Compare commits

...

No commits in common. "9672c803907b7bebe51cfaf205b04cf76e9de5c2" and "69264a21e63b8467523bb096455b55cb7a473355" have entirely different histories.

1529 changed files with 17953 additions and 32876 deletions

View File

@ -1 +1 @@
106.0.5249.91
105.0.5195.52

View File

@ -43,9 +43,7 @@ Example Caddyfile (replace `user` and `pass` accordingly):
```
{
servers {
protocol {
experimental_http3
}
protocols h1 h2 h3
}
}
:443, example.com
@ -57,7 +55,9 @@ route {
hide_via
probe_resistance
}
file_server { root /var/www/html }
file_server {
root /var/www/html
}
}
```
`:443` must appear first for this Caddyfile to work. For more advanced usage consider using [JSON for Caddy 2's config](https://caddyserver.com/docs/json/).

12
src/.gn
View File

@ -58,7 +58,7 @@ default_args = {
# Overwrite default args declared in the Fuchsia sdk
fuchsia_sdk_readelf_exec =
"//third_party/llvm-build/Release+Asserts/bin/llvm-readelf"
fuchsia_target_api_level = 9
fuchsia_target_api_level = 8
devtools_visibility = [ "*" ]
}
@ -68,10 +68,11 @@ default_args = {
# their includes checked for proper dependencies when you run either
# "gn check" or "gn gen --check".
no_check_targets = [
"//headless:headless_non_renderer", # 2 errors
"//headless:headless_renderer", # 12 errors
"//headless:headless_shared_sources", # 2 errors
"//headless:headless_shell_browser_lib", # 1 errors
"//headless:headless_non_renderer", # 9 errors
"//headless:headless_renderer", # 13 errors
"//headless:headless_shared_sources", # 4 errors
"//headless:headless_shell_browser_lib", # 10 errors
"//headless:headless_shell_lib", # 10 errors
# //v8, https://crbug.com/v8/7330
"//v8/src/inspector:inspector", # 20 errors
@ -153,7 +154,6 @@ exec_script_whitelist =
"//build_overrides/build.gni",
"//chrome/android/webapk/shell_apk/prepare_upload_dir/BUILD.gn",
"//chrome/version.gni",
# TODO(dgn): Layer violation but breaks the build otherwise, see
# https://crbug.com/474506.

View File

@ -71,7 +71,6 @@ Alexey Korepanov <alexkorep@gmail.com>
Alexey Kuts <kruntuid@gmail.com>
Alexey Kuzmin <alex.s.kuzmin@gmail.com>
Alexey Kuznetsov <saturas2000@gmail.com>
Alexey Terentiev <alexeyter@gmail.com>
Alexis Brenon <brenon.alexis@gmail.com>
Alexis La Goutte <alexis.lagoutte@gmail.com>
Alexis Menard <alexis.menard@intel.com>
@ -168,7 +167,6 @@ Bert Belder <bertbelder@gmail.com>
Bhagirathi Satpathy <bhagirathi.s@samsung.com>
Bhanukrushana Rout <b.rout@samsung.com>
Biljith Jayan <billy.jayan@samsung.com>
Bin Liao <bin.liao@intel.com>
Boaz Sender <boaz@bocoup.com>
Bobby Powers <bobbypowers@gmail.com>
Branden Archer <bma4@zips.uakron.edu>
@ -387,7 +385,6 @@ Gajendra Singh <wxjg68@motorola.com>
Ganesh Borle <ganesh.borle@samsung.com>
Gao Chun <chun.gao@intel.com>
Gao Chun <gaochun.dev@gmail.com>
Gao Sheng <gaosheng08@meituan.com>
Gao Yu <wanggao@tencent.com>
Gaurav Dhol <gaurav.dhol@einfochips.com>
Gautham Banasandra <gautham.bangalore@gmail.com>
@ -424,7 +421,6 @@ Halton Huo <halton.huo@intel.com>
Hans Hillen <hans.hillen@gmail.com>
Hao Li <hao.x.li@intel.com>
Haojian Wu <hokein.wu@gmail.com>
Haoxuan Zhang <zhanghaoxuan.59@bytedance.com>
Hari Singh <hari.singh1@samsung.com>
Harpreet Singh Khurana <harpreet.sk@samsung.com>
Harshikesh Kumar <harshikeshnobug@gmail.com>
@ -541,7 +537,6 @@ Jiadong Zhu <jiadong.zhu@linaro.org>
Jiahe Zhang <jiahe.zhang@intel.com>
Jiajia Qin <jiajia.qin@intel.com>
Jiajie Hu <jiajie.hu@intel.com>
Jiangzhen Hou <houjiangzhen@360.cn>
Jianjun Zhu <jianjun.zhu@intel.com>
Jianneng Zhong <muzuiget@gmail.com>
Jiawei Shao <jiawei.shao@intel.com>
@ -650,7 +645,6 @@ Kaustubh Atrawalkar <kaustubh.a@samsung.com>
Kaustubh Atrawalkar <kaustubh.ra@gmail.com>
Ke He <ke.he@intel.com>
Keeley Hammond <vertedinde@electronjs.org>
Keeling <liqining.keeling@bytedance.com>
Keene Pan <keenepan@linpus.com>
Keiichiro Nagashima <n4ag3a2sh1i@gmail.com>
Keita Suzuki <keitasuzuki.park@gmail.com>
@ -801,7 +795,6 @@ Mayur Kankanwadi <mayurk.vk@samsung.com>
Md Abdullah Al Alamin <a.alamin.cse@gmail.com>
Md. Hasanur Rashid <hasanur.r@samsung.com>
Md Jobed Hossain <jobed.h@samsung.com>
Md Raiyan bin Sayeed <mrbsayee@uwaterloo.ca>
Md Sami Uddin <md.sami@samsung.com>
Micha Hanselmann <micha.hanselmann@gmail.com>
Michael Cirone <mikecirone@gmail.com>
@ -933,7 +926,6 @@ Peter Molnar <pmolnar.u-szeged@partner.samsung.com>
Peter Snyder <snyderp@gmail.com>
Peter Varga <pvarga@inf.u-szeged.hu>
Peter Wong <peter.wm.wong@gmail.com>
Phan Quang Minh <phanquangminh217@gmail.com>
Philip Hanson <philip.hanson@intel.com>
Philipp Hancke <fippo@andyet.net>
Philipp Hancke <philipp.hancke@googlemail.com>
@ -1060,13 +1052,11 @@ Sathish Kuppuswamy <sathish.kuppuswamy@intel.com>
Satoshi Matsuzaki <satoshi.matsuzaki@gmail.com>
Satyajit Sahu <satyajit.sahu@amd.com>
Sayan Nayak <sayan.nayak@samsung.com>
Sayan Sivakumaran <sivakusayan@gmail.com>
Scott D Phillips <scott.d.phillips@intel.com>
Sean Bryant <sean@cyberwang.net>
Sean DuBois <seaduboi@amazon.com>
Sebastian Amend <sebastian.amend@googlemail.com>
Sebastian Krzyszkowiak <dos@dosowisko.net>
Sebastjan Raspor <sebastjan.raspor1@gmail.com>
Seo Sanghyeon <sanxiyn@gmail.com>
Seokju Kwon <seokju.kwon@gmail.com>
Seokho Song <0xdevssh@gmail.com>
@ -1125,8 +1115,6 @@ Sohan Jyoti Ghosh <sohanjg@chromium.org>
Sohom Datta <sohom.datta@learner.manipal.edu>
Sohom Datta <dattasohom1@gmail.com>
Song Fangzhen <songfangzhen@bytedance.com>
Song Qinglin <songql@dingdao.com>
Song Qinglin <songqinglin@gmail.com>
Song YeWen <ffmpeg@gmail.com>
Sooho Park <sooho1000@gmail.com>
Soojung Choi <crystal2840@gmail.com>
@ -1315,7 +1303,6 @@ Yoav Zilberberg <yoav.zilberberg@gmail.com>
Yong Ling <yongling@tencent.com>
Yong Shin <sy3620@gmail.com>
Yong Wang <ccyongwang@tencent.com>
Yonggang Luo <luoyonggang@gmail.com>
Yongha Lee <yongha78.lee@samsung.com>
Yongseok Choi <yongseok.choi@navercorp.com>
Yongsheng Zhu <yongsheng.zhu@intel.com>
@ -1333,7 +1320,6 @@ Yuki Osaki <yuki.osaki7@gmail.com>
Yuki Tsuchiya <Yuki.Tsuchiya@sony.com>
Yuma Takai <tara20070827@gmail.com>
Yumikiyo Osanai <yumios.art@gmail.com>
Yumin Su <yuminsu.hi@gmail.com>
Yunchao He <yunchao.he@intel.com>
Yupei Lin <yplam@yplam.com>
Yupei Wang <perryuwang@tencent.com>
@ -1358,7 +1344,6 @@ Zhenyu Shan <zhenyu.shan@intel.com>
Zhibo Wang <zhibo1.wang@intel.com>
Zhifei Fang <facetothefate@gmail.com>
Zhiyuan Ye <zhiyuanye@tencent.com>
Zhou Jun <zhoujun@uniontech.com>
Zhuoyu Qian <zhuoyu.qian@samsung.com>
Ziran Sun <ziran.sun@samsung.com>
Zoltan Czirkos <czirkos.zoltan@gmail.com>
@ -1446,9 +1431,7 @@ Venture 3 Systems LLC <*@venture3systems.com>
Vewd Software AS <*@vewd.com>
Vivaldi Technologies AS <*@vivaldi.com>
Wacom <*@wacom.com>
Whist Technologies <*@whist.com>
Xperi Corporation <*@xperi.com>
Yandex LLC <*@yandex-team.ru>
Zuckjet <zuckjet@gmail.com>
# Please DO NOT APPEND here. See comments at the top of the file.
# END organizations section.

294
src/DEPS
View File

@ -233,7 +233,7 @@ vars = {
#
# CQ_INCLUDE_TRYBOTS=luci.chrome.try:lacros-amd64-generic-chrome-skylab
# CQ_INCLUDE_TRYBOTS=luci.chrome.try:lacros-arm-generic-chrome-skylab
'lacros_sdk_version': '15040.0.0',
'lacros_sdk_version': '14977.0.0',
# Generate location tag metadata to include in tests result data uploaded
# to ResultDB. This isn't needed on some configs and the tool that generates
@ -279,7 +279,7 @@ vars = {
'dawn_standalone': False,
# reclient CIPD package version
'reclient_version': 're_client_version:0.76.0.f4c4bc4-gomaip',
'reclient_version': 're_client_version:0.72.0.b874055-gomaip',
# Enable fetching Rust-related packages.
'use_rust': False,
@ -297,34 +297,34 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling Skia
# and whatever else without interference from each other.
'skia_revision': 'ba6bc7d02d406a262e155a83e71cf4c19736a312',
'skia_revision': 'f204b137b97b44b7397de173fc54181c37ac6501',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling V8
# and whatever else without interference from each other.
'v8_revision': '69ffa9cea07d4a4177d0224c2c8fabfd0c314f2f',
'v8_revision': 'b1f56b4a8a7cf9f707f7966104278777f9994b13',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling ANGLE
# and whatever else without interference from each other.
'angle_revision': '428b6788d71120c833f029aa4e24903fa811f77b',
'angle_revision': '2f0d8ab049b10ee41f9b90cea8da8e80db076e38',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling SwiftShader
# and whatever else without interference from each other.
'swiftshader_revision': '1c3dfde53353d5e13ca25eb52aa208d450b5e980',
'swiftshader_revision': '16e026a959f1bc80ff237aa81b4a63b52517dec1',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling PDFium
# and whatever else without interference from each other.
'pdfium_revision': 'c5f61025a75118b24885f97db4ecfe7f81db462d',
'pdfium_revision': 'd14da8e682e244127db32490365d1c094243e5f3',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling BoringSSL
# and whatever else without interference from each other.
#
# Note this revision should be updated with
# third_party/boringssl/roll_boringssl.py, not roll-dep.
'boringssl_revision': 'adaa322b63d1bfbd1abcf4a308926a9a83a6acbe',
'boringssl_revision': 'b95124305ab15c7523d3e21437309fa5dd717ee8',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling Fuchsia sdk
# and whatever else without interference from each other.
'fuchsia_version': 'version:9.20220817.2.1',
'fuchsia_version': 'version:9.20220720.2.1',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling google-toolbox-for-mac
# and whatever else without interference from each other.
@ -340,19 +340,19 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling lss
# and whatever else without interference from each other.
'lss_revision': 'ce877209e11aa69dcfffbd53ef90ea1d07136521',
'lss_revision': '0d6435b731ef91d5182eaecff82ae96764222c48',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling NaCl
# and whatever else without interference from each other.
'nacl_revision': '6bb46e84a384cfd877bbd741d89b31c547944cfd',
'nacl_revision': '18d9964d47fc44f49a4c19b7ba91197ddca00c6a',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling freetype
# and whatever else without interference from each other.
'freetype_revision': '7cd3f19f21cc9d600e3b765ef2058474d20233e2',
'freetype_revision': '275b116b40c9d183d42242099ea9ff276985855b',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling freetype
# and whatever else without interference from each other.
'freetype_testing_revision': 'b4d3251b9921e585b7d565f0edbcbfded0e2daf8',
'freetype_testing_revision': '1ca0c99d25ae3b1e4c70513c1bf74643fc3dee09',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling HarfBuzz
# and whatever else without interference from each other.
@ -368,7 +368,7 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling catapult
# and whatever else without interference from each other.
'catapult_revision': '72946313ec4f9773088a8258f7509fbeaf47465e',
'catapult_revision': 'b6e934ef32e6591ad60636e3fe167d0e3e9aa5d4',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling libFuzzer
# and whatever else without interference from each other.
@ -376,7 +376,7 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling devtools-frontend
# and whatever else without interference from each other.
'devtools_frontend_revision': 'c005e44cd0f38aea3bac8e0c723c6d89d144d7c3',
'devtools_frontend_revision': 'b1ac4239dc5fffa56170e7367a03f35d2eaa223c',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling libprotobuf-mutator
# and whatever else without interference from each other.
@ -384,11 +384,11 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_build-tools_version
# and whatever else without interference from each other.
'android_sdk_build-tools_version': '-VRKr36Uw8L_iFqqo9nevIBgNMggND5iWxjidyjnCgsC',
'android_sdk_build-tools_version': 'tRoD45SCi7UleQqSV7MrMQO1_e5P8ysphkCcj6z_cCQC',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_emulator_version
# and whatever else without interference from each other.
'android_sdk_emulator_version': '9lGp8nTUCRRWGMnI_96HcKfzjnxEJKUcfvfwmA3wXNkC',
'android_sdk_emulator_version': 'gMHhUuoQRKfxr-MBn3fNNXZtkAVXtOwMwT7kfx8jkIgC',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_extras_version
# and whatever else without interference from each other.
@ -400,11 +400,11 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_platform-tools_version
# and whatever else without interference from each other.
'android_sdk_platform-tools_version': 'RSI3iwryh7URLGRgJHsCvUxj092woTPnKt4pwFcJ6L8C',
'android_sdk_platform-tools_version': 'g7n_-r6yJd_SGRklujGB1wEt8iyr77FZTUJVS9w6O34C',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_platforms_version
# and whatever else without interference from each other.
'android_sdk_platforms_version': 'eo5KvW6UVor92LwZai8Zulc624BQZoCu-yn7wa1z_YcC',
'android_sdk_platforms_version': 'lL3IGexKjYlwjO_1Ga-xwxgwbE_w-lmi2Zi1uOlWUIAC',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_sources_version
# and whatever else without interference from each other.
@ -412,11 +412,11 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed
# and whatever else without interference from each other.
'dawn_revision': '972757dde41304685484a5d344398787cfd69219',
'dawn_revision': '1e98a9ba4a64301e0ab932e22ce989688f6cdf6c',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed
# and whatever else without interference from each other.
'quiche_revision': '9b5aac9172d4a50b53b557c86ee7715cf70740d1',
'quiche_revision': '53c94d968dd6a0cf748caf42462a0b676f95530c',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling ios_webkit
# and whatever else without interference from each other.
@ -436,11 +436,11 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling libavif
# and whatever else without interference from each other.
'libavif_revision': 'e0954237c40ff75dbc79991ea4774853ad09bed7',
'libavif_revision': 'dd2d67c5f976038354c0406a253e26dd2abc4632',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling nearby
# and whatever else without interference from each other.
'nearby_revision': 'd92f1d47573427e6417e29a3e82ea7d4c34fe0b5',
'nearby_revision': 'd2c401112cc577fe3c5f9a11329bb557048af31a',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling securemessage
# and whatever else without interference from each other.
@ -456,11 +456,11 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed
# and whatever else without interference from each other.
'libcxxabi_revision': '039323b945911a54cb7400da8fb14fcbb0348e97',
'libcxxabi_revision': 'b954e3e65634a9e2f7b595598a30c455f5f2eb26',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed
# and whatever else without interference from each other.
'libunwind_revision': '12726c93aa00e47e80dbd332cca8a1b4d67945ce',
'libunwind_revision': '955e2ff5fbb15791fea263c1c80e1ec6b3c5ee61',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed
# and whatever else without interference from each other.
@ -476,14 +476,14 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling ffmpeg
# and whatever else without interference from each other.
'ffmpeg_revision': '42601c274d7a39f5ded391024c52ff444f144f75',
'ffmpeg_revision': '880df5ede50a8534c8116d0d50e4bc4f3ef08a06',
# If you change this, also update the libc++ revision in
# //buildtools/deps_revisions.gni.
'libcxx_revision': 'db722166934ebc79a6e65e5fef9a6eae21eacb77',
'libcxx_revision': '88bf4070487fbe9020697a2281743b91e5e29bef',
# GN CIPD package version.
'gn_version': 'git_revision:0bcd37bd2b83f1a9ee17088037ebdfe6eab6d31a',
'gn_version': 'git_revision:9ef321772ecc161937db69acb346397e0ccc484d',
}
# Only these hosts are allowed for dependencies in this DEPS file.
@ -634,7 +634,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/updater/chrome_mac_universal',
'version': 'jGg_naPD5ghuLToPE3ClG72iDs9GqSPF6M1LM2OhVAwC',
'version': 'RBRqNIwSXHjdZf4BVjWk8enaKIHw58aQGFDVNozlbWIC',
},
],
},
@ -645,7 +645,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/updater/chrome_win_x86',
'version': 'SEYmLOH4UqiO4plPuX52Olw1fYjReimhu8AGkSu0o6YC',
'version': 'tqlS-vFYsn2LVSJMwipq84EKLmwKa1XJb760NnpQL2gC',
},
],
},
@ -656,7 +656,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/updater/chrome_win_x86_64',
'version': 'DlaEyhCmLI4fzB57R1aWxX9QkHXpbYK4ooGqiNLPX5AC',
'version': 'RthX5RzppvSV7uq2P6pm2bnv6-dvoHpUIOsZFk57ZMEC',
},
],
},
@ -668,7 +668,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/updater/chromium_mac_amd64',
'version': 'ekvYgaz0Q8adDlksn73Gh-sUisY5m76_4lr_0c1woM0C',
'version': '7MvxvS-pmZP1iAXQoCiLI7nv4UkDiyw8PC1ycwpYWbYC',
},
],
},
@ -680,7 +680,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/updater/chromium_mac_arm64',
'version': 'UEHK7zslk3sVEEqwqnRp9i3kNWbv1PiHuTanLbEU64wC',
'version': 'd5lN7fzV07O4-Mu_T8TshrGQtlR9F508p9cdhchcLpYC',
},
],
},
@ -691,7 +691,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/updater/chromium_win_x86',
'version': 'cotzeTxkTmIASM9Gv4ZgbdXS8yJTvhEm8foupSnIEdkC',
'version': '8zehx-DVmaf_FplPe23acLAStf3Z7anQ3CY9LXBfvD0C',
},
],
},
@ -702,7 +702,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/updater/chromium_win_x86_64',
'version': 'esNCd2d4_kSQlID8kAeNQDJgiTaXuVJ3oiky_Ma2mFUC',
'version': 'KE8JnjZFOyHxUhVdRkm0IMVqlZIaYPnAOI-zxtUD4zUC',
},
],
},
@ -762,7 +762,7 @@ deps = {
'packages': [
{
'package': 'chromium/chrome/test/data/safe_browsing/dmg',
'version': '03TLfNQgc59nHmyWtYWJfFaUrEW8QDJJzXwm-672m-QC',
'version': 'a543ae3f0b3e67dd5a1c75f63317231a1d242912',
},
],
'condition': 'checkout_mac',
@ -773,16 +773,16 @@ deps = {
Var('chromium_git') + '/external/github.com/toji/webvr.info.git' + '@' + 'c58ae99b9ff9e2aa4c524633519570bf33536248',
'src/docs/website': {
'url': Var('chromium_git') + '/website.git' + '@' + '1b05569731b9b2d4f945fa3c87f3fa8a15a732e6',
'url': Var('chromium_git') + '/website.git' + '@' + '3965ba67f8d283378e6c0b64d634b91fb830a378',
},
'src/ios/third_party/earl_grey2/src': {
'url': Var('chromium_git') + '/external/github.com/google/EarlGrey.git' + '@' + 'd8ddfdbb1e4d12fb0f92ef1fceb21c6a8538cbfb',
'url': Var('chromium_git') + '/external/github.com/google/EarlGrey.git' + '@' + '53a2982c85ac6cf802719603d037ad3be7091ebb',
'condition': 'checkout_ios',
},
'src/ios/third_party/edo/src': {
'url': Var('chromium_git') + '/external/github.com/google/eDistantObject.git' + '@' + '101cd0562999a9b6fb6cff7c5783c1293a366acf',
'url': Var('chromium_git') + '/external/github.com/google/eDistantObject.git' + '@' + '3d3dcee71993376f3abcf3457b046c1df6c13182',
'condition': 'checkout_ios',
},
@ -792,7 +792,7 @@ deps = {
},
'src/ios/third_party/material_components_ios/src': {
'url': Var('chromium_git') + '/external/github.com/material-components/material-components-ios.git' + '@' + '72dcc44075ab9977db264c693eb883212584c1a0',
'url': Var('chromium_git') + '/external/github.com/material-components/material-components-ios.git' + '@' + '425d641798c86ab809fcb067bbb265958756af98',
'condition': 'checkout_ios',
},
@ -802,7 +802,7 @@ deps = {
},
'src/ios/third_party/material_internationalization_ios/src': {
'url': Var('chromium_git') + '/external/github.com/material-foundation/material-internationalization-ios.git' + '@' + '305aa8d276f5137c98c5c1c888efc22e02251ee7',
'url': Var('chromium_git') + '/external/github.com/material-foundation/material-internationalization-ios.git' + '@' + 'ad190b15a5e3f2d84c9b8182a3a0b84428edce68',
'condition': 'checkout_ios',
},
@ -862,7 +862,7 @@ deps = {
'packages': [
{
'package': 'chromium/rts/model/linux-amd64',
'version': 'MHZoKHSLMgxFLr6l4cmXLh5Kl9WvlxPv08vb9doDa9YC',
'version': 'XlzIsX8AH06QHVAMzpKt5aT3nfupjnBr78ztG18pXdsC',
},
],
'dep_type': 'cipd',
@ -873,7 +873,7 @@ deps = {
'packages': [
{
'package': 'chromium/rts/model/mac-amd64',
'version': 'sGVhzaqwgT697qIGKjfghO3mclJdO4I-L5nQoJqYb3sC',
'version': 'CPhzNoasDtJ45F8bwTLs7lIQDiy-PhdReFmXrlL5FDoC',
},
],
'dep_type': 'cipd',
@ -884,7 +884,7 @@ deps = {
'packages': [
{
'package': 'chromium/rts/model/windows-amd64',
'version': 'GUeUYkTnmtlRNdQBo9biZRZks9a3yshCcUu-YqToTbkC',
'version': 'XmZtpYZGxTqwBMgEXpdyfrdCxx79QfYeVlhpviF2OUcC',
},
],
'dep_type': 'cipd',
@ -895,7 +895,7 @@ deps = {
'packages': [
{
'package': 'experimental/google.com/sshrimp/chromium/rts/model/linux-amd64',
'version': '61RZ40XmEInOMhnIJMEqdOjocdCHqodmEJcDxu4x7uIC',
'version': '3K1dz8hGV_xBeEcPKmXfrPYWCwXdRf6KVVLrg7AuJ0sC',
},
],
'dep_type': 'cipd',
@ -952,7 +952,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/androidx',
'version': 'nJRbqGtdXzotJnB8kFaAckjFBKzXW4O9BdApCCC7J8AC',
'version': 'gAsD4l8EoP_W0IH5vzedZ1tyN3-wAP8-fqkaS_mX6rcC',
},
],
'condition': 'checkout_android',
@ -974,7 +974,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_system_sdk/public',
'version': 'RGY8Vyf8jjszRIJRFxZj7beXSUEHTQM90MtYejUvdMgC',
'version': 'oSfDIvHlCa6W0gS79Q5OOfB9E4eBg3uAvi3BEDN21U0C',
},
],
'condition': 'checkout_android',
@ -985,7 +985,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_build_tools/aapt2',
'version': 'nSnWUNu6ssPA-kPMvFQj4JjDXRWj2iubvvjfT1F6HCMC',
'version': 'hf9C5IyJUUGaBnzqu60xiFJSyfAmjqjc_PiNXNVc9l0C',
},
],
'condition': 'checkout_android',
@ -1007,7 +1007,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_build_tools/lint',
'version': 'nklp_LHwFqk9tuQm1yHGBy2W16YMz_R7Q7vcnZZnF78C',
'version': 'INnGGTfg5gGJutJiBtWI6-QwusHDDnKvZzI53Q3UiecC',
},
],
'condition': 'checkout_android',
@ -1018,7 +1018,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_build_tools/manifest_merger',
'version': 'CvokX4c6dx7DwQ8VVMQ70CROzyJWg13oOq3feeuTzg8C',
'version': '0WkAedh1tJB8lzisWJRT80UjpacKLltuV7NqP-0tx9gC',
},
],
'condition': 'checkout_android',
@ -1033,7 +1033,7 @@ deps = {
'src/third_party/android_sdk/public': {
'packages': [
{
'package': 'chromium/third_party/android_sdk/public/build-tools/33.0.0',
'package': 'chromium/third_party/android_sdk/public/build-tools/31.0.0',
'version': Var('android_sdk_build-tools_version'),
},
{
@ -1049,7 +1049,7 @@ deps = {
'version': Var('android_sdk_platform-tools_version'),
},
{
'package': 'chromium/third_party/android_sdk/public/platforms/android-33',
'package': 'chromium/third_party/android_sdk/public/platforms/android-31',
'version': Var('android_sdk_platforms_version'),
},
{
@ -1069,7 +1069,7 @@ deps = {
Var('chromium_git') + '/angle/angle.git' + '@' + Var('angle_revision'),
'src/third_party/content_analysis_sdk/src':
Var('chromium_git') + '/external/github.com/chromium/content_analysis_sdk.git' + '@' + 'fe3c222acb75bdd5248a3339d4255f032e293296',
Var('chromium_git') + '/external/github.com/chromium/content_analysis_sdk.git' + '@' + '1d7dd0490808a8a972949521cc314e42d085c69f',
'src/third_party/dav1d/libdav1d':
Var('chromium_git') + '/external/github.com/videolan/dav1d.git' + '@' + '87f9a81cd770e49394a45deca7a3df41243de00b',
@ -1078,7 +1078,7 @@ deps = {
Var('dawn_git') + '/dawn.git' + '@' + Var('dawn_revision'),
'src/third_party/libjxl/src':
Var('chromium_git') + '/external/github.com/libjxl/libjxl.git' + '@' + Var('libjxl_revision'),
Var('chromium_git') + '/external/gitlab.com/wg1/jpeg-xl.git' + '@' + Var('libjxl_revision'),
'src/third_party/highway/src':
Var('chromium_git') + '/external/github.com/google/highway.git' + '@' + Var('highway_revision'),
@ -1102,7 +1102,7 @@ deps = {
Var('boringssl_git') + '/boringssl.git' + '@' + Var('boringssl_revision'),
'src/third_party/breakpad/breakpad':
Var('chromium_git') + '/breakpad/breakpad.git' + '@' + 'e085b3b50bde862d0cf3ce4594e3f391bcf5faec',
Var('chromium_git') + '/breakpad/breakpad.git' + '@' + 'c4c43b80ea8854c57a4374ac32579b577172dc23',
'src/third_party/byte_buddy': {
'packages': [
@ -1127,7 +1127,7 @@ deps = {
},
'src/third_party/cast_core/public/src':
Var('chromium_git') + '/cast_core/public' + '@' + '469e045e514c09701ab674d023cbaa6562866f83',
Var('chromium_git') + '/cast_core/public' + '@' + '8ba5ff47563d0ca8233e8fa009377ed14a560cf4',
'src/third_party/catapult':
Var('chromium_git') + '/catapult.git' + '@' + Var('catapult_revision'),
@ -1156,7 +1156,7 @@ deps = {
# Tools used when building Chrome for Chrome OS. This affects both the Simple
# Chrome workflow, as well as the chromeos-chrome ebuild.
'src/third_party/chromite': {
'url': Var('chromium_git') + '/chromiumos/chromite.git' + '@' + 'b6181b487cea5df8b340f5ab040d3c8a04c8b518',
'url': Var('chromium_git') + '/chromiumos/chromite.git' + '@' + '60350fcfeb0bca00eba2794f53661a1b996a79a5',
'condition': 'checkout_chromeos',
},
@ -1167,14 +1167,14 @@ deps = {
Var('chromium_git') + '/external/colorama.git' + '@' + '799604a1041e9b3bc5d2789ecbd7e8db2e18e6b8',
'src/third_party/cpuinfo/src':
Var('chromium_git') + '/external/github.com/pytorch/cpuinfo.git' + '@' + 'beb46ca0319882f262e682dd596880c92830687f',
Var('chromium_git') + '/external/github.com/pytorch/cpuinfo.git' + '@' + 'b40bae27785787b6dd70788986fd96434cf90ae2',
'src/third_party/crc32c/src':
Var('chromium_git') + '/external/github.com/google/crc32c.git' + '@' + 'fa5ade41ee480003d9c5af6f43567ba22e4e17e6',
# For Linux and Chromium OS.
'src/third_party/cros_system_api': {
'url': Var('chromium_git') + '/chromiumos/platform2/system_api.git' + '@' + '354de5ff0d73b10dbd42ecfaf393dbd26ec62bee',
'url': Var('chromium_git') + '/chromiumos/platform2/system_api.git' + '@' + 'f3b1373caf7bd717be4f0d21ab8c738c6bfcf418',
'condition': 'checkout_linux',
},
@ -1184,7 +1184,7 @@ deps = {
},
'src/third_party/depot_tools':
Var('chromium_git') + '/chromium/tools/depot_tools.git' + '@' + 'd14f65eba90cc352b76ff0743a307ce22450d823',
Var('chromium_git') + '/chromium/tools/depot_tools.git' + '@' + '0ba2fd429dd6db431fcbee6995c1278d2a3657a0',
'src/third_party/devtools-frontend/src':
Var('chromium_git') + '/devtools/devtools-frontend' + '@' + Var('devtools_frontend_revision'),
@ -1193,7 +1193,7 @@ deps = {
Var('chromium_git') + '/chromium/dom-distiller/dist.git' + '@' + '199de96b345ada7c6e7e6ba3d2fa7a6911b8767d',
'src/third_party/eigen3/src':
Var('chromium_git') + '/external/gitlab.com/libeigen/eigen.git' + '@' + '34780d8bd13d0af0cf17a22789ef286e8512594d',
Var('chromium_git') + '/external/gitlab.com/libeigen/eigen.git' + '@' + '0e187141679fdb91da33249d18cb79a011c0e2ea',
'src/third_party/emoji-metadata/src': {
'url': Var('chromium_git') + '/external/github.com/googlefonts/emoji-metadata' + '@' + '8de89a7a36cd024dcd30ac9f67f3f02c37a7c8fb',
@ -1236,7 +1236,7 @@ deps = {
Var('chromium_git') + '/external/github.com/google/gemmlowp.git' + '@' + '13d57703abca3005d97b19df1f2db731607a7dc2',
'src/third_party/grpc/src': {
'url': Var('chromium_git') + '/external/github.com/grpc/grpc.git' + '@' + 'dd77c67217b10ffeaf766e25eb8b46d2d59de4ff',
'url': Var('chromium_git') + '/external/github.com/grpc/grpc.git' + '@' + '89f7534e43cf73f56c492a9cf7eb85ca6bfbd87a',
},
'src/third_party/freetype/src':
@ -1338,7 +1338,7 @@ deps = {
Var('chromium_git') + '/chromium/deps/hunspell_dictionaries.git' + '@' + '41cdffd71c9948f63c7ad36e1fb0ff519aa7a37e',
'src/third_party/icu':
Var('chromium_git') + '/chromium/deps/icu.git' + '@' + '3e03346162e9bdd41850a1184d2713da4add719e',
Var('chromium_git') + '/chromium/deps/icu.git' + '@' + '6e558942cc8b83e525bdabaf987e06af8a377314',
'src/third_party/icu4j': {
'packages': [
@ -1378,6 +1378,17 @@ deps = {
'condition': 'checkout_android or checkout_linux',
'dep_type': 'cipd',
},
'src/third_party/jdk/extras': {
'packages': [
{
'package': 'chromium/third_party/jdk/extras',
'version': '-7m_pvgICYN60yQI3qmTj_8iKjtnT4NXicT0G_jJPqsC',
},
],
# Needed on Linux for use on chromium_presubmit (for checkstyle).
'condition': 'checkout_android or checkout_linux',
'dep_type': 'cipd',
},
'src/third_party/jsoncpp/source':
Var('chromium_git') + '/external/github.com/open-source-parsers/jsoncpp.git'
@ -1398,7 +1409,7 @@ deps = {
Var('chromium_git') + '/external/libaddressinput.git' + '@' + 'df35d6c42da4fa2759e4cfb592afe33817993b89',
'src/third_party/libaom/source/libaom':
Var('aomedia_git') + '/aom.git' + '@' + '5bd38c95006adc1b3b9523a6980207c5576fd8a5',
Var('aomedia_git') + '/aom.git' + '@' + '8dcdafc6d4a2f9f8ea8104f26eca5d123eefcb7f',
'src/third_party/libavif/src':
Var('chromium_git') + '/external/github.com/AOMediaCodec/libavif.git' + '@' + Var('libavif_revision'),
@ -1436,7 +1447,7 @@ deps = {
},
'src/third_party/libphonenumber/dist':
Var('chromium_git') + '/external/libphonenumber.git' + '@' + '140dfeb81b753388e8a672900fb7a971e9a0d362',
Var('chromium_git') + '/external/libphonenumber.git' + '@' + '68eba9d6ee8b11fb58ece36b6c46d07965d7f7ff',
'src/third_party/libprotobuf-mutator/src':
Var('chromium_git') + '/external/github.com/google/libprotobuf-mutator.git' + '@' + Var('libprotobuf-mutator'),
@ -1456,7 +1467,7 @@ deps = {
},
'src/third_party/libvpx/source/libvpx':
Var('chromium_git') + '/webm/libvpx.git' + '@' + '8786aee5821801fe6b5a285be009ba67ea7f4e63',
Var('chromium_git') + '/webm/libvpx.git' + '@' + '711bef67400f096416cb1ba7f6560e533871490f',
'src/third_party/libwebm/source':
Var('chromium_git') + '/webm/libwebm.git' + '@' + 'e4fbea0c9751ae8aa86629b197a28d8276a2b0da',
@ -1465,7 +1476,7 @@ deps = {
Var('chromium_git') + '/webm/libwebp.git' + '@' + '7366f7f394af26de814296152c50e673ed0a832f',
'src/third_party/libyuv':
Var('chromium_git') + '/libyuv/libyuv.git' + '@' + '65e7c9d5706a77d1949da59bfcb0817c252ef8d6',
Var('chromium_git') + '/libyuv/libyuv.git' + '@' + 'd248929c059ff7629a85333699717d7a677d8d96',
'src/third_party/lighttpd': {
'url': Var('chromium_git') + '/chromium/deps/lighttpd.git' + '@' + Var('lighttpd_revision'),
@ -1477,28 +1488,6 @@ deps = {
'condition': 'checkout_android or checkout_linux',
},
'src/third_party/lzma_sdk/bin/host_platform': {
'packages': [
{
'package': 'infra/3pp/tools/7z/${{platform}}',
'version': 'version:2@22.01',
},
],
'condition': 'checkout_win',
'dep_type': 'cipd',
},
'src/third_party/lzma_sdk/bin/win64': {
'packages': [
{
'package': 'infra/3pp/tools/7z/windows-amd64',
'version': 'version:2@22.01',
},
],
'condition': 'checkout_win',
'dep_type': 'cipd',
},
'src/third_party/material_design_icons/src': {
'url': Var('chromium_git') + '/external/github.com/google/material-design-icons.git' + '@' +
'5ab428852e35dc177a8c37a2df9dc9ccf768c65a',
@ -1578,7 +1567,7 @@ deps = {
Var('chromium_git') + '/external/github.com/cisco/openh264' + '@' + 'fac04ceb3e966f613ed17e98178e9d690280bba6',
'src/third_party/openscreen/src':
Var('chromium_git') + '/openscreen' + '@' + '7f795b08c7e41abdecab742d1577cf22c40dadd7',
Var('chromium_git') + '/openscreen' + '@' + '6be6b78224a276e908b8272542d125e133c40f3f',
'src/third_party/openxr/src': {
'url': Var('chromium_git') + '/external/github.com/KhronosGroup/OpenXR-SDK' + '@' + 'bf21ccb1007bb531b45d9978919a56ea5059c245',
@ -1595,7 +1584,7 @@ deps = {
},
'src/third_party/perfetto':
Var('android_git') + '/platform/external/perfetto.git' + '@' + 'c16f31c6da7523528ae09808c3427fb395142c94',
Var('android_git') + '/platform/external/perfetto.git' + '@' + '361efbf9aab595e4dfa79ec48f242d9e722393c9',
'src/third_party/perl': {
'url': Var('chromium_git') + '/chromium/deps/perl.git' + '@' + '6f3e5028eb65d0b4c5fdd792106ac4c84eee1eb3',
@ -1605,6 +1594,17 @@ deps = {
'src/third_party/pthreadpool/src':
Var('chromium_git') + '/external/github.com/Maratyszcza/pthreadpool.git' + '@' + '1787867f6183f056420e532eec640cba25efafea',
'src/third_party/proguard': {
'packages': [
{
'package': 'chromium/third_party/proguard',
'version': 'Fd91BJFVlmiO6c46YMTsdy7n2f5Sk2hVVGlzPLvqZPsC',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
# Dependency of skia.
'src/third_party/pyelftools': {
'url': Var('chromium_git') + '/chromiumos/third_party/pyelftools.git' + '@' + '19b3e610c86fcadb837d252c794cb5e8008826ae',
@ -1629,13 +1629,13 @@ deps = {
},
'src/third_party/re2/src':
Var('chromium_git') + '/external/github.com/google/re2.git' + '@' + '2013ae0f998304d69fe36a0164845dca2643a718',
Var('chromium_git') + '/external/github.com/google/re2.git' + '@' + 'ba2e7c2f25047a6848f2baa9bc4cb700ea9dda84',
'src/third_party/r8': {
'packages': [
{
'package': 'chromium/third_party/r8',
'version': 'qvL35O3yU1ZbOWHVZBedmVtdaav1qKquii4RJyUh-PgC',
'version': 'auReXfxxD74XGdPdi-rYsKrp4sRwYwgNjh_W0PT7vNcC',
},
],
'condition': 'checkout_android',
@ -1651,7 +1651,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/robolectric',
'version': '6OaDTPaXu0VZoMwWllgaXTeiaJR5jQkZb1_aooRa2GUC',
'version': 'WZ96VJuhBM63xzHb-_E72Tf46M9yIbfia6basI1YG4EC',
},
],
'condition': 'checkout_android',
@ -1659,7 +1659,7 @@ deps = {
},
'src/third_party/ruy/src':
Var('chromium_git') + '/external/github.com/google/ruy.git' + '@' + '841ea4172ba904fe3536789497f9565f2ef64129',
Var('chromium_git') + '/external/github.com/google/ruy.git' + '@' + '72155b3185246e9143f4c6a3a7f283d2ebba8524',
'src/third_party/skia':
Var('skia_git') + '/skia.git' + '@' + Var('skia_revision'),
@ -1671,7 +1671,7 @@ deps = {
Var('chromium_git') + '/external/github.com/google/snappy.git' + '@' + '65dc7b383985eb4f63cd3e752136db8d9b4be8c0',
'src/third_party/sqlite/src':
Var('chromium_git') + '/chromium/deps/sqlite.git' + '@' + '5fb64c1a111a36e7530a8d2132e6ce0c46607942',
Var('chromium_git') + '/chromium/deps/sqlite.git' + '@' + 'e6b63421941617bf5ccac6b4a62d7a7b4a2c3fef',
'src/third_party/sqlite4java': {
'packages': [
@ -1702,7 +1702,7 @@ deps = {
Var('chromium_git') + '/external/github.com/GoogleChromeLabs/text-fragments-polyfill.git' + '@' + 'c036420683f672d685e27415de0a5f5e85bdc23f',
'src/third_party/tflite/src':
Var('chromium_git') + '/external/github.com/tensorflow/tensorflow.git' + '@' + '20bdb50879599c0e7a62036ee3fd8a644ced97f1',
Var('chromium_git') + '/external/github.com/tensorflow/tensorflow.git' + '@' + 'ac31ffa987c14665062c00f98ec025a3fdc185ab',
'src/third_party/turbine': {
'packages': [
@ -1715,7 +1715,7 @@ deps = {
'dep_type': 'cipd',
},
'src/third_party/vulkan-deps': '{chromium_git}/vulkan-deps@31a5e38f3e48d6706f5ccb2d2b74eeda1de6a21a',
'src/third_party/vulkan-deps': '{chromium_git}/vulkan-deps@c42337d9ef75170244486b580bad7dfe78447bfd',
'src/third_party/vulkan_memory_allocator':
Var('chromium_git') + '/external/github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator.git' + '@' + 'ebe84bec02c041d28f902da0214bf442743fc907',
@ -1728,7 +1728,7 @@ deps = {
# Wayland protocols that add functionality not available in the core protocol.
'src/third_party/wayland-protocols/src': {
'url': Var('chromium_git') + '/external/anongit.freedesktop.org/git/wayland/wayland-protocols.git' + '@' + '83866f19d3d61b28e94d71781646466b3a6623d8',
'url': Var('chromium_git') + '/external/anongit.freedesktop.org/git/wayland/wayland-protocols.git' + '@' + 'd324986823519c15b2162fc3e0a720f349e43b0c',
'condition': 'checkout_linux',
},
@ -1751,10 +1751,10 @@ deps = {
Var('chromium_git') + '/external/khronosgroup/webgl.git' + '@' + '44e4c8770158c505b03ee7feafa4859d083b0912',
'src/third_party/webgpu-cts/src':
Var('chromium_git') + '/external/github.com/gpuweb/cts.git' + '@' + '4e8df07a22cac3a9a4b606c7458ecb4646fabf73',
Var('chromium_git') + '/external/github.com/gpuweb/cts.git' + '@' + 'c4eb1df3f306c0ee3e43ba2446eb3616e42d6855',
'src/third_party/webrtc':
Var('webrtc_git') + '/src.git' + '@' + '17f085c1d7f6f0cad9e6041aa55a22594925eca5',
Var('webrtc_git') + '/src.git' + '@' + 'dc5cf31cad576376abd3aa6306169453cfd85ba5',
'src/third_party/libgifcodec':
Var('skia_git') + '/libgifcodec' + '@'+ Var('libgifcodec_revision'),
@ -1775,7 +1775,7 @@ deps = {
},
'src/third_party/xnnpack/src':
Var('chromium_git') + '/external/github.com/google/XNNPACK.git' + '@' + '8e3d3359f9bec608e09fac1f7054a2a14b1bd73c',
Var('chromium_git') + '/external/github.com/google/XNNPACK.git' + '@' + 'a33b227047def29b79853ef688b6dda6c6fc5386',
'src/tools/page_cycler/acid3':
Var('chromium_git') + '/chromium/deps/acid3.git' + '@' + '6be0a66a1ebd7ebc5abc1b2f405a945f6d871521',
@ -1784,7 +1784,7 @@ deps = {
'packages': [
{
'package': 'skia/tools/goldctl/linux-amd64',
'version': 'ebDbf3X2jdAICDlXMXUr7yp4muhSvYoREDLdZZoJzuAC',
'version': 'BquSeorcTU84O2_A8IoWetGrcfLWxLfZCo9sve1Wt2IC',
},
],
'dep_type': 'cipd',
@ -1794,7 +1794,7 @@ deps = {
'packages': [
{
'package': 'skia/tools/goldctl/windows-amd64',
'version': '58nNno6pNLLSJaZknYmuijuo5gy2tfRBKNI1iCldDlcC',
'version': 'AOoQr1u4-cOIEYJDAgVxGWoTiPaRcjrSsjjAaB-u_ggC',
},
],
'dep_type': 'cipd',
@ -1805,7 +1805,7 @@ deps = {
'packages': [
{
'package': 'skia/tools/goldctl/mac-amd64',
'version': 'yE_XL6pbQ8M0WuI236ObRlkSxu0XMdWs_AnUeo21wa8C',
'version': '-t3YY_sZ-jtMAYZ2PlhjudFnEUgk4m-HjlIwSip4tOAC',
},
],
'dep_type': 'cipd',
@ -1816,7 +1816,7 @@ deps = {
'packages': [
{
'package': 'skia/tools/goldctl/mac-arm64',
'version': 'l46gUmkc-2-OsEMo-oEbpXiBAYg48KtXbtF1lyBh0u8C',
'version': 'x_xKUnqrgizoTO8mxX4RkyhpQ-nUp_x_go9YH-tc--QC',
},
],
'dep_type': 'cipd',
@ -1827,7 +1827,7 @@ deps = {
Var('chromium_git') + '/v8/v8.git' + '@' + Var('v8_revision'),
'src-internal': {
'url': 'https://chrome-internal.googlesource.com/chrome/src-internal.git@fa84529d9b58900f7476a217edf9dfbf8ac19b95',
'url': 'https://chrome-internal.googlesource.com/chrome/src-internal.git@c29563eac12bc062b66805a4766673729ce7d4ef',
'condition': 'checkout_src_internal',
},
@ -1846,7 +1846,7 @@ deps = {
'packages': [
{
'package': 'chromeos_internal/apps/eche_app/app',
'version': 'uWK-hoCeXK1wUicxHb7YlPKmGDebjbXvVHcCkFbCnFkC',
'version': 'PEjYa5GVISxpuqCZfq9pZ3QeSWhNtWSdQ6gmJ8bizQ0C',
},
],
'condition': 'checkout_chromeos and checkout_src_internal',
@ -1857,7 +1857,7 @@ deps = {
'packages': [
{
'package': 'chromeos_internal/apps/help_app/app',
'version': '_JqbtCltJUQrGFSIw_3hlkjdvrFVrqIKXh9aSzVJ0e8C',
'version': 'MqUROEBmHZCBRsEY3abQ7JOvoDr5wZ_MTK3vAN-901wC',
},
],
'condition': 'checkout_chromeos and checkout_src_internal',
@ -1868,7 +1868,7 @@ deps = {
'packages': [
{
'package': 'chromeos_internal/apps/media_app/app',
'version': 'OUGohHk5YvTdODZGYUatmAIbkUeI9qX41jPUFsn8kFAC',
'version': 'tV1aN61vvzGiDSJgQxN_namEG8pvO6RTuO-qbQMC51IC',
},
],
'condition': 'checkout_chromeos and checkout_src_internal',
@ -1879,7 +1879,7 @@ deps = {
'packages': [
{
'package': 'chromeos_internal/apps/projector_app/app',
'version': 'xbs9VsAB3uK9gNyQtNWmfxYtOUrdR28ynajJYzrV6tAC',
'version': 'Eeqz2JXdGXA3-P7iu9xSzSc3iyUuAruoN1W3-FplxR4C',
},
],
'condition': 'checkout_chromeos and checkout_src_internal',
@ -2671,7 +2671,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/com_google_android_material_material',
'version': 'version:2@1.7.0-alpha02.cr1',
'version': 'version:2@1.6.0-alpha01.cr1',
},
],
'condition': 'checkout_android',
@ -3309,7 +3309,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib',
'version': 'version:2@1.7.10.cr1',
'version': 'version:2@1.7.0.cr1',
},
],
'condition': 'checkout_android',
@ -3320,7 +3320,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib_common',
'version': 'version:2@1.7.10.cr1',
'version': 'version:2@1.7.0.cr1',
},
],
'condition': 'checkout_android',
@ -3463,7 +3463,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/org_robolectric_annotations',
'version': 'version:2@4.8.1.cr1',
'version': 'version:2@4.7.3.cr1',
},
],
'condition': 'checkout_android',
@ -3474,7 +3474,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/org_robolectric_junit',
'version': 'version:2@4.8.1.cr1',
'version': 'version:2@4.7.3.cr1',
},
],
'condition': 'checkout_android',
@ -3485,7 +3485,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/org_robolectric_nativeruntime',
'version': 'version:2@4.8.1.cr1',
'version': 'version:2@4.7.3.cr1',
},
],
'condition': 'checkout_android',
@ -3496,7 +3496,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/org_robolectric_pluginapi',
'version': 'version:2@4.8.1.cr1',
'version': 'version:2@4.7.3.cr1',
},
],
'condition': 'checkout_android',
@ -3507,7 +3507,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/org_robolectric_plugins_maven_dependency_resolver',
'version': 'version:2@4.8.1.cr1',
'version': 'version:2@4.7.3.cr1',
},
],
'condition': 'checkout_android',
@ -3518,7 +3518,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/org_robolectric_resources',
'version': 'version:2@4.8.1.cr1',
'version': 'version:2@4.7.3.cr1',
},
],
'condition': 'checkout_android',
@ -3529,7 +3529,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/org_robolectric_robolectric',
'version': 'version:2@4.8.1.cr1',
'version': 'version:2@4.7.3.cr1',
},
],
'condition': 'checkout_android',
@ -3540,7 +3540,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/org_robolectric_sandbox',
'version': 'version:2@4.8.1.cr1',
'version': 'version:2@4.7.3.cr1',
},
],
'condition': 'checkout_android',
@ -3551,7 +3551,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/org_robolectric_shadowapi',
'version': 'version:2@4.8.1.cr1',
'version': 'version:2@4.7.3.cr1',
},
],
'condition': 'checkout_android',
@ -3562,7 +3562,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/org_robolectric_shadows_framework',
'version': 'version:2@4.8.1.cr1',
'version': 'version:2@4.7.3.cr1',
},
],
'condition': 'checkout_android',
@ -3573,7 +3573,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/org_robolectric_shadows_multidex',
'version': 'version:2@4.8.1.cr1',
'version': 'version:2@4.7.3.cr1',
},
],
'condition': 'checkout_android',
@ -3584,7 +3584,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/org_robolectric_shadows_playservices',
'version': 'version:2@4.8.1.cr1',
'version': 'version:2@4.7.3.cr1',
},
],
'condition': 'checkout_android',
@ -3595,7 +3595,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/org_robolectric_utils',
'version': 'version:2@4.8.1.cr1',
'version': 'version:2@4.7.3.cr1',
},
],
'condition': 'checkout_android',
@ -3606,7 +3606,7 @@ deps = {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/org_robolectric_utils_reflector',
'version': 'version:2@4.8.1.cr1',
'version': 'version:2@4.7.3.cr1',
},
],
'condition': 'checkout_android',
@ -3686,20 +3686,6 @@ skip_child_includes = [
hooks = [
# Download and initialize "vpython" VirtualEnv environment packages for
# Python2. We do this before running any other hooks so that any other
# hooks that might use vpython don't trip over unexpected issues and
# don't run slower than they might otherwise need to.
{
'name': 'vpython_common',
'pattern': '.',
# TODO(https://crbug.com/1205263): Run this on mac/arm too once it works.
'condition': 'not (host_os == "mac" and host_cpu == "arm64") and enable_vpython_common_crbug_1329052',
'action': [ 'vpython',
'-vpython-spec', 'src/.vpython',
'-vpython-tool', 'install',
],
},
# Download and initialize "vpython" VirtualEnv environment packages for
# Python3. We do this before running any other hooks so that any other
# hooks that might use vpython don't trip over unexpected issues and
@ -4378,6 +4364,18 @@ hooks = [
'--gs_url_base=chromeos-prebuilt/afdo-job/llvm',
],
},
{
'name': 'gvr_static_shim_android_arm_1',
'pattern': '\\.sha1',
'condition': 'checkout_android',
'action': [ 'python3',
'src/third_party/depot_tools/download_from_google_storage.py',
'--no_resume',
'--no_auth',
'--bucket', 'chromium-gvr-static-shim',
'-s', 'src/third_party/gvr-android-sdk/libgvr_shim_static_arm_1.a.sha1',
],
},
{
'name': 'gvr_static_shim_android_arm_Cr',
'pattern': '\\.sha1',
@ -4390,6 +4388,18 @@ hooks = [
'-s', 'src/third_party/gvr-android-sdk/libgvr_shim_static_arm_Cr.a.sha1',
],
},
{
'name': 'gvr_static_shim_android_arm64_1',
'pattern': '\\.sha1',
'condition': 'checkout_android',
'action': [ 'python3',
'src/third_party/depot_tools/download_from_google_storage.py',
'--no_resume',
'--no_auth',
'--bucket', 'chromium-gvr-static-shim',
'-s', 'src/third_party/gvr-android-sdk/libgvr_shim_static_arm64_1.a.sha1',
],
},
{
'name': 'gvr_static_shim_android_arm64_Cr',
'pattern': '\\.sha1',

View File

@ -187,17 +187,10 @@ mixed_component("base") {
"allocator/allocator_check.h",
"allocator/allocator_extension.cc",
"allocator/allocator_extension.h",
"allocator/dispatcher/configuration.h",
"allocator/dispatcher/dispatcher.cc",
"allocator/dispatcher/dispatcher.h",
"allocator/dispatcher/initializer.h",
"allocator/dispatcher/internal/dispatch_data.cc",
"allocator/dispatcher/internal/dispatch_data.h",
"allocator/dispatcher/internal/dispatcher_internal.h",
"allocator/dispatcher/internal/tools.h",
"allocator/dispatcher/reentry_guard.cc",
"allocator/dispatcher/reentry_guard.h",
"allocator/dispatcher/subsystem.h",
"as_const.h",
"at_exit.cc",
"at_exit.h",
@ -355,7 +348,6 @@ mixed_component("base") {
"files/scoped_temp_dir.cc",
"files/scoped_temp_dir.h",
"format_macros.h",
"functional/function_ref.h",
"functional/identity.h",
"functional/invoke.h",
"functional/not_fn.h",
@ -548,10 +540,6 @@ mixed_component("base") {
"power_monitor/power_monitor_source.cc",
"power_monitor/power_monitor_source.h",
"power_monitor/power_observer.h",
"power_monitor/sampling_event_source.cc",
"power_monitor/sampling_event_source.h",
"power_monitor/timer_sampling_event_source.cc",
"power_monitor/timer_sampling_event_source.h",
"process/environment_internal.cc",
"process/environment_internal.h",
"process/kill.cc",
@ -629,7 +617,6 @@ mixed_component("base") {
"sequence_checker_impl.h",
"sequence_token.cc",
"sequence_token.h",
"state_transitions.h",
"stl_util.h",
"strings/abseil_string_conversions.cc",
"strings/abseil_string_conversions.h",
@ -701,8 +688,6 @@ mixed_component("base") {
"task/common/checked_lock.h",
"task/common/checked_lock_impl.cc",
"task/common/checked_lock_impl.h",
"task/common/lazy_now.cc",
"task/common/lazy_now.h",
"task/common/operations_controller.cc",
"task/common/operations_controller.h",
"task/common/scoped_defer_task_posting.cc",
@ -736,9 +721,9 @@ mixed_component("base") {
"task/sequence_manager/enqueue_order_generator.h",
"task/sequence_manager/fence.cc",
"task/sequence_manager/fence.h",
"task/sequence_manager/hierarchical_timing_wheel.cc",
"task/sequence_manager/hierarchical_timing_wheel.h",
"task/sequence_manager/lazily_deallocated_deque.h",
"task/sequence_manager/lazy_now.cc",
"task/sequence_manager/lazy_now.h",
"task/sequence_manager/sequence_manager.cc",
"task/sequence_manager/sequence_manager.h",
"task/sequence_manager/sequence_manager_impl.cc",
@ -767,8 +752,6 @@ mixed_component("base") {
"task/sequence_manager/thread_controller_with_message_pump_impl.h",
"task/sequence_manager/time_domain.cc",
"task/sequence_manager/time_domain.h",
"task/sequence_manager/timing_wheel.cc",
"task/sequence_manager/timing_wheel.h",
"task/sequence_manager/wake_up_queue.cc",
"task/sequence_manager/wake_up_queue.h",
"task/sequence_manager/work_deduplicator.cc",
@ -1064,12 +1047,6 @@ mixed_component("base") {
"message_loop/message_pump_win.cc",
"message_loop/message_pump_win.h",
"native_library_win.cc",
"power_monitor/battery_level_provider.cc",
"power_monitor/battery_level_provider.h",
"power_monitor/battery_level_provider_win.cc",
"power_monitor/power_monitor_device_source_win.cc",
"power_monitor/speed_limit_observer_win.cc",
"power_monitor/speed_limit_observer_win.h",
"process/kill_win.cc",
"process/launch_win.cc",
"process/memory_win.cc",
@ -1120,8 +1097,6 @@ mixed_component("base") {
"win/core_winrt_util.cc",
"win/core_winrt_util.h",
"win/current_module.h",
"win/dark_mode_support.cc",
"win/dark_mode_support.h",
"win/default_apps_util.cc",
"win/default_apps_util.h",
"win/embedded_i18n/language_selector.cc",
@ -1294,14 +1269,6 @@ mixed_component("base") {
"message_loop/message_pump_mac.h",
"message_loop/message_pump_mac.mm",
"native_library_mac.mm",
"power_monitor/battery_level_provider.cc",
"power_monitor/battery_level_provider.h",
"power_monitor/battery_level_provider_mac.mm",
"power_monitor/iopm_power_source_sampling_event_source.cc",
"power_monitor/iopm_power_source_sampling_event_source.h",
"power_monitor/power_monitor_device_source_mac.mm",
"power_monitor/thermal_state_observer_mac.h",
"power_monitor/thermal_state_observer_mac.mm",
"process/kill_mac.cc",
"process/launch_mac.cc",
"process/memory_mac.mm",
@ -1523,7 +1490,6 @@ mixed_component("base") {
":logging_buildflags",
":orderfile_buildflags",
":parsing_buildflags",
":power_monitor_buildflags",
":profiler_buildflags",
":sanitizer_buildflags",
":synchronization_buildflags",
@ -2001,6 +1967,9 @@ mixed_component("base") {
"files/file_enumerator_win.cc",
"memory/platform_shared_memory_mapper_win.cc",
"memory/platform_shared_memory_region_win.cc",
"power_monitor/power_monitor_device_source_win.cc",
"power_monitor/speed_limit_observer_win.cc",
"power_monitor/speed_limit_observer_win.h",
"profiler/win32_stack_frame_unwinder.cc",
"profiler/win32_stack_frame_unwinder.h",
"rand_util_win.cc",
@ -2055,6 +2024,9 @@ mixed_component("base") {
"memory/platform_shared_memory_region_mac.cc",
"message_loop/message_pump_kqueue.cc",
"message_loop/message_pump_kqueue.h",
"power_monitor/power_monitor_device_source_mac.mm",
"power_monitor/thermal_state_observer_mac.h",
"power_monitor/thermal_state_observer_mac.mm",
"system/sys_info_mac.mm",
"time/time_conversion_posix.cc",
"time/time_exploded_posix.cc",
@ -2302,6 +2274,8 @@ mixed_component("base") {
if (enable_base_tracing) {
sources += [
"trace_event/auto_open_close_event.h",
"trace_event/blame_context.cc",
"trace_event/blame_context.h",
"trace_event/builtin_categories.cc",
"trace_event/builtin_categories.h",
"trace_event/category_registry.cc",
@ -2460,7 +2434,6 @@ buildflag_header("debugging_buildflags") {
is_debug && strip_absolute_paths_from_debug_symbols && is_mac
flags = [
"DCHECK_IS_CONFIGURABLE=$dcheck_is_configurable",
"ENABLE_LOCATION_SOURCE=$enable_location_source",
"FROM_HERE_USES_LOCATION_BUILTINS=$from_here_uses_location_builtins",
"ENABLE_PROFILING=$enable_profiling",
@ -2570,14 +2543,6 @@ buildflag_header("profiler_buildflags") {
]
}
buildflag_header("power_monitor_buildflags") {
header = "power_monitor_buildflags.h"
header_dir = "base/power_monitor"
_has_battery_provider_impl = is_win || is_mac
flags = [ "HAS_BATTERY_LEVEL_PROVIDER_IMPL=$_has_battery_provider_impl" ]
}
# This is the subset of files from base that should not be used with a dynamic
# library. Note that this library cannot depend on base because base depends on
# base_static.

View File

@ -39,6 +39,8 @@ buildflag_header("buildflags") {
# Not to be used directly - see `partition_alloc_config.h`.
"USE_MTE_CHECKED_PTR=$_use_mte_checked_ptr",
"USE_FAKE_BINARY_EXPERIMENT=$use_fake_binary_experiment",
]
}
@ -68,9 +70,6 @@ config("wrap_malloc_symbols") {
"-Wl,-wrap,realloc",
"-Wl,-wrap,valloc",
# Not allocating memory, but part of the API
"-Wl,-wrap,malloc_usable_size",
# <stdlib.h> functions
"-Wl,-wrap,realpath",

View File

@ -84,20 +84,12 @@ if (is_win && use_allocator_shim) {
_is_brp_supported = (is_win || is_android || is_linux || is_mac ||
is_chromeos) && use_allocator == "partition"
_is_mcp_supported = is_win && use_allocator == "partition"
declare_args() {
# We jam MTECheckedPtr off by default, but can set it to
# `_is_mcp_supported` to activate it.
use_mte_checked_ptr = false
}
declare_args() {
# Set use_backup_ref_ptr true to use BackupRefPtr (BRP) as the implementation
# of raw_ptr<T>, and enable PartitionAlloc support for it.
# We also disable BRP in the presence of MTECheckedPtr, which is almost
# never enabled.
use_backup_ref_ptr = _is_brp_supported && !use_mte_checked_ptr
use_backup_ref_ptr = _is_brp_supported
use_mte_checked_ptr = false
}
assert(!(use_backup_ref_ptr && use_mte_checked_ptr),
@ -116,6 +108,11 @@ declare_args() {
enable_backup_ref_ptr_slow_checks = false
enable_dangling_raw_ptr_checks = false
# Registers the binary for a fake binary A/B experiment. The binaries built
# with this flag have no behavior difference, except for setting a synthetic
# Finch.
use_fake_binary_experiment = false
# The supported platforms are supposed to match `_is_brp_supported`, but we
# enable the feature on Linux early because it's most widely used for security
# research

View File

@ -22,7 +22,6 @@ void* __real_calloc(size_t, size_t);
void* __real_realloc(void*, size_t);
void* __real_memalign(size_t, size_t);
void __real_free(void*);
size_t __real_malloc_usable_size(void*);
} // extern "C"
namespace {
@ -58,12 +57,6 @@ void RealFree(const AllocatorDispatch*, void* address, void* context) {
__real_free(address);
}
size_t RealSizeEstimate(const AllocatorDispatch*,
void* address,
void* context) {
return __real_malloc_usable_size(address);
}
} // namespace
const AllocatorDispatch AllocatorDispatch::default_dispatch = {
@ -73,7 +66,7 @@ const AllocatorDispatch AllocatorDispatch::default_dispatch = {
&RealMemalign, /* alloc_aligned_function */
&RealRealloc, /* realloc_function */
&RealFree, /* free_function */
&RealSizeEstimate, /* get_size_estimate_function */
nullptr, /* get_size_estimate_function */
nullptr, /* batch_malloc_function */
nullptr, /* batch_free_function */
nullptr, /* free_definite_size_function */

View File

@ -54,10 +54,6 @@ SHIM_ALWAYS_EXPORT void* __wrap_valloc(size_t size) {
return ShimValloc(size, nullptr);
}
SHIM_ALWAYS_EXPORT size_t __wrap_malloc_usable_size(void* address) {
return ShimGetSizeEstimate(address, nullptr);
}
const size_t kPathMaxSize = 8192;
static_assert(kPathMaxSize >= PATH_MAX, "");

View File

@ -1,24 +0,0 @@
// Copyright (c) 2022 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_DISPATCHER_CONFIGURATION_H_
#define BASE_ALLOCATOR_DISPATCHER_CONFIGURATION_H_
#include <cstddef>
namespace base::allocator::dispatcher::configuration {
// The maximum number of optional observers that may be present depending on
// command line parameters.
constexpr size_t kMaximumNumberOfOptionalObservers = 4;
// The total number of observers including mandatory and optional observers.
// Primarily the number of observers affects the performance at allocation time.
// The current value of 4 doesn't have hard evidence. Keep in mind that
// also a single observer can severely impact performance.
constexpr size_t kMaximumNumberOfObservers = 4;
} // namespace base::allocator::dispatcher::configuration
#endif // BASE_ALLOCATOR_DISPATCHER_CONFIGURATION_H_

View File

@ -6,18 +6,10 @@
#include "base/allocator/allocator_shim.h"
#include "base/allocator/buildflags.h"
#include "base/allocator/dispatcher/internal/dispatch_data.h"
#include "base/allocator/dispatcher/reentry_guard.h"
#include "base/allocator/partition_allocator/partition_alloc.h"
#include "base/check.h"
#include "base/dcheck_is_on.h"
#include "base/no_destructor.h"
#include "base/sampling_heap_profiler/poisson_allocation_sampler.h"
#if DCHECK_IS_ON()
#include <atomic>
#endif
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
namespace base::allocator::dispatcher::allocator_shim_details {
namespace {
@ -245,97 +237,3 @@ void RemoveStandardAllocatorHooksForTesting() {
}
} // namespace base::allocator::dispatcher
namespace base::allocator::dispatcher {
// The private implementation of Dispatcher.
struct Dispatcher::Impl {
void Initialize(const internal::DispatchData& dispatch_data) {
#if DCHECK_IS_ON()
DCHECK(!is_initialized_check_flag_.test_and_set());
#endif
dispatch_data_ = dispatch_data;
ConnectToEmitters(dispatch_data_);
}
void Reset() {
#if DCHECK_IS_ON()
DCHECK([&]() {
auto const was_set = is_initialized_check_flag_.test();
is_initialized_check_flag_.clear();
return was_set;
}());
#endif
DisconnectFromEmitters(dispatch_data_);
dispatch_data_ = {};
}
private:
// Connect the hooks to the memory subsystem. In some cases, most notably when
// we have no observers at all, the hooks will be invalid and must NOT be
// connected. This way we prevent notifications although no observers are
// present.
static void ConnectToEmitters(const internal::DispatchData& dispatch_data) {
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
if (auto* const allocator_dispatch = dispatch_data.GetAllocatorDispatch()) {
allocator::InsertAllocatorDispatch(allocator_dispatch);
}
#endif
#if BUILDFLAG(USE_PARTITION_ALLOC)
{
auto* const allocation_hook = dispatch_data.GetAllocationObserverHook();
auto* const free_hook = dispatch_data.GetFreeObserverHook();
if (allocation_hook && free_hook) {
partition_alloc::PartitionAllocHooks::SetObserverHooks(allocation_hook,
free_hook);
}
}
#endif
}
static void DisconnectFromEmitters(internal::DispatchData& dispatch_data) {
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
if (auto* const allocator_dispatch = dispatch_data.GetAllocatorDispatch()) {
allocator::RemoveAllocatorDispatchForTesting(
allocator_dispatch); // IN-TEST
}
#endif
#if BUILDFLAG(USE_PARTITION_ALLOC)
partition_alloc::PartitionAllocHooks::SetObserverHooks(nullptr, nullptr);
#endif
}
// Information on the hooks.
internal::DispatchData dispatch_data_;
#if DCHECK_IS_ON()
// Indicator if the dispatcher has been initialized before.
#if !defined(__cpp_lib_atomic_value_initialization) || \
__cpp_lib_atomic_value_initialization < 201911L
std::atomic_flag is_initialized_check_flag_ = ATOMIC_FLAG_INIT;
#else
std::atomic_flag is_initialized_check_flag_;
#endif
#endif
};
Dispatcher::Dispatcher() : impl_(std::make_unique<Impl>()) {}
Dispatcher::~Dispatcher() = default;
Dispatcher& Dispatcher::GetInstance() {
static base::NoDestructor<Dispatcher> instance;
return *instance;
}
void Dispatcher::Initialize(const internal::DispatchData& dispatch_data) {
impl_->Initialize(dispatch_data);
}
void Dispatcher::ResetForTesting() {
impl_->Reset();
}
} // namespace base::allocator::dispatcher

View File

@ -5,74 +5,13 @@
#ifndef BASE_ALLOCATOR_DISPATCHER_DISPATCHER_H_
#define BASE_ALLOCATOR_DISPATCHER_DISPATCHER_H_
#include "base/allocator/dispatcher/internal/dispatcher_internal.h"
#include "base/base_export.h"
#include <memory>
namespace base::allocator::dispatcher {
void BASE_EXPORT InstallStandardAllocatorHooks();
void BASE_EXPORT RemoveStandardAllocatorHooksForTesting();
namespace internal {
struct DispatchData;
}
// Dispatcher serves as the top level instance for managing the dispatch
// mechanism. The class instance manages connections to the various memory
// subsystems such as PartitionAlloc. To keep the public interface as lean as
// possible it uses a pimpl pattern.
class BASE_EXPORT Dispatcher {
public:
static Dispatcher& GetInstance();
Dispatcher();
// Initialize the dispatch mechanism with the given tuple of observers. The
// observers must be valid (it is only DCHECKed internally at initialization,
// but not verified further)
// If Initialize is called multiple times, the first one wins. All later
// invocations are silently ignored. Initialization is protected from
// concurrent invocations. In case of concurrent accesses, the first one to
// get the lock wins.
// The dispatcher invokes following functions on the observers:
// void OnAllocation(void* address,
// size_t size,
// AllocationSubsystem sub_system,
// const char* type_name);
// void OnFree(void* address);
//
// Note: The dispatcher mechanism does NOT bring systematic protection against
// recursive invocations. That is, observers which allocate memory on the
// heap, i.e. through dynamically allocated containers or by using the
// CHECK-macro, are responsible to break these recursions!
template <typename... ObserverTypes>
void Initialize(const std::tuple<ObserverTypes...>& observers) {
// Get the hooks for running these observers and pass them to further
// initialization
Initialize(internal::GetNotificationHooks(observers));
}
// The following functions provide an interface to setup and tear down the
// dispatcher when testing. This must NOT be used from production code since
// the hooks cannot be removed reliably under all circumstances.
template <typename ObserverType>
void InitializeForTesting(ObserverType* observer) {
Initialize(std::make_tuple(observer));
}
void ResetForTesting();
private:
// structure and pointer to the private implementation.
struct Impl;
std::unique_ptr<Impl> const impl_;
~Dispatcher();
void Initialize(const internal::DispatchData& dispatch_data);
};
} // namespace base::allocator::dispatcher
#endif // BASE_ALLOCATOR_DISPATCHER_DISPATCHER_H_

View File

@ -1,206 +0,0 @@
// Copyright (c) 2022 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_DISPATCHER_INITIALIZER_H_
#define BASE_ALLOCATOR_DISPATCHER_INITIALIZER_H_
#include "base/allocator/dispatcher/configuration.h"
#include "base/allocator/dispatcher/dispatcher.h"
#include "base/allocator/dispatcher/internal/tools.h"
#include <tuple>
#include <utility>
namespace base::allocator::dispatcher {
namespace internal {
// Filter the passed observers and perform initialization of the passed
// dispatcher.
template <size_t CurrentIndex,
typename DispatcherType,
typename CheckObserverPredicate,
typename VerifiedObservers,
typename UnverifiedObservers,
size_t... IndicesToSelect>
inline void DoInitialize(DispatcherType& dispatcher,
CheckObserverPredicate check_observer,
const VerifiedObservers& verified_observers,
const UnverifiedObservers& unverified_observers,
std::index_sequence<IndicesToSelect...> indices) {
if constexpr (CurrentIndex < std::tuple_size<UnverifiedObservers>::value) {
// We still have some items left to handle.
if (check_observer(std::get<CurrentIndex>(unverified_observers))) {
// The current observer is valid. Hence, append the index of the current
// item to the set of indices and head on to the next item.
DoInitialize<CurrentIndex + 1>(
dispatcher, check_observer, verified_observers, unverified_observers,
std::index_sequence<IndicesToSelect..., CurrentIndex>{});
} else {
// The current observer is not valid. Hence, head on to the next item with
// an unaltered list of indices.
DoInitialize<CurrentIndex + 1>(dispatcher, check_observer,
verified_observers, unverified_observers,
indices);
}
} else if constexpr (CurrentIndex ==
std::tuple_size<UnverifiedObservers>::value) {
// So we have met the end of the tuple of observers to verify.
// Hence, we extract the additional valid observers, append to the tuple of
// already verified observers and hand over to the dispatcher.
auto observers = std::tuple_cat(
verified_observers,
std::make_tuple(std::get<IndicesToSelect>(unverified_observers)...));
// Do a final check that neither the maximum total number of observers nor
// the maximum number of optional observers is exceeded.
static_assert(std::tuple_size<decltype(observers)>::value <=
configuration::kMaximumNumberOfObservers);
static_assert(sizeof...(IndicesToSelect) <=
configuration::kMaximumNumberOfOptionalObservers);
dispatcher.Initialize(std::move(observers));
}
}
} // namespace internal
// The result of concatenating two tuple-types.
template <typename... tuples>
using TupleCat = decltype(std::tuple_cat(std::declval<tuples>()...));
// Initializer collects mandatory and optional observers and initializes the
// passed Dispatcher with only the enabled observers.
//
// In some situations, presence of observers depends on runtime. i.e. command
// line parameters or CPU features. With 3 optional observers we already have 8
// different combinations. Initializer takes the job of dealing with all
// combinations from the user. It allows users to pass all observers (including
// nullptr for disabled optional observers) and initializes the Dispatcher with
// only the enabled observers.
//
// Since this process results in a combinatoric explosion, Initializer
// distinguishes between optional and mandatory observers. Mandatory observers
// are not included in the filtering process and must always be enabled (not
// nullptr).
//
// To allow the Initializer to track the number and exact type of observers, it
// is implemented as a templated class which holds information on the types in
// the std::tuples passed as template parameters. Therefore, whenever any type
// observer it set, the initializer changes its type to reflect this.
template <typename MandatoryObservers = std::tuple<>,
typename OptionalObservers = std::tuple<>>
struct BASE_EXPORT Initializer {
Initializer() = default;
Initializer(MandatoryObservers mandatory_observers,
OptionalObservers optional_observers)
: mandatory_observers_(std::move(mandatory_observers)),
optional_observers_(std::move(optional_observers)) {}
// Set the mandatory observers. The number of observers that can be set is
// limited by configuration::maximum_number_of_observers.
template <typename... NewMandatoryObservers,
std::enable_if_t<
internal::LessEqual((sizeof...(NewMandatoryObservers) +
std::tuple_size<OptionalObservers>::value),
configuration::kMaximumNumberOfObservers),
bool> = true>
Initializer<std::tuple<NewMandatoryObservers*...>, OptionalObservers>
SetMandatoryObservers(NewMandatoryObservers*... mandatory_observers) const {
return {std::make_tuple(mandatory_observers...), GetOptionalObservers()};
}
// Add mandatory observers. The number of observers that can be added is
// limited by the current number of observers, see
// configuration::maximum_number_of_observers.
template <typename... AdditionalMandatoryObservers,
std::enable_if_t<internal::LessEqual(
std::tuple_size<MandatoryObservers>::value +
sizeof...(AdditionalMandatoryObservers) +
std::tuple_size<OptionalObservers>::value,
configuration::kMaximumNumberOfObservers),
bool> = true>
Initializer<TupleCat<MandatoryObservers,
std::tuple<AdditionalMandatoryObservers*...>>,
OptionalObservers>
AddMandatoryObservers(
AdditionalMandatoryObservers*... additional_mandatory_observers) const {
return {std::tuple_cat(GetMandatoryObservers(),
std::make_tuple(additional_mandatory_observers...)),
GetOptionalObservers()};
}
// Set the optional observers. The number of observers that can be set is
// limited by configuration::maximum_number_of_optional_observers as well as
// configuration::maximum_number_of_observers.
template <
typename... NewOptionalObservers,
std::enable_if_t<
internal::LessEqual(
sizeof...(NewOptionalObservers),
configuration::kMaximumNumberOfOptionalObservers) &&
internal::LessEqual((sizeof...(NewOptionalObservers) +
std::tuple_size<MandatoryObservers>::value),
configuration::kMaximumNumberOfObservers),
bool> = true>
Initializer<MandatoryObservers, std::tuple<NewOptionalObservers*...>>
SetOptionalObservers(NewOptionalObservers*... optional_observers) const {
return {GetMandatoryObservers(), std::make_tuple(optional_observers...)};
}
// Add optional observers. The number of observers that can be added is
// limited by the current number of optional observers,
// configuration::maximum_number_of_optional_observers as well as
// configuration::maximum_number_of_observers.
template <
typename... AdditionalOptionalObservers,
std::enable_if_t<
internal::LessEqual(
std::tuple_size<OptionalObservers>::value +
sizeof...(AdditionalOptionalObservers),
configuration::kMaximumNumberOfOptionalObservers) &&
internal::LessEqual((std::tuple_size<OptionalObservers>::value +
sizeof...(AdditionalOptionalObservers) +
std::tuple_size<MandatoryObservers>::value),
configuration::kMaximumNumberOfObservers),
bool> = true>
Initializer<
MandatoryObservers,
TupleCat<OptionalObservers, std::tuple<AdditionalOptionalObservers*...>>>
AddOptionalObservers(
AdditionalOptionalObservers*... additional_optional_observers) const {
return {GetMandatoryObservers(),
std::tuple_cat(GetOptionalObservers(),
std::make_tuple(additional_optional_observers...))};
}
// Perform the actual initialization on the passed dispatcher.
// The dispatcher is passed as a template only to provide better testability.
template <typename DispatcherType>
void DoInitialize(DispatcherType& dispatcher) const {
internal::DoInitialize<0>(dispatcher, internal::IsValidObserver{},
GetMandatoryObservers(), GetOptionalObservers(),
{});
}
const MandatoryObservers& GetMandatoryObservers() const {
return mandatory_observers_;
}
const OptionalObservers& GetOptionalObservers() const {
return optional_observers_;
}
private:
MandatoryObservers mandatory_observers_;
OptionalObservers optional_observers_;
};
// Convenience function for creating an empty Initializer.
inline Initializer<> CreateInitializer() {
return {};
}
} // namespace base::allocator::dispatcher
#endif // BASE_ALLOCATOR_DISPATCHER_INITIALIZER_H_

View File

@ -1,41 +0,0 @@
// Copyright (c) 2022 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/dispatcher/internal/dispatch_data.h"
namespace base::allocator::dispatcher::internal {
#if BUILDFLAG(USE_PARTITION_ALLOC)
DispatchData& DispatchData::SetAllocationObserverHooks(
AllocationObserverHook* allocation_observer_hook,
FreeObserverHook* free_observer_hook) {
allocation_observer_hook_ = allocation_observer_hook;
free_observer_hook_ = free_observer_hook;
return *this;
}
DispatchData::AllocationObserverHook* DispatchData::GetAllocationObserverHook()
const {
return allocation_observer_hook_;
}
DispatchData::FreeObserverHook* DispatchData::GetFreeObserverHook() const {
return free_observer_hook_;
}
#endif
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
DispatchData& DispatchData::SetAllocatorDispatch(
AllocatorDispatch* allocator_dispatch) {
allocator_dispatch_ = allocator_dispatch;
return *this;
}
AllocatorDispatch* DispatchData::GetAllocatorDispatch() const {
return allocator_dispatch_;
}
#endif
} // namespace base::allocator::dispatcher::internal

View File

@ -1,54 +0,0 @@
// Copyright (c) 2022 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_DISPATCHER_INTERNAL_DISPATCH_DATA_H_
#define BASE_ALLOCATOR_DISPATCHER_INTERNAL_DISPATCH_DATA_H_
#include "base/allocator/buildflags.h"
#include "base/base_export.h"
#include "build/build_config.h"
#if BUILDFLAG(USE_PARTITION_ALLOC)
#include "base/allocator/partition_allocator/partition_alloc.h"
#endif
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
#include "base/allocator/allocator_shim.h"
#endif
namespace base::allocator::dispatcher::internal {
// A simple utility class to pass all the information required to properly hook
// into the memory allocation subsystems from DispatcherImpl to the Dispatcher.
struct BASE_EXPORT DispatchData {
#if BUILDFLAG(USE_PARTITION_ALLOC)
using AllocationObserverHook =
partition_alloc::PartitionAllocHooks::AllocationObserverHook;
using FreeObserverHook =
partition_alloc::PartitionAllocHooks::FreeObserverHook;
DispatchData& SetAllocationObserverHooks(AllocationObserverHook*,
FreeObserverHook*);
AllocationObserverHook* GetAllocationObserverHook() const;
FreeObserverHook* GetFreeObserverHook() const;
private:
AllocationObserverHook* allocation_observer_hook_ = nullptr;
FreeObserverHook* free_observer_hook_ = nullptr;
public:
#endif
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
DispatchData& SetAllocatorDispatch(AllocatorDispatch* allocator_dispatch);
AllocatorDispatch* GetAllocatorDispatch() const;
private:
AllocatorDispatch* allocator_dispatch_ = nullptr;
#endif
};
} // namespace base::allocator::dispatcher::internal
#endif

View File

@ -1,351 +0,0 @@
// Copyright (c) 2022 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_DISPATCHER_INTERNAL_DISPATCHER_INTERNAL_H_
#define BASE_ALLOCATOR_DISPATCHER_INTERNAL_DISPATCHER_INTERNAL_H_
#include "base/allocator/buildflags.h"
#include "base/allocator/dispatcher/configuration.h"
#include "base/allocator/dispatcher/internal/dispatch_data.h"
#include "base/allocator/dispatcher/internal/tools.h"
#include "base/allocator/dispatcher/reentry_guard.h"
#include "base/allocator/dispatcher/subsystem.h"
#include "base/compiler_specific.h"
#include "build/build_config.h"
#if BUILDFLAG(USE_PARTITION_ALLOC)
#include "base/allocator/partition_allocator/partition_alloc.h"
#endif
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
#include "base/allocator/allocator_shim.h"
#endif
#include <tuple>
namespace base::allocator::dispatcher::internal {
template <typename CheckObserverPredicate,
typename... ObserverTypes,
size_t... Indices>
void inline PerformObserverCheck(const std::tuple<ObserverTypes...>& observers,
std::index_sequence<Indices...>,
CheckObserverPredicate check_observer) {
((DCHECK(check_observer(std::get<Indices>(observers)))), ...);
}
template <typename... ObserverTypes, size_t... Indices>
ALWAYS_INLINE void PerformAllocationNotification(
const std::tuple<ObserverTypes...>& observers,
std::index_sequence<Indices...>,
void* address,
size_t size,
AllocationSubsystem subSystem,
const char* type_name) {
((std::get<Indices>(observers)->OnAllocation(address, size, subSystem,
type_name)),
...);
}
template <typename... ObserverTypes, size_t... Indices>
ALWAYS_INLINE void PerformFreeNotification(
const std::tuple<ObserverTypes...>& observers,
std::index_sequence<Indices...>,
void* address) {
((std::get<Indices>(observers)->OnFree(address)), ...);
}
// DispatcherImpl provides hooks into the various memory subsystems. These hooks
// are responsible for dispatching any notification to the observers.
// In order to provide as many information on the exact type of the observer and
// prevent any conditional jumps in the hot allocation path, observers are
// stored in a std::tuple. DispatcherImpl performs a CHECK at initialization
// time to ensure they are valid.
template <typename... ObserverTypes>
struct DispatcherImpl {
using AllObservers = std::index_sequence_for<ObserverTypes...>;
template <std::enable_if_t<
internal::LessEqual(sizeof...(ObserverTypes),
configuration::kMaximumNumberOfObservers),
bool> = true>
static DispatchData GetNotificationHooks(
std::tuple<ObserverTypes*...> observers) {
s_observers = std::move(observers);
PerformObserverCheck(s_observers, AllObservers{}, IsValidObserver{});
return CreateDispatchData();
}
private:
static DispatchData CreateDispatchData() {
return DispatchData()
#if BUILDFLAG(USE_PARTITION_ALLOC)
.SetAllocationObserverHooks(&PartitionAllocatorAllocationHook,
&PartitionAllocatorFreeHook)
#endif
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
.SetAllocatorDispatch(&allocator_dispatch_)
#endif
;
}
#if BUILDFLAG(USE_PARTITION_ALLOC)
static void PartitionAllocatorAllocationHook(void* address,
size_t size,
const char* type_name) {
DoNotifyAllocation(address, size, AllocationSubsystem::kPartitionAllocator,
type_name);
}
static void PartitionAllocatorFreeHook(void* address) {
DoNotifyFree(address);
}
#endif
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
static void* AllocFn(const AllocatorDispatch* self,
size_t size,
void* context) {
ReentryGuard guard;
void* const address = self->next->alloc_function(self->next, size, context);
if (LIKELY(guard)) {
DoNotifyAllocation(address, size, AllocationSubsystem::kAllocatorShim);
}
return address;
}
static void* AllocUncheckedFn(const AllocatorDispatch* self,
size_t size,
void* context) {
ReentryGuard guard;
void* const address =
self->next->alloc_unchecked_function(self->next, size, context);
if (LIKELY(guard)) {
DoNotifyAllocation(address, size, AllocationSubsystem::kAllocatorShim);
}
return address;
}
static void* AllocZeroInitializedFn(const AllocatorDispatch* self,
size_t n,
size_t size,
void* context) {
ReentryGuard guard;
void* const address = self->next->alloc_zero_initialized_function(
self->next, n, size, context);
if (LIKELY(guard)) {
DoNotifyAllocation(address, n * size,
AllocationSubsystem::kAllocatorShim);
}
return address;
}
static void* AllocAlignedFn(const AllocatorDispatch* self,
size_t alignment,
size_t size,
void* context) {
ReentryGuard guard;
void* const address = self->next->alloc_aligned_function(
self->next, alignment, size, context);
if (LIKELY(guard)) {
DoNotifyAllocation(address, size, AllocationSubsystem::kAllocatorShim);
}
return address;
}
static void* ReallocFn(const AllocatorDispatch* self,
void* address,
size_t size,
void* context) {
ReentryGuard guard;
// Note: size == 0 actually performs free.
// Note: ReentryGuard prevents from recursions introduced by malloc and
// initialization of thread local storage which happen in the allocation
// path only (please see docs of ReentryGuard for full details). Therefore,
// the DoNotifyFree doesn't need to be guarded. Instead, making it unguarded
// also ensures proper notification.
DoNotifyFree(address);
void* const reallocated_address =
self->next->realloc_function(self->next, address, size, context);
if (LIKELY(guard)) {
DoNotifyAllocation(reallocated_address, size,
AllocationSubsystem::kAllocatorShim);
}
return reallocated_address;
}
static void FreeFn(const AllocatorDispatch* self,
void* address,
void* context) {
// Note: The RecordFree should be called before free_function (here and in
// other places). That is because observers need to handle the allocation
// being freed before calling free_function, as once the latter is executed
// the address becomes available and can be allocated by another thread.
// That would be racy otherwise.
// Note: The code doesn't need to protect from recursions using
// ReentryGuard, see ReallocFn for details.
DoNotifyFree(address);
self->next->free_function(self->next, address, context);
}
static size_t GetSizeEstimateFn(const AllocatorDispatch* self,
void* address,
void* context) {
return self->next->get_size_estimate_function(self->next, address, context);
}
static unsigned BatchMallocFn(const AllocatorDispatch* self,
size_t size,
void** results,
unsigned num_requested,
void* context) {
ReentryGuard guard;
unsigned const num_allocated = self->next->batch_malloc_function(
self->next, size, results, num_requested, context);
if (LIKELY(guard)) {
for (unsigned i = 0; i < num_allocated; ++i) {
DoNotifyAllocation(results[i], size,
AllocationSubsystem::kAllocatorShim);
}
}
return num_allocated;
}
static void BatchFreeFn(const AllocatorDispatch* self,
void** to_be_freed,
unsigned num_to_be_freed,
void* context) {
// Note: The code doesn't need to protect from recursions using
// ReentryGuard, see ReallocFn for details.
for (unsigned i = 0; i < num_to_be_freed; ++i) {
DoNotifyFree(to_be_freed[i]);
}
self->next->batch_free_function(self->next, to_be_freed, num_to_be_freed,
context);
}
static void FreeDefiniteSizeFn(const AllocatorDispatch* self,
void* address,
size_t size,
void* context) {
// Note: The code doesn't need to protect from recursions using
// ReentryGuard, see ReallocFn for details.
DoNotifyFree(address);
self->next->free_definite_size_function(self->next, address, size, context);
}
static void* AlignedMallocFn(const AllocatorDispatch* self,
size_t size,
size_t alignment,
void* context) {
ReentryGuard guard;
void* const address = self->next->aligned_malloc_function(
self->next, size, alignment, context);
if (LIKELY(guard)) {
DoNotifyAllocation(address, size, AllocationSubsystem::kAllocatorShim);
}
return address;
}
static void* AlignedReallocFn(const AllocatorDispatch* self,
void* address,
size_t size,
size_t alignment,
void* context) {
ReentryGuard guard;
// Note: size == 0 actually performs free.
// Note: DoNotifyFree doesn't need to protect from recursions using
// ReentryGuard, see ReallocFn for details.
// Instead, making it unguarded also ensures proper notification of the free
// portion.
DoNotifyFree(address);
address = self->next->aligned_realloc_function(self->next, address, size,
alignment, context);
if (LIKELY(guard)) {
DoNotifyAllocation(address, size, AllocationSubsystem::kAllocatorShim);
}
return address;
}
static void AlignedFreeFn(const AllocatorDispatch* self,
void* address,
void* context) {
// Note: The code doesn't need to protect from recursions using
// ReentryGuard, see ReallocFn for details.
DoNotifyFree(address);
self->next->aligned_free_function(self->next, address, context);
}
static AllocatorDispatch allocator_dispatch_;
#endif
static ALWAYS_INLINE void DoNotifyAllocation(
void* address,
size_t size,
AllocationSubsystem subSystem,
const char* type_name = nullptr) {
PerformAllocationNotification(s_observers, AllObservers{}, address, size,
subSystem, type_name);
}
static ALWAYS_INLINE void DoNotifyFree(void* address) {
PerformFreeNotification(s_observers, AllObservers{}, address);
}
static std::tuple<ObserverTypes*...> s_observers;
};
template <typename... ObserverTypes>
std::tuple<ObserverTypes*...> DispatcherImpl<ObserverTypes...>::s_observers;
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
template <typename... ObserverTypes>
AllocatorDispatch DispatcherImpl<ObserverTypes...>::allocator_dispatch_ = {
&AllocFn,
&AllocUncheckedFn,
&AllocZeroInitializedFn,
&AllocAlignedFn,
&ReallocFn,
&FreeFn,
&GetSizeEstimateFn,
&BatchMallocFn,
&BatchFreeFn,
&FreeDefiniteSizeFn,
&AlignedMallocFn,
&AlignedReallocFn,
&AlignedFreeFn,
nullptr};
#endif
// Specialization of DispatcherImpl in case we have no observers to notify. In
// this special case we return a set of null pointers as the Dispatcher must not
// install any hooks at all.
template <>
struct DispatcherImpl<> {
static DispatchData GetNotificationHooks(std::tuple<> /*observers*/) {
return DispatchData()
#if BUILDFLAG(USE_PARTITION_ALLOC)
.SetAllocationObserverHooks(nullptr, nullptr)
#endif
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
.SetAllocatorDispatch(nullptr)
#endif
;
}
};
// A little utility function that helps using DispatcherImpl by providing
// automated type deduction for templates.
template <typename... ObserverTypes>
inline DispatchData GetNotificationHooks(
std::tuple<ObserverTypes*...> observers) {
return DispatcherImpl<ObserverTypes...>::GetNotificationHooks(
std::move(observers));
}
} // namespace base::allocator::dispatcher::internal
#endif // BASE_ALLOCATOR_DISPATCHER_INTERNAL_DISPATCHER_INTERNAL_H_

View File

@ -1,29 +0,0 @@
// Copyright (c) 2022 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_DISPATCHER_INTERNAL_TOOLS_H_
#define BASE_ALLOCATOR_DISPATCHER_INTERNAL_TOOLS_H_
#include <cstddef>
namespace base::allocator::dispatcher::internal {
constexpr bool LessEqual(size_t lhs, size_t rhs) {
return lhs <= rhs;
}
constexpr bool Equal(size_t lhs, size_t rhs) {
return lhs == rhs;
}
struct IsValidObserver {
template <typename T>
constexpr bool operator()(T const* ptr) const noexcept {
return ptr != nullptr;
}
};
} // namespace base::allocator::dispatcher::internal
#endif // BASE_ALLOCATOR_DISPATCHER_INTERNAL_DISPATCHER_H_

View File

@ -9,26 +9,12 @@
#include "build/build_config.h"
#if BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID)
#include <pthread.h>
#endif
namespace base::allocator::dispatcher {
#if BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_ANDROID)
pthread_key_t ReentryGuard::entered_key_ = 0;
void ReentryGuard::InitTLSSlot() {
if (entered_key_ == 0) {
int error = pthread_key_create(&entered_key_, nullptr);
CHECK(!error);
}
DCHECK(entered_key_ != 0);
}
#else
void ReentryGuard::InitTLSSlot() {}
#endif
} // namespace base::allocator::dispatcher
#endif

View File

@ -25,7 +25,7 @@ namespace base::allocator::dispatcher {
// Besides that the implementations of thread_local on macOS and Android
// seem to allocate memory lazily on the first access to thread_local variables.
// Make use of pthread TLS instead of C++ thread_local there.
struct BASE_EXPORT ReentryGuard {
struct ReentryGuard {
ReentryGuard() : allowed_(!pthread_getspecific(entered_key_)) {
pthread_setspecific(entered_key_, reinterpret_cast<void*>(true));
}
@ -41,7 +41,10 @@ struct BASE_EXPORT ReentryGuard {
// order to acquire a low TLS slot number because glibc TLS implementation
// will require a malloc call to allocate storage for a higher slot number
// (>= PTHREAD_KEY_2NDLEVEL_SIZE == 32). c.f. heap_profiling::InitTLSSlot.
static void InitTLSSlot();
static void Init() {
int error = pthread_key_create(&entered_key_, nullptr);
CHECK(!error);
}
private:
static pthread_key_t entered_key_;
@ -55,7 +58,7 @@ struct BASE_EXPORT ReentryGuard {
struct [[maybe_unused]] BASE_EXPORT ReentryGuard {
constexpr explicit operator bool() const noexcept { return true; }
static void InitTLSSlot();
static void Init() {}
};
#endif

View File

@ -1,21 +0,0 @@
// Copyright (c) 2022 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_DISPATCHER_SUBSYSTEM_H_
#define BASE_ALLOCATOR_DISPATCHER_SUBSYSTEM_H_
namespace base::allocator::dispatcher {
// Identifiers for the memory subsystem handling the allocation. Some observers
// require more detailed information on who is performing the allocation, i.e.
// SamplingHeapProfiler.
enum class AllocationSubsystem {
// Allocation is handled by PartitionAllocator.
kPartitionAllocator = 1,
// Allocation is handled by AllocatorShims.
kAllocatorShim = 2
};
} // namespace base::allocator::dispatcher
#endif // BASE_ALLOCATOR_DISPATCHER_SUBSYSTEM_H_

View File

@ -1,27 +0,0 @@
// Copyright (c) 2022 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_DISPATCHER_TESTING_DISPATCHER_TEST_H_
#define BASE_ALLOCATOR_DISPATCHER_TESTING_DISPATCHER_TEST_H_
#include "testing/gtest/include/gtest/gtest.h"
namespace base::allocator::dispatcher::testing {
// DispatcherTest provides some common initialization which most of the
// unittests of the dispatcher require. DispatcherTest should not be used
// directly. Instead, derive your test fixture from it.
struct DispatcherTest : public ::testing::Test {
// Perform some commonly required initialization, at them moment
// - Initialize the TLS slot for the ReentryGuard
DispatcherTest();
protected:
// Protected d'tor only to prevent direct usage of this class.
~DispatcherTest() override;
};
} // namespace base::allocator::dispatcher::testing
#endif // BASE_ALLOCATOR_DISPATCHER_TESTING_DISPATCHER_TEST_H_

View File

@ -1,32 +0,0 @@
// Copyright (c) 2022 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_DISPATCHER_TESTING_OBSERVER_MOCK_H_
#define BASE_ALLOCATOR_DISPATCHER_TESTING_OBSERVER_MOCK_H_
#include "base/allocator/dispatcher/subsystem.h"
#include "testing/gmock/include/gmock/gmock.h"
#include <cstddef>
namespace base::allocator::dispatcher::testing {
// ObserverMock is a small mock class based on GoogleMock.
// It complies to the interface enforced by the dispatcher. The template
// parameter serves only to create distinct types of observers if required.
template <typename T = void>
struct ObserverMock {
MOCK_METHOD(void,
OnAllocation,
(void* address,
size_t size,
AllocationSubsystem sub_system,
const char* type_name),
());
MOCK_METHOD(void, OnFree, (void* address), ());
};
} // namespace base::allocator::dispatcher::testing
#endif // BASE_ALLOCATOR_DISPATCHER_TESTING_OBSERVER_MOCK_H_

View File

@ -1,50 +0,0 @@
// Copyright (c) 2022 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_DISPATCHER_TESTING_TOOLS_H_
#define BASE_ALLOCATOR_DISPATCHER_TESTING_TOOLS_H_
#include <array>
#include <tuple>
#include <utility>
namespace base::allocator::dispatcher::testing {
namespace internal {
template <size_t Size, typename Type, typename... AppendedTypes>
struct DefineTupleFromSingleType {
using type = typename DefineTupleFromSingleType<Size - 1,
Type,
AppendedTypes...,
Type>::type;
};
template <typename Type, typename... AppendedTypes>
struct DefineTupleFromSingleType<0, Type, AppendedTypes...> {
using type = std::tuple<AppendedTypes...>;
};
} // namespace internal
template <size_t Size, typename Type>
struct DefineTupleFromSingleType {
using type = typename internal::DefineTupleFromSingleType<Size, Type>::type;
};
template <typename Type, size_t Size, size_t... Indices>
typename internal::DefineTupleFromSingleType<Size, Type*>::type
CreateTupleOfPointers(std::array<Type, Size>& items,
std::index_sequence<Indices...>) {
return std::make_tuple((&items[Indices])...);
}
template <typename Type, size_t Size>
typename internal::DefineTupleFromSingleType<Size, Type*>::type
CreateTupleOfPointers(std::array<Type, Size>& items) {
return CreateTupleOfPointers(items, std::make_index_sequence<Size>{});
}
} // namespace base::allocator::dispatcher::testing
#endif

View File

@ -9,10 +9,12 @@
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/base_export.h"
#include "base/compiler_specific.h"
#include "base/feature_list.h"
#include "base/metrics/field_trial_params.h"
namespace base {
struct Feature;
namespace features {
// See /docs/dangling_ptr.md

View File

@ -359,6 +359,14 @@ std::map<std::string, std::string> ProposeSyntheticFinchTrials() {
trials.emplace("PCScan_Effective_Fallback", pcscan_group_name_fallback);
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
trials.emplace("FakeBinaryExperiment",
#if BUILDFLAG(USE_FAKE_BINARY_EXPERIMENT)
"Enabled"
#else
"Disabled"
#endif
);
return trials;
}

View File

@ -44,11 +44,7 @@ if (is_fuchsia) {
if (make_partition_alloc_standalone) {
partition_alloc_target_type = "component"
} else {
if (is_component_build) {
partition_alloc_target_type = "source_set"
} else {
partition_alloc_target_type = "static_library"
}
}
target(partition_alloc_target_type, "partition_alloc") {
@ -131,7 +127,6 @@ target(partition_alloc_target_type, "partition_alloc") {
"partition_alloc_base/time/time.h",
"partition_alloc_base/time/time_override.cc",
"partition_alloc_base/time/time_override.h",
"partition_alloc_base/types/strong_alias.h",
"partition_alloc_base/win/windows_types.h",
"partition_alloc_check.h",
"partition_alloc_config.h",
@ -158,7 +153,6 @@ target(partition_alloc_target_type, "partition_alloc") {
"partition_stats.h",
"partition_tag.h",
"partition_tag_bitmap.h",
"partition_tag_types.h",
"partition_tls.h",
"random.cc",
"random.h",
@ -283,13 +277,17 @@ target(partition_alloc_target_type, "partition_alloc") {
":debugging_buildflags",
":logging_buildflags",
":partition_alloc_buildflags",
"//build:branding_buildflags",
"//build/config/compiler:compiler_buildflags",
]
# TODO(https://crbug.com/1151236): Remove this dependency on Abseil once PA
# no longer includes any headers directly from base/.
deps = [ "//third_party/abseil-cpp:absl" ]
configs += [
":partition_alloc_implementation",
":memory_tagging",
]
deps = []
public_configs = []
if (is_android) {
# tagging.cc requires __arm_mte_set_* functions.
@ -347,7 +345,7 @@ buildflag_header("partition_alloc_buildflags") {
_record_alloc_info = false
# TODO(crbug.com/1151236): Need to refactor the following buildflags.
# The buildflags (except RECORD_ALLOC_INFO) are used by both chrome and
# The buildflags (expect RECORD_ALLOC_INFO) are used by both chrome and
# partition alloc. For partition alloc,
# gen/base/allocator/partition_allocator/partition_alloc_buildflags.h
# defines and partition alloc includes the header file. For chrome,
@ -363,7 +361,6 @@ buildflag_header("partition_alloc_buildflags") {
"USE_MTE_CHECKED_PTR=$_use_mte_checked_ptr",
"RECORD_ALLOC_INFO=$_record_alloc_info",
"USE_FREESLOT_BITMAP=$use_freeslot_bitmap",
]
}
@ -399,7 +396,6 @@ buildflag_header("debugging_buildflags") {
flags = [
"PA_DCHECK_IS_ON=$_dcheck_is_on",
"PA_EXPENSIVE_DCHECKS_ARE_ON=$enable_expensive_dchecks",
"PA_DCHECK_IS_CONFIGURABLE=$dcheck_is_configurable",
]
}
@ -412,6 +408,8 @@ group("buildflags") {
":partition_alloc_buildflags",
]
}
# TODO(crbug.com/1151236): After making partition_alloc a standalone library,
# move test code here. i.e. test("partition_alloc_tests") { ... } and
# test("partition_alloc_perftests").

View File

@ -4,19 +4,16 @@ This document describes PartitionAlloc at a high level, with some architectural
details. For implementation details, see the comments in
`partition_alloc_constants.h`.
## Quick Links
* [Glossary](./glossary.md): Definitions of terms commonly used in
PartitionAlloc. The present document largely avoids defining terms.
* [Build Config](./build_config.md): Pertinent GN args, buildflags, and
macros.
## Overview
PartitionAlloc is a memory allocator optimized for space efficiency,
allocation latency, and security.
*** note
This document largely avoids defining terms; consult the
[glossary](./glossary.md) for a complete reference.
***
### Performance
PartitionAlloc is designed to be extremely fast in its fast paths. The fast

View File

@ -9,12 +9,6 @@ each term depends mainly upon previously defined ones.
* **Partition**: A heap that is separated and protected both from other
partitions and from non-PartitionAlloc memory. Each partition holds
multiple buckets.
*** promo
**NOTE**: In code (and comments), "partition," "root," and even
"allocator" are all conceptually the same thing.
***
* **Bucket**: A collection of regions in a partition that contains
similar-sized objects. For example, one bucket may hold objects of
size (224,&nbsp;256], another (256,&nbsp;320], etc. Bucket size
@ -41,9 +35,6 @@ Buckets consist of slot spans, organized as linked lists (see below).
which are also commonly 2MiB. These have to be fully committed /
uncommitted in memory, whereas super pages can be partially committed
with system page granularity.
* **Extent**: An extent is a run of consecutive super pages (belonging
to a single partition). Extents are to super pages what slot spans are
to slots (see below).
## Slots and Spans
@ -107,22 +98,6 @@ Buckets consist of slot spans, organized as linked lists (see below).
other metadata (e.g. StarScan bitmaps) can bump the starting offset
forward. While this term is entrenched in the code, the team
considers it suboptimal and is actively looking for a replacement.
* **Allocation Fast Path**: A path taken during an allocation that is
considered fast. Usually means that an allocation request can be
immediately satisfied by grabbing a slot from the freelist of the
first active slot span in the bucket.
* **Allocation Slow Path**: Anything which is not fast (see above).
Can involve
* finding another active slot span in the list,
* provisioning more slots in a slot span,
* bringing back a free (or decommitted) slot span,
* allocating a new slot span, or even
* allocating a new super page.
*** aside
By "slow" we may mean something as simple as extra logic (`if`
statements etc.), or something as costly as system calls.
***
## PartitionAlloc-Everywhere

View File

@ -7,7 +7,6 @@
#include <stddef.h>
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "build/build_config.h"
@ -46,7 +45,6 @@ struct PageCharacteristics {
std::atomic<size_t> size;
std::atomic<size_t> shift;
};
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
extern PageCharacteristics page_characteristics;
} // namespace partition_alloc::internal

View File

@ -25,10 +25,7 @@
#if BUILDFLAG(IS_WIN)
#include <windows.h>
#if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
#include <VersionHelpers.h> // For IsWindows8Point1OrGreater().
#endif
#endif // BUILDFLAG(IS_WIN)
namespace partition_alloc::internal {
@ -120,12 +117,10 @@ PA_ALWAYS_INLINE size_t PartitionAddressSpace::BRPPoolSize() {
}
#else
PA_ALWAYS_INLINE size_t PartitionAddressSpace::RegularPoolSize() {
return IsWindows8Point1OrGreater() ? kRegularPoolSize
: kRegularPoolSizeForLegacyWindows;
return kRegularPoolSize;
}
PA_ALWAYS_INLINE size_t PartitionAddressSpace::BRPPoolSize() {
return IsWindows8Point1OrGreater() ? kBRPPoolSize
: kBRPPoolSizeForLegacyWindows;
return kBRPPoolSize;
}
#endif // BUILDFLAG(IS_IOS)
#endif // defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)

View File

@ -200,16 +200,6 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
static constexpr size_t kBRPPoolSize = kPoolMaxSize;
static_assert(base::bits::IsPowerOfTwo(kRegularPoolSize) &&
base::bits::IsPowerOfTwo(kBRPPoolSize));
#if defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
// We can't afford pool sizes as large as kPoolMaxSize on Windows <8.1 (see
// crbug.com/1101421 and crbug.com/1217759).
static constexpr size_t kRegularPoolSizeForLegacyWindows = 4 * kGiB;
static constexpr size_t kBRPPoolSizeForLegacyWindows = 4 * kGiB;
static_assert(kRegularPoolSizeForLegacyWindows < kRegularPoolSize);
static_assert(kBRPPoolSizeForLegacyWindows < kBRPPoolSize);
static_assert(base::bits::IsPowerOfTwo(kRegularPoolSizeForLegacyWindows) &&
base::bits::IsPowerOfTwo(kBRPPoolSizeForLegacyWindows));
#endif // defined(PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE)
static constexpr size_t kConfigurablePoolMaxSize = kPoolMaxSize;
static constexpr size_t kConfigurablePoolMinSize = 1 * kGiB;
static_assert(kConfigurablePoolMinSize <= kConfigurablePoolMaxSize);

View File

@ -4,7 +4,6 @@
declare_args() {
make_partition_alloc_standalone = false
use_freeslot_bitmap = false
}
# TODO(): move partition allocator's own args defined by

View File

@ -4,7 +4,17 @@
#include "base/allocator/partition_allocator/partition_alloc_base/check.h"
#include "build/build_config.h"
// check.h is a widely included header and its size has significant impact on
// build time. Try not to raise this limit unless absolutely necessary. See
// https://chromium.googlesource.com/chromium/src/+/HEAD/docs/wmax_tokens.md
#ifndef NACL_TC_REV
#pragma clang max_tokens_here 17000
#endif
#include "base/allocator/partition_allocator/partition_alloc_base/logging.h"
#include "build/build_config.h"
namespace partition_alloc::internal::logging {

View File

@ -4,10 +4,20 @@
#include "base/allocator/partition_allocator/partition_alloc_base/files/file_path.h"
#include "build/build_config.h"
// file_path.h is a widely included header and its size has significant impact
// on build time. Try not to raise this limit unless necessary. See
// https://chromium.googlesource.com/chromium/src/+/HEAD/docs/wmax_tokens.md
#ifndef NACL_TC_REV
#pragma clang max_tokens_here 370000
#endif
#include <string.h>
#include <algorithm>
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "build/build_config.h"
#if BUILDFLAG(IS_WIN)
#include <windows.h>

View File

@ -42,6 +42,7 @@
#include <ostream>
#include <string>
#include "base/allocator/partition_allocator/partition_alloc_base/debug/alias.h"
#include "base/allocator/partition_allocator/partition_alloc_base/posix/eintr_wrapper.h"
#if BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
@ -83,13 +84,13 @@ void WriteToFd(int fd, const char* data, size_t length) {
} // namespace
#if BUILDFLAG(PA_DCHECK_IS_CONFIGURABLE)
#if defined(DCHECK_IS_CONFIGURABLE)
// In DCHECK-enabled Chrome builds, allow the meaning of LOGGING_DCHECK to be
// determined at run-time. We default it to INFO, to avoid it triggering
// crashes before the run-time has explicitly chosen the behaviour.
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
logging::LogSeverity LOGGING_DCHECK = LOGGING_INFO;
#endif // BUILDFLAG(PA_DCHECK_IS_CONFIGURABLE)
#endif // defined(DCHECK_IS_CONFIGURABLE)
// This is never instantiated, it's just used for EAT_STREAM_PARAMETERS to have
// an object of the correct type on the LHS of the unused part of the ternary

View File

@ -401,11 +401,11 @@ PA_COMPONENT_EXPORT(PARTITION_ALLOC) extern std::ostream* g_swallow_stream;
// Definitions for DCHECK et al.
#if BUILDFLAG(PA_DCHECK_IS_CONFIGURABLE)
#if defined(DCHECK_IS_CONFIGURABLE)
PA_COMPONENT_EXPORT(PARTITION_ALLOC) extern LogSeverity LOGGING_DCHECK;
#else
constexpr LogSeverity LOGGING_DCHECK = LOGGING_FATAL;
#endif // BUILDFLAG(PA_DCHECK_IS_CONFIGURABLE)
#endif // defined(DCHECK_IS_CONFIGURABLE)
// Redefine the standard assert to use our nice log files
#undef assert

View File

@ -4,6 +4,15 @@
#include "base/allocator/partition_allocator/partition_alloc_base/time/time.h"
#include "build/build_config.h"
#if BUILDFLAG(IS_LINUX)
// time.h is a widely included header and its size impacts build time.
// Try not to raise this limit unless necessary. See
// https://chromium.googlesource.com/chromium/src/+/HEAD/docs/wmax_tokens.md
#pragma clang max_tokens_here 490000
#endif // BUILDFLAG(IS_LINUX)
#include <atomic>
#include <cmath>
#include <limits>
@ -12,6 +21,7 @@
#include <utility>
#include "base/allocator/partition_allocator/partition_alloc_base/time/time_override.h"
#include "build/build_config.h"
namespace partition_alloc::internal::base {

View File

@ -18,6 +18,7 @@
#include "base/allocator/partition_allocator/partition_alloc_base/time/time_override.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_notreached.h"
#include "build/build_config.h"
// Ensure the Fuchsia and Mac builds do not include this module. Instead,
// non-POSIX implementation is used for sampling the system clocks.

View File

@ -1,141 +0,0 @@
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_TYPES_STRONG_ALIAS_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_TYPES_STRONG_ALIAS_H_
#include <type_traits>
#include <utility>
namespace partition_alloc::internal::base {
// A type-safe alternative for a typedef or a 'using' directive.
//
// C++ currently does not support type-safe typedefs, despite multiple proposals
// (ex. http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2013/n3515.pdf). The
// next best thing is to try and emulate them in library code.
//
// The motivation is to disallow several classes of errors:
//
// using Orange = int;
// using Apple = int;
// Apple apple(2);
// Orange orange = apple; // Orange should not be able to become an Apple.
// Orange x = orange + apple; // Shouldn't add Oranges and Apples.
// if (orange > apple); // Shouldn't compare Apples to Oranges.
// void foo(Orange);
// void foo(Apple); // Redefinition.
// etc.
//
// StrongAlias may instead be used as follows:
//
// using Orange = StrongAlias<class OrangeTag, int>;
// using Apple = StrongAlias<class AppleTag, int>;
// using Banana = StrongAlias<class BananaTag, std::string>;
// Apple apple(2);
// Banana banana("Hello");
// Orange orange = apple; // Does not compile.
// Orange other_orange = orange; // Compiles, types match.
// Orange x = orange + apple; // Does not compile.
// Orange y = Orange(orange.value() + apple.value()); // Compiles.
// Orange z = Orange(banana->size() + *other_orange); // Compiles.
// if (orange > apple); // Does not compile.
// if (orange > other_orange); // Compiles.
// void foo(Orange);
// void foo(Apple); // Compiles into separate overload.
//
// StrongAlias is a zero-cost abstraction, it's compiled away.
//
// TagType is an empty tag class (also called "phantom type") that only serves
// the type system to differentiate between different instantiations of the
// template.
// UnderlyingType may be almost any value type. Note that some methods of the
// StrongAlias may be unavailable (ie. produce elaborate compilation errors when
// used) if UnderlyingType doesn't support them.
//
// StrongAlias only directly exposes comparison operators (for convenient use in
// ordered containers) and a Hasher struct (for unordered_map/set). It's
// impossible, without reflection, to expose all methods of the UnderlyingType
// in StrongAlias's interface. It's also potentially unwanted (ex. you don't
// want to be able to add two StrongAliases that represent socket handles).
// A getter and dereference operators are provided in case you need to access
// the UnderlyingType.
//
// See also
// - //styleguide/c++/blink-c++.md which provides recommendation and examples of
// using StrongAlias<Tag, bool> instead of a bare bool.
// - IdType<...> which provides helpers for specializing StrongAlias to be
// used as an id.
// - TokenType<...> which provides helpers for specializing StrongAlias to be
// used as a wrapper of base::UnguessableToken.
template <typename TagType, typename UnderlyingType>
class StrongAlias {
public:
constexpr StrongAlias() = default;
constexpr explicit StrongAlias(const UnderlyingType& v) : value_(v) {}
constexpr explicit StrongAlias(UnderlyingType&& v) noexcept
: value_(std::move(v)) {}
constexpr UnderlyingType* operator->() { return &value_; }
constexpr const UnderlyingType* operator->() const { return &value_; }
constexpr UnderlyingType& operator*() & { return value_; }
constexpr const UnderlyingType& operator*() const& { return value_; }
constexpr UnderlyingType&& operator*() && { return std::move(value_); }
constexpr const UnderlyingType&& operator*() const&& {
return std::move(value_);
}
constexpr UnderlyingType& value() & { return value_; }
constexpr const UnderlyingType& value() const& { return value_; }
constexpr UnderlyingType&& value() && { return std::move(value_); }
constexpr const UnderlyingType&& value() const&& { return std::move(value_); }
constexpr explicit operator const UnderlyingType&() const& { return value_; }
constexpr bool operator==(const StrongAlias& other) const {
return value_ == other.value_;
}
constexpr bool operator!=(const StrongAlias& other) const {
return value_ != other.value_;
}
constexpr bool operator<(const StrongAlias& other) const {
return value_ < other.value_;
}
constexpr bool operator<=(const StrongAlias& other) const {
return value_ <= other.value_;
}
constexpr bool operator>(const StrongAlias& other) const {
return value_ > other.value_;
}
constexpr bool operator>=(const StrongAlias& other) const {
return value_ >= other.value_;
}
// Hasher to use in std::unordered_map, std::unordered_set, etc.
//
// Example usage:
// using MyType = base::StrongAlias<...>;
// using MySet = std::unordered_set<MyType, typename MyType::Hasher>;
//
// https://google.github.io/styleguide/cppguide.html#std_hash asks to avoid
// defining specializations of `std::hash` - this is why the hasher needs to
// be explicitly specified and why the following code will *not* work:
// using MyType = base::StrongAlias<...>;
// using MySet = std::unordered_set<MyType>; // This won't work.
struct Hasher {
using argument_type = StrongAlias;
using result_type = std::size_t;
result_type operator()(const argument_type& id) const {
return std::hash<UnderlyingType>()(id.value());
}
};
protected:
UnderlyingType value_;
};
} // namespace partition_alloc::internal::base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_TYPES_STRONG_ALIAS_H_

View File

@ -18,7 +18,7 @@
static_assert(sizeof(void*) == 8, "");
#else
static_assert(sizeof(void*) != 8, "");
#endif // defined(ARCH_CPU_64_BITS) && !BUILDFLAG(IS_NACL)
#endif
// PCScan supports 64 bits only.
#if defined(PA_HAS_64_BITS_POINTERS)
@ -30,21 +30,13 @@ static_assert(sizeof(void*) != 8, "");
#define PA_STARSCAN_NEON_SUPPORTED
#endif
#if defined(PA_HAS_64_BITS_POINTERS) && (BUILDFLAG(IS_IOS) || BUILDFLAG(IS_WIN))
#if BUILDFLAG(IS_IOS)
// Use dynamically sized GigaCage. This allows to query the size at run-time,
// before initialization, instead of using a hardcoded constexpr.
//
// This is needed on iOS because iOS test processes can't handle a large cage
// (see crbug.com/1250788).
//
// This is needed on Windows, because OS versions <8.1 incur commit charge even
// on reserved address space, thus don't handle large cage well (see
// crbug.com/1101421 and crbug.com/1217759).
//
// This setting is specific to 64-bit, as 32-bit has a different implementation.
// before initialization, instead of using a hardcoded constexpr. This is needed
// on iOS because iOS test processes can't handle a large cage (see
// crbug.com/1250788).
#define PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE
#endif // defined(PA_HAS_64_BITS_POINTERS) &&
// (BUILDFLAG(IS_IOS) || BUILDFLAG(IS_WIN))
#endif
#if defined(PA_HAS_64_BITS_POINTERS) && \
(BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_ANDROID))
@ -98,7 +90,7 @@ static_assert(sizeof(void*) != 8, "");
#define PA_HAS_FAST_MUTEX
#endif
// If defined, enables zeroing memory on Free() with roughly 1% probability.
// If set to 1, enables zeroing memory on Free() with roughly 1% probability.
// This applies only to normal buckets, as direct-map allocations are always
// decommitted.
// TODO(bartekn): Re-enable once PartitionAlloc-Everywhere evaluation is done.
@ -237,7 +229,7 @@ constexpr bool kUseLazyCommit = false;
// larger slot spans.
#if BUILDFLAG(IS_LINUX) || (BUILDFLAG(IS_MAC) && defined(ARCH_CPU_ARM64))
#define PA_PREFER_SMALLER_SLOT_SPANS
#endif // BUILDFLAG(IS_LINUX) || (BUILDFLAG(IS_MAC) && defined(ARCH_CPU_ARM64))
#endif // BUILDFLAG(IS_LINUX)
// Build MTECheckedPtr code.
//

View File

@ -146,21 +146,6 @@ MaxRegularSlotSpanSize() {
return kMaxPartitionPagesPerRegularSlotSpan << PartitionPageShift();
}
// The maximum size that is used in an alternate bucket distribution. After this
// threshold, we only have 1 slot per slot-span, so external fragmentation
// doesn't matter. So, using the alternate bucket distribution after this
// threshold has no benefit, and only increases internal fragmentation.
//
// We would like this to be |MaxRegularSlotSpanSize()| on all platforms, but
// this is not constexpr on all platforms, so on other platforms we hardcode it,
// even though this may be too low, e.g. on systems with a page size >4KiB.
constexpr size_t kHighThresholdForAlternateDistribution =
#if PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR
MaxRegularSlotSpanSize();
#else
1 << 16;
#endif
// We reserve virtual address space in 2 MiB chunks (aligned to 2 MiB as well).
// These chunks are called *super pages*. We do this so that we can store
// metadata in the first few pages of each 2 MiB-aligned section. This makes
@ -257,23 +242,22 @@ constexpr size_t kSuperPageAlignment = kSuperPageSize;
constexpr size_t kSuperPageOffsetMask = kSuperPageAlignment - 1;
constexpr size_t kSuperPageBaseMask = ~kSuperPageOffsetMask;
// GigaCage is generally split into two pools, one which supports BackupRefPtr
// (BRP) and one that doesn't.
// GigaCage is split into two pools, one which supports BackupRefPtr (BRP) and
// one that doesn't.
#if defined(PA_HAS_64_BITS_POINTERS)
// The 3rd, Configurable Pool is only available in 64-bit mode.
// The Configurable Pool is only available in 64-bit mode
constexpr size_t kNumPools = 3;
// Maximum GigaCage pool size. With exception of Configurable Pool, it is also
// the actual size, unless PA_USE_DYNAMICALLY_SIZED_GIGA_CAGE is set, which
// allows to choose a different size at initialization time for certain
// configurations.
#if BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX)
// Special-case macOS. Contrary to other platforms, there is no sandbox limit
// there, meaning that a single renderer could "happily" consume >8GiB. So the
// 8GiB pool size is a regression. Make the limit higher on this platform only
// to be consistent with previous behavior. See crbug.com/1232567 for details.
//
// Special-case Android and iOS, which incur test failures with larger
// GigaCage. Regardless, allocating >8GiB with malloc() on these platforms is
// unrealistic as of 2022.
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_IOS)
constexpr size_t kPoolMaxSize = 8 * kGiB;
#else
// On Linux, reserving memory is not costly, and we have cases where heaps can
// grow to more than 8GiB without being a memory leak.
constexpr size_t kPoolMaxSize = 16 * kGiB;
#else
constexpr size_t kPoolMaxSize = 8 * kGiB;
#endif
#else // defined(PA_HAS_64_BITS_POINTERS)
constexpr size_t kNumPools = 2;
@ -383,10 +367,8 @@ constexpr size_t kMinDirectMappedDownsize = kMaxBucketed + 1;
// fails. This is a security choice in Chrome, to help making size_t vs int bugs
// harder to exploit.
// The definition of MaxDirectMapped does only depend on constants that are
// unconditionally constexpr. Therefore it is not necessary to use
// PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR here.
constexpr PA_ALWAYS_INLINE size_t MaxDirectMapped() {
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
MaxDirectMapped() {
// Subtract kSuperPageSize to accommodate for granularity inside
// PartitionRoot::GetDirectMapReservationSize.
return (1UL << 31) - kSuperPageSize;

View File

@ -176,6 +176,8 @@ SlotSpanMetadata<thread_safe>* PartitionDirectMap(
unsigned int flags,
size_t raw_size,
size_t slot_span_alignment) {
using ::partition_alloc::internal::ScopedUnlockGuard;
PA_DCHECK((slot_span_alignment >= PartitionPageSize()) &&
base::bits::IsPowerOfTwo(slot_span_alignment));
@ -215,10 +217,6 @@ SlotSpanMetadata<thread_safe>* PartitionDirectMap(
PartitionDirectMapExtent<thread_safe>* map_extent = nullptr;
PartitionPage<thread_safe>* page = nullptr;
#if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
const PartitionTag tag = root->GetNewPartitionTag();
#endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
{
// Getting memory for direct-mapped allocations doesn't interact with the
// rest of the allocator, but takes a long time, as it involves several
@ -412,10 +410,6 @@ SlotSpanMetadata<thread_safe>* PartitionDirectMap(
map_extent->reservation_size = reservation_size;
map_extent->padding_for_alignment = padding_for_alignment;
map_extent->bucket = &metadata->bucket;
#if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
DirectMapPartitionTagSetValue(slot_start, tag);
#endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
}
root->lock_.AssertAcquired();
@ -693,7 +687,7 @@ PA_ALWAYS_INLINE uintptr_t PartitionBucket<thread_safe>::AllocNewSuperPage(
return 0;
// Didn't manage to get a new uncommitted super page -> address space issue.
ScopedUnlockGuard unlock{root->lock_};
::partition_alloc::internal::ScopedUnlockGuard unlock{root->lock_};
PartitionOutOfMemoryMappingFailure(root, kSuperPageSize);
}
@ -849,13 +843,14 @@ PartitionBucket<thread_safe>::ProvisionMoreSlotsAndAllocOne(
PA_DCHECK(!slot_span->get_freelist_head());
PA_DCHECK(!slot_span->is_full());
size_t size = slot_size;
uintptr_t slot_span_start =
SlotSpanMetadata<thread_safe>::ToSlotSpanStart(slot_span);
// If we got here, the first unallocated slot is either partially or fully on
// an uncommitted page. If the latter, it must be at the start of that page.
uintptr_t return_slot =
slot_span_start + (slot_size * slot_span->num_allocated_slots);
uintptr_t next_slot = return_slot + slot_size;
slot_span_start + (size * slot_span->num_allocated_slots);
uintptr_t next_slot = return_slot + size;
uintptr_t commit_start = base::bits::AlignUp(return_slot, SystemPageSize());
PA_DCHECK(next_slot > commit_start);
uintptr_t commit_end = base::bits::AlignUp(next_slot, SystemPageSize());
@ -869,7 +864,7 @@ PartitionBucket<thread_safe>::ProvisionMoreSlotsAndAllocOne(
slot_span->num_allocated_slots++;
// Round down, because a slot that doesn't fully fit in the new page(s) isn't
// provisioned.
size_t slots_to_provision = (commit_end - return_slot) / slot_size;
size_t slots_to_provision = (commit_end - return_slot) / size;
slot_span->num_unprovisioned_slots -= slots_to_provision;
PA_DCHECK(slot_span->num_allocated_slots +
slot_span->num_unprovisioned_slots <=
@ -887,33 +882,31 @@ PartitionBucket<thread_safe>::ProvisionMoreSlotsAndAllocOne(
PageAccessibilityDisposition::kRequireUpdate);
}
if (PA_LIKELY(slot_size <= kMaxMemoryTaggingSize)) {
if (PA_LIKELY(size <= kMaxMemoryTaggingSize)) {
// Ensure the MTE-tag of the memory pointed by |return_slot| is unguessable.
TagMemoryRangeRandomly(return_slot, slot_size);
TagMemoryRangeRandomly(return_slot, size);
}
#if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
NormalBucketPartitionTagSetValue(return_slot, slot_size,
root->GetNewPartitionTag());
PartitionTagSetValue(return_slot, size, root->GetNewPartitionTag());
#endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
// Add all slots that fit within so far committed pages to the free list.
PartitionFreelistEntry* prev_entry = nullptr;
uintptr_t next_slot_end = next_slot + slot_size;
uintptr_t next_slot_end = next_slot + size;
size_t free_list_entries_added = 0;
while (next_slot_end <= commit_end) {
void* next_slot_ptr;
if (PA_LIKELY(slot_size <= kMaxMemoryTaggingSize)) {
if (PA_LIKELY(size <= kMaxMemoryTaggingSize)) {
// Ensure the MTE-tag of the memory pointed by other provisioned slot is
// unguessable. They will be returned to the app as is, and the MTE-tag
// will only change upon calling Free().
next_slot_ptr = TagMemoryRangeRandomly(next_slot, slot_size);
next_slot_ptr = TagMemoryRangeRandomly(next_slot, size);
} else {
// No MTE-tagging for larger slots, just cast.
next_slot_ptr = reinterpret_cast<void*>(next_slot);
}
#if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
NormalBucketPartitionTagSetValue(next_slot, slot_size,
root->GetNewPartitionTag());
PartitionTagSetValue(next_slot, size, root->GetNewPartitionTag());
#endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
auto* entry = PartitionFreelistEntry::EmplaceAndInitNull(next_slot_ptr);
if (!slot_span->get_freelist_head()) {
@ -925,7 +918,7 @@ PartitionBucket<thread_safe>::ProvisionMoreSlotsAndAllocOne(
prev_entry->SetNext(entry);
}
next_slot = next_slot_end;
next_slot_end = next_slot + slot_size;
next_slot_end = next_slot + size;
prev_entry = entry;
#if BUILDFLAG(PA_DCHECK_IS_ON)
free_list_entries_added++;
@ -1337,7 +1330,7 @@ uintptr_t PartitionBucket<thread_safe>::SlowPathAlloc(
if (flags & AllocFlags::kReturnNull)
return 0;
// See comment in PartitionDirectMap() for unlocking.
ScopedUnlockGuard unlock{root->lock_};
::partition_alloc::internal::ScopedUnlockGuard unlock{root->lock_};
root->OutOfMemory(raw_size);
PA_IMMEDIATE_CRASH(); // Not required, kept as documentation.
}

View File

@ -241,7 +241,7 @@ PA_ALWAYS_INLINE constexpr uint16_t BucketIndexLookup::GetIndex(size_t size) {
//
// So, an allocation of size 1.4*2^10 would go into the 1.5*2^10 bucket under
// Distribution A, but to the 2^11 bucket under Distribution B.
if (1 << 8 < size && size < kHighThresholdForAlternateDistribution)
if (1 << 8 < size && size < 1 << 19)
return BucketIndexLookup::GetIndexForDenserBuckets(RoundUpSize(size));
return BucketIndexLookup::GetIndexForDenserBuckets(size);
}

View File

@ -35,6 +35,8 @@ void UnmapNow(uintptr_t reservation_start,
template <bool thread_safe>
PA_ALWAYS_INLINE void PartitionDirectUnmap(
SlotSpanMetadata<thread_safe>* slot_span) {
using ::partition_alloc::internal::ScopedUnlockGuard;
auto* root = PartitionRoot<thread_safe>::FromSlotSpan(slot_span);
root->lock_.AssertAcquired();
auto* extent = PartitionDirectMapExtent<thread_safe>::FromSlotSpan(slot_span);

View File

@ -25,7 +25,6 @@
#include "base/allocator/partition_allocator/partition_bucket.h"
#include "base/allocator/partition_allocator/partition_freelist_entry.h"
#include "base/allocator/partition_allocator/partition_tag_bitmap.h"
#include "base/allocator/partition_allocator/partition_tag_types.h"
#include "base/allocator/partition_allocator/reservation_offset_table.h"
#include "base/allocator/partition_allocator/starscan/state_bitmap.h"
#include "base/allocator/partition_allocator/tagging.h"
@ -222,10 +221,6 @@ struct SlotSpanMetadata {
PA_ALWAYS_INLINE void SetRawSize(size_t raw_size);
PA_ALWAYS_INLINE size_t GetRawSize() const;
// Only meaningful when `this` refers to a slot span in a direct map
// bucket.
PA_ALWAYS_INLINE PartitionTag* DirectMapMTETag();
PA_ALWAYS_INLINE PartitionFreelistEntry* get_freelist_head() const {
return freelist_head;
}
@ -339,13 +334,6 @@ struct SubsequentPageMetadata {
// the first one is used to store slot information, but the second one is
// available for extra information)
size_t raw_size;
// Specific to when `this` is used in a direct map bucket. Since direct
// maps don't have as many tags as the typical normal bucket slot span,
// we can get away with just hiding the sole tag in here.
//
// See `//base/memory/mtecheckedptr.md` for details.
PartitionTag direct_map_tag;
};
// Each partition page has metadata associated with it. The metadata of the
@ -419,16 +407,6 @@ PA_ALWAYS_INLINE PartitionPage<thread_safe>* PartitionSuperPageToMetadataArea(
SystemPageSize());
}
PA_ALWAYS_INLINE const SubsequentPageMetadata* GetSubsequentPageMetadata(
const PartitionPage<ThreadSafe>* page) {
return &(page + 1)->subsequent_page_metadata;
}
PA_ALWAYS_INLINE SubsequentPageMetadata* GetSubsequentPageMetadata(
PartitionPage<ThreadSafe>* page) {
return &(page + 1)->subsequent_page_metadata;
}
template <bool thread_safe>
PA_ALWAYS_INLINE PartitionSuperPageExtentEntry<thread_safe>*
PartitionSuperPageToExtent(uintptr_t super_page) {
@ -688,26 +666,16 @@ template <bool thread_safe>
PA_ALWAYS_INLINE void SlotSpanMetadata<thread_safe>::SetRawSize(
size_t raw_size) {
PA_DCHECK(CanStoreRawSize());
auto* subsequent_page_metadata = GetSubsequentPageMetadata(
reinterpret_cast<PartitionPage<thread_safe>*>(this));
subsequent_page_metadata->raw_size = raw_size;
auto* the_next_page = reinterpret_cast<PartitionPage<thread_safe>*>(this) + 1;
the_next_page->subsequent_page_metadata.raw_size = raw_size;
}
template <bool thread_safe>
PA_ALWAYS_INLINE size_t SlotSpanMetadata<thread_safe>::GetRawSize() const {
PA_DCHECK(CanStoreRawSize());
const auto* subsequent_page_metadata = GetSubsequentPageMetadata(
reinterpret_cast<const PartitionPage<thread_safe>*>(this));
return subsequent_page_metadata->raw_size;
}
template <bool thread_safe>
PA_ALWAYS_INLINE PartitionTag*
SlotSpanMetadata<thread_safe>::DirectMapMTETag() {
PA_DCHECK(bucket->is_direct_mapped());
auto* subsequent_page_metadata = GetSubsequentPageMetadata(
reinterpret_cast<PartitionPage<thread_safe>*>(this));
return &subsequent_page_metadata->direct_map_tag;
auto* the_next_page =
reinterpret_cast<const PartitionPage<thread_safe>*>(this) + 1;
return the_next_page->subsequent_page_metadata.raw_size;
}
template <bool thread_safe>

View File

@ -278,14 +278,6 @@ void PartitionAllocMallocHookOnAfterForkInChild() {
namespace internal {
namespace {
constexpr size_t kMaxPurgeableSlotsPerSystemPage = 2;
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
MaxPurgeableSlotSize() {
return SystemPageSize() / kMaxPurgeableSlotsPerSystemPage;
}
} // namespace
template <bool thread_safe>
static size_t PartitionPurgeSlotSpan(
internal::SlotSpanMetadata<thread_safe>* slot_span,
@ -293,11 +285,7 @@ static size_t PartitionPurgeSlotSpan(
auto* root = PartitionRoot<thread_safe>::FromSlotSpan(slot_span);
const internal::PartitionBucket<thread_safe>* bucket = slot_span->bucket;
size_t slot_size = bucket->slot_size;
// We will do nothing if slot_size is smaller than SystemPageSize() / 2
// because |kMaxSlotCount| will be too large in that case, which leads to
// |slot_usage| using up too much memory.
if (slot_size < MaxPurgeableSlotSize() || !slot_span->num_allocated_slots)
if (slot_size < SystemPageSize() || !slot_span->num_allocated_slots)
return 0;
size_t bucket_num_slots = bucket->get_slots_per_span();
@ -320,7 +308,7 @@ static size_t PartitionPurgeSlotSpan(
#if defined(PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR)
constexpr size_t kMaxSlotCount =
(PartitionPageSize() * kMaxPartitionPagesPerRegularSlotSpan) /
MaxPurgeableSlotSize();
SystemPageSize();
#elif BUILDFLAG(IS_APPLE) || (BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64))
// It's better for slot_usage to be stack-allocated and fixed-size, which
// demands that its size be constexpr. On IS_APPLE and Linux on arm64,
@ -328,11 +316,10 @@ static size_t PartitionPurgeSlotSpan(
// what the run time page size is, kMaxSlotCount can always be simplified
// to this expression.
constexpr size_t kMaxSlotCount =
4 * kMaxPurgeableSlotsPerSystemPage *
internal::kMaxPartitionPagesPerRegularSlotSpan;
4 * internal::kMaxPartitionPagesPerRegularSlotSpan;
PA_CHECK(kMaxSlotCount == (PartitionPageSize() *
internal::kMaxPartitionPagesPerRegularSlotSpan) /
MaxPurgeableSlotSize());
SystemPageSize());
#endif
PA_DCHECK(bucket_num_slots <= kMaxSlotCount);
PA_DCHECK(slot_span->num_unprovisioned_slots < bucket_num_slots);
@ -350,10 +337,10 @@ static size_t PartitionPurgeSlotSpan(
// slots are not in use.
for (PartitionFreelistEntry* entry = slot_span->get_freelist_head(); entry;
/**/) {
size_t slot_number =
bucket->GetSlotNumber(SlotStartPtr2Addr(entry) - slot_span_start);
PA_DCHECK(slot_number < num_slots);
slot_usage[slot_number] = 0;
size_t slot_index =
(SlotStartPtr2Addr(entry) - slot_span_start) / slot_size;
PA_DCHECK(slot_index < num_slots);
slot_usage[slot_index] = 0;
#if !BUILDFLAG(IS_WIN)
// If we have a slot where the encoded next pointer is 0, we can actually
// discard that entry because touching a discarded page is guaranteed to
@ -361,7 +348,7 @@ static size_t PartitionPurgeSlotSpan(
// effective on big-endian machines because the masking function is
// negation.)
if (entry->IsEncodedNextPtrZero())
last_slot = slot_number;
last_slot = slot_index;
#endif
entry = entry->GetNext(slot_size);
}
@ -380,20 +367,7 @@ static size_t PartitionPurgeSlotSpan(
size_t unprovisioned_bytes = 0;
uintptr_t begin_addr = slot_span_start + (num_slots * slot_size);
uintptr_t end_addr = begin_addr + (slot_size * truncated_slots);
// The slots that do not contain discarded pages should not be included to
// |truncated_slots|. Detects those slots and fixes |truncated_slots| and
// |num_slots| accordingly.
uintptr_t rounded_up_begin_addr = RoundUpToSystemPage(begin_addr);
for (size_t i = 0; i < kMaxPurgeableSlotsPerSystemPage; ++i) {
begin_addr += slot_size;
if (RoundUpToSystemPage(begin_addr) != rounded_up_begin_addr)
break;
--truncated_slots;
++num_slots;
}
begin_addr = rounded_up_begin_addr;
begin_addr = RoundUpToSystemPage(begin_addr);
// We round the end address here up and not down because we're at the end of
// a slot span, so we "own" all the way up the page boundary.
end_addr = RoundUpToSystemPage(end_addr);
@ -441,55 +415,25 @@ static size_t PartitionPurgeSlotSpan(
}
}
if (slot_size < SystemPageSize()) {
return discardable_bytes;
}
// Next, walk the slots and for any not in use, consider which system pages
// are no longer needed. We can release any system pages back to the system as
// long as we don't interfere with a freelist pointer or an adjacent used
// slot.
// Next, walk the slots and for any not in use, consider where the system page
// boundaries occur. We can release any system pages back to the system as
// long as we don't interfere with a freelist pointer or an adjacent slot.
for (size_t i = 0; i < num_slots; ++i) {
if (slot_usage[i]) {
if (slot_usage[i])
continue;
}
// The first address we can safely discard is just after the freelist
// pointer. There's one quirk: if the freelist pointer is actually nullptr,
// we can discard that pointer value too.
uintptr_t begin_addr = slot_span_start + (i * slot_size);
uintptr_t end_addr = begin_addr + slot_size;
bool can_discard_free_list_pointer = false;
#if !BUILDFLAG(IS_WIN)
if (i != last_slot) {
if (i != last_slot)
begin_addr += sizeof(internal::PartitionFreelistEntry);
} else {
can_discard_free_list_pointer = true;
}
#else
begin_addr += sizeof(internal::PartitionFreelistEntry);
#endif
uintptr_t rounded_up_begin_addr = RoundUpToSystemPage(begin_addr);
uintptr_t rounded_down_begin_addr = RoundDownToSystemPage(begin_addr);
begin_addr = RoundUpToSystemPage(begin_addr);
end_addr = RoundDownToSystemPage(end_addr);
// |rounded_up_begin_addr| could be greater than |end_addr| only if slot
// size was less than system page size, or if free list pointer crossed the
// page boundary. Neither is possible here.
PA_DCHECK(rounded_up_begin_addr <= end_addr);
if (rounded_down_begin_addr < rounded_up_begin_addr && i != 0 &&
!slot_usage[i - 1] && can_discard_free_list_pointer) {
// This slot contains a partial page in the beginning. The rest of that
// page is contained in the slot[i-1], which is also discardable.
// Therefore we can discard this page.
begin_addr = rounded_down_begin_addr;
} else {
begin_addr = rounded_up_begin_addr;
}
if (begin_addr < end_addr) {
size_t partial_slot_bytes = end_addr - begin_addr;
discardable_bytes += partial_slot_bytes;
@ -499,7 +443,6 @@ static size_t PartitionPurgeSlotSpan(
}
}
}
return discardable_bytes;
}
@ -1001,7 +944,7 @@ bool PartitionRoot<thread_safe>::TryReallocInPlaceForNormalBuckets(
// new size is a significant percentage smaller. We could do the same if we
// determine it is a win.
if (AllocationCapacityFromRequestedSize(new_size) !=
AllocationCapacityFromSlotStart(slot_start))
AllocationCapacityFromPtr(object))
return false;
// Trying to allocate |new_size| would use the same amount of underlying
@ -1145,7 +1088,7 @@ void PartitionRoot<thread_safe>::PurgeMemory(int flags) {
if (bucket.slot_size == internal::kInvalidBucketSize)
continue;
if (bucket.slot_size >= internal::MaxPurgeableSlotSize())
if (bucket.slot_size >= internal::SystemPageSize())
internal::PartitionPurgeBucket(&bucket);
else
bucket.SortSlotSpanFreelists();

View File

@ -64,7 +64,6 @@
#include "base/allocator/partition_allocator/partition_page.h"
#include "base/allocator/partition_allocator/partition_ref_count.h"
#include "base/allocator/partition_allocator/partition_tag.h"
#include "base/allocator/partition_allocator/partition_tag_types.h"
#include "base/allocator/partition_allocator/reservation_offset_table.h"
#include "base/allocator/partition_allocator/starscan/pcscan.h"
#include "base/allocator/partition_allocator/starscan/state_bitmap.h"
@ -92,9 +91,7 @@ template <typename Z>
static constexpr bool offset_type =
std::is_integral_v<Z> && sizeof(Z) <= sizeof(ptrdiff_t);
// We want this size to be big enough that we have time to start up other
// scripts _before_ we wrap around.
static constexpr size_t kAllocInfoSize = 1 << 24;
static constexpr size_t kAllocInfoSize = 1 << 20;
struct AllocInfo {
std::atomic<size_t> index{0};
@ -510,8 +507,7 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
PA_ALWAYS_INLINE static size_t GetUsableSize(void* ptr);
PA_ALWAYS_INLINE size_t
AllocationCapacityFromSlotStart(uintptr_t slot_start) const;
PA_ALWAYS_INLINE size_t AllocationCapacityFromPtr(void* ptr) const;
PA_ALWAYS_INLINE size_t
AllocationCapacityFromRequestedSize(size_t size) const;
@ -1191,8 +1187,10 @@ PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooks(void* object) {
#if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
if (!root->IsDirectMappedBucket(slot_span->bucket)) {
size_t slot_size_less_extras =
root->AdjustSizeForExtrasSubtract(slot_span->bucket->slot_size);
partition_alloc::internal::PartitionTagIncrementValue(
slot_start, slot_span->bucket->slot_size);
object, slot_size_less_extras);
}
#endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
@ -1591,7 +1589,6 @@ PA_ALWAYS_INLINE bool PartitionRoot<thread_safe>::TryRecommitSystemPagesForData(
}
// static
//
// Returns the size available to the app. It can be equal or higher than the
// requested size. If higher, the overage won't exceed what's actually usable
// by the app without a risk of running out of an allocated region or into
@ -1617,8 +1614,8 @@ PA_ALWAYS_INLINE size_t PartitionRoot<thread_safe>::GetUsableSize(void* ptr) {
// the same amount of underlying memory.
template <bool thread_safe>
PA_ALWAYS_INLINE size_t
PartitionRoot<thread_safe>::AllocationCapacityFromSlotStart(
uintptr_t slot_start) const {
PartitionRoot<thread_safe>::AllocationCapacityFromPtr(void* object) const {
uintptr_t slot_start = ObjectToSlotStart(object);
auto* slot_span = SlotSpan::FromSlotStart(slot_start);
return AdjustSizeForExtrasSubtract(slot_span->bucket->slot_size);
}

View File

@ -13,12 +13,10 @@
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
#include "base/allocator/partition_allocator/partition_alloc_notreached.h"
#include "base/allocator/partition_allocator/partition_cookie.h"
#include "base/allocator/partition_allocator/partition_page.h"
#include "base/allocator/partition_allocator/partition_tag_bitmap.h"
#include "base/allocator/partition_allocator/partition_tag_types.h"
#include "base/allocator/partition_allocator/reservation_offset_table.h"
#include "base/allocator/partition_allocator/tagging.h"
#include "build/build_config.h"
@ -27,11 +25,19 @@ namespace partition_alloc {
#if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
// Use 8 bits for the partition tag.
// TODO(tasak): add a description about the partition tag.
using PartitionTag = uint8_t;
static_assert(
sizeof(PartitionTag) == internal::tag_bitmap::kPartitionTagSize,
"sizeof(PartitionTag) must be equal to bitmap::kPartitionTagSize.");
PA_ALWAYS_INLINE PartitionTag* NormalBucketPartitionTagPointer(uintptr_t addr) {
PA_ALWAYS_INLINE PartitionTag* PartitionTagPointer(uintptr_t addr) {
// TODO(crbug.com/1307514): Add direct map support. For now, just assume
// that direct maps don't have tags.
PA_DCHECK(internal::IsManagedByNormalBuckets(addr));
uintptr_t bitmap_base =
internal::SuperPageTagBitmapAddr(addr & internal::kSuperPageBaseMask);
const size_t bitmap_end_offset =
@ -46,23 +52,6 @@ PA_ALWAYS_INLINE PartitionTag* NormalBucketPartitionTagPointer(uintptr_t addr) {
return reinterpret_cast<PartitionTag*>(bitmap_base + offset_in_bitmap);
}
PA_ALWAYS_INLINE PartitionTag* DirectMapPartitionTagPointer(uintptr_t addr) {
uintptr_t first_super_page = internal::GetDirectMapReservationStart(addr);
PA_DCHECK(first_super_page) << "not managed by a direct map: " << addr;
auto* subsequent_page_metadata = GetSubsequentPageMetadata(
internal::PartitionSuperPageToMetadataArea<internal::ThreadSafe>(
first_super_page));
return &subsequent_page_metadata->direct_map_tag;
}
PA_ALWAYS_INLINE PartitionTag* PartitionTagPointer(uintptr_t addr) {
// UNLIKELY because direct maps are far less common than normal buckets.
if (PA_UNLIKELY(internal::IsManagedByDirectMap(addr))) {
return DirectMapPartitionTagPointer(addr);
}
return NormalBucketPartitionTagPointer(addr);
}
PA_ALWAYS_INLINE PartitionTag* PartitionTagPointer(const void* ptr) {
// Disambiguation: UntagPtr relates to hwardware MTE, and it strips the tag
// from the pointer. Whereas, PartitionTagPointer relates to software MTE
@ -72,18 +61,12 @@ PA_ALWAYS_INLINE PartitionTag* PartitionTagPointer(const void* ptr) {
namespace internal {
PA_ALWAYS_INLINE void DirectMapPartitionTagSetValue(uintptr_t addr,
PartitionTag value) {
*DirectMapPartitionTagPointer(addr) = value;
}
PA_ALWAYS_INLINE void NormalBucketPartitionTagSetValue(uintptr_t slot_start,
PA_ALWAYS_INLINE void PartitionTagSetValue(uintptr_t addr,
size_t size,
PartitionTag value) {
PA_DCHECK((size % tag_bitmap::kBytesPerPartitionTag) == 0);
PA_DCHECK((slot_start % tag_bitmap::kBytesPerPartitionTag) == 0);
size_t tag_count = size >> tag_bitmap::kBytesPerPartitionTagShift;
PartitionTag* tag_ptr = NormalBucketPartitionTagPointer(slot_start);
PartitionTag* tag_ptr = PartitionTagPointer(addr);
if (sizeof(PartitionTag) == 1) {
memset(tag_ptr, value, tag_count);
} else {
@ -92,34 +75,50 @@ PA_ALWAYS_INLINE void NormalBucketPartitionTagSetValue(uintptr_t slot_start,
}
}
PA_ALWAYS_INLINE void PartitionTagSetValue(void* ptr,
size_t size,
PartitionTag value) {
// Disambiguation: UntagPtr relates to hwardware MTE, and it strips the tag
// from the pointer. Whereas, PartitionTagSetValue relates to software MTE
// (i.e. MTECheckedPtr) and it sets the in-memory tag.
PartitionTagSetValue(UntagPtr(ptr), size, value);
}
PA_ALWAYS_INLINE PartitionTag PartitionTagGetValue(void* ptr) {
return *PartitionTagPointer(ptr);
}
PA_ALWAYS_INLINE void PartitionTagIncrementValue(uintptr_t slot_start,
size_t size) {
PartitionTag tag = *PartitionTagPointer(slot_start);
PA_ALWAYS_INLINE void PartitionTagClearValue(void* ptr, size_t size) {
size_t tag_region_size = size >> tag_bitmap::kBytesPerPartitionTagShift
<< tag_bitmap::kPartitionTagSizeShift;
PA_DCHECK(!memchr(PartitionTagPointer(ptr), 0, tag_region_size));
memset(PartitionTagPointer(ptr), 0, tag_region_size);
}
PA_ALWAYS_INLINE void PartitionTagIncrementValue(void* ptr, size_t size) {
PartitionTag tag = PartitionTagGetValue(ptr);
PartitionTag new_tag = tag;
++new_tag;
new_tag += !new_tag; // Avoid 0.
#if BUILDFLAG(PA_DCHECK_IS_ON)
PA_DCHECK(internal::IsManagedByNormalBuckets(slot_start));
// This verifies that tags for the entire slot have the same value and that
// |size| doesn't exceed the slot size.
size_t tag_count = size >> tag_bitmap::kBytesPerPartitionTagShift;
PartitionTag* tag_ptr = PartitionTagPointer(slot_start);
PartitionTag* tag_ptr = PartitionTagPointer(ptr);
while (tag_count-- > 0) {
PA_DCHECK(tag == *tag_ptr);
tag_ptr++;
}
#endif
NormalBucketPartitionTagSetValue(slot_start, size, new_tag);
PartitionTagSetValue(ptr, size, new_tag);
}
} // namespace internal
#else // No-op versions
using PartitionTag = uint8_t;
PA_ALWAYS_INLINE PartitionTag* PartitionTagPointer(void* ptr) {
PA_NOTREACHED();
return nullptr;
@ -127,12 +126,15 @@ PA_ALWAYS_INLINE PartitionTag* PartitionTagPointer(void* ptr) {
namespace internal {
PA_ALWAYS_INLINE void PartitionTagSetValue(void*, size_t, PartitionTag) {}
PA_ALWAYS_INLINE PartitionTag PartitionTagGetValue(void*) {
return 0;
}
PA_ALWAYS_INLINE void PartitionTagIncrementValue(uintptr_t slot_start,
size_t size) {}
PA_ALWAYS_INLINE void PartitionTagClearValue(void* ptr, size_t) {}
PA_ALWAYS_INLINE void PartitionTagIncrementValue(void* ptr, size_t size) {}
} // namespace internal

View File

@ -1,25 +0,0 @@
// Copyright 2022 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_TAG_TYPES_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_TAG_TYPES_H_
#include <cstdint>
// This header defines the types for MTECheckedPtr. Canonical
// documentation available at `//base/memory/raw_ptr_mtecheckedptr.md`.
namespace partition_alloc {
// Use 8 bits for the partition tag. This is the "lower" byte of the
// two top bytes in a 64-bit pointer. The "upper" byte of the same
// is reserved for true ARM MTE.
//
// MTECheckedPtr is not yet compatible with ARM MTE, but it is a
// distant goal to have them coexist.
using PartitionTag = uint8_t;
} // namespace partition_alloc
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_TAG_TYPES_H_

View File

@ -568,13 +568,24 @@ PA_ALWAYS_INLINE void ThreadCache::PutInBucket(Bucket& bucket,
// Everything below requires this alignment.
static_assert(internal::kAlignment == 16, "");
#if PA_HAS_BUILTIN(__builtin_assume_aligned)
// Cast back to uintptr_t, because we need it for pointer arithmetic. Make
// sure it gets MTE-tagged, as we cast it later to a pointer and dereference.
uintptr_t address_tagged =
reinterpret_cast<uintptr_t>(__builtin_assume_aligned(
internal::SlotStartAddr2Ptr(slot_start), internal::kAlignment));
#else
uintptr_t address_tagged =
reinterpret_cast<uintptr_t>(internal::SlotStartAddr2Ptr(slot_start));
#endif
// The pointer is always 16 bytes aligned, so its start address is always == 0
// % 16. Its distance to the next cacheline is
// `64 - ((slot_start & 63) / 16) * 16`
// % 16. Its distance to the next cacheline is `64 - ((address_tagged & 63) /
// 16) * 16`.
static_assert(
internal::kPartitionCachelineSize == 64,
"The computation below assumes that cache lines are 64 bytes long.");
int distance_to_next_cacheline_in_16_bytes = 4 - ((slot_start >> 4) & 3);
int distance_to_next_cacheline_in_16_bytes = 4 - ((address_tagged >> 4) & 3);
int slot_size_remaining_in_16_bytes =
#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
// When BRP is on in the "previous slot" mode, this slot may have a BRP
@ -590,14 +601,8 @@ PA_ALWAYS_INLINE void ThreadCache::PutInBucket(Bucket& bucket,
static const uint32_t poison_16_bytes[4] = {0xbadbad00, 0xbadbad00,
0xbadbad00, 0xbadbad00};
// Give a hint to the compiler in hope it'll vectorize the loop.
#if PA_HAS_BUILTIN(__builtin_assume_aligned)
void* slot_start_tagged = __builtin_assume_aligned(
internal::SlotStartAddr2Ptr(slot_start), internal::kAlignment);
#else
void* slot_start_tagged = internal::SlotStartAddr2Ptr(slot_start);
#endif
uint32_t* address_aligned = static_cast<uint32_t*>(slot_start_tagged);
// Already MTE-tagged above, so safe to dereference.
uint32_t* address_aligned = reinterpret_cast<uint32_t*>(address_tagged);
for (int i = 0; i < slot_size_remaining_in_16_bytes; i++) {
// Clang will expand the memcpy to a 16-byte write (movups on x86).
memcpy(address_aligned, poison_16_bytes, sizeof(poison_16_bytes));

View File

@ -9,13 +9,12 @@ namespace base {
namespace android {
// Defines the state of bindgings with child process. See ChildProcessConnection
// to see what the bindings are.
// to see what the bindings are. Note these values are used as array indices.
// GENERATED_JAVA_ENUM_PACKAGE: org.chromium.base
enum class ChildBindingState {
UNBOUND,
WAIVED,
NOT_PERCEPTIBLE,
VISIBLE,
MODERATE,
STRONG,
MAX_VALUE = STRONG
};

View File

@ -104,12 +104,12 @@ static void JNI_EarlyTraceEvent_RecordEarlyAsyncBeginEvent(
jlong id,
jlong time_ns) {
std::string name = ConvertJavaStringToUTF8(env, jname);
TRACE_EVENT_BEGIN(internal::kJavaTraceCategory, nullptr,
perfetto::Track(static_cast<uint64_t>(id)),
TRACE_EVENT_NESTABLE_ASYNC_BEGIN_WITH_TIMESTAMP_AND_FLAGS0(
internal::kJavaTraceCategory, name.c_str(),
TRACE_ID_LOCAL(static_cast<uint64_t>(id)),
TimeTicks::FromJavaNanoTime(time_ns),
[&](::perfetto::EventContext& ctx) {
ctx.event()->set_name(name.c_str());
});
TRACE_EVENT_FLAG_JAVA_STRING_LITERALS | TRACE_EVENT_FLAG_COPY);
}
static void JNI_EarlyTraceEvent_RecordEarlyAsyncEndEvent(
@ -118,8 +118,12 @@ static void JNI_EarlyTraceEvent_RecordEarlyAsyncEndEvent(
jlong id,
jlong time_ns) {
std::string name = ConvertJavaStringToUTF8(env, jname);
TRACE_EVENT_END(internal::kJavaTraceCategory,
perfetto::Track(static_cast<uint64_t>(id)));
TRACE_EVENT_NESTABLE_ASYNC_END_WITH_TIMESTAMP_AND_FLAGS0(
internal::kJavaTraceCategory, name.c_str(),
TRACE_ID_LOCAL(static_cast<uint64_t>(id)),
TimeTicks::FromJavaNanoTime(time_ns),
TRACE_EVENT_FLAG_JAVA_STRING_LITERALS | TRACE_EVENT_FLAG_COPY);
}
bool GetBackgroundStartupTracingFlag() {

View File

@ -319,10 +319,6 @@ def _StripGenerics(value):
return ''.join(out)
def _NameIsTestOnly(name):
return name.endswith('ForTest') or name.endswith('ForTesting')
class JniParams(object):
"""Get JNI related parameters."""
@ -685,8 +681,7 @@ RE_SCOPED_JNI_TYPES = re.compile('jobject|jclass|jstring|jthrowable|.*Array')
# Regex to match a string like "@CalledByNative public void foo(int bar)".
RE_CALLED_BY_NATIVE = re.compile(
r'@CalledByNative((?P<Unchecked>(?:Unchecked)?|ForTesting))'
r'(?:\("(?P<annotation>.*)"\))?'
r'@CalledByNative(?P<Unchecked>(?:Unchecked)?)(?:\("(?P<annotation>.*)"\))?'
r'(?:\s+@\w+(?:\(.*\))?)*' # Ignore any other annotations.
r'\s+(?P<prefix>('
r'(private|protected|public|static|abstract|final|default|synchronized)'
@ -897,20 +892,8 @@ class ProxyHelpers(object):
if not isinstance(hash_b64, str):
hash_b64 = hash_b64.decode()
long_hash = ('M' + hash_b64).rstrip('=')
hashed_name = long_hash[:ProxyHelpers.MAX_CHARS_FOR_HASHED_NATIVE_METHODS]
# If the method is a test-only method, we don't care about saving size on
# the method name, since it shouldn't show up in the binary. Additionally,
# if we just hash the name, our checkers which enforce that we have no
# "ForTesting" methods by checking for the suffix "ForTesting" will miss
# these. We could preserve the name entirely and not hash anything, but
# that risks collisions. So, instead, we just append "ForTesting" to any
# test-only hashes, to ensure we catch any test-only methods that
# shouldn't be in our final binary.
if _NameIsTestOnly(method_name):
return hashed_name + '_ForTesting'
return hashed_name
hashed_name = ('M' + hash_b64).rstrip('=')
return hashed_name[0:ProxyHelpers.MAX_CHARS_FOR_HASHED_NATIVE_METHODS]
@staticmethod
def CreateProxyMethodName(fully_qualified_class, old_name, use_hash=False):
@ -928,18 +911,12 @@ class ProxyHelpers(object):
return EscapeClassName(fully_qualified_class + '/' + old_name)
@staticmethod
def ExtractStaticProxyNatives(fully_qualified_class,
contents,
ptr_type,
include_test_only=True):
def ExtractStaticProxyNatives(fully_qualified_class, contents, ptr_type):
methods = []
for match in _NATIVE_PROXY_EXTRACTION_REGEX.finditer(contents):
interface_body = match.group('interface_body')
for method in _EXTRACT_METHODS_REGEX.finditer(interface_body):
name = method.group('name')
if not include_test_only and _NameIsTestOnly(name):
continue
params = JniParams.Parse(method.group('params'), use_proxy_types=True)
return_type = JavaTypeToProxyCast(method.group('return_type'))
proxy_name = ProxyHelpers.CreateProxyMethodName(fully_qualified_class,
@ -1637,9 +1614,6 @@ See SampleForTests.java for more details.
parser.add_argument('--unchecked_exceptions',
action='store_true',
help='Do not check that no exceptions were thrown.')
parser.add_argument('--include_test_only',
action='store_true',
help='Whether to maintain ForTesting JNI methods.')
parser.add_argument(
'--use_proxy_hash',
action='store_true',

View File

@ -62,7 +62,6 @@ class TestOptions(object):
self.always_mangle = False
self.unchecked_exceptions = False
self.split_name = None
self.include_test_only = True
class BaseTest(unittest.TestCase):
@ -584,9 +583,6 @@ class TestGenerator(BaseTest):
@CalledByNative
public List<Bitmap.CompressFormat> getCompressFormatList();
@CalledByNativeForTesting
public int[] returnIntArrayForTesting();
"""
jni_params = jni_generator.JniParams('org/chromium/Foo')
jni_params.ExtractImportsAndInnerClasses(test_data)
@ -826,17 +822,6 @@ class TestGenerator(BaseTest):
env_call=('Void', ''),
unchecked=False,
),
CalledByNative(
return_type='int[]',
system_class=False,
static=False,
name='returnIntArrayForTesting',
method_id_var_name='returnIntArrayForTesting',
java_class_name='',
params=[],
env_call=('Void', ''),
unchecked=False,
),
]
self.AssertListEquals(golden_called_by_natives, called_by_natives)
h = jni_generator.InlHeaderFileGenerator('', 'org/chromium/TestJni', [],
@ -1401,59 +1386,6 @@ class ProxyTestGenerator(BaseTest):
self.AssertListEquals(_RemoveHashedNames(natives), golden_natives)
def testForTestingKept(self):
test_data = """
class SampleProxyJni {
@NativeMethods
interface Natives {
void fooForTesting();
void fooForTest();
}
}
"""
qualified_clazz = 'org/chromium/example/SampleProxyJni'
natives = jni_generator.ProxyHelpers.ExtractStaticProxyNatives(
qualified_clazz, test_data, 'long', True)
golden_natives = [
NativeMethod(
return_type='void',
static=True,
name='fooForTesting',
params=[],
java_class_name=None,
is_proxy=True,
proxy_name='org_chromium_example_SampleProxyJni_fooForTesting'),
NativeMethod(
return_type='void',
static=True,
name='fooForTest',
params=[],
java_class_name=None,
is_proxy=True,
proxy_name='org_chromium_example_SampleProxyJni_fooForTest'),
]
self.AssertListEquals(_RemoveHashedNames(natives), golden_natives)
def testForTestingRemoved(self):
test_data = """
class SampleProxyJni {
@NativeMethods
interface Natives {
void fooForTesting();
void fooForTest();
}
}
"""
qualified_clazz = 'org/chromium/example/SampleProxyJni'
natives = jni_generator.ProxyHelpers.ExtractStaticProxyNatives(
qualified_clazz, test_data, 'long', False)
self.AssertListEquals(_RemoveHashedNames(natives), [])
def testProxyNativesMainDex(self):
test_data = """
@MainDex

View File

@ -41,8 +41,7 @@ def _Generate(java_file_paths,
srcjar_path,
proxy_opts,
header_path=None,
namespace='',
include_test_only=True):
namespace=''):
"""Generates files required to perform JNI registration.
Generates a srcjar containing a single class, GEN_JNI, that contains all
@ -67,8 +66,7 @@ def _Generate(java_file_paths,
_DictForPath,
use_proxy_hash=proxy_opts.use_hash,
enable_jni_multiplexing=proxy_opts.enable_jni_multiplexing,
namespace=namespace,
include_test_only=include_test_only), java_file_paths):
namespace=namespace), java_file_paths):
if d:
results.append(d)
@ -137,8 +135,7 @@ def _Generate(java_file_paths,
def _DictForPath(path,
use_proxy_hash=False,
enable_jni_multiplexing=False,
namespace='',
include_test_only=True):
namespace=''):
with open(path) as f:
contents = jni_generator.RemoveComments(f.read())
if '@JniIgnoreNatives' in contents:
@ -151,8 +148,7 @@ def _DictForPath(path,
natives += jni_generator.ProxyHelpers.ExtractStaticProxyNatives(
fully_qualified_class=fully_qualified_class,
contents=contents,
ptr_type='long',
include_test_only=include_test_only)
ptr_type='long')
if len(natives) == 0:
return None
# The namespace for the content is separate from the namespace for the
@ -922,9 +918,6 @@ def main(argv):
'--manual_jni_registration',
action='store_true',
help='Manually do JNI registration - required for crazy linker')
arg_parser.add_argument('--include_test_only',
action='store_true',
help='Whether to maintain ForTesting JNI methods.')
args = arg_parser.parse_args(build_utils.ExpandFileArgs(argv[1:]))
if not args.enable_proxy_mocks and args.require_mocks:
@ -955,8 +948,7 @@ def main(argv):
args.srcjar_path,
proxy_opts=proxy_opts,
header_path=args.header_path,
namespace=args.namespace,
include_test_only=args.include_test_only)
namespace=args.namespace)
if args.depfile:
build_utils.WriteDepfile(args.depfile, args.srcjar_path,

View File

@ -92,10 +92,13 @@ void RunJavaTask(base::android::ScopedJavaGlobalRef<jobject> task,
// JNIEnv is thread specific, but we don't know which thread we'll be run on
// so we must look it up.
std::string event_name = base::StrCat({"JniPostTask: ", runnable_class_name});
TRACE_EVENT("toplevel", nullptr, [&](::perfetto::EventContext& ctx) {
ctx.event()->set_name(event_name.c_str());
});
TRACE_EVENT_BEGIN_WITH_FLAGS0(
"toplevel", event_name.c_str(),
TRACE_EVENT_FLAG_JAVA_STRING_LITERALS | TRACE_EVENT_FLAG_COPY);
JNI_Runnable::Java_Runnable_run(base::android::AttachCurrentThread(), task);
TRACE_EVENT_END_WITH_FLAGS0(
"toplevel", event_name.c_str(),
TRACE_EVENT_FLAG_JAVA_STRING_LITERALS | TRACE_EVENT_FLAG_COPY);
}
} // namespace

View File

@ -187,7 +187,7 @@ class TraceEventDataConverter {
// Return saved values to pass to TRACE_EVENT macros.
const char* name() { return name_.c_str(); }
const char* arg_name() { return has_arg_ ? "arg" : nullptr; }
const std::string& arg() { return arg_; }
const char* arg() { return has_arg_ ? arg_.c_str() : nullptr; }
private:
std::string name_;
@ -201,17 +201,17 @@ static void JNI_TraceEvent_Instant(JNIEnv* env,
const JavaParamRef<jstring>& jname,
const JavaParamRef<jstring>& jarg) {
TraceEventDataConverter converter(env, jname, jarg);
if (converter.arg_name()) {
TRACE_EVENT_INSTANT(internal::kJavaTraceCategory, nullptr,
converter.arg_name(), converter.arg(),
[&](::perfetto::EventContext& ctx) {
ctx.event()->set_name(converter.name());
});
if (converter.arg()) {
TRACE_EVENT_INSTANT_WITH_FLAGS1(
internal::kJavaTraceCategory, converter.name(),
TRACE_EVENT_FLAG_JAVA_STRING_LITERALS | TRACE_EVENT_FLAG_COPY |
TRACE_EVENT_SCOPE_THREAD,
converter.arg_name(), converter.arg());
} else {
TRACE_EVENT_INSTANT(internal::kJavaTraceCategory, nullptr,
[&](::perfetto::EventContext& ctx) {
ctx.event()->set_name(converter.name());
});
TRACE_EVENT_INSTANT_WITH_FLAGS0(
internal::kJavaTraceCategory, converter.name(),
TRACE_EVENT_FLAG_JAVA_STRING_LITERALS | TRACE_EVENT_FLAG_COPY |
TRACE_EVENT_SCOPE_THREAD);
}
}
@ -233,17 +233,15 @@ static void JNI_TraceEvent_Begin(JNIEnv* env,
const JavaParamRef<jstring>& jname,
const JavaParamRef<jstring>& jarg) {
TraceEventDataConverter converter(env, jname, jarg);
if (converter.arg_name()) {
TRACE_EVENT_BEGIN(internal::kJavaTraceCategory, nullptr,
converter.arg_name(), converter.arg(),
[&](::perfetto::EventContext& ctx) {
ctx.event()->set_name(converter.name());
});
if (converter.arg()) {
TRACE_EVENT_BEGIN_WITH_FLAGS1(
internal::kJavaTraceCategory, converter.name(),
TRACE_EVENT_FLAG_JAVA_STRING_LITERALS | TRACE_EVENT_FLAG_COPY,
converter.arg_name(), converter.arg());
} else {
TRACE_EVENT_BEGIN(internal::kJavaTraceCategory, nullptr,
[&](::perfetto::EventContext& ctx) {
ctx.event()->set_name(converter.name());
});
TRACE_EVENT_BEGIN_WITH_FLAGS0(
internal::kJavaTraceCategory, converter.name(),
TRACE_EVENT_FLAG_JAVA_STRING_LITERALS | TRACE_EVENT_FLAG_COPY);
}
}
@ -251,46 +249,52 @@ static void JNI_TraceEvent_End(JNIEnv* env,
const JavaParamRef<jstring>& jname,
const JavaParamRef<jstring>& jarg) {
TraceEventDataConverter converter(env, jname, jarg);
if (converter.arg_name()) {
TRACE_EVENT_END(internal::kJavaTraceCategory, converter.arg_name(),
converter.arg());
if (converter.arg()) {
TRACE_EVENT_END_WITH_FLAGS1(
internal::kJavaTraceCategory, converter.name(),
TRACE_EVENT_FLAG_JAVA_STRING_LITERALS | TRACE_EVENT_FLAG_COPY,
converter.arg_name(), converter.arg());
} else {
TRACE_EVENT_END(internal::kJavaTraceCategory);
TRACE_EVENT_END_WITH_FLAGS0(
internal::kJavaTraceCategory, converter.name(),
TRACE_EVENT_FLAG_JAVA_STRING_LITERALS | TRACE_EVENT_FLAG_COPY);
}
}
static void JNI_TraceEvent_BeginToplevel(JNIEnv* env,
const JavaParamRef<jstring>& jtarget) {
std::string target = ConvertJavaStringToUTF8(env, jtarget);
TRACE_EVENT_BEGIN(internal::kToplevelTraceCategory, nullptr,
[&](::perfetto::EventContext& ctx) {
ctx.event()->set_name(target.c_str());
});
TRACE_EVENT_BEGIN_WITH_FLAGS0(
internal::kToplevelTraceCategory, target.c_str(),
TRACE_EVENT_FLAG_JAVA_STRING_LITERALS | TRACE_EVENT_FLAG_COPY);
}
static void JNI_TraceEvent_EndToplevel(JNIEnv* env,
const JavaParamRef<jstring>& jtarget) {
std::string target = ConvertJavaStringToUTF8(env, jtarget);
TRACE_EVENT_END(internal::kToplevelTraceCategory);
TRACE_EVENT_END_WITH_FLAGS0(
internal::kToplevelTraceCategory, target.c_str(),
TRACE_EVENT_FLAG_JAVA_STRING_LITERALS | TRACE_EVENT_FLAG_COPY);
}
static void JNI_TraceEvent_StartAsync(JNIEnv* env,
const JavaParamRef<jstring>& jname,
jlong jid) {
TraceEventDataConverter converter(env, jname, nullptr);
TRACE_EVENT_BEGIN(internal::kJavaTraceCategory, nullptr,
perfetto::Track(static_cast<uint64_t>(jid)),
[&](::perfetto::EventContext& ctx) {
ctx.event()->set_name(converter.name());
});
TRACE_EVENT_NESTABLE_ASYNC_BEGIN_WITH_FLAGS0(
internal::kJavaTraceCategory, converter.name(),
TRACE_ID_LOCAL(static_cast<uint64_t>(jid)),
TRACE_EVENT_FLAG_JAVA_STRING_LITERALS | TRACE_EVENT_FLAG_COPY);
}
static void JNI_TraceEvent_FinishAsync(JNIEnv* env,
const JavaParamRef<jstring>& jname,
jlong jid) {
TraceEventDataConverter converter(env, jname, nullptr);
TRACE_EVENT_END(internal::kJavaTraceCategory,
perfetto::Track(static_cast<uint64_t>(jid)));
TRACE_EVENT_NESTABLE_ASYNC_END_WITH_FLAGS0(
internal::kJavaTraceCategory, converter.name(),
TRACE_ID_LOCAL(static_cast<uint64_t>(jid)),
TRACE_EVENT_FLAG_JAVA_STRING_LITERALS | TRACE_EVENT_FLAG_COPY);
}
} // namespace android

View File

@ -139,26 +139,11 @@ inline internal::UnretainedWrapper<T> Unretained(T* o) {
return internal::UnretainedWrapper<T>(o);
}
template <typename T, typename I>
inline internal::UnretainedWrapper<T> Unretained(const raw_ptr<T, I>& o) {
template <typename T, typename O>
inline internal::UnretainedWrapper<T> Unretained(const raw_ptr<T, O>& o) {
return internal::UnretainedWrapper<T>(o);
}
template <typename T, typename I>
inline internal::UnretainedWrapper<T> Unretained(raw_ptr<T, I>&& o) {
return internal::UnretainedWrapper<T>(std::move(o));
}
template <typename T, typename I>
inline auto Unretained(const raw_ref<T, I>& o) {
return internal::UnretainedRefWrapper(o);
}
template <typename T, typename I>
inline auto Unretained(raw_ref<T, I>&& o) {
return internal::UnretainedRefWrapper(std::move(o));
}
// RetainedRef() accepts a ref counted object and retains a reference to it.
// When the callback is called, the object is passed as a raw pointer.
//

View File

@ -19,13 +19,10 @@
#include "base/check.h"
#include "base/compiler_specific.h"
#include "base/memory/raw_ptr.h"
#include "base/memory/raw_ref.h"
#include "base/memory/raw_scoped_refptr_mismatch_checker.h"
#include "base/memory/weak_ptr.h"
#include "base/notreached.h"
#include "base/types/always_false.h"
#include "build/build_config.h"
#include "third_party/abseil-cpp/absl/functional/function_ref.h"
#if BUILDFLAG(IS_APPLE) && !HAS_FEATURE(objc_arc)
#include "base/mac/scoped_block.h"
@ -78,9 +75,6 @@ struct BindUnwrapTraits;
template <typename Functor, typename BoundArgsTuple, typename SFINAE = void>
struct CallbackCancellationTraits;
template <typename Signature>
class FunctionRef;
namespace internal {
template <typename Functor, typename SFINAE = void>
@ -91,16 +85,13 @@ class UnretainedWrapper {
public:
explicit UnretainedWrapper(T* o) : ptr_(o) {}
// Trick to only instantiate these constructors if they are used. Otherwise,
// Trick to only instantiate this constructor if it is used. Otherwise,
// instantiating UnretainedWrapper with a T that is not supported by
// raw_ptr would trigger raw_ptr<T>'s static_assert.
template <typename U = T, typename I>
template <typename U = T, typename Option>
// Avoids having a raw_ptr<T> -> T* -> raw_ptr<T> round trip, which
// would trigger the raw_ptr error detector if T* was dangling.
explicit UnretainedWrapper(const raw_ptr<U, I>& o) : ptr_(o) {}
template <typename U = T, typename I>
explicit UnretainedWrapper(raw_ptr<U, I>&& o) : ptr_(std::move(o)) {}
explicit UnretainedWrapper(const raw_ptr<U, Option>& o) : ptr_(o) {}
T* get() const { return ptr_; }
private:
@ -129,46 +120,26 @@ class UnretainedWrapper {
// std::reference_wrapper<T> and T& do not work, since the reference lifetime is
// not safely protected by MiraclePtr.
//
// UnretainedWrapper<T> and raw_ptr<T> do not work, since BindUnwrapTraits would
// try to pass by T* rather than T&.
//
// raw_ref<T> is not used to differentiate between storing a `raw_ref<T>`
// explicitly versus storing a `T&` or `std::ref()`.
template <typename T, bool = raw_ptr_traits::IsSupportedType<T>::value>
// UnretainedWrapper<T> and raw_ptr<T> do not work, since BindUnwrapTraits
// would try to pass by T* rather than T&.
template <typename T>
class UnretainedRefWrapper {
public:
explicit UnretainedRefWrapper(T& o) : ref_(o) {}
T& get() const { return ref_; }
explicit UnretainedRefWrapper(T& o) : ptr_(std::addressof(o)) {}
T& get() const { return *ptr_; }
private:
T& ref_;
#if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
// As above.
using ImplType = T*;
#else
using ImplType = std::conditional_t<raw_ptr_traits::IsSupportedType<T>::value,
raw_ptr<T, DanglingUntriaged>,
T*>;
#endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
ImplType const ptr_;
};
#if !defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
// Implementation of UnretainedRefWrapper for `T` where raw_ref<T> is supported.
template <typename T>
class UnretainedRefWrapper<T, true> {
public:
explicit UnretainedRefWrapper(T& o) : ref_(o) {}
T& get() const { return *ref_; }
private:
const raw_ref<T, DanglingUntriaged> ref_;
};
// Implementation of UnretainedRefWrapper for `raw_ref<T>`.
template <typename T, typename I, bool b>
class UnretainedRefWrapper<raw_ref<T, I>, b> {
public:
explicit UnretainedRefWrapper(const raw_ref<T, I>& ref) : ref_(ref) {}
explicit UnretainedRefWrapper(raw_ref<T, I>&& ref) : ref_(std::move(ref)) {}
T& get() const { return *ref_; }
private:
const raw_ref<T, I> ref_;
};
#endif
template <typename T>
class RetainedRefWrapper {
public:
@ -237,7 +208,8 @@ class OwnedRefWrapper {
template <typename T>
class PassedWrapper {
public:
explicit PassedWrapper(T&& scoper) : scoper_(std::move(scoper)) {}
explicit PassedWrapper(T&& scoper)
: is_valid_(true), scoper_(std::move(scoper)) {}
PassedWrapper(PassedWrapper&& other)
: is_valid_(other.is_valid_), scoper_(std::move(other.scoper_)) {}
T Take() const {
@ -247,7 +219,7 @@ class PassedWrapper {
}
private:
mutable bool is_valid_ = true;
mutable bool is_valid_;
mutable T scoper_;
};
@ -1076,11 +1048,6 @@ struct MakeBindStateTypeImpl<true, Functor, Receiver, BoundArgs...> {
static_assert(!std::is_array_v<std::remove_reference_t<Receiver>>,
"First bound argument to a method cannot be an array.");
static_assert(
!IsRawRefV<DecayedReceiver>,
"Receivers may not be raw_ref<T>. If using a raw_ref<T> here is safe"
" and has no lifetime concerns, use base::Unretained() and document why"
" it's safe.");
static_assert(
!IsPointerV<DecayedReceiver> ||
IsRefCountedType<RemovePointerT<DecayedReceiver>>::value,
@ -1389,26 +1356,6 @@ RepeatingCallback<Signature> BindImpl(RepeatingCallback<Signature> callback) {
return callback;
}
template <template <typename> class CallbackT, typename Signature>
auto BindImpl(absl::FunctionRef<Signature>, ...) {
static_assert(
AlwaysFalse<Signature>,
"base::Bind{Once,Repeating} require strong ownership: non-owning "
"function references may not bound as the functor due to potential "
"lifetime issues.");
return nullptr;
}
template <template <typename> class CallbackT, typename Signature>
auto BindImpl(FunctionRef<Signature>, ...) {
static_assert(
AlwaysFalse<Signature>,
"base::Bind{Once,Repeating} require strong ownership: non-owning "
"function references may not bound as the functor due to potential "
"lifetime issues.");
return nullptr;
}
} // namespace internal
// An injection point to control |this| pointer behavior on a method invocation.

View File

@ -13,12 +13,12 @@
#include <utility>
#include "base/bind.h"
#include "base/callback_forward.h" // IWYU pragma: export
#include "base/callback_forward.h"
#include "base/callback_internal.h"
#include "base/check.h"
#include "base/functional/function_ref.h"
#include "base/notreached.h"
#include "base/types/always_false.h"
#include "third_party/abseil-cpp/absl/functional/function_ref.h"
// -----------------------------------------------------------------------------
// Usage documentation
@ -180,19 +180,19 @@ class OnceCallback<R(Args...)> : public internal::CallbackBase {
template <typename Signature>
// NOLINTNEXTLINE(google-explicit-constructor)
operator FunctionRef<Signature>() & {
operator absl::FunctionRef<Signature>() & {
static_assert(
AlwaysFalse<Signature>,
"need to convert a base::OnceCallback to base::FunctionRef? "
"need to convert a base::OnceCallback to absl::FunctionRef? "
"Please bring up this use case on #cxx (Slack) or cxx@chromium.org.");
}
template <typename Signature>
// NOLINTNEXTLINE(google-explicit-constructor)
operator FunctionRef<Signature>() && {
operator absl::FunctionRef<Signature>() && {
static_assert(
AlwaysFalse<Signature>,
"using base::BindOnce() is not necessary with base::FunctionRef; is it "
"using base::BindOnce() is not necessary with absl::FunctionRef; is it "
"possible to use a capturing lambda directly? If not, please bring up "
"this use case on #cxx (Slack) or cxx@chromium.org.");
}
@ -310,19 +310,19 @@ class RepeatingCallback<R(Args...)> : public internal::CallbackBaseCopyable {
template <typename Signature>
// NOLINTNEXTLINE(google-explicit-constructor)
operator FunctionRef<Signature>() & {
operator absl::FunctionRef<Signature>() & {
static_assert(
AlwaysFalse<Signature>,
"need to convert a base::RepeatingCallback to base::FunctionRef? "
"need to convert a base::RepeatingCallback to absl::FunctionRef? "
"Please bring up this use case on #cxx (Slack) or cxx@chromium.org.");
}
template <typename Signature>
// NOLINTNEXTLINE(google-explicit-constructor)
operator FunctionRef<Signature>() && {
operator absl::FunctionRef<Signature>() && {
static_assert(
AlwaysFalse<Signature>,
"using base::BindRepeating() is not necessary with base::FunctionRef; "
"using base::BindRepeating() is not necessary with absl::FunctionRef; "
"is it possible to use a capturing lambda directly? If not, please "
"bring up this use case on #cxx (Slack) or cxx@chromium.org.");
}

View File

@ -4,19 +4,26 @@
#include "base/check.h"
#include "build/build_config.h"
// check.h is a widely included header and its size has significant impact on
// build time. Try not to raise this limit unless absolutely necessary. See
// https://chromium.googlesource.com/chromium/src/+/HEAD/docs/wmax_tokens.md
#ifndef NACL_TC_REV
#pragma clang max_tokens_here 17000
#endif
#include "base/check_op.h"
#include "base/debug/alias.h"
#include "base/debug/debugging_buildflags.h"
#if !BUILDFLAG(IS_NACL)
#include "base/debug/crash_logging.h"
#endif // !BUILDFLAG(IS_NACL)
#include "base/debug/dump_without_crashing.h"
#include "base/logging.h"
#include "base/strings/stringprintf.h"
#include "base/thread_annotations.h"
#include "build/build_config.h"
#if !BUILDFLAG(IS_NACL)
#include "base/debug/crash_logging.h"
#endif // !BUILDFLAG(IS_NACL)
#include <atomic>
namespace logging {
@ -25,19 +32,24 @@ namespace {
// DCHECK_IS_CONFIGURABLE and ENABLE_LOG_ERROR_NOT_REACHED are both interested
// in non-FATAL DCHECK()/NOTREACHED() reports.
#if BUILDFLAG(DCHECK_IS_CONFIGURABLE) || BUILDFLAG(ENABLE_LOG_ERROR_NOT_REACHED)
void DumpOnceWithoutCrashing(LogMessage* log_message) {
#if defined(DCHECK_IS_CONFIGURABLE) || BUILDFLAG(ENABLE_LOG_ERROR_NOT_REACHED)
void DCheckDumpOnceWithoutCrashing(LogMessage* log_message) {
// Best-effort gate to prevent multiple DCHECKs from being dumped. This will
// race if multiple threads DCHECK at the same time, but we'll eventually stop
// reporting and at most report once per thread.
static std::atomic<bool> has_dumped = false;
if (!has_dumped.load(std::memory_order_relaxed)) {
const std::string str = log_message->BuildCrashString();
// Copy the LogMessage message to stack memory to make sure it can be
// recovered in crash dumps.
// TODO(pbos): Do we need this for NACL builds or is the crash key set in
// the caller sufficient?
DEBUG_ALIAS_FOR_CSTR(log_message_str,
log_message->BuildCrashString().c_str(), 1024);
// TODO(pbos): Surface DCHECK_MESSAGE well in crash reporting to make this
// redundant, then remove it.
DEBUG_ALIAS_FOR_CSTR(log_message_str, str.c_str(), 1024);
#if !BUILDFLAG(IS_NACL)
// Report the log message as DCHECK_MESSAGE in the dump we're about to do.
SCOPED_CRASH_KEY_STRING1024("Logging", "DCHECK_MESSAGE", str);
#endif // !BUILDFLAG(IS_NACL)
// Note that dumping may fail if the crash handler hasn't been set yet. In
// that case we want to try again on the next failing DCHECK.
@ -46,36 +58,20 @@ void DumpOnceWithoutCrashing(LogMessage* log_message) {
}
}
void NotReachedDumpOnceWithoutCrashing(LogMessage* log_message) {
#if !BUILDFLAG(IS_NACL)
SCOPED_CRASH_KEY_STRING1024("Logging", "NOTREACHED_MESSAGE",
log_message->BuildCrashString());
#endif // !BUILDFLAG(IS_NACL)
DumpOnceWithoutCrashing(log_message);
}
class NotReachedLogMessage : public LogMessage {
public:
using LogMessage::LogMessage;
~NotReachedLogMessage() override {
if (severity() != logging::LOGGING_FATAL)
NotReachedDumpOnceWithoutCrashing(this);
DCheckDumpOnceWithoutCrashing(this);
}
};
#else
using NotReachedLogMessage = LogMessage;
#endif // BUILDFLAG(DCHECK_IS_CONFIGURABLE) ||
#endif // defined(DCHECK_IS_CONFIGURABLE) ||
// BUILDFLAG(ENABLE_LOG_ERROR_NOT_REACHED)
#if BUILDFLAG(DCHECK_IS_CONFIGURABLE)
void DCheckDumpOnceWithoutCrashing(LogMessage* log_message) {
#if !BUILDFLAG(IS_NACL)
SCOPED_CRASH_KEY_STRING1024("Logging", "DCHECK_MESSAGE",
log_message->BuildCrashString());
#endif // !BUILDFLAG(IS_NACL)
DumpOnceWithoutCrashing(log_message);
}
#if defined(DCHECK_IS_CONFIGURABLE)
class DCheckLogMessage : public LogMessage {
public:
@ -113,7 +109,7 @@ using DCheckWin32ErrorLogMessage = Win32ErrorLogMessage;
#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
using DCheckErrnoLogMessage = ErrnoLogMessage;
#endif // BUILDFLAG(IS_WIN)
#endif // BUILDFLAG(DCHECK_IS_CONFIGURABLE)
#endif // defined(DCHECK_IS_CONFIGURABLE)
} // namespace

View File

@ -10,7 +10,6 @@
#include "base/base_export.h"
#include "base/compiler_specific.h"
#include "base/dcheck_is_on.h"
#include "base/debug/debugging_buildflags.h"
#include "base/immediate_crash.h"
// This header defines the CHECK, DCHECK, and DPCHECK macros.
@ -99,8 +98,7 @@ class BASE_EXPORT CheckError {
LogMessage* const log_message_;
};
#if defined(OFFICIAL_BUILD) && defined(NDEBUG) && \
!BUILDFLAG(DCHECK_IS_CONFIGURABLE)
#if defined(OFFICIAL_BUILD) && defined(NDEBUG)
// Discard log strings to reduce code bloat.
//

View File

@ -34,7 +34,7 @@
//
// `CHECK_IS_TEST` is thread safe.
#define CHECK_IS_TEST() base::internal::check_is_test_impl()
#define CHECK_IS_TEST() base::internal::check_is_test_impl();
namespace base::internal {
BASE_EXPORT void check_is_test_impl();

View File

@ -4,6 +4,13 @@
#include "base/check_op.h"
// check_op.h is a widely included header and its size has significant impact on
// build time. Try not to raise this limit unless absolutely necessary. See
// https://chromium.googlesource.com/chromium/src/+/HEAD/docs/wmax_tokens.md
#ifndef NACL_TC_REV
#pragma clang max_tokens_here 390000
#endif
#include <string.h>
#include <cstdio>

View File

@ -12,7 +12,6 @@
#include "base/base_export.h"
#include "base/check.h"
#include "base/dcheck_is_on.h"
#include "base/debug/debugging_buildflags.h"
#include "base/template_util.h"
// This header defines the (DP)CHECK_EQ etc. macros.
@ -140,8 +139,7 @@ class CheckOpResult {
char* message_ = nullptr;
};
#if defined(OFFICIAL_BUILD) && defined(NDEBUG) && \
!BUILDFLAG(DCHECK_IS_CONFIGURABLE)
#if defined(OFFICIAL_BUILD) && defined(NDEBUG)
// Discard log strings to reduce code bloat.
#define CHECK_OP(name, op, val1, val2) CHECK((val1)op(val2))

View File

@ -170,8 +170,6 @@
#endif
// DISABLE_CFI_ICALL -- Disable Control Flow Integrity indirect call checks.
// Security Note: if you just need to allow calling of dlsym functions use
// DISABLE_CFI_DLSYM.
#if !defined(DISABLE_CFI_ICALL)
#if BUILDFLAG(IS_WIN)
// Windows also needs __declspec(guard(nocf)).
@ -184,21 +182,6 @@
#define DISABLE_CFI_ICALL
#endif
// DISABLE_CFI_DLSYM -- applies DISABLE_CFI_ICALL on platforms where dlsym
// functions must be called. Retains CFI checks on platforms where loaded
// modules participate in CFI (e.g. Windows).
#if !defined(DISABLE_CFI_DLSYM)
#if BUILDFLAG(IS_WIN)
// Windows modules register functions when loaded so can be checked by CFG.
#define DISABLE_CFI_DLSYM
#else
#define DISABLE_CFI_DLSYM DISABLE_CFI_ICALL
#endif
#endif
#if !defined(DISABLE_CFI_DLSYM)
#define DISABLE_CFI_DLSYM
#endif
// Macro useful for writing cross-platform function pointers.
#if !defined(CDECL)
#if BUILDFLAG(IS_WIN)
@ -406,13 +389,4 @@ inline constexpr bool AnalyzerAssumeTrue(bool arg) {
#define GSL_POINTER
#endif
// Adds the "logically_const" tag to a symbol's mangled name, which can be
// recognized by the "Mutable Constants" check
// (https://chromium.googlesource.com/chromium/src/+/main/docs/speed/binary_size/android_binary_size_trybot.md#Mutable-Constants).
#if defined(COMPILER_GCC) || defined(__clang__)
#define LOGICALLY_CONST [[gnu::abi_tag("logically_const")]]
#else
#define LOGICALLY_CONST
#endif
#endif // BASE_COMPILER_SPECIFIC_H_

View File

@ -4,6 +4,13 @@
#include "base/containers/flat_tree.h"
// flat_tree.h is a widely included header and its size has significant impact
// on build time. Try not to raise this limit unless absolutely necessary. See
// https://chromium.googlesource.com/chromium/src/+/HEAD/docs/wmax_tokens.md
#ifndef NACL_TC_REV
#pragma clang max_tokens_here 370000
#endif
namespace base {
sorted_unique_t sorted_unique;

View File

@ -286,10 +286,7 @@ class GSL_POINTER span : public internal::ExtentStorage<Extent> {
typename End,
typename = internal::EnableIfCompatibleContiguousIterator<It, T>,
typename = std::enable_if_t<!std::is_convertible<End, size_t>::value>>
constexpr span(It begin, End end) noexcept
// Subtracting two iterators gives a ptrdiff_t, but the result should be
// non-negative: see CHECK below.
: span(begin, static_cast<size_t>(end - begin)) {
constexpr span(It begin, End end) noexcept : span(begin, end - begin) {
// Note: CHECK_LE is not constexpr, hence regular CHECK must be used.
CHECK(begin <= end);
}

View File

@ -4,6 +4,13 @@
#include "base/debug/alias.h"
// This is a widely included header and its size has significant impact on
// build time. Try not to raise this limit unless absolutely necessary. See
// https://chromium.googlesource.com/chromium/src/+/HEAD/docs/wmax_tokens.md
#ifndef NACL_TC_REV
#pragma clang max_tokens_here 250
#endif
#include "base/compiler_specific.h"
namespace base {

View File

@ -4,10 +4,8 @@
#include "base/debug/crash_logging.h"
#include "base/strings/string_piece.h"
#include "build/build_config.h"
namespace base::debug {
namespace base {
namespace debug {
namespace {
@ -20,24 +18,6 @@ CrashKeyString* AllocateCrashKeyString(const char name[],
if (!g_crash_key_impl)
return nullptr;
// TODO(https://crbug.com/1341077): It would be great if the DCHECKs below
// could also be enabled on Android, but debugging tryjob failures was a bit
// difficult... :-/
#if DCHECK_IS_ON() && !BUILDFLAG(IS_ANDROID)
base::StringPiece name_piece = name;
// Some `CrashKeyImplementation`s reserve certain characters and disallow
// using them in crash key names. See also https://crbug.com/1341077.
DCHECK_EQ(base::StringPiece::npos, name_piece.find(':'))
<< "; name_piece = " << name_piece;
// Some `CrashKeyImplementation`s support only short crash key names (e.g. see
// the DCHECK in crash_reporter::internal::CrashKeyStringImpl::Set).
// Enforcing this restrictions here ensures that crash keys will work for all
// `CrashKeyStringImpl`s.
DCHECK_LT(name_piece.size(), 40u);
#endif
return g_crash_key_impl->Allocate(name, value_length);
}
@ -80,4 +60,5 @@ void SetCrashKeyImplementation(std::unique_ptr<CrashKeyImplementation> impl) {
g_crash_key_impl = impl.release();
}
} // namespace base::debug
} // namespace debug
} // namespace base

View File

@ -4,6 +4,13 @@
#include "base/feature_list.h"
// feature_list.h is a widely included header and its size impacts build
// time. Try not to raise this limit unless necessary. See
// https://chromium.googlesource.com/chromium/src/+/HEAD/docs/wmax_tokens.md
#ifndef NACL_TC_REV
#pragma clang max_tokens_here 600000
#endif
#include <string>
#include <tuple>
@ -44,16 +51,6 @@ FeatureList* g_feature_list_instance = nullptr;
// which Feature that accessor was for, if so.
const Feature* g_initialized_from_accessor = nullptr;
// Controls whether a feature's override state will be cached in
// `base::Feature::cached_value`. This field and the associated `base::Feature`
// only exist to measure the impact of the caching on different performance
// metrics.
// TODO(crbug.com/1341292): Remove this global and this feature once the gains
// are measured.
bool g_cache_override_state = false;
const base::Feature kCacheFeatureOverrideState{
"CacheFeatureOverrideState", base::FEATURE_ENABLED_BY_DEFAULT};
#if DCHECK_IS_ON()
// Tracks whether the use of base::Feature is allowed for this module.
// See ForbidUseForCurrentModule().
@ -118,18 +115,17 @@ bool IsValidFeatureOrFieldTrialName(StringPiece name) {
return IsStringASCII(name) && name.find_first_of(",<*") == std::string::npos;
}
// Splits |text| into two parts by the |separator| where the first part will be
// Splits |first| into two parts by the |separator| where the first part will be
// returned updated in |first| and the second part will be returned as |second|.
// This function returns false if there is more than one |separator| in |first|.
// If there is no |separator| presented in |first|, this function will not
// modify |first| and |second|. It's used for splitting the |enable_features|
// flag into feature name, field trial name and feature parameters.
bool SplitIntoTwo(StringPiece text,
StringPiece separator,
bool SplitIntoTwo(const std::string& separator,
StringPiece* first,
std::string* second) {
std::vector<StringPiece> parts =
SplitStringPiece(text, separator, TRIM_WHITESPACE, SPLIT_WANT_ALL);
SplitStringPiece(*first, separator, TRIM_WHITESPACE, SPLIT_WANT_ALL);
if (parts.size() == 2) {
*second = std::string(parts[1]);
} else if (parts.size() > 2) {
@ -154,21 +150,31 @@ bool ParseEnableFeatures(const std::string& enable_features,
std::vector<std::string> enable_features_list;
std::vector<std::string> force_fieldtrials_list;
std::vector<std::string> force_fieldtrial_params_list;
for (const auto& enable_feature :
for (auto& enable_feature :
FeatureList::SplitFeatureListString(enable_features)) {
std::string feature_name;
std::string study;
std::string group;
// First, check whether ":" is present. If true, feature parameters were
// set for this feature.
std::string feature_params;
if (!FeatureList::ParseEnableFeatureString(
enable_feature, &feature_name, &study, &group, &feature_params)) {
if (!SplitIntoTwo(":", &enable_feature, &feature_params))
return false;
// Then, check whether "." is present. If true, a group was specified for
// this feature.
std::string group;
if (!SplitIntoTwo(".", &enable_feature, &group))
return false;
// Finally, check whether "<" is present. If true, a study was specified for
// this feature.
std::string study;
if (!SplitIntoTwo("<", &enable_feature, &study))
return false;
}
const std::string feature_name(enable_feature);
// If feature params were set but group and study weren't, associate the
// feature and its feature params to a synthetic field trial as the
// feature params only make sense when it's combined with a field trial.
if (!feature_params.empty()) {
study = study.empty() ? "Study" + feature_name : study;
group = group.empty() ? "Group" + feature_name : group;
force_fieldtrials_list.push_back(study + "/" + group);
force_fieldtrial_params_list.push_back(study + "." + group + ":" +
feature_params);
@ -185,25 +191,12 @@ bool ParseEnableFeatures(const std::string& enable_features,
return true;
}
std::pair<FeatureList::OverrideState, uint16_t> UnpackFeatureCache(
uint32_t packed_cache_value) {
return std::make_pair(
static_cast<FeatureList::OverrideState>(packed_cache_value >> 24),
packed_cache_value & 0xFFFF);
}
uint32_t PackFeatureCache(FeatureList::OverrideState override_state,
uint32_t caching_context) {
return (static_cast<uint32_t>(override_state) << 24) |
(caching_context & 0xFFFF);
}
} // namespace
#if BUILDFLAG(DCHECK_IS_CONFIGURABLE)
#if defined(DCHECK_IS_CONFIGURABLE)
const Feature kDCheckIsFatalFeature{"DcheckIsFatal",
FEATURE_DISABLED_BY_DEFAULT};
#endif // BUILDFLAG(DCHECK_IS_CONFIGURABLE)
#endif // defined(DCHECK_IS_CONFIGURABLE)
FeatureList::FeatureList() = default;
@ -376,10 +369,8 @@ void FeatureList::AddFeaturesToAllocator(PersistentMemoryAllocator* allocator) {
}
void FeatureList::GetFeatureOverrides(std::string* enable_overrides,
std::string* disable_overrides,
bool include_group_name) const {
GetFeatureOverridesImpl(enable_overrides, disable_overrides, false,
include_group_name);
std::string* disable_overrides) const {
GetFeatureOverridesImpl(enable_overrides, disable_overrides, false);
}
void FeatureList::GetCommandLineFeatureOverrides(
@ -432,45 +423,6 @@ std::vector<StringPiece> FeatureList::SplitFeatureListString(
return SplitStringPiece(input, ",", TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
}
// static
bool FeatureList::ParseEnableFeatureString(StringPiece enable_feature,
std::string* feature_name,
std::string* study_name,
std::string* group_name,
std::string* params) {
StringPiece first;
// First, check whether ":" is present. If true, feature parameters were
// set for this feature.
std::string feature_params;
if (!SplitIntoTwo(enable_feature, ":", &first, &feature_params))
return false;
// Then, check whether "." is present. If true, a group was specified for
// this feature.
std::string group;
if (!SplitIntoTwo(first, ".", &first, &group))
return false;
// Finally, check whether "<" is present. If true, a study was specified for
// this feature.
std::string study;
if (!SplitIntoTwo(first, "<", &first, &study))
return false;
std::string enable_feature_name(first);
// If feature params were set but group and study weren't, associate the
// feature and its feature params to a synthetic field trial as the
// feature params only make sense when it's combined with a field trial.
if (!feature_params.empty()) {
study = study.empty() ? "Study" + enable_feature_name : study;
group = group.empty() ? "Group" + enable_feature_name : group;
}
feature_name->swap(enable_feature_name);
study_name->swap(study);
group_name->swap(group);
params->swap(feature_params);
return true;
}
// static
bool FeatureList::InitializeInstance(const std::string& enable_features,
const std::string& disable_features) {
@ -532,12 +484,9 @@ void FeatureList::SetInstance(std::unique_ptr<FeatureList> instance) {
ConfigureRandBytesFieldTrial();
#endif
g_cache_override_state =
base::FeatureList::IsEnabled(kCacheFeatureOverrideState);
base::sequence_manager::internal::WorkQueue::ConfigureCapacityFieldTrial();
#if BUILDFLAG(DCHECK_IS_CONFIGURABLE)
#if defined(DCHECK_IS_CONFIGURABLE)
// Update the behaviour of LOGGING_DCHECK to match the Feature configuration.
// DCHECK is also forced to be FATAL if we are running a death-test.
// TODO(crbug.com/1057995#c11): --gtest_internal_run_death_test doesn't
@ -552,7 +501,7 @@ void FeatureList::SetInstance(std::unique_ptr<FeatureList> instance) {
} else {
logging::LOGGING_DCHECK = logging::LOG_INFO;
}
#endif // BUILDFLAG(DCHECK_IS_CONFIGURABLE)
#endif // defined(DCHECK_IS_CONFIGURABLE)
}
// static
@ -580,10 +529,6 @@ void FeatureList::ForbidUseForCurrentModule() {
#endif // DCHECK_IS_ON()
}
void FeatureList::SetCachingContextForTesting(uint16_t caching_context) {
caching_context_ = caching_context;
}
void FeatureList::FinalizeInitialization() {
DCHECK(!initialized_);
// Store the field trial list pointer for DCHECKing.
@ -618,32 +563,7 @@ FeatureList::OverrideState FeatureList::GetOverrideState(
DCHECK(IsValidFeatureOrFieldTrialName(feature.name)) << feature.name;
DCHECK(CheckFeatureIdentity(feature)) << feature.name;
// If caching is disabled, always perform the full lookup.
if (!g_cache_override_state)
return GetOverrideStateByFeatureName(feature.name);
uint32_t current_cache_value =
feature.cached_value.load(std::memory_order_relaxed);
auto unpacked = UnpackFeatureCache(current_cache_value);
if (unpacked.second == caching_context_)
return unpacked.first;
OverrideState state = GetOverrideStateByFeatureName(feature.name);
uint32_t new_cache_value = PackFeatureCache(state, caching_context_);
// Update the cache with the new value.
// In non-test code, this value can be in one of 2 states: either it's unset,
// or another thread has updated it to the same value we're about to write.
// Because of this, a plain `store` yields the correct result in all cases.
// In test code, it's possible for a different thread to have installed a new
// `ScopedFeatureList` and written a value that's different than the one we're
// about to write, although that would be a thread safety violation already
// and such tests should be fixed.
feature.cached_value.store(new_cache_value, std::memory_order_relaxed);
return state;
}
FeatureList::OverrideState FeatureList::GetOverrideStateByFeatureName(
@ -770,8 +690,7 @@ void FeatureList::RegisterOverride(StringPiece feature_name,
void FeatureList::GetFeatureOverridesImpl(std::string* enable_overrides,
std::string* disable_overrides,
bool command_line_only,
bool include_group_name) const {
bool command_line_only) const {
DCHECK(initialized_);
// Check that the FieldTrialList this is associated with, if any, is the
@ -811,13 +730,8 @@ void FeatureList::GetFeatureOverridesImpl(std::string* enable_overrides,
target_list->push_back('*');
target_list->append(entry.first);
if (entry.second.field_trial) {
auto* const field_trial = entry.second.field_trial;
target_list->push_back('<');
target_list->append(field_trial->trial_name());
if (include_group_name) {
target_list->push_back('.');
target_list->append(field_trial->GetGroupNameWithoutActivation());
}
target_list->append(entry.second.field_trial->trial_name());
}
}
}

View File

@ -5,7 +5,6 @@
#ifndef BASE_FEATURE_LIST_H_
#define BASE_FEATURE_LIST_H_
#include <atomic>
#include <functional>
#include <map>
#include <memory>
@ -14,7 +13,6 @@
#include <vector>
#include "base/base_export.h"
#include "base/compiler_specific.h"
#include "base/containers/flat_map.h"
#include "base/dcheck_is_on.h"
#include "base/feature_list_buildflags.h"
@ -45,21 +43,7 @@ enum FeatureState {
// file static. It should never be used as a constexpr as it breaks
// pointer-based identity lookup.
// Note: New code should use CONSTINIT on the base::Feature declaration.
//
// Making Feature constants mutable allows them to contain a mutable member to
// cache their override state, while still remaining declared as const. This
// cache member allows for significantly faster IsEnabled() checks.
// The "Mutable Constants" check
// (https://chromium.googlesource.com/chromium/src/+/main/docs/speed/binary_size/android_binary_size_trybot.md#Mutable-Constants)
// detects this, because this generally means that a readonly symbol is put in
// writable memory when readonly memory would be more efficient in terms of
// space. Declaring as LOGICALLY_CONST adds a recognizable pattern to all
// Feature constant mangled names, which the "Mutable Constants" can use to
// ignore the symbols declared as such. The performance gains of the cache are
// large enough that it is worth the tradeoff to have the symbols in
// non-readonly memory, therefore requiring a bypass of the "Mutable Constants"
// check.
struct BASE_EXPORT LOGICALLY_CONST Feature {
struct BASE_EXPORT Feature {
constexpr Feature(const char* name, FeatureState default_state)
: name(name), default_state(default_state) {
#if BUILDFLAG(ENABLE_BANNED_BASE_FEATURE_PREFIX)
@ -69,14 +53,6 @@ struct BASE_EXPORT LOGICALLY_CONST Feature {
}
#endif // BUILDFLAG(ENABLE_BANNED_BASE_FEATURE_PREFIX)
}
// This object needs to be copyable because of some signatures in
// ScopedFeatureList, but generally isn't copied anywhere except unit tests.
// The `cached_value` doesn't get copied and copies will trigger a lookup if
// their state is queried.
Feature(const Feature& other)
: name(other.name), default_state(other.default_state), cached_value(0) {}
// The name of the feature. This should be unique to each feature and is used
// for enabling/disabling features via command line flags and experiments.
// It is strongly recommended to use CamelCase style for feature names, e.g.
@ -87,35 +63,14 @@ struct BASE_EXPORT LOGICALLY_CONST Feature {
// NOTE: The actual runtime state may be different, due to a field trial or a
// command line switch.
const FeatureState default_state;
private:
friend class FeatureList;
// A packed value where the first 8 bits represent the `OverrideState` of this
// feature, and the last 16 bits are a caching context ID used to allow
// ScopedFeatureLists to invalidate these cached values in testing. A value of
// 0 in the caching context ID field indicates that this value has never been
// looked up and cached, a value of 1 indicates this value contains the cached
// `OverrideState` that was looked up via `base::FeatureList`, and any other
// value indicate that this cached value is only valid for a particular
// ScopedFeatureList instance.
//
// Packing these values into a uint32_t makes it so that atomic operations
// performed on this fields can be lock free.
//
// The override state stored in this field is only used if the current
// `FeatureList::caching_context_` field is equal to the lower 16 bits of the
// packed cached value. Otherwise, the override state is looked up in the
// feature list and the cache is updated.
mutable std::atomic<uint32_t> cached_value = 0;
};
#if BUILDFLAG(DCHECK_IS_CONFIGURABLE)
#if defined(DCHECK_IS_CONFIGURABLE)
// DCHECKs have been built-in, and are configurable at run-time to be fatal, or
// not, via a DcheckIsFatal feature. We define the Feature here since it is
// checked in FeatureList::SetInstance(). See https://crbug.com/596231.
extern BASE_EXPORT const Feature kDCheckIsFatalFeature;
#endif // BUILDFLAG(DCHECK_IS_CONFIGURABLE)
#endif // defined(DCHECK_IS_CONFIGURABLE)
// The FeatureList class is used to determine whether a given feature is on or
// off. It provides an authoritative answer, taking into account command-line
@ -303,16 +258,12 @@ class BASE_EXPORT FeatureList {
// accepted by InitializeFromCommandLine()) corresponding to features that
// have been overridden - either through command-line or via FieldTrials. For
// those features that have an associated FieldTrial, the output entry will be
// of the format "FeatureName<TrialName" (|include_group_name|=false) or
// "FeatureName<TrialName.GroupName" (if |include_group_name|=true), where
// "TrialName" is the name of the FieldTrial and "GroupName" is the group
// name of the FieldTrial. Features that have overrides with
// OVERRIDE_USE_DEFAULT will be added to |enable_overrides| with a '*'
// character prefix. Must be called only after the instance has been
// initialized and registered.
// of the format "FeatureName<TrialName", where "TrialName" is the name of the
// FieldTrial. Features that have overrides with OVERRIDE_USE_DEFAULT will be
// added to |enable_overrides| with a '*' character prefix. Must be called
// only after the instance has been initialized and registered.
void GetFeatureOverrides(std::string* enable_overrides,
std::string* disable_overrides,
bool include_group_names = false) const;
std::string* disable_overrides) const;
// Like GetFeatureOverrides(), but only returns overrides that were specified
// explicitly on the command-line, omitting the ones from field trials.
@ -357,19 +308,6 @@ class BASE_EXPORT FeatureList {
static std::vector<base::StringPiece> SplitFeatureListString(
base::StringPiece input);
// Checks and parses the |enable_feature| (e.g.
// FeatureName<Study.Group:Param1/value1/) obtained by applying
// SplitFeatureListString() to the |enable_features| flag, and sets
// |feature_name| to be the feature's name, |study_name| and |group_name| to
// be the field trial name and its group name if the field trial is specified
// or field trial parameters are given, |params| to be the field trial
// parameters if exists.
static bool ParseEnableFeatureString(StringPiece enable_feature,
std::string* feature_name,
std::string* study_name,
std::string* group_name,
std::string* params);
// Initializes and sets an instance of FeatureList with feature overrides via
// command-line flags |enable_features| and |disable_features| if one has not
// already been set from command-line flags. Returns true if an instance did
@ -413,8 +351,6 @@ class BASE_EXPORT FeatureList {
// Has no effect if DCHECKs are not enabled.
static void ForbidUseForCurrentModule();
void SetCachingContextForTesting(uint16_t caching_context);
private:
FRIEND_TEST_ALL_PREFIXES(FeatureListTest, CheckFeatureIdentity);
FRIEND_TEST_ALL_PREFIXES(FeatureListTest,
@ -505,8 +441,7 @@ class BASE_EXPORT FeatureList {
// function's comments for more details.
void GetFeatureOverridesImpl(std::string* enable_overrides,
std::string* disable_overrides,
bool command_line_only,
bool include_group_name = false) const;
bool command_line_only) const;
// Verifies that there's only a single definition of a Feature struct for a
// given feature name. Keeps track of the first seen Feature struct for each
@ -540,11 +475,6 @@ class BASE_EXPORT FeatureList {
// Whether this object has been initialized from command line.
bool initialized_from_command_line_ = false;
// Used when querying `base::Feature` state to determine if the cached value
// in the `Feature` object is populated and valid. See the comment on
// `base::Feature::cached_value` for more details.
uint16_t caching_context_ = 1;
};
} // namespace base

View File

@ -4,6 +4,15 @@
#include "base/files/file_path.h"
#include "build/build_config.h"
// file_path.h is a widely included header and its size has significant impact
// on build time. Try not to raise this limit unless necessary. See
// https://chromium.googlesource.com/chromium/src/+/HEAD/docs/wmax_tokens.md
#ifndef NACL_TC_REV
#pragma clang max_tokens_here 400000
#endif
#include <iostream>
#include <string.h>
#include <algorithm>

View File

@ -0,0 +1,49 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/files/file_path_watcher.h"
#include "base/files/file_path.h"
#include "base/notreached.h"
#include "base/threading/sequenced_task_runner_handle.h"
namespace base {
namespace {
class FilePathWatcherImpl : public FilePathWatcher::PlatformDelegate {
public:
FilePathWatcherImpl() = default;
FilePathWatcherImpl(const FilePathWatcherImpl&) = delete;
FilePathWatcherImpl& operator=(const FilePathWatcherImpl&) = delete;
~FilePathWatcherImpl() override = default;
// FilePathWatcher::PlatformDelegate:
bool Watch(const FilePath& path,
Type type,
const FilePathWatcher::Callback& callback) override;
void Cancel() override;
};
bool FilePathWatcherImpl::Watch(const FilePath& path,
Type type,
const FilePathWatcher::Callback& callback) {
DCHECK(!callback.is_null());
NOTIMPLEMENTED_LOG_ONCE();
return false;
}
void FilePathWatcherImpl::Cancel() {
set_cancelled();
}
} // namespace
FilePathWatcher::FilePathWatcher() {
sequence_checker_.DetachFromSequence();
impl_ = std::make_unique<FilePathWatcherImpl>();
}
} // namespace base

View File

@ -673,7 +673,7 @@ bool FilePathWatcherImpl::UpdateRecursiveWatches(
? recursive_paths_by_watch_[fired_watch]
: target_;
auto start_it = recursive_watches_by_path_.upper_bound(changed_dir);
auto start_it = recursive_watches_by_path_.lower_bound(changed_dir);
auto end_it = start_it;
for (; end_it != recursive_watches_by_path_.end(); ++end_it) {
const FilePath& cur_path = end_it->first;

View File

@ -15,11 +15,11 @@ namespace base {
// this constants.
constexpr TimeDelta kAudioSchedulingPeriod = Milliseconds(10);
// Reserve 30% of one CPU core for audio threads.
// Reserve 10% or one CPU core for audio threads.
// TODO(crbug.com/1174811): A different value may need to be used for WebAudio
// threads (see media::FuchsiaAudioOutputDevice). A higher capacity may need to
// be allocated in that case.
constexpr float kAudioSchedulingCapacity = 0.3;
constexpr float kAudioSchedulingCapacity = 0.1;
// Scheduling interval to use for display threads.
// TODO(crbug.com/1224707): Add scheduling period to Thread::Options and remove

View File

@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// TODO(crbug.com/1227712): Migrate syntax and remove this.
library base.testfidl;
@discoverable

View File

@ -1,102 +0,0 @@
// Copyright 2022 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_FUNCTIONAL_FUNCTION_REF_H_
#define BASE_FUNCTIONAL_FUNCTION_REF_H_
#include <type_traits>
#include <utility>
#include "base/bind_internal.h"
#include "third_party/abseil-cpp/absl/base/attributes.h"
#include "third_party/abseil-cpp/absl/functional/function_ref.h"
namespace base {
template <typename Signature>
class FunctionRef;
// A non-owning reference to any invocable object (e.g. function pointer, method
// pointer, functor, lambda, et cetera) suitable for use as a type-erased
// argument to ForEach-style functions or other visitor patterns that:
//
// - do not need to copy or take ownership of the argument
// - synchronously call the invocable that was passed as an argument
//
// `base::FunctionRef` makes no heap allocations: it is trivially copyable and
// should be passed by value.
//
// `base::FunctionRef` has no null/empty state: a `base::FunctionRef` is always
// valid to invoke.
//
// The usual lifetime precautions for other non-owning references types (e.g.
// `base::StringPiece`, `base::span`) also apply to `base::FunctionRef`.
// `base::FunctionRef` should typically be used as an argument; returning a
// `base::FunctionRef` or storing a `base::FunctionRef` as a field is dangerous
// and likely to result in lifetime bugs.
//
// `base::RepeatingCallback` and `base::BindRepeating()` is another common way
// to represent type-erased invocable objects. In contrast, it requires a heap
// allocation and is not trivially copyable. It should be used when there are
// ownership requirements (e.g. partial application of arguments to a function
// stored for asynchronous execution).
//
// Note: this class is very similar to `absl::FunctionRef<R(Args...)>`, but
// disallows implicit conversions between function types, e.g. functors that
// return non-void values may bind to `absl::FunctionRef<void(...)>`:
//
// ```
// void F(absl::FunctionRef<void()>);
// ...
// F([] { return 42; });
// ```
//
// This compiles and silently discards the return value, but with the base
// version, the equivalent snippet:
//
// ```
// void F(base::FunctionRef<void()>);
// ...
// F([] { return 42;});
// ```
//
// will not compile at all.
template <typename R, typename... Args>
class FunctionRef<R(Args...)> {
public:
// `ABSL_ATTRIBUTE_LIFETIME_BOUND` is important since `FunctionRef` retains
// only a reference to `functor`, `functor` must outlive `this`.
template <typename Functor,
typename = std::enable_if_t<std::is_same_v<
R(Args...),
typename internal::MakeFunctorTraits<Functor>::RunType>>>
// NOLINTNEXTLINE(google-explicit-constructor)
FunctionRef(const Functor& functor ABSL_ATTRIBUTE_LIFETIME_BOUND)
: wrapped_func_ref_(functor) {}
// Null FunctionRefs are not allowed.
FunctionRef() = delete;
FunctionRef(const FunctionRef&) = default;
// Reduce the likelihood of lifetime bugs by disallowing assignment.
FunctionRef& operator=(const FunctionRef&) = delete;
R operator()(Args... args) const {
return wrapped_func_ref_(std::forward<Args>(args)...);
}
absl::FunctionRef<R(Args...)> ToAbsl() const { return wrapped_func_ref_; }
// In Chrome, converting to `absl::FunctionRef` should be explicitly done
// through `ToAbsl()`.
template <typename Signature>
operator absl::FunctionRef<Signature>() = delete;
private:
absl::FunctionRef<R(Args...)> wrapped_func_ref_;
};
} // namespace base
#endif // BASE_FUNCTIONAL_FUNCTION_REF_H_

View File

@ -82,7 +82,7 @@ struct MD5CE {
DCHECK_EQ(m % 64, 0u);
if (i < n) {
// Emit the message itself...
return static_cast<uint8_t>(data[i]);
return data[i];
} else if (i == n) {
// ...followed by the end of message marker.
return 0x80;

View File

@ -17,109 +17,29 @@ namespace device_util {
// The returned string is the string returned by sysctlbyname() with name
// "hw.machine". Possible (known) values include:
//
// iPhone7,1 -> iPhone 6 Plus
// iPhone7,2 -> iPhone 6
// iPhone8,1 -> iPhone 6s
// iPhone8,2 -> iPhone 6s Plus
// iPhone8,4 -> iPhone SE (GSM)
// iPhone9,1 -> iPhone 7
// iPhone9,2 -> iPhone 7 Plus
// iPhone9,3 -> iPhone 7
// iPhone9,4 -> iPhone 7 Plus
// iPhone10,1 -> iPhone 8
// iPhone10,2 -> iPhone 8 Plus
// iPhone10,3 -> iPhone X Global
// iPhone10,4 -> iPhone 8
// iPhone10,5 -> iPhone 8 Plus
// iPhone10,6 -> iPhone X GSM
// iPhone11,2 -> iPhone XS
// iPhone11,4 -> iPhone XS Max
// iPhone11,6 -> iPhone XS Max Global
// iPhone11,8 -> iPhone XR
// iPhone12,1 -> iPhone 11
// iPhone12,3 -> iPhone 11 Pro
// iPhone12,5 -> iPhone 11 Pro Max
// iPhone12,8 -> iPhone SE 2nd Gen
// iPhone13,1 -> iPhone 12 Mini
// iPhone13,2 -> iPhone 12
// iPhone13,3 -> iPhone 12 Pro
// iPhone13,4 -> iPhone 12 Pro Max
// iPhone14,2 -> iPhone 13 Pro
// iPhone14,3 -> iPhone 13 Pro Max
// iPhone14,4 -> iPhone 13 Mini
// iPhone14,5 -> iPhone 13
// iPhone14,6 -> iPhone SE 3rd Gen
// iPhone14,7 -> iPhone 14
// iPhone14,8 -> iPhone 14 Plus
// iPhone15,2 -> iPhone 14 Pro
// iPhone15,3 -> iPhone 14 Pro Max
// iPhone1,1 -> iPhone 1G
// iPhone1,2 -> iPhone 3G
// iPhone2,1 -> iPhone 3GS
// iPhone3,1 -> iPhone 4/AT&T
// iPhone3,2 -> iPhone 4/Other Carrier?
// iPhone3,3 -> iPhone 4/Other Carrier?
// iPhone4,1 -> iPhone 4S
//
// iPad3,4 -> 4th Gen iPad
// iPad3,5 -> 4th Gen iPad GSM+LTE
// iPad3,6 -> 4th Gen iPad CDMA+LTE
// iPad4,1 -> iPad Air (WiFi)
// iPad4,2 -> iPad Air (GSM+CDMA)
// iPad4,3 -> 1st Gen iPad Air (China)
// iPad4,4 -> iPad mini Retina (WiFi)
// iPad4,5 -> iPad mini Retina (GSM+CDMA)
// iPad4,6 -> iPad mini Retina (China)
// iPad4,7 -> iPad mini 3 (WiFi)
// iPad4,8 -> iPad mini 3 (GSM+CDMA)
// iPad4,9 -> iPad Mini 3 (China)
// iPad5,1 -> iPad mini 4 (WiFi)
// iPad5,2 -> 4th Gen iPad mini (WiFi+Cellular)
// iPad5,3 -> iPad Air 2 (WiFi)
// iPad5,4 -> iPad Air 2 (Cellular)
// iPad6,3 -> iPad Pro (9.7 inch, WiFi)
// iPad6,4 -> iPad Pro (9.7 inch, WiFi+LTE)
// iPad6,7 -> iPad Pro (12.9 inch, WiFi)
// iPad6,8 -> iPad Pro (12.9 inch, WiFi+LTE)
// iPad6,11 -> iPad (2017)
// iPad6,12 -> iPad (2017)
// iPad7,1 -> iPad Pro 2nd Gen (WiFi)
// iPad7,2 -> iPad Pro 2nd Gen (WiFi+Cellular)
// iPad7,3 -> iPad Pro 10.5-inch 2nd Gen
// iPad7,4 -> iPad Pro 10.5-inch 2nd Gen
// iPad7,5 -> iPad 6th Gen (WiFi)
// iPad7,6 -> iPad 6th Gen (WiFi+Cellular)
// iPad7,11 -> iPad 7th Gen 10.2-inch (WiFi)
// iPad7,12 -> iPad 7th Gen 10.2-inch (WiFi+Cellular)
// iPad8,1 -> iPad Pro 11 inch 3rd Gen (WiFi)
// iPad8,2 -> iPad Pro 11 inch 3rd Gen (1TB, WiFi)
// iPad8,3 -> iPad Pro 11 inch 3rd Gen (WiFi+Cellular)
// iPad8,4 -> iPad Pro 11 inch 3rd Gen (1TB, WiFi+Cellular)
// iPad8,5 -> iPad Pro 12.9 inch 3rd Gen (WiFi)
// iPad8,6 -> iPad Pro 12.9 inch 3rd Gen (1TB, WiFi)
// iPad8,7 -> iPad Pro 12.9 inch 3rd Gen (WiFi+Cellular)
// iPad8,8 -> iPad Pro 12.9 inch 3rd Gen (1TB, WiFi+Cellular)
// iPad8,9 -> iPad Pro 11 inch 4th Gen (WiFi)
// iPad8,10 -> iPad Pro 11 inch 4th Gen (WiFi+Cellular)
// iPad8,11 -> iPad Pro 12.9 inch 4th Gen (WiFi)
// iPad8,12 -> iPad Pro 12.9 inch 4th Gen (WiFi+Cellular)
// iPad11,1 -> iPad mini 5th Gen (WiFi)
// iPad11,2 -> iPad mini 5th Gen
// iPad11,3 -> iPad Air 3rd Gen (WiFi)
// iPad11,4 -> iPad Air 3rd Gen
// iPad11,6 -> iPad 8th Gen (WiFi)
// iPad11,7 -> iPad 8th Gen (WiFi+Cellular)
// iPad12,1 -> iPad 9th Gen (WiFi)
// iPad12,2 -> iPad 9th Gen (WiFi+Cellular)
// iPad14,1 -> iPad mini 6th Gen (WiFi)
// iPad14,2 -> iPad mini 6th Gen (WiFi+Cellular)
// iPad13,1 -> iPad Air 4th Gen (WiFi)
// iPad13,2 -> iPad Air 4th Gen (WiFi+Cellular)
// iPad13,4 -> iPad Pro 11 inch 5th Gen
// iPad13,5 -> iPad Pro 11 inch 5th Gen
// iPad13,6 -> iPad Pro 11 inch 5th Gen
// iPad13,7 -> iPad Pro 11 inch 5th Gen
// iPad13,8 -> iPad Pro 12.9 inch 5th Gen
// iPad13,9 -> iPad Pro 12.9 inch 5th Gen
// iPad13,10 -> iPad Pro 12.9 inch 5th Gen
// iPad13,11 -> iPad Pro 12.9 inch 5th Gen
// iPad13,16 -> iPad Air 5th Gen (WiFi)
// iPad13,17 -> iPad Air 5th Gen (WiFi+Cellular)
// iPod1,1 -> iPod touch 1G
// iPod2,1 -> iPod touch 2G
// iPod2,2 -> ?
// iPod3,1 -> iPod touch 3G
// iPod4,1 -> iPod touch 4G
// iPod5,1 -> ?
//
// iPad1,1 -> iPad 1G, WiFi
// iPad1,? -> iPad 1G, 3G <- needs 3G owner to test
// iPad2,1 -> iPad 2G, WiFi
//
// AppleTV2,1 -> AppleTV 2
//
// i386 -> Simulator
// x86_64 -> Simulator
std::string GetPlatform();
// Returns true if the application is running on a device with 512MB or more

View File

@ -58,15 +58,11 @@ namespace ios {
namespace device_util {
std::string GetPlatform() {
#if TARGET_OS_SIMULATOR
return getenv("SIMULATOR_MODEL_IDENTIFIER");
#elif TARGET_OS_IPHONE
std::string platform;
size_t size = 0;
sysctlbyname("hw.machine", NULL, &size, NULL, 0);
sysctlbyname("hw.machine", base::WriteInto(&platform, size), &size, NULL, 0);
return platform;
#endif
}
bool RamIsAtLeast512Mb() {

View File

@ -58,10 +58,6 @@ BASE_EXPORT bool IsMultipleScenesSupported();
// speed up actual launch time.
BASE_EXPORT bool IsApplicationPreWarmed();
// The iPhone 14 Pro and Pro Max introduced a dynamic island. This should only
// be called when working around UIKit bugs.
BASE_EXPORT bool HasDynamicIsland();
} // namespace ios
} // namespace base

View File

@ -8,7 +8,6 @@
#import <UIKit/UIKit.h>
#include <stddef.h>
#import "base/ios/device_util.h"
#include "base/mac/foundation_util.h"
#include "base/system/sys_info.h"
@ -88,12 +87,5 @@ bool IsApplicationPreWarmed() {
return [NSProcessInfo.processInfo.environment objectForKey:@"ActivePrewarm"];
}
bool HasDynamicIsland() {
std::string hardware_model = ::ios::device_util::GetPlatform();
static bool is_dynamic_island_model =
(hardware_model == "iPhone15,2" || hardware_model == "iPhone15,3");
return is_dynamic_island_model;
}
} // namespace ios
} // namespace base

View File

@ -480,7 +480,7 @@ absl::optional<Value> JSONParser::ConsumeList() {
return absl::nullopt;
}
Value::List list;
Value::ListStorage list_storage;
Token token = GetNextToken();
while (token != T_ARRAY_END) {
@ -490,7 +490,7 @@ absl::optional<Value> JSONParser::ConsumeList() {
return absl::nullopt;
}
list.Append(std::move(*item));
list_storage.push_back(std::move(*item));
token = GetNextToken();
if (token == T_LIST_SEPARATOR) {
@ -508,7 +508,7 @@ absl::optional<Value> JSONParser::ConsumeList() {
ConsumeChar(); // Closing ']'.
return Value(std::move(list));
return Value(std::move(list_storage));
}
absl::optional<Value> JSONParser::ConsumeString() {

View File

@ -4,16 +4,25 @@
#include "base/location.h"
#include "build/build_config.h"
// location.h is a widely included header and its size can significantly impact
// build time. Try not to raise this limit unless absolutely necessary. See
// https://chromium.googlesource.com/chromium/src/+/HEAD/docs/wmax_tokens.md
#ifndef NACL_TC_REV
#pragma clang max_tokens_here 390000
#endif
#if defined(COMPILER_MSVC)
#include <intrin.h>
#endif
#include "base/compiler_specific.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/stringprintf.h"
#include "base/trace_event/base_tracing.h"
#include "build/build_config.h"
#if defined(COMPILER_MSVC)
#include <intrin.h>
#endif
namespace base {
namespace {

View File

@ -3,6 +3,15 @@
// found in the LICENSE file.
#include "base/logging.h"
#include <atomic>
#include <memory>
// logging.h is a widely included header and its size has significant impact on
// build time. Try not to raise this limit unless absolutely necessary. See
// https://chromium.googlesource.com/chromium/src/+/HEAD/docs/wmax_tokens.md
#ifndef NACL_TC_REV
#pragma clang max_tokens_here 470000
#endif // NACL_TC_REV
#ifdef BASE_CHECK_H_
#error "logging.h should not include check.h"
@ -11,13 +20,14 @@
#include <limits.h>
#include <stdint.h>
#include <atomic>
#include <memory>
#include <tuple>
#include <vector>
#include "base/base_export.h"
#include "base/debug/crash_logging.h"
#if defined(LEAK_SANITIZER) && !BUILDFLAG(IS_NACL)
#include "base/debug/leak_annotations.h"
#endif // defined(LEAK_SANITIZER) && !BUILDFLAG(IS_NACL)
#include "base/immediate_crash.h"
#include "base/pending_task.h"
#include "base/strings/string_piece.h"
@ -25,15 +35,6 @@
#include "base/trace_event/base_tracing.h"
#include "build/build_config.h"
#if !BUILDFLAG(IS_NACL)
#include "base/auto_reset.h"
#include "base/debug/crash_logging.h"
#endif // !BUILDFLAG(IS_NACL)
#if defined(LEAK_SANITIZER) && !BUILDFLAG(IS_NACL)
#include "base/debug/leak_annotations.h"
#endif // defined(LEAK_SANITIZER) && !BUILDFLAG(IS_NACL)
#if BUILDFLAG(IS_WIN)
#include <io.h>
#include <windows.h>
@ -461,53 +462,14 @@ void WriteToFd(int fd, const char* data, size_t length) {
}
}
void SetLogFatalCrashKey(LogMessage* log_message) {
#if !BUILDFLAG(IS_NACL)
// In case of an out-of-memory condition, this code could be reentered when
// constructing and storing the key. Using a static is not thread-safe, but if
// multiple threads are in the process of a fatal crash at the same time, this
// should work.
static bool guarded = false;
if (guarded)
return;
base::AutoReset<bool> guard(&guarded, true);
static auto* const crash_key = base::debug::AllocateCrashKeyString(
"LOG_FATAL", base::debug::CrashKeySize::Size1024);
base::debug::SetCrashKeyString(crash_key, log_message->BuildCrashString());
#endif // !BUILDFLAG(IS_NACL)
}
std::string BuildCrashString(const char* file,
int line,
const char* message_without_prefix) {
// Only log last path component.
if (file) {
const char* slash = strrchr(file,
#if BUILDFLAG(IS_WIN)
'\\'
#else
'/'
#endif // BUILDFLAG(IS_WIN)
);
if (slash) {
file = slash + 1;
}
}
return base::StringPrintf("%s:%d: %s", file, line, message_without_prefix);
}
} // namespace
#if BUILDFLAG(DCHECK_IS_CONFIGURABLE)
#if defined(DCHECK_IS_CONFIGURABLE)
// In DCHECK-enabled Chrome builds, allow the meaning of LOGGING_DCHECK to be
// determined at run-time. We default it to INFO, to avoid it triggering
// crashes before the run-time has explicitly chosen the behaviour.
BASE_EXPORT logging::LogSeverity LOGGING_DCHECK = LOGGING_INFO;
#endif // BUILDFLAG(DCHECK_IS_CONFIGURABLE)
#endif // defined(DCHECK_IS_CONFIGURABLE)
// This is never instantiated, it's just used for EAT_STREAM_PARAMETERS to have
// an object of the correct type on the LHS of the unused part of the ternary
@ -729,9 +691,6 @@ LogMessage::~LogMessage() {
TRACE_LOG_MESSAGE(
file_, base::StringPiece(str_newline).substr(message_start_), line_);
if (severity_ == LOGGING_FATAL)
SetLogFatalCrashKey(this);
// Give any log message handler first dibs on the message.
if (g_log_message_handler &&
g_log_message_handler(severity_, file_, line_, message_start_,
@ -954,8 +913,21 @@ LogMessage::~LogMessage() {
}
std::string LogMessage::BuildCrashString() const {
return logging::BuildCrashString(file(), line(),
str().c_str() + message_start_);
return BuildCrashString(file(), line(), str().c_str() + message_start_);
}
std::string LogMessage::BuildCrashString(const char* file,
int line,
const char* message_without_prefix) {
// Only log last path component.
if (file) {
const char* slash = strrchr(file, '/');
if (slash) {
file = slash + 1;
}
}
return base::StringPrintf("%s:%d: %s", file, line, message_without_prefix);
}
// writes the common header info to the stream

View File

@ -615,11 +615,11 @@ BASE_EXPORT extern std::ostream* g_swallow_stream;
// Definitions for DCHECK et al.
#if BUILDFLAG(DCHECK_IS_CONFIGURABLE)
#if defined(DCHECK_IS_CONFIGURABLE)
BASE_EXPORT extern LogSeverity LOGGING_DCHECK;
#else
constexpr LogSeverity LOGGING_DCHECK = LOGGING_FATAL;
#endif // BUILDFLAG(DCHECK_IS_CONFIGURABLE)
#endif // defined(DCHECK_IS_CONFIGURABLE)
// Redefine the standard assert to use our nice log files
#undef assert
@ -653,6 +653,9 @@ class BASE_EXPORT LogMessage {
// Gets file:line: message in a format suitable for crash reporting.
std::string BuildCrashString() const;
static std::string BuildCrashString(const char* file,
int line,
const char* message_without_prefix);
private:
void Init(const char* file, int line);

View File

@ -28,6 +28,8 @@
extern "C" {
CFTypeID SecKeyGetTypeID();
#if !BUILDFLAG(IS_IOS)
CFTypeID SecACLGetTypeID();
CFTypeID SecTrustedApplicationGetTypeID();
// The NSFont/CTFont toll-free bridging is broken before 10.15.
// http://www.openradar.me/15341349 rdar://15341349
//
@ -406,10 +408,12 @@ CFCastStrict<CTFontRef>(const CFTypeRef& cf_val) {
#endif
#if !BUILDFLAG(IS_IOS)
CF_CAST_DEFN(SecACL)
CF_CAST_DEFN(SecAccessControl)
CF_CAST_DEFN(SecCertificate)
CF_CAST_DEFN(SecKey)
CF_CAST_DEFN(SecPolicy)
CF_CAST_DEFN(SecTrustedApplication)
#endif
#undef CF_CAST_DEFN

View File

@ -20,14 +20,6 @@ This is the brief version. Googlers can search internally for further
reading.
***
*** aside
MTECheckedPtr is one particular incarnation of `raw_ptr`, and so the
primary documentation is kept here in `//base/memory/`. However, the
implementation is woven deeply into PartitionAlloc, and inevitably
some dirty PA-internal details may bubble up here when discussing
how MTECheckedPtr works.
***
MTECheckedPtr is a Chromium-specific implementation of ARM's
[MTE concept][arm-mte]. When MTECheckedPtr is enabled,
@ -92,22 +84,4 @@ When MTECheckedPtr *is* enabled (not the default for anybody),
both of the above degrade the `raw_ptr<T, D>` into the no-op version
of `raw_ptr`.
## Appendix: PA-Internal Tag Locations
[The top-level PartitionAlloc documentation][pa-readme]
mentions the space in which
MTECheckedPtr's tags reside - in the space labeled "Bitmaps(?)" in the
super page diagram, before the first usable slot span. This diagram
only applies to *normal* buckets and not to *direct map* buckets.
While direct map super pages also cordon off the first partition page
and offer access to the core metadata within, reservations are always
permissible immediately after, and there are no bitmaps (whether
from *Scan or MTECheckedPtr) following that first partition page.
In implementing MTECheckedPtr support for direct maps, we decided
not to add this extra headroom for bitmaps; instead, the tag is
placed directly in `SubsequentPageMetadata`, colocated with the core
metadata in the first partition page.
[arm-mte]: https://community.arm.com/arm-community-blogs/b/architectures-and-processors-blog/posts/enhancing-memory-safety
[pa-readme]: ../allocator/partition_allocator/PartitionAlloc.md#layout-in-memory

View File

@ -36,7 +36,6 @@
#if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
#include "base/allocator/partition_allocator/partition_tag.h"
#include "base/allocator/partition_allocator/partition_tag_types.h"
#include "base/allocator/partition_allocator/tagging.h"
#include "base/check_op.h"
#endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
@ -161,7 +160,12 @@ struct MTECheckedPtrImplPartitionAllocSupport {
// Disambiguation: UntagPtr removes the hardware MTE tag, whereas this class
// is responsible for handling the software MTE tag.
auto addr = partition_alloc::UntagPtr(ptr);
return partition_alloc::IsManagedByPartitionAlloc(addr);
// MTECheckedPtr algorithms work only when memory is
// allocated by PartitionAlloc, from normal buckets pool.
//
// TODO(crbug.com/1307514): Allow direct-map buckets.
return partition_alloc::IsManagedByPartitionAlloc(addr) &&
partition_alloc::internal::IsManagedByNormalBuckets(addr);
}
// Returns pointer to the tag that protects are pointed by |addr|.
@ -198,7 +202,6 @@ struct MTECheckedPtrImpl {
static_assert(sizeof(partition_alloc::PartitionTag) * 8 <= kTagBits, "");
uintptr_t tag = *(static_cast<volatile partition_alloc::PartitionTag*>(
PartitionAllocSupport::TagPointer(addr)));
DCHECK(tag);
tag <<= kValidAddressBits;
addr |= tag;
@ -455,43 +458,14 @@ struct BackupRefPtrImpl {
typename Z,
typename = std::enable_if_t<offset_type<Z>, void>>
static ALWAYS_INLINE T* Advance(T* wrapped_ptr, Z delta_elems) {
#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
// First check if the new address lands within the same allocation
// (end-of-allocation address is ok too). It has a non-trivial cost, but
// it's cheaper and more secure than the previous implementation that
// rewrapped the pointer (wrapped the new pointer and unwrapped the old
// one).
#if DCHECK_IS_ON() || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
uintptr_t address = partition_alloc::UntagPtr(wrapped_ptr);
if (IsSupportedAndNotNull(address))
CHECK(IsValidDelta(address, delta_elems * static_cast<Z>(sizeof(T))));
return wrapped_ptr + delta_elems;
#else
// In the "before allocation" mode, on 32-bit, we can run into a problem
// that the end-of-allocation address could fall out of "GigaCage", if this
// is the last slot of the super page, thus pointing to the guard page. This
// mean the ref-count won't be decreased when the pointer is released
// (leak).
//
// We could possibly solve it in a few different ways:
// - Add the trailing guard page to "GigaCage", but we'd have to think very
// hard if this doesn't create another hole.
// - Add an address adjustment to "GigaCage" check, similar as the one in
// PartitionAllocGetSlotStartInBRPPool(), but that seems fragile, not to
// mention adding an extra instruction to an inlined hot path.
// - Let the leak happen, since it should a very rare condition.
// - Go back to the previous solution of rewrapping the pointer, but that
// had an issue of losing protection in case the pointer ever gets shifter
// before the end of allocation.
//
// We decided to cross that bridge once we get there... if we ever get
// there. Currently there are no plans to switch back to the "before
// allocation" mode.
//
// This problem doesn't exist in the "previous slot" mode, or any mode that
// involves putting extras after the allocation, because the
// end-of-allocation address belongs to the same slot.
static_assert(false);
#endif
T* new_wrapped_ptr = WrapRawPtr(wrapped_ptr + delta_elems);
ReleaseWrappedPtr(wrapped_ptr);
return new_wrapped_ptr;
}
// Returns a copy of a wrapped pointer, without making an assertion on whether
@ -711,16 +685,6 @@ struct IsSupportedType<content::responsiveness::Calculator> {
static constexpr bool value = false;
};
// IsRawPtrCountingImpl<T>::value answers whether T is a specialization of
// RawPtrCountingImplWrapperForTest, to know whether Impl is for testing
// purposes.
template <typename T>
struct IsRawPtrCountingImpl : std::false_type {};
template <typename T>
struct IsRawPtrCountingImpl<internal::RawPtrCountingImplWrapperForTest<T>>
: std::true_type {};
#if __OBJC__
// raw_ptr<T> is not compatible with pointers to Objective-C classes for a
// multitude of reasons. They may fail to compile in many cases, and wouldn't
@ -808,11 +772,6 @@ using DefaultRawPtrImpl = RawPtrBanDanglingIfSupported;
template <typename T, typename Impl = DefaultRawPtrImpl>
class TRIVIAL_ABI GSL_POINTER raw_ptr {
using DanglingRawPtr = std::conditional_t<
raw_ptr_traits::IsRawPtrCountingImpl<Impl>::value,
raw_ptr<T, internal::RawPtrCountingImplWrapperForTest<RawPtrMayDangle>>,
raw_ptr<T, RawPtrMayDangle>>;
public:
static_assert(raw_ptr_traits::IsSupportedType<T>::value,
"raw_ptr<T> doesn't work with this kind of pointee type T");
@ -829,23 +788,15 @@ class TRIVIAL_ABI GSL_POINTER raw_ptr {
p.wrapped_ptr_ = nullptr;
}
ALWAYS_INLINE raw_ptr& operator=(const raw_ptr& p) noexcept {
ALWAYS_INLINE raw_ptr& operator=(const raw_ptr& p) {
// Duplicate before releasing, in case the pointer is assigned to itself.
//
// Unlike the move version of this operator, don't add |this != &p| branch,
// for performance reasons. Even though Duplicate() is not cheap, we
// practically never assign a raw_ptr<T> to itself. We suspect that a
// cumulative cost of a conditional branch, even if always correctly
// predicted, would exceed that.
T* new_ptr = Impl::Duplicate(p.wrapped_ptr_);
Impl::ReleaseWrappedPtr(wrapped_ptr_);
wrapped_ptr_ = new_ptr;
return *this;
}
ALWAYS_INLINE raw_ptr& operator=(raw_ptr&& p) noexcept {
// Unlike the the copy version of this operator, this branch is necessaty
// for correctness.
ALWAYS_INLINE raw_ptr& operator=(raw_ptr&& p) {
if (LIKELY(this != &p)) {
Impl::ReleaseWrappedPtr(wrapped_ptr_);
wrapped_ptr_ = p.wrapped_ptr_;
@ -879,7 +830,7 @@ class TRIVIAL_ABI GSL_POINTER raw_ptr {
ALWAYS_INLINE raw_ptr& operator=(const raw_ptr&) noexcept = default;
ALWAYS_INLINE raw_ptr& operator=(raw_ptr&&) noexcept = default;
ALWAYS_INLINE ~raw_ptr() noexcept = default;
ALWAYS_INLINE ~raw_ptr() = default;
#endif // BUILDFLAG(USE_BACKUP_REF_PTR)
@ -1026,43 +977,26 @@ class TRIVIAL_ABI GSL_POINTER raw_ptr {
// during the free operation, which will lead to taking the slower path that
// involves quarantine.
ALWAYS_INLINE void ClearAndDelete() noexcept {
delete GetForExtractionAndReset();
#if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
// We cannot directly `delete` a wrapped pointer, since the tag bits
// atop will lead PA totally astray.
T* ptr = Impl::SafelyUnwrapPtrForExtraction(wrapped_ptr_);
#else
T* ptr = wrapped_ptr_;
#endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
operator=(nullptr);
delete ptr;
}
ALWAYS_INLINE void ClearAndDeleteArray() noexcept {
delete[] GetForExtractionAndReset();
}
// Clear the underlying pointer and return another raw_ptr instance
// that is allowed to dangle.
// This can be useful in cases such as:
// ```
// ptr.ExtractAsDangling()->SelfDestroy();
// ```
// ```
// c_style_api_do_something_and_destroy(ptr.ExtractAsDangling());
// ```
// NOTE, avoid using this method as it indicates an error-prone memory
// ownership pattern. If possible, use smart pointers like std::unique_ptr<>
// instead of raw_ptr<>.
// If you have to use it, avoid saving the return value in a long-lived
// variable (or worse, a field)! It's meant to be used as a temporary, to be
// passed into a cleanup & freeing function, and destructed at the end of the
// statement.
ALWAYS_INLINE DanglingRawPtr ExtractAsDangling() noexcept {
if constexpr (std::is_same_v<
typename std::remove_reference<decltype(*this)>::type,
DanglingRawPtr>) {
DanglingRawPtr res(std::move(*this));
// Not all implementation clear the source pointer on move, so do it
// here just in case. Should be cheap.
#if defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
// We cannot directly `delete` a wrapped pointer, since the tag bits
// atop will lead PA totally astray.
T* ptr = Impl::SafelyUnwrapPtrForExtraction(wrapped_ptr_);
#else
T* ptr = wrapped_ptr_;
#endif // defined(PA_USE_MTE_CHECKED_PTR_WITH_64_BITS_POINTERS)
operator=(nullptr);
return res;
} else {
T* ptr = GetForExtraction();
DanglingRawPtr res(ptr);
operator=(nullptr);
return res;
}
delete[] ptr;
}
// Comparison operators between raw_ptr and raw_ptr<U>/U*/std::nullptr_t.
@ -1197,12 +1131,6 @@ class TRIVIAL_ABI GSL_POINTER raw_ptr {
return Impl::UnsafelyUnwrapPtrForComparison(wrapped_ptr_);
}
ALWAYS_INLINE T* GetForExtractionAndReset() {
T* ptr = GetForExtraction();
operator=(nullptr);
return ptr;
}
T* wrapped_ptr_;
template <typename U, typename V>
@ -1287,7 +1215,7 @@ using DisableDanglingPtrDetection = base::RawPtrMayDangle;
// See `docs/dangling_ptr.md`
// Annotates known dangling raw_ptr. Those haven't been triaged yet. All the
// occurrences are meant to be removed. See https://crbug.com/1291138.
// occurrences are meant to be removed. See https://cbug.com/1291138.
using DanglingUntriaged = DisableDanglingPtrDetection;
// The following template parameters are only meaningful when `raw_ptr`

View File

@ -1,7 +1,7 @@
# raw_ptr&lt;T&gt; (aka MiraclePtr, aka BackupRefPtr)
`raw_ptr<T>` is a non-owning smart pointer that has improved memory-safety over
raw pointers. It behaves just like a raw pointer on platforms where
over raw pointers. It behaves just like a raw pointer on platforms where
USE_BACKUP_REF_PTR is off, and almost like one when it's on. The main
difference is that when USE_BACKUP_REF_PTR is enabled, it's zero-initialized and
cleared on destruction and move. (You should continue to explicitly initialize
@ -50,7 +50,7 @@ exclusions via:
- Code that cannot depend on `//base`
- Code in `//ppapi`
- `RAW_PTR_EXCLUSION` C++ attribute to exclude individual fields. Examples:
- Cases where `raw_ptr<T>` won't compile (e.g. cases covered in
- Cases where `raw_ptr<T>` won't compile (e.g. cases coverd in
[the "Unsupported cases leading to compile errors" section](#Unsupported-cases-leading-to-compile-errors)).
Make sure to also look at
[the "Recoverable compile-time problems" section](#Recoverable-compile_time-problems).

View File

@ -5,7 +5,6 @@
#ifndef BASE_MEMORY_RAW_REF_H_
#define BASE_MEMORY_RAW_REF_H_
#include <memory>
#include <type_traits>
#include <utility>
@ -68,7 +67,7 @@ class TRIVIAL_ABI GSL_POINTER raw_ref {
std::is_same_v<Impl, internal::AsanBackupRefPtrImpl>;
public:
ALWAYS_INLINE explicit raw_ref(T& p) noexcept : inner_(std::addressof(p)) {}
ALWAYS_INLINE explicit raw_ref(T& p) noexcept : inner_(&p) {}
ALWAYS_INLINE raw_ref& operator=(T& p) noexcept {
inner_.operator=(&p);
@ -275,29 +274,6 @@ class TRIVIAL_ABI GSL_POINTER raw_ref {
template <class T>
raw_ref(T) -> raw_ref<T>;
// Template helpers for working with raw_ref<T>.
template <typename T>
struct IsRawRef : std::false_type {};
template <typename T, typename I>
struct IsRawRef<raw_ref<T, I>> : std::true_type {};
template <typename T>
inline constexpr bool IsRawRefV = IsRawRef<T>::value;
template <typename T>
struct RemoveRawRef {
using type = T;
};
template <typename T, typename I>
struct RemoveRawRef<raw_ref<T, I>> {
using type = T;
};
template <typename T>
using RemoveRawRefT = typename RemoveRawRef<T>::type;
} // namespace base
using base::raw_ref;
@ -316,12 +292,40 @@ struct less<raw_ref<T, Impl>> {
return lhs < rhs;
}
bool operator()(T& lhs, const raw_ref<T, Impl>& rhs) const {
bool operator()(const raw_ref<const T, Impl>& lhs,
const raw_ref<const T, Impl>& rhs) const {
Impl::IncrementLessCountForTest();
return lhs < rhs;
}
bool operator()(const raw_ref<T, Impl>& lhs, T& rhs) const {
bool operator()(const raw_ref<T, Impl>& lhs,
const raw_ref<const T, Impl>& rhs) const {
Impl::IncrementLessCountForTest();
return lhs < rhs;
}
bool operator()(const raw_ref<const T, Impl>& lhs,
const raw_ref<T, Impl>& rhs) const {
Impl::IncrementLessCountForTest();
return lhs < rhs;
}
bool operator()(const T& lhs, const raw_ref<const T, Impl>& rhs) const {
Impl::IncrementLessCountForTest();
return lhs < rhs;
}
bool operator()(const T& lhs, const raw_ref<T, Impl>& rhs) const {
Impl::IncrementLessCountForTest();
return lhs < rhs;
}
bool operator()(const raw_ref<const T, Impl>& lhs, const T& rhs) const {
Impl::IncrementLessCountForTest();
return lhs < rhs;
}
bool operator()(const raw_ref<T, Impl>& lhs, const T& rhs) const {
Impl::IncrementLessCountForTest();
return lhs < rhs;
}

View File

@ -8,7 +8,6 @@
#include <type_traits>
#include "base/memory/raw_ptr.h"
#include "base/memory/raw_ref.h"
#include "base/template_util.h"
// It is dangerous to post a task with a T* argument where T is a subtype of
@ -37,13 +36,8 @@ struct IsRefCountedType<T,
// pointer type and are convertible to a RefCounted(Base|ThreadSafeBase) type.
template <typename T>
struct NeedsScopedRefptrButGetsRawPtr
: std::disjunction<
// TODO(danakj): Should ban native references and
// std::reference_wrapper here too.
std::conjunction<base::IsRawRef<T>,
IsRefCountedType<base::RemoveRawRefT<T>>>,
std::conjunction<base::IsPointer<T>,
IsRefCountedType<base::RemovePointerT<T>>>> {
: std::conjunction<base::IsPointer<T>,
IsRefCountedType<base::RemovePointerT<T>>> {
static_assert(!std::is_reference<T>::value,
"NeedsScopedRefptrButGetsRawPtr requires non-reference type.");
};

View File

@ -237,14 +237,6 @@ void MessagePumpEpoll::OnEpollEvent(const epoll_event& e) {
// `entry` during the loop below. This copy is inexpensive in practice
// because the size of this vector is expected to be very small (<= 2).
auto interests = entry.interests;
// Any of these interests' event handlers may destroy any of the others'
// controllers. Start all of them watching for destruction before we actually
// dispatch any events.
for (const auto& interest : interests.container()) {
interest->WatchForControllerDestruction();
}
for (const auto& interest : interests.container()) {
if (!interest->active()) {
continue;
@ -268,16 +260,10 @@ void MessagePumpEpoll::OnEpollEvent(const epoll_event& e) {
UpdateEpollEvent(entry);
}
if (!interest->was_controller_destroyed()) {
HandleEvent(entry.fd, can_read, can_write, interest->controller());
}
}
for (const auto& interest : interests.container()) {
interest->StopWatchingForControllerDestruction();
}
}
void MessagePumpEpoll::HandleEvent(int fd,
bool can_read,
bool can_write,
@ -300,17 +286,13 @@ void MessagePumpEpoll::HandleEvent(int fd,
controller->created_from_location().file_name());
if (can_read && can_write) {
bool controller_was_destroyed = false;
bool* previous_was_destroyed_flag =
std::exchange(controller->was_destroyed_, &controller_was_destroyed);
controller->was_destroyed_ = &controller_was_destroyed;
controller->OnFdWritable();
if (!controller_was_destroyed) {
controller->OnFdReadable();
}
if (!controller_was_destroyed) {
controller->was_destroyed_ = previous_was_destroyed_flag;
} else if (previous_was_destroyed_flag) {
*previous_was_destroyed_flag = true;
controller->was_destroyed_ = nullptr;
}
} else if (can_write) {
controller->OnFdWritable();

View File

@ -182,10 +182,9 @@ MessagePumpGlib::MessagePumpGlib()
if (RunningOnMainThread()) {
context_ = g_main_context_default();
} else {
owned_context_ = std::unique_ptr<GMainContext, GMainContextDeleter>(
g_main_context_new());
context_ = owned_context_.get();
context_ = g_main_context_new();
g_main_context_push_thread_default(context_);
context_owned_ = true;
}
// Create our wakeup pipe, which is used to flag when work was scheduled.
@ -198,22 +197,25 @@ MessagePumpGlib::MessagePumpGlib()
wakeup_gpollfd_->fd = wakeup_pipe_read_;
wakeup_gpollfd_->events = G_IO_IN;
work_source_ = std::unique_ptr<GSource, GSourceDeleter>(
g_source_new(&WorkSourceFuncs, sizeof(WorkSource)));
static_cast<WorkSource*>(work_source_.get())->pump = this;
g_source_add_poll(work_source_.get(), wakeup_gpollfd_.get());
g_source_set_priority(work_source_.get(), kPriorityWork);
work_source_ = g_source_new(&WorkSourceFuncs, sizeof(WorkSource));
static_cast<WorkSource*>(work_source_)->pump = this;
g_source_add_poll(work_source_, wakeup_gpollfd_.get());
g_source_set_priority(work_source_, kPriorityWork);
// This is needed to allow Run calls inside Dispatch.
g_source_set_can_recurse(work_source_.get(), TRUE);
g_source_attach(work_source_.get(), context_);
g_source_set_can_recurse(work_source_, TRUE);
g_source_attach(work_source_, context_);
}
MessagePumpGlib::~MessagePumpGlib() {
work_source_.reset();
g_source_destroy(work_source_);
g_source_unref(work_source_);
close(wakeup_pipe_read_);
close(wakeup_pipe_write_);
context_ = nullptr;
owned_context_.reset();
if (context_owned_) {
g_main_context_pop_thread_default(context_);
g_main_context_unref(context_);
}
}
MessagePumpGlib::FdWatchController::FdWatchController(const Location& location)

View File

@ -5,7 +5,6 @@
#ifndef BASE_MESSAGE_LOOP_MESSAGE_PUMP_GLIB_H_
#define BASE_MESSAGE_LOOP_MESSAGE_PUMP_GLIB_H_
#include <glib.h>
#include <memory>
#include "base/base_export.h"
@ -15,6 +14,10 @@
#include "base/threading/thread_checker.h"
#include "base/time/time.h"
typedef struct _GMainContext GMainContext;
typedef struct _GPollFD GPollFD;
typedef struct _GSource GSource;
namespace base {
// This class implements a base MessagePump needed for TYPE_UI MessageLoops on
@ -107,22 +110,6 @@ class BASE_EXPORT MessagePumpGlib : public MessagePump,
void HandleFdWatchDispatch(FdWatchController* controller);
private:
struct GMainContextDeleter {
inline void operator()(GMainContext* context) const {
if (context) {
g_main_context_pop_thread_default(context);
g_main_context_unref(context);
}
}
};
struct GSourceDeleter {
inline void operator()(GSource* source) const {
if (source) {
g_source_destroy(source);
g_source_unref(source);
}
}
};
bool ShouldQuit() const;
// We may make recursive calls to Run, so we save state that needs to be
@ -131,15 +118,15 @@ class BASE_EXPORT MessagePumpGlib : public MessagePump,
raw_ptr<RunState> state_;
std::unique_ptr<GMainContext, GMainContextDeleter> owned_context_;
// This is a GLib structure that we can add event sources to. On the main
// thread, we use the default GLib context, which is the one to which all GTK
// events are dispatched.
raw_ptr<GMainContext> context_ = nullptr;
raw_ptr<GMainContext, DanglingUntriaged> context_ = nullptr;
bool context_owned_ = false;
// The work source. It is shared by all calls to Run and destroyed when
// the message pump is destroyed.
std::unique_ptr<GSource, GSourceDeleter> work_source_;
raw_ptr<GSource, DanglingUntriaged> work_source_;
// We use a wakeup pipe to make sure we'll get out of the glib polling phase
// when another thread has scheduled us to do some work. There is a glib

View File

@ -137,7 +137,7 @@ void MessagePumpLibevent::FdWatchController::OnFdWritable() {
watcher_->OnFileCanWriteWithoutBlocking(epoll_interest_->params().fd);
}
MessagePumpLibevent::MessagePumpLibevent() {
MessagePumpLibevent::MessagePumpLibevent() : event_base_(event_base_new()) {
#if BUILDFLAG(ENABLE_MESSAGE_PUMP_EPOLL)
if (g_use_epoll) {
epoll_pump_ = std::make_unique<MessagePumpEpoll>();
@ -154,7 +154,8 @@ MessagePumpLibevent::MessagePumpLibevent() {
#if BUILDFLAG(ENABLE_MESSAGE_PUMP_EPOLL)
MessagePumpLibevent::MessagePumpLibevent(decltype(kUseEpoll))
: epoll_pump_(std::make_unique<MessagePumpEpoll>()) {
: epoll_pump_(std::make_unique<MessagePumpEpoll>()),
event_base_(event_base_new()) {
epoll_pump_ = std::make_unique<MessagePumpEpoll>();
}
#endif
@ -169,8 +170,8 @@ MessagePumpLibevent::~MessagePumpLibevent() {
DCHECK(event_base_);
if (using_libevent) {
DCHECK(wakeup_event_);
event_del(wakeup_event_.get());
wakeup_event_.reset();
event_del(wakeup_event_);
delete wakeup_event_;
if (wakeup_pipe_in_ >= 0) {
if (IGNORE_EINTR(close(wakeup_pipe_in_)) < 0)
DPLOG(ERROR) << "close";
@ -180,7 +181,7 @@ MessagePumpLibevent::~MessagePumpLibevent() {
DPLOG(ERROR) << "close";
}
}
event_base_.reset();
event_base_free(event_base_);
}
// Must be called early in process startup, but after FeatureList
@ -250,7 +251,7 @@ bool MessagePumpLibevent::WatchFileDescriptor(int fd,
event_set(evt.get(), fd, event_mask, OnLibeventNotification, controller);
// Tell libevent which message pump this socket will belong to when we add it.
if (event_base_set(event_base_.get(), evt.get())) {
if (event_base_set(event_base_, evt.get())) {
DPLOG(ERROR) << "event_base_set(fd=" << EVENT_FD(evt.get()) << ")";
return false;
}
@ -312,7 +313,7 @@ void MessagePumpLibevent::Run(Delegate* delegate) {
// OnLibeventNotification() did enter a nested loop from here, it
// wouldn't be labeled as such in tracing by "ThreadController active".
// Contact gab@/scheduler-dev@ if a problematic trace emerges.
event_base_loop(event_base_.get(), EVLOOP_NONBLOCK);
event_base_loop(event_base_, EVLOOP_NONBLOCK);
bool attempt_more_work = immediate_work_available || processed_io_events_;
processed_io_events_ = false;
@ -342,8 +343,8 @@ void MessagePumpLibevent::Run(Delegate* delegate) {
struct timeval poll_tv;
poll_tv.tv_sec = static_cast<time_t>(delay.InSeconds());
poll_tv.tv_usec = delay.InMicroseconds() % Time::kMicrosecondsPerSecond;
event_set(timer_event.get(), -1, 0, timer_callback, event_base_.get());
event_base_set(event_base_.get(), timer_event.get());
event_set(timer_event.get(), -1, 0, timer_callback, event_base_);
event_base_set(event_base_, timer_event.get());
event_add(timer_event.get(), &poll_tv);
did_set_timer = true;
@ -353,7 +354,7 @@ void MessagePumpLibevent::Run(Delegate* delegate) {
// is conditionally interrupted to look for more work if we are aware of a
// delayed task that will need servicing.
delegate->BeforeWait();
event_base_loop(event_base_.get(), EVLOOP_ONCE);
event_base_loop(event_base_, EVLOOP_ONCE);
// We previously setup a timer to break out the event loop to look for more
// work. Now that we're here delete the event.
@ -412,12 +413,12 @@ bool MessagePumpLibevent::Init() {
wakeup_pipe_out_ = fds[0];
wakeup_pipe_in_ = fds[1];
wakeup_event_ = std::make_unique<event>();
event_set(wakeup_event_.get(), wakeup_pipe_out_, EV_READ | EV_PERSIST,
wakeup_event_ = new event;
event_set(wakeup_event_, wakeup_pipe_out_, EV_READ | EV_PERSIST,
OnWakeup, this);
event_base_set(event_base_.get(), wakeup_event_.get());
event_base_set(event_base_, wakeup_event_);
if (event_add(wakeup_event_.get(), nullptr))
if (event_add(wakeup_event_, nullptr))
return false;
return true;
}
@ -478,7 +479,7 @@ void MessagePumpLibevent::OnWakeup(int socket, short flags, void* context) {
DCHECK_EQ(nread, 1);
that->processed_io_events_ = true;
// Tell libevent to break out of inner loop.
event_base_loopbreak(that->event_base_.get());
event_base_loopbreak(that->event_base_);
}
MessagePumpLibevent::EpollInterest::EpollInterest(

View File

@ -16,11 +16,11 @@
#include "base/message_loop/message_pump_buildflags.h"
#include "base/message_loop/watchable_io_message_pump_posix.h"
#include "base/threading/thread_checker.h"
#include "third_party/libevent/event.h"
// Declare structs we need from libevent.h rather than including it
struct event_base;
struct event;
namespace base {
class MessagePumpEpoll;
@ -72,22 +72,6 @@ class BASE_EXPORT MessagePumpLibevent : public MessagePump,
bool active() const { return active_; }
void set_active(bool active) { active_ = active; }
// Only meaningful between WatchForControllerDestruction() and
// StopWatchingForControllerDestruction().
bool was_controller_destroyed() const { return was_controller_destroyed_; }
void WatchForControllerDestruction() {
DCHECK(!controller_->was_destroyed_);
controller_->was_destroyed_ = &was_controller_destroyed_;
}
void StopWatchingForControllerDestruction() {
if (!was_controller_destroyed_) {
DCHECK_EQ(controller_->was_destroyed_, &was_controller_destroyed_);
controller_->was_destroyed_ = nullptr;
}
}
private:
friend class RefCounted<EpollInterest>;
~EpollInterest();
@ -95,7 +79,6 @@ class BASE_EXPORT MessagePumpLibevent : public MessagePump,
FdWatchController* const controller_;
const EpollInterestParams params_;
bool active_ = true;
bool was_controller_destroyed_ = false;
};
// Note that this class is used as the FdWatchController for both
@ -237,22 +220,16 @@ class BASE_EXPORT MessagePumpLibevent : public MessagePump,
// This flag is set if libevent has processed I/O events.
bool processed_io_events_ = false;
struct EventBaseFree {
inline void operator()(event_base* e) const {
if (e)
event_base_free(e);
}
};
// Libevent dispatcher. Watches all sockets registered with it, and sends
// readiness callbacks when a socket is ready for I/O.
std::unique_ptr<event_base, EventBaseFree> event_base_{event_base_new()};
const raw_ptr<event_base, DanglingUntriaged> event_base_;
// ... write end; ScheduleWork() writes a single byte to it
int wakeup_pipe_in_ = -1;
// ... read end; OnWakeup reads it and then breaks Run() out of its sleep
int wakeup_pipe_out_ = -1;
// ... libevent wrapper for read end
std::unique_ptr<event> wakeup_event_;
raw_ptr<event, DanglingUntriaged> wakeup_event_ = nullptr;
ThreadChecker watch_file_descriptor_caller_checker_;
};

View File

@ -514,13 +514,9 @@ void MessagePumpCFRunLoopBase::RunIdleWork() {
// objects if the app is not currently handling a UI event to ensure they're
// released promptly even in the absence of UI events.
MessagePumpScopedAutoreleasePool autorelease_pool(this);
// Pop the current work item scope as it captures any native work happening
// *between* the last DoWork() and this DoIdleWork()
PopWorkItemScope();
// Call DoIdleWork once, and if something was done, arrange to come back here
// again as long as the loop is still running.
bool did_work = delegate_->DoIdleWork();
// As in DoWork(), push a new scope to cover any native work that could
// possibly happen between now and BeforeWait().
PushWorkItemScope();
if (did_work)
CFRunLoopSourceSignal(idle_work_source_);
}
@ -555,8 +551,6 @@ void MessagePumpCFRunLoopBase::RunNestingDeferredWork() {
void MessagePumpCFRunLoopBase::BeforeWait() {
// Current work item tracking needs to go away since execution will stop.
// Matches the PushWorkItemScope() in AfterWaitObserver() (with an arbitrary
// amount of matching Pop/Push in between when running work items).
PopWorkItemScope();
if (!delegate_) {
@ -610,8 +604,7 @@ void MessagePumpCFRunLoopBase::AfterWaitObserver(CFRunLoopObserverRef observer,
MessagePumpCFRunLoopBase* self = static_cast<MessagePumpCFRunLoopBase*>(info);
base::mac::CallWithEHFrame(^{
// Emerging from sleep, any work happening after this (outside of a
// RunWork()) should be considered native work. Matching PopWorkItemScope()
// is in BeforeWait().
// RunWork()) should be considered native work.
self->PushWorkItemScope();
});
}

Some files were not shown because too many files have changed in this diff Show More