Compare commits

..

No commits in common. "e12009df79d26c5ee5043d36187b944de47995c4" and "43d9635938309c9dd331cfb8302a840e2818a9a7" have entirely different histories.

1376 changed files with 18473 additions and 61149 deletions

View File

@ -1 +1 @@
103.0.5060.53 102.0.5005.61

View File

@ -33,6 +33,8 @@ default_args = {
# also needs to be defined to src/ios/BUILD.gn (respectively removed from both # also needs to be defined to src/ios/BUILD.gn (respectively removed from both
# location when it is removed). # location when it is removed).
v8_extra_library_files = []
v8_experimental_extra_library_files = []
v8_enable_gdbjit = false v8_enable_gdbjit = false
v8_imminent_deprecation_warnings = false v8_imminent_deprecation_warnings = false
@ -58,7 +60,7 @@ default_args = {
# Overwrite default args declared in the Fuchsia sdk # Overwrite default args declared in the Fuchsia sdk
fuchsia_sdk_readelf_exec = fuchsia_sdk_readelf_exec =
"//third_party/llvm-build/Release+Asserts/bin/llvm-readelf" "//third_party/llvm-build/Release+Asserts/bin/llvm-readelf"
fuchsia_target_api_level = 8 fuchsia_target_api_level = 7
devtools_visibility = [ "*" ] devtools_visibility = [ "*" ]
} }

View File

@ -733,7 +733,6 @@ Manish Chhajer <chhajer.m@samsung.com>
Manish Jethani <m.jethani@eyeo.com> Manish Jethani <m.jethani@eyeo.com>
Manojkumar Bhosale <manojkumar.bhosale@imgtec.com> Manojkumar Bhosale <manojkumar.bhosale@imgtec.com>
Manuel Braun <thembrown@gmail.com> Manuel Braun <thembrown@gmail.com>
Manuel Lagana <manuel.lagana.dev@gmail.com>
Mao Yujie <maojie0924@gmail.com> Mao Yujie <maojie0924@gmail.com>
Mao Yujie <yujie.mao@intel.com> Mao Yujie <yujie.mao@intel.com>
Marc des Garets <marc.desgarets@googlemail.com> Marc des Garets <marc.desgarets@googlemail.com>
@ -826,7 +825,6 @@ Mohammad Azam <m.azam@samsung.com>
Mohammed Wajahat Ali Siddiqui <wajahat.s@samsung.com> Mohammed Wajahat Ali Siddiqui <wajahat.s@samsung.com>
Mohan Reddy <mohan.reddy@samsung.com> Mohan Reddy <mohan.reddy@samsung.com>
Mohit Bhalla <bhallam@amazon.com> Mohit Bhalla <bhallam@amazon.com>
Moiseanu Rares-Marian <moiseanurares@gmail.com>
Momoka Yamamoto <momoka.my6@gmail.com> Momoka Yamamoto <momoka.my6@gmail.com>
Momoko Hattori <momohatt10@gmail.com> Momoko Hattori <momohatt10@gmail.com>
Mostafa Sedaghat joo <mostafa.sedaghat@gmail.com> Mostafa Sedaghat joo <mostafa.sedaghat@gmail.com>
@ -937,7 +935,6 @@ Qiang Zeng <zengqiang1@huawei.com>
Qiankun Miao <qiankun.miao@intel.com> Qiankun Miao <qiankun.miao@intel.com>
Qing Zhang <qing.zhang@intel.com> Qing Zhang <qing.zhang@intel.com>
Qingmei Li <qingmei.li@vivo.com> Qingmei Li <qingmei.li@vivo.com>
Qiyao Yuan <qiyaoyuan@tencent.com>
Radu Stavila <stavila@adobe.com> Radu Stavila <stavila@adobe.com>
Radu Velea <radu.velea@intel.com> Radu Velea <radu.velea@intel.com>
Rafael Antognolli <rafael.antognolli@intel.com> Rafael Antognolli <rafael.antognolli@intel.com>
@ -1013,7 +1010,6 @@ Sam McDonald <sam@sammcd.com>
Samuel Attard <samuel.r.attard@gmail.com> Samuel Attard <samuel.r.attard@gmail.com>
Sanggi Hong <sanggi.hong11@gmail.com> Sanggi Hong <sanggi.hong11@gmail.com>
Sanghee Lee <sanghee.lee1992@gmail.com> Sanghee Lee <sanghee.lee1992@gmail.com>
Sangheon Kim <sangheon77.kim@samsung.com>
Sanghyun Park <sh919.park@samsung.com> Sanghyun Park <sh919.park@samsung.com>
Sanghyup Lee <sh53.lee@samsung.com> Sanghyup Lee <sh53.lee@samsung.com>
Sangjoon Je <htamop@gmail.com> Sangjoon Je <htamop@gmail.com>
@ -1060,7 +1056,6 @@ Shane Hansen <shanemhansen@gmail.com>
ShankarGanesh K <blr.bmlab@gmail.com> ShankarGanesh K <blr.bmlab@gmail.com>
Shanmuga Pandi M <shanmuga.m@samsung.com> Shanmuga Pandi M <shanmuga.m@samsung.com>
Shaobo Yan <shaobo.yan@intel.com> Shaobo Yan <shaobo.yan@intel.com>
Shaotang Zhu <zhushaotang@uniontech.com>
Shashi Kumar <sk.kumar@samsung.com> Shashi Kumar <sk.kumar@samsung.com>
Shawn Anastasio <shawnanastasio@gmail.com> Shawn Anastasio <shawnanastasio@gmail.com>
Shelley Vohr <shelley.vohr@gmail.com> Shelley Vohr <shelley.vohr@gmail.com>
@ -1094,8 +1089,6 @@ Simon La Macchia <smacchia@amazon.com>
Siva Kumar Gunturi <siva.gunturi@samsung.com> Siva Kumar Gunturi <siva.gunturi@samsung.com>
Sohan Jyoti Ghosh <sohan.jyoti@huawei.com> Sohan Jyoti Ghosh <sohan.jyoti@huawei.com>
Sohan Jyoti Ghosh <sohan.jyoti@samsung.com> Sohan Jyoti Ghosh <sohan.jyoti@samsung.com>
Sohom Datta <sohom.datta@learner.manipal.edu>
Sohom Datta <dattasohom1@gmail.com>
Song Fangzhen <songfangzhen@bytedance.com> Song Fangzhen <songfangzhen@bytedance.com>
Song YeWen <ffmpeg@gmail.com> Song YeWen <ffmpeg@gmail.com>
Sooho Park <sooho1000@gmail.com> Sooho Park <sooho1000@gmail.com>
@ -1178,7 +1171,6 @@ Tim Steiner <twsteiner@gmail.com>
Timo Gurr <timo.gurr@gmail.com> Timo Gurr <timo.gurr@gmail.com>
Timo Reimann <ttr314@googlemail.com> Timo Reimann <ttr314@googlemail.com>
Timo Witte <timo.witte@gmail.com> Timo Witte <timo.witte@gmail.com>
Timothy Pearson <tpearson@raptorcs.com>
Ting Shao <ting.shao@intel.com> Ting Shao <ting.shao@intel.com>
Tobias Lippert <tobias.lippert@fastmail.com> Tobias Lippert <tobias.lippert@fastmail.com>
Tobias Soppa <tobias@soppa.me> Tobias Soppa <tobias@soppa.me>
@ -1321,7 +1313,6 @@ Zhengkun Li <zhengkli@amazon.com>
Zhenyu Liang <zhenyu.liang@intel.com> Zhenyu Liang <zhenyu.liang@intel.com>
Zhenyu Shan <zhenyu.shan@intel.com> Zhenyu Shan <zhenyu.shan@intel.com>
Zhifei Fang <facetothefate@gmail.com> Zhifei Fang <facetothefate@gmail.com>
Zhiyuan Ye <zhiyuanye@tencent.com>
Zhuoyu Qian <zhuoyu.qian@samsung.com> Zhuoyu Qian <zhuoyu.qian@samsung.com>
Ziran Sun <ziran.sun@samsung.com> Ziran Sun <ziran.sun@samsung.com>
Zoltan Czirkos <czirkos.zoltan@gmail.com> Zoltan Czirkos <czirkos.zoltan@gmail.com>

371
src/DEPS
View File

@ -142,7 +142,7 @@ vars = {
# required to build C++-Rust interop codegen tools. This may break things that # required to build C++-Rust interop codegen tools. This may break things that
# use it when clang rolls, and is meant for prototyping. You should talk to # use it when clang rolls, and is meant for prototyping. You should talk to
# tools/clang/OWNERS before depending on it. # tools/clang/OWNERS before depending on it.
'checkout_clang_libs': 'use_rust', 'checkout_clang_libs': False,
# By default checkout the OpenXR loader library only on Windows. The OpenXR # By default checkout the OpenXR loader library only on Windows. The OpenXR
# backend for VR in Chromium is currently only supported for Windows, but # backend for VR in Chromium is currently only supported for Windows, but
@ -184,8 +184,8 @@ vars = {
# qemu on linux-arm64 machines. # qemu on linux-arm64 machines.
'checkout_fuchsia_for_arm64_host': False, 'checkout_fuchsia_for_arm64_host': False,
# By default, download the fuchsia sdk from the public sdk directory. # By default, download the fuchsia sdk from the fuchsia GCS bucket.
'fuchsia_sdk_cipd_prefix': 'fuchsia/sdk/gn/', 'fuchsia_sdk_bucket': 'fuchsia',
# By default, download the fuchsia images from the fuchsia GCS bucket. # By default, download the fuchsia images from the fuchsia GCS bucket.
'fuchsia_images_bucket': 'fuchsia', 'fuchsia_images_bucket': 'fuchsia',
@ -201,14 +201,7 @@ vars = {
# By default, do not check out versions of toolschains and sdks that are # By default, do not check out versions of toolschains and sdks that are
# specifically only needed by Lacros. # specifically only needed by Lacros.
'checkout_lacros_sdk': False, 'checkout_lacros_sdk': False,
# To update the sdk version: 'lacros_sdk_version': '14556.0.0',
# 1 Choose a version that's not newer than the Ash side so it's thoroughly
# tested:
# https://chromium-review.googlesource.com/q/%2522Automated+Commit:+LKGM%2522+status:merged
# 2 Run additional a few optional tryjobs:
# lacros-amd64-generic-chrome-skylab
# lacros-arm-generic-chrome-skylab
'lacros_sdk_version': '14748.0.0',
# Generate location tag metadata to include in tests result data uploaded # Generate location tag metadata to include in tests result data uploaded
# to ResultDB. This isn't needed on some configs and the tool that generates # to ResultDB. This isn't needed on some configs and the tool that generates
@ -220,18 +213,12 @@ vars = {
# luci-go CIPD package version. # luci-go CIPD package version.
# Make sure the revision is uploaded by infra-packagers builder. # Make sure the revision is uploaded by infra-packagers builder.
# https://ci.chromium.org/p/infra-internal/g/infra-packagers/console # https://ci.chromium.org/p/infra-internal/g/infra-packagers/console
'luci_go': 'git_revision:9193dc92fa28cd5faf6b2d7c6e0a7f2ec174898e', 'luci_go': 'git_revision:6da0608e4fa8a3c6d1fa4f855485c0038b05bf72',
# This can be overridden, e.g. with custom_vars, to build clang from HEAD # This can be overridden, e.g. with custom_vars, to build clang from HEAD
# instead of downloading the prebuilt pinned revision. # instead of downloading the prebuilt pinned revision.
'llvm_force_head_revision': False, 'llvm_force_head_revision': False,
# Fetch Rust toolchain built against our LLVM revision instead of the Android
# Rust toolchain. Experimental. The corresponding GN arg
# use_chromium_rust_toolchain directs the build to use this toolchain instead
# of the Android toolchain.
'fetch_prebuilt_chromium_rust_toolchain': 'use_rust and host_os == "linux"',
# Build in-tree Rust toolchain. checkout_clang_libs must also be True. The # Build in-tree Rust toolchain. checkout_clang_libs must also be True. The
# corresponding GN arg use_chromium_rust_toolchain directs the build to use # corresponding GN arg use_chromium_rust_toolchain directs the build to use
# the in-tree toolchain instead of the Android toolchain. # the in-tree toolchain instead of the Android toolchain.
@ -251,10 +238,7 @@ vars = {
'dawn_standalone': False, 'dawn_standalone': False,
# reclient CIPD package version # reclient CIPD package version
'reclient_version': 're_client_version:0.62.0.0a58116-gomaip', 'reclient_version': 're_client_version:0.59.0.7914303-gomaip',
# Enable fetching Rust-related packages.
'use_rust': False,
'android_git': 'https://android.googlesource.com', 'android_git': 'https://android.googlesource.com',
'aomedia_git': 'https://aomedia.googlesource.com', 'aomedia_git': 'https://aomedia.googlesource.com',
@ -269,38 +253,34 @@ vars = {
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling Skia # the commit queue can handle CLs rolling Skia
# and whatever else without interference from each other. # and whatever else without interference from each other.
'skia_revision': 'b301ff025004c9cd82816c86c547588e6c24b466', 'skia_revision': '3338e90707323d2cd3a150276acb9f39933deee2',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling V8 # the commit queue can handle CLs rolling V8
# and whatever else without interference from each other. # and whatever else without interference from each other.
'v8_revision': '3713708934e4c3bd0fd2b53ed87afd2204d95075', 'v8_revision': '87c27db79e6a35a6bdedcbfe732f978812bf6ced',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling ANGLE # the commit queue can handle CLs rolling ANGLE
# and whatever else without interference from each other. # and whatever else without interference from each other.
'angle_revision': '53e1711046b815dfff3321c3027f190c9b0b899b', 'angle_revision': '6661eb4900dae62cbe9af5023f9c1e7105798b50',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling SwiftShader # the commit queue can handle CLs rolling SwiftShader
# and whatever else without interference from each other. # and whatever else without interference from each other.
'swiftshader_revision': 'f1c2c0b0728152d7ca24472e696e07e957e6d09a', 'swiftshader_revision': '103a69bd6c82980c967c2f4002c9a302ea67c716',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling PDFium # the commit queue can handle CLs rolling PDFium
# and whatever else without interference from each other. # and whatever else without interference from each other.
'pdfium_revision': '558516c32375fc92a61d78d2617d98de01e4513d', 'pdfium_revision': '62ad9af8a9f9494645b659674b64bb51775cde05',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling BoringSSL # the commit queue can handle CLs rolling BoringSSL
# and whatever else without interference from each other. # and whatever else without interference from each other.
# #
# Note this revision should be updated with # Note this revision should be updated with
# third_party/boringssl/roll_boringssl.py, not roll-dep. # third_party/boringssl/roll_boringssl.py, not roll-dep.
'boringssl_revision': '227ff6e6425283b83594a91a1aa81cc78f1a88df', 'boringssl_revision': '27ffcc6e19bbafddf1b59ec0bc6df2904de7eb2c',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling Fuchsia sdk
# and whatever else without interference from each other.
'fuchsia_version': 'version:8.20220512.1.1',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling google-toolbox-for-mac # the commit queue can handle CLs rolling google-toolbox-for-mac
# and whatever else without interference from each other. # and whatever else without interference from each other.
'google_toolbox_for_mac_revision': '42b12f10cd8342f5cb41a1e3e3a2f13fd9943b0d', 'google_toolbox_for_mac_revision': 'aa1a3d2d447905999f119efbb70b3786c5eafa13',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling googletest # the commit queue can handle CLs rolling googletest
# and whatever else without interference from each other. # and whatever else without interference from each other.
@ -320,7 +300,7 @@ vars = {
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling freetype # the commit queue can handle CLs rolling freetype
# and whatever else without interference from each other. # and whatever else without interference from each other.
'freetype_revision': '5d49473f8579d7f5f687d3fe52af977468f8e090', 'freetype_revision': '3100c8120e0ff423db8d8134a8073e639371993e',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling freetype # the commit queue can handle CLs rolling freetype
# and whatever else without interference from each other. # and whatever else without interference from each other.
@ -328,7 +308,7 @@ vars = {
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling HarfBuzz # the commit queue can handle CLs rolling HarfBuzz
# and whatever else without interference from each other. # and whatever else without interference from each other.
'harfbuzz_revision': 'acdab17ed3507bc9524cb57bef703a983e1031cf', 'harfbuzz_revision': '6454cec085ba51cefcd12b1f8027bc4a647347d5',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling Emoji Segmenter # the commit queue can handle CLs rolling Emoji Segmenter
# and whatever else without interference from each other. # and whatever else without interference from each other.
@ -340,7 +320,7 @@ vars = {
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling catapult # the commit queue can handle CLs rolling catapult
# and whatever else without interference from each other. # and whatever else without interference from each other.
'catapult_revision': '3a2e446a98743856c32b4426cef3237f86ad8787', 'catapult_revision': '3cf2f4f0e03be4dc0f4a26cb3943b4a719643e1b',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling libFuzzer # the commit queue can handle CLs rolling libFuzzer
# and whatever else without interference from each other. # and whatever else without interference from each other.
@ -348,7 +328,7 @@ vars = {
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling devtools-frontend # the commit queue can handle CLs rolling devtools-frontend
# and whatever else without interference from each other. # and whatever else without interference from each other.
'devtools_frontend_revision': '89128af9eab223d347fdcf39b704a2e484946efa', 'devtools_frontend_revision': '46a28a3c5dadb2a79680c35a61000e908aee74dc',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling libprotobuf-mutator # the commit queue can handle CLs rolling libprotobuf-mutator
# and whatever else without interference from each other. # and whatever else without interference from each other.
@ -384,11 +364,11 @@ vars = {
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed # the commit queue can handle CLs rolling feed
# and whatever else without interference from each other. # and whatever else without interference from each other.
'dawn_revision': '3b4d8a92c762beb2858755ca1f48cb496a04c23a', 'dawn_revision': 'fa8cc68ff7c055512e83a538e5517400f5f053bc',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed # the commit queue can handle CLs rolling feed
# and whatever else without interference from each other. # and whatever else without interference from each other.
'quiche_revision': '6139e0819ba88e6112d22832f73f77759201bf3f', 'quiche_revision': '7e841d3541a113b5ed577824c9aa71b8a1c7617f',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling ios_webkit # the commit queue can handle CLs rolling ios_webkit
# and whatever else without interference from each other. # and whatever else without interference from each other.
@ -408,11 +388,11 @@ vars = {
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling libavif # the commit queue can handle CLs rolling libavif
# and whatever else without interference from each other. # and whatever else without interference from each other.
'libavif_revision': '372b338c89f6fcb88b189d065e241b0b6d5fd04d', 'libavif_revision': 'ccf5a781238b43fee428519ba6e9508204835b9c',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling nearby # the commit queue can handle CLs rolling nearby
# and whatever else without interference from each other. # and whatever else without interference from each other.
'nearby_revision': '6347f80a1196ea15ee18ff6d0fd1917f1cb2b404', 'nearby_revision': '0c8838ad9b9ba5e03ea9dadd0cba5f4ea9c949fd',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling securemessage # the commit queue can handle CLs rolling securemessage
# and whatever else without interference from each other. # and whatever else without interference from each other.
@ -428,11 +408,11 @@ vars = {
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed # the commit queue can handle CLs rolling feed
# and whatever else without interference from each other. # and whatever else without interference from each other.
'libcxxabi_revision': 'f8b9fcc8e27d2ebb27e74378613bd7109737f15a', 'libcxxabi_revision': 'e025ba5dc85202540099d7cd8e72eae2d4ee9e33',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed # the commit queue can handle CLs rolling feed
# and whatever else without interference from each other. # and whatever else without interference from each other.
'libunwind_revision': 'cd5b90c8ef0bd5ec1b9574596db253f83a3cf49e', 'libunwind_revision': '1acfbbb4747081789ba48bc8c042fa3c5c8ccaa3',
# Three lines of non-changing comments so that # Three lines of non-changing comments so that
# the commit queue can handle CLs rolling feed # the commit queue can handle CLs rolling feed
# and whatever else without interference from each other. # and whatever else without interference from each other.
@ -451,7 +431,7 @@ vars = {
'libcxx_revision': '79a2e924d96e2fc1e4b937c42efd08898fa472d7', 'libcxx_revision': '79a2e924d96e2fc1e4b937c42efd08898fa472d7',
# GN CIPD package version. # GN CIPD package version.
'gn_version': 'git_revision:578a7fe4c3c6b0bc2ae1fd2e37f14857d09895bf', 'gn_version': 'git_revision:fd9f2036f26d83f9fcfe93042fb952e5a7fe2167',
} }
# Only these hosts are allowed for dependencies in this DEPS file. # Only these hosts are allowed for dependencies in this DEPS file.
@ -536,8 +516,7 @@ deps = {
}, },
], ],
'dep_type': 'cipd', 'dep_type': 'cipd',
# TODO(https://crbug.com/1292038): gate this on use_rust as well as host_os. 'condition': '(host_os == "linux")',
'condition': 'host_os == "linux"',
}, },
# We don't know target_cpu at deps time. At least until there's a universal # We don't know target_cpu at deps time. At least until there's a universal
@ -567,7 +546,7 @@ deps = {
'src/third_party/apache-linux': { 'src/third_party/apache-linux': {
'packages': [ 'packages': [
{ {
'package': 'infra/3pp/tools/httpd-php/linux-amd64', 'package': 'infra/3pp/tools/httpd-php/${{platform}}',
'version': 'version:2@httpd2.4.38.php7.3.31.chromium.3', 'version': 'version:2@httpd2.4.38.php7.3.31.chromium.3',
}, },
], ],
@ -579,7 +558,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/android_webview/tools/cts_archive', 'package': 'chromium/android_webview/tools/cts_archive',
'version': '7HRNj8Yv_CSQiyCoK8Y4Ld-h1virCHVES9Ed18Z75LkC', 'version': 'rzLrTykLB2J7ON1a9_5F7qmkjH3U246nHDPHiTruibUC',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -741,16 +720,16 @@ deps = {
Var('chromium_git') + '/external/github.com/toji/webvr.info.git' + '@' + 'c58ae99b9ff9e2aa4c524633519570bf33536248', Var('chromium_git') + '/external/github.com/toji/webvr.info.git' + '@' + 'c58ae99b9ff9e2aa4c524633519570bf33536248',
'src/docs/website': { 'src/docs/website': {
'url': Var('chromium_git') + '/website.git' + '@' + '9edef17474dc2fef6ac0b0540515d9eaab7f2e45', 'url': Var('chromium_git') + '/website.git' + '@' + '17a7f6a95704dc84abc24ba06252d048de1f54df',
}, },
'src/ios/third_party/earl_grey2/src': { 'src/ios/third_party/earl_grey2/src': {
'url': Var('chromium_git') + '/external/github.com/google/EarlGrey.git' + '@' + 'd28ba1132c96e3267db78ceef59c07486fad3d30', 'url': Var('chromium_git') + '/external/github.com/google/EarlGrey.git' + '@' + '98801869816f5272ad89f7f66bec7941960a28ea',
'condition': 'checkout_ios', 'condition': 'checkout_ios',
}, },
'src/ios/third_party/edo/src': { 'src/ios/third_party/edo/src': {
'url': Var('chromium_git') + '/external/github.com/google/eDistantObject.git' + '@' + '08988a24e17251c85c4283dae7badddc3402dee9', 'url': Var('chromium_git') + '/external/github.com/google/eDistantObject.git' + '@' + '727e556705278598fce683522beedbb9946bfda0',
'condition': 'checkout_ios', 'condition': 'checkout_ios',
}, },
@ -765,7 +744,7 @@ deps = {
}, },
'src/ios/third_party/material_components_ios/src': { 'src/ios/third_party/material_components_ios/src': {
'url': Var('chromium_git') + '/external/github.com/material-components/material-components-ios.git' + '@' + '505c27cd9f6b33acbe54b5c40f291866e53a47f1', 'url': Var('chromium_git') + '/external/github.com/material-components/material-components-ios.git' + '@' + '04424d224ee9e98c7e5a31e140e31105f07e73f1',
'condition': 'checkout_ios', 'condition': 'checkout_ios',
}, },
@ -809,6 +788,17 @@ deps = {
'condition': 'checkout_ios', 'condition': 'checkout_ios',
}, },
'src/ios/third_party/native_closure_compiler': {
'packages': [
{
'package': 'chromium/third_party/native_closure_compiler_macos',
'version': 'version:2@20210505.0.0',
},
],
'condition': 'checkout_ios',
'dep_type': 'cipd',
},
'src/ios/third_party/ochamcrest/src': { 'src/ios/third_party/ochamcrest/src': {
'url': Var('chromium_git') + '/external/github.com/hamcrest/OCHamcrest.git' + '@' + '92d9c14d13bb864255e65c09383564653896916b', 'url': Var('chromium_git') + '/external/github.com/hamcrest/OCHamcrest.git' + '@' + '92d9c14d13bb864255e65c09383564653896916b',
'condition': 'checkout_ios', 'condition': 'checkout_ios',
@ -821,7 +811,7 @@ deps = {
}, },
'src/media/cdm/api': 'src/media/cdm/api':
Var('chromium_git') + '/chromium/cdm.git' + '@' + 'fef0b5aa1bd31efb88dfab804bdbe614f3d54f28', Var('chromium_git') + '/chromium/cdm.git' + '@' + 'fc5afac6847dc61addc1177103aa602e71a9ecac',
'src/native_client': { 'src/native_client': {
'url': Var('chromium_git') + '/native_client/src/native_client.git' + '@' + Var('nacl_revision'), 'url': Var('chromium_git') + '/native_client/src/native_client.git' + '@' + Var('nacl_revision'),
@ -835,7 +825,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/rts/model/linux-amd64', 'package': 'chromium/rts/model/linux-amd64',
'version': 'KZbOWyGl41EX1PlJ5EkkQ7uXgfbDU5_jgwuBfzWAU-EC', 'version': 'YoP4kTClaepmmjRqVgIPL-uE44odWGlVM8pBRVdTx2AC',
}, },
], ],
'dep_type': 'cipd', 'dep_type': 'cipd',
@ -846,7 +836,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/rts/model/mac-amd64', 'package': 'chromium/rts/model/mac-amd64',
'version': 'VeBGWNvPjjvuUBd78PYxm9BBVGYOLdVeAAdaBscZAdAC', 'version': 'mL4NyynmT1Ubjyy2JUXN4SX80VIVKV66MfgBDu-HLRAC',
}, },
], ],
'dep_type': 'cipd', 'dep_type': 'cipd',
@ -857,7 +847,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/rts/model/windows-amd64', 'package': 'chromium/rts/model/windows-amd64',
'version': 'K6xgV4tqvjNfCU66sz6WMgX80oGWTUZwRkp9OH5j_UIC', 'version': 'le7Fn-9wOJ6Ob24B0IvVQY_Sss-rzfQ9xaeovuM0WSUC',
}, },
], ],
'dep_type': 'cipd', 'dep_type': 'cipd',
@ -914,7 +904,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/androidx', 'package': 'chromium/third_party/androidx',
'version': 'PNBrshbagcx1GOHkHBhYzkuMPFqjoNIacmyt8tKhmfwC', 'version': 'k4t_4yTm03LpWgvtVabkki_hjYZ0-R6vK2R68XEEKFwC',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -947,7 +937,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/android_build_tools/aapt2', 'package': 'chromium/third_party/android_build_tools/aapt2',
'version': 'kZqQH92bSO1p0a7_hcrana_9YjtSBU1te7TEtNVBoCUC', 'version': 'u2Cw4baoLfvlEDMwcJjq9iOJRF0_2BjsgMFl7UhJxGAC',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -958,7 +948,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/android_build_tools/bundletool', 'package': 'chromium/third_party/android_build_tools/bundletool',
'version': 'AqsPZpWJh-ZyGraHKlbH8XgjRnmyDmolX4HhwPEo9XUC', 'version': 'zQILIUnCaQ93HTtR07m4ahlE9mrkkwks52L5vFaUaUUC',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -980,6 +970,10 @@ deps = {
'package': 'chromium/third_party/android_sdk/public/emulator', 'package': 'chromium/third_party/android_sdk/public/emulator',
'version': Var('android_sdk_emulator_version'), 'version': Var('android_sdk_emulator_version'),
}, },
{
'package': 'chromium/third_party/android_sdk/public/extras',
'version': Var('android_sdk_extras_version'),
},
{ {
'package': 'chromium/third_party/android_sdk/public/patcher', 'package': 'chromium/third_party/android_sdk/public/patcher',
'version': Var('android_sdk_patcher_version'), 'version': Var('android_sdk_patcher_version'),
@ -1008,9 +1002,6 @@ deps = {
'src/third_party/angle': 'src/third_party/angle':
Var('chromium_git') + '/angle/angle.git' + '@' + Var('angle_revision'), Var('chromium_git') + '/angle/angle.git' + '@' + Var('angle_revision'),
'src/third_party/content_analysis_sdk/src':
Var('chromium_git') + '/external/github.com/chromium/content_analysis_sdk.git' + '@' + 'd2a0b6188bcbae674f8ef2c42c7cffc908ac632e',
'src/third_party/dav1d/libdav1d': 'src/third_party/dav1d/libdav1d':
Var('chromium_git') + '/external/github.com/videolan/dav1d.git' + '@' + '87f9a81cd770e49394a45deca7a3df41243de00b', Var('chromium_git') + '/external/github.com/videolan/dav1d.git' + '@' + '87f9a81cd770e49394a45deca7a3df41243de00b',
@ -1032,7 +1023,7 @@ deps = {
}, },
'src/third_party/barhopper': { 'src/third_party/barhopper': {
'url': 'https://chrome-internal.googlesource.com/chrome/deps/barhopper.git' + '@' + '5830f9acc68275805d60d4b02bf8e1e3c600740d', 'url': 'https://chrome-internal.googlesource.com/chrome/deps/barhopper.git' + '@' + 'b619dfad3ef48aa15d3a647442c3c40f3a967146',
'condition': 'checkout_src_internal and checkout_chromeos', 'condition': 'checkout_src_internal and checkout_chromeos',
}, },
@ -1081,7 +1072,7 @@ deps = {
}, },
'src/third_party/cast_core/public/src': 'src/third_party/cast_core/public/src':
Var('chromium_git') + '/cast_core/public' + '@' + '1112b7c91c791fc951c162527586652de2c4cda9', Var('chromium_git') + '/cast_core/public' + '@' + 'e7dac9fd5f5bf0158015b33a2594e30c1e4ae610',
'src/third_party/catapult': 'src/third_party/catapult':
Var('chromium_git') + '/catapult.git' + '@' + Var('catapult_revision'), Var('chromium_git') + '/catapult.git' + '@' + Var('catapult_revision'),
@ -1110,7 +1101,7 @@ deps = {
# Tools used when building Chrome for Chrome OS. This affects both the Simple # Tools used when building Chrome for Chrome OS. This affects both the Simple
# Chrome workflow, as well as the chromeos-chrome ebuild. # Chrome workflow, as well as the chromeos-chrome ebuild.
'src/third_party/chromite': { 'src/third_party/chromite': {
'url': Var('chromium_git') + '/chromiumos/chromite.git' + '@' + '1a40fd23ac85e202145a649b7205fc3f6103d16f', 'url': Var('chromium_git') + '/chromiumos/chromite.git' + '@' + 'eb3547e17504b0d603b4faf2b0caebcc5d9dca93',
'condition': 'checkout_chromeos', 'condition': 'checkout_chromeos',
}, },
@ -1120,20 +1111,17 @@ deps = {
'src/third_party/colorama/src': 'src/third_party/colorama/src':
Var('chromium_git') + '/external/colorama.git' + '@' + '799604a1041e9b3bc5d2789ecbd7e8db2e18e6b8', Var('chromium_git') + '/external/colorama.git' + '@' + '799604a1041e9b3bc5d2789ecbd7e8db2e18e6b8',
'src/third_party/cpuinfo/src':
Var('chromium_git') + '/external/github.com/pytorch/cpuinfo.git' + '@' + 'b40bae27785787b6dd70788986fd96434cf90ae2',
'src/third_party/crc32c/src': 'src/third_party/crc32c/src':
Var('chromium_git') + '/external/github.com/google/crc32c.git' + '@' + 'fa5ade41ee480003d9c5af6f43567ba22e4e17e6', Var('chromium_git') + '/external/github.com/google/crc32c.git' + '@' + 'fa5ade41ee480003d9c5af6f43567ba22e4e17e6',
# For Linux and Chromium OS. # For Linux and Chromium OS.
'src/third_party/cros_system_api': { 'src/third_party/cros_system_api': {
'url': Var('chromium_git') + '/chromiumos/platform2/system_api.git' + '@' + 'd79486f913c4d301047e2f6e8051538c94066ec9', 'url': Var('chromium_git') + '/chromiumos/platform2/system_api.git' + '@' + '90c3a2f21e7d8c6668c9f7daaaf39c5fd8ffe58a',
'condition': 'checkout_linux', 'condition': 'checkout_linux',
}, },
'src/third_party/depot_tools': 'src/third_party/depot_tools':
Var('chromium_git') + '/chromium/tools/depot_tools.git' + '@' + '8b707654318d0b2b24c0b0bbeeef0ee8b0865007', Var('chromium_git') + '/chromium/tools/depot_tools.git' + '@' + '932a621ece2316026926d615bb04d3006077ab79',
'src/third_party/devtools-frontend/src': 'src/third_party/devtools-frontend/src':
Var('chromium_git') + '/devtools/devtools-frontend' + '@' + Var('devtools_frontend_revision'), Var('chromium_git') + '/devtools/devtools-frontend' + '@' + Var('devtools_frontend_revision'),
@ -1142,7 +1130,7 @@ deps = {
Var('chromium_git') + '/chromium/dom-distiller/dist.git' + '@' + '199de96b345ada7c6e7e6ba3d2fa7a6911b8767d', Var('chromium_git') + '/chromium/dom-distiller/dist.git' + '@' + '199de96b345ada7c6e7e6ba3d2fa7a6911b8767d',
'src/third_party/eigen3/src': 'src/third_party/eigen3/src':
Var('chromium_git') + '/external/gitlab.com/libeigen/eigen.git' + '@' + 'b02c384ef4e8eba7b8bdef16f9dc6f8f4d6a6b2b', Var('chromium_git') + '/external/gitlab.com/libeigen/eigen.git' + '@' + '008ff3483a8c5604639e1c4d204eae30ad737af6',
'src/third_party/emoji-metadata/src': { 'src/third_party/emoji-metadata/src': {
'url': Var('chromium_git') + '/external/github.com/googlefonts/emoji-metadata' + '@' + '8de89a7a36cd024dcd30ac9f67f3f02c37a7c8fb', 'url': Var('chromium_git') + '/external/github.com/googlefonts/emoji-metadata' + '@' + '8de89a7a36cd024dcd30ac9f67f3f02c37a7c8fb',
@ -1194,9 +1182,6 @@ deps = {
'src/third_party/freetype-testing/src': 'src/third_party/freetype-testing/src':
Var('chromium_git') + '/external/github.com/freetype/freetype2-testing.git' + '@' + Var('freetype_testing_revision'), Var('chromium_git') + '/external/github.com/freetype/freetype2-testing.git' + '@' + Var('freetype_testing_revision'),
'src/third_party/fxdiv/src':
Var('chromium_git') + '/external/github.com/Maratyszcza/FXdiv.git' + '@' + '63058eff77e11aa15bf531df5dd34395ec3017c8',
'src/third_party/harfbuzz-ng/src': 'src/third_party/harfbuzz-ng/src':
Var('chromium_git') + '/external/github.com/harfbuzz/harfbuzz.git' + '@' + Var('harfbuzz_revision'), Var('chromium_git') + '/external/github.com/harfbuzz/harfbuzz.git' + '@' + Var('harfbuzz_revision'),
@ -1261,17 +1246,6 @@ deps = {
'dep_type': 'cipd', 'dep_type': 'cipd',
}, },
'src/third_party/fuchsia-sdk/sdk': {
'packages': [
{
'package': Var('fuchsia_sdk_cipd_prefix') + '${{platform}}',
'version': Var('fuchsia_version'),
},
],
'condition': 'checkout_fuchsia',
'dep_type': 'cipd',
},
'src/third_party/hamcrest': { 'src/third_party/hamcrest': {
'packages': [ 'packages': [
{ {
@ -1287,7 +1261,7 @@ deps = {
Var('chromium_git') + '/chromium/deps/hunspell_dictionaries.git' + '@' + '41cdffd71c9948f63c7ad36e1fb0ff519aa7a37e', Var('chromium_git') + '/chromium/deps/hunspell_dictionaries.git' + '@' + '41cdffd71c9948f63c7ad36e1fb0ff519aa7a37e',
'src/third_party/icu': 'src/third_party/icu':
Var('chromium_git') + '/chromium/deps/icu.git' + '@' + '585942f33d939a11f4600bd5042649b7ca189008', Var('chromium_git') + '/chromium/deps/icu.git' + '@' + 'e1f2f4f42368555835a7a0894188716556c32871',
'src/third_party/icu4j': { 'src/third_party/icu4j': {
'packages': [ 'packages': [
@ -1341,7 +1315,7 @@ deps = {
'src/third_party/jsoncpp/source': 'src/third_party/jsoncpp/source':
Var('chromium_git') + '/external/github.com/open-source-parsers/jsoncpp.git' Var('chromium_git') + '/external/github.com/open-source-parsers/jsoncpp.git'
+ '@' + '42e892d96e47b1f6e29844cc705e148ec4856448', # release 1.9.4 + '@' + '9059f5cad030ba11d37818847443a53918c327b1', # release 1.9.4
'src/third_party/junit/src': { 'src/third_party/junit/src': {
'url': Var('chromium_git') + '/external/junit.git' + '@' + '64155f8a9babcfcf4263cf4d08253a1556e75481', 'url': Var('chromium_git') + '/external/junit.git' + '@' + '64155f8a9babcfcf4263cf4d08253a1556e75481',
@ -1349,7 +1323,7 @@ deps = {
}, },
'src/third_party/leveldatabase/src': 'src/third_party/leveldatabase/src':
Var('chromium_git') + '/external/leveldb.git' + '@' + 'd019e3605f222ebc5a3a2484a2cb29db537551dd', Var('chromium_git') + '/external/leveldb.git' + '@' + '1b51a3a96821e5fd5175288724c95c1bde57b2f0',
'src/third_party/libFuzzer/src': 'src/third_party/libFuzzer/src':
Var('chromium_git') + '/chromium/llvm-project/compiler-rt/lib/fuzzer.git' + '@' + Var('libfuzzer_revision'), Var('chromium_git') + '/chromium/llvm-project/compiler-rt/lib/fuzzer.git' + '@' + Var('libfuzzer_revision'),
@ -1358,7 +1332,7 @@ deps = {
Var('chromium_git') + '/external/libaddressinput.git' + '@' + '3b8ee157a8f3536bbf5ad2448e9e3370463c1e40', Var('chromium_git') + '/external/libaddressinput.git' + '@' + '3b8ee157a8f3536bbf5ad2448e9e3370463c1e40',
'src/third_party/libaom/source/libaom': 'src/third_party/libaom/source/libaom':
Var('aomedia_git') + '/aom.git' + '@' + 'ef14518388c0a41c1d3b992f75d5886c9da33832', Var('aomedia_git') + '/aom.git' + '@' + 'e24a83a72b507b93a94f299f0eead1213dbac214',
'src/third_party/libavif/src': 'src/third_party/libavif/src':
Var('chromium_git') + '/external/github.com/AOMediaCodec/libavif.git' + '@' + Var('libavif_revision'), Var('chromium_git') + '/external/github.com/AOMediaCodec/libavif.git' + '@' + Var('libavif_revision'),
@ -1411,18 +1385,18 @@ deps = {
}, },
'src/third_party/libunwindstack': { 'src/third_party/libunwindstack': {
'url': Var('chromium_git') + '/chromium/src/third_party/libunwindstack.git' + '@' + '3c86843ae0f8d560ae0d15b92e34ce88cf83057a', 'url': Var('chromium_git') + '/chromium/src/third_party/libunwindstack.git' + '@' + '6868358481bb1e5e20d155c1084dc436c88b5e6b',
'condition': 'checkout_android', 'condition': 'checkout_android',
}, },
'src/third_party/libvpx/source/libvpx': 'src/third_party/libvpx/source/libvpx':
Var('chromium_git') + '/webm/libvpx.git' + '@' + 'cb1abee1455ac7e552da271ac64c71d117caaa77', Var('chromium_git') + '/webm/libvpx.git' + '@' + 'bf672f23a5336cb54dbcb2e4417142139f44cc3e',
'src/third_party/libwebm/source': 'src/third_party/libwebm/source':
Var('chromium_git') + '/webm/libwebm.git' + '@' + 'e4fbea0c9751ae8aa86629b197a28d8276a2b0da', Var('chromium_git') + '/webm/libwebm.git' + '@' + 'e4fbea0c9751ae8aa86629b197a28d8276a2b0da',
'src/third_party/libyuv': 'src/third_party/libyuv':
Var('chromium_git') + '/libyuv/libyuv.git' + '@' + 'd62ee21e6627888e84466b5a5ed15775582ac67b', Var('chromium_git') + '/libyuv/libyuv.git' + '@' + '18f91105162a6ebe7a46ee1c81e9ab67ca97a02b',
'src/third_party/lighttpd': { 'src/third_party/lighttpd': {
'url': Var('chromium_git') + '/chromium/deps/lighttpd.git' + '@' + Var('lighttpd_revision'), 'url': Var('chromium_git') + '/chromium/deps/lighttpd.git' + '@' + Var('lighttpd_revision'),
@ -1459,7 +1433,7 @@ deps = {
# Graphics buffer allocator for Chrome OS. # Graphics buffer allocator for Chrome OS.
'src/third_party/minigbm/src': { 'src/third_party/minigbm/src': {
'url': Var('chromium_git') + '/chromiumos/platform/minigbm.git' + '@' + 'd73fa7ff377919d94d4ed675cc91a070f0631548', 'url': Var('chromium_git') + '/chromiumos/platform/minigbm.git' + '@' + '2e63aaf616cdda26019d265989bd0d96ee11aab9',
'condition': 'checkout_linux', 'condition': 'checkout_linux',
}, },
@ -1510,10 +1484,10 @@ deps = {
}, },
'src/third_party/openh264/src': 'src/third_party/openh264/src':
Var('chromium_git') + '/external/github.com/cisco/openh264' + '@' + 'fac04ceb3e966f613ed17e98178e9d690280bba6', Var('chromium_git') + '/external/github.com/cisco/openh264' + '@' + 'b52786888ddce9d6bc06b7825ba9bffc65924e0c',
'src/third_party/openscreen/src': 'src/third_party/openscreen/src':
Var('chromium_git') + '/openscreen' + '@' + 'a10f9dc128676d53d29981ad9c0d0fbc5e0bd322', Var('chromium_git') + '/openscreen' + '@' + 'ee7d4e8c5eb35509288a8f238bbf8ef9c3cb9d35',
'src/third_party/openxr/src': { 'src/third_party/openxr/src': {
'url': Var('chromium_git') + '/external/github.com/KhronosGroup/OpenXR-SDK' + '@' + 'bf21ccb1007bb531b45d9978919a56ea5059c245', 'url': Var('chromium_git') + '/external/github.com/KhronosGroup/OpenXR-SDK' + '@' + 'bf21ccb1007bb531b45d9978919a56ea5059c245',
@ -1530,16 +1504,13 @@ deps = {
}, },
'src/third_party/perfetto': 'src/third_party/perfetto':
Var('android_git') + '/platform/external/perfetto.git' + '@' + 'f7b904a467cb55e4ff3a2ded74354e8e8d8ffabf', Var('android_git') + '/platform/external/perfetto.git' + '@' + '4c15672c0a9e16ac762aa5148f1264350fd49b98',
'src/third_party/perl': { 'src/third_party/perl': {
'url': Var('chromium_git') + '/chromium/deps/perl.git' + '@' + '6f3e5028eb65d0b4c5fdd792106ac4c84eee1eb3', 'url': Var('chromium_git') + '/chromium/deps/perl.git' + '@' + '6f3e5028eb65d0b4c5fdd792106ac4c84eee1eb3',
'condition': 'checkout_win', 'condition': 'checkout_win',
}, },
'src/third_party/pthreadpool/src':
Var('chromium_git') + '/external/github.com/Maratyszcza/pthreadpool.git' + '@' + '1787867f6183f056420e532eec640cba25efafea',
'src/third_party/proguard': { 'src/third_party/proguard': {
'packages': [ 'packages': [
{ {
@ -1574,8 +1545,63 @@ deps = {
'dep_type': 'cipd', 'dep_type': 'cipd',
}, },
'src/third_party/qemu-linux-x64': {
'packages': [
{
'package': 'fuchsia/third_party/qemu/linux-amd64',
'version': 'FFZaD9tecL-z0lq2XP_7UqiAaMgRGwXTyvcmkv7XCQcC'
},
],
'condition': 'host_os == "linux" and checkout_fuchsia',
'dep_type': 'cipd',
},
'src/third_party/qemu-mac-x64': {
'packages': [
{
'package': 'fuchsia/third_party/qemu/mac-amd64',
'version': '79L6B9YhuL7uIg_CxwlQcZqLOixVtS2Cctn7dmVg0q4C'
},
],
'condition': 'host_os == "mac" and checkout_fuchsia',
'dep_type': 'cipd',
},
'src/third_party/aemu-linux-arm64': {
'packages': [
{
'package': 'fuchsia/third_party/aemu/linux-arm64',
'version': 'r2LsKQPbfi0NYEO8tfocwaJ1MMACXPDLkgCI0IjJq-YC'
},
],
'condition': 'host_os == "linux" and checkout_fuchsia_for_arm64_host',
'dep_type': 'cipd',
},
'src/third_party/aemu-linux-x64': {
'packages': [
{
'package': 'fuchsia/third_party/android/aemu/release/linux-amd64',
'version': 'lbYV0rO8V4GxeqmRrKZeRgQmbFxh2BwafFgd9cjYmWYC'
},
],
'condition': 'host_os == "linux" and checkout_fuchsia',
'dep_type': 'cipd',
},
'src/third_party/aemu-mac-x64': {
'packages': [
{
'package': 'fuchsia/third_party/android/aemu/release/mac-amd64',
'version': 'en5IYbZukTkSmHUnmAKiFkHZrGz1BCQhCecHEggxAqUC'
},
],
'condition': 'host_os == "mac" and checkout_fuchsia',
'dep_type': 'cipd',
},
'src/third_party/re2/src': 'src/third_party/re2/src':
Var('chromium_git') + '/external/github.com/google/re2.git' + '@' + '5723bb8950318135ed9cf4fc76bed988a087f536', Var('chromium_git') + '/external/github.com/google/re2.git' + '@' + 'f5c782e5d02b3e7f244274c9b6d9d3d7a9b6e737',
'src/third_party/r8': { 'src/third_party/r8': {
'packages': [ 'packages': [
@ -1617,7 +1643,7 @@ deps = {
Var('chromium_git') + '/external/github.com/google/snappy.git' + '@' + '65dc7b383985eb4f63cd3e752136db8d9b4be8c0', Var('chromium_git') + '/external/github.com/google/snappy.git' + '@' + '65dc7b383985eb4f63cd3e752136db8d9b4be8c0',
'src/third_party/sqlite/src': 'src/third_party/sqlite/src':
Var('chromium_git') + '/chromium/deps/sqlite.git' + '@' + 'cb47d7089f714e4514f126dfa8ac630cab78ea32', Var('chromium_git') + '/chromium/deps/sqlite.git' + '@' + 'a54d5d154f4b349705a67107ed190d1943f94646',
'src/third_party/sqlite4java': { 'src/third_party/sqlite4java': {
'packages': [ 'packages': [
@ -1645,16 +1671,16 @@ deps = {
Var('swiftshader_git') + '/SwiftShader.git' + '@' + Var('swiftshader_revision'), Var('swiftshader_git') + '/SwiftShader.git' + '@' + Var('swiftshader_revision'),
'src/third_party/text-fragments-polyfill/src': 'src/third_party/text-fragments-polyfill/src':
Var('chromium_git') + '/external/github.com/GoogleChromeLabs/text-fragments-polyfill.git' + '@' + 'c036420683f672d685e27415de0a5f5e85bdc23f', Var('chromium_git') + '/external/github.com/GoogleChromeLabs/text-fragments-polyfill.git' + '@' + '428dd13167f3ce02e3ca7c086d291d7c079da0dc',
'src/third_party/tflite/src': 'src/third_party/tflite/src':
Var('chromium_git') + '/external/github.com/tensorflow/tensorflow.git' + '@' + '783ece207fa191df18e305c796aeb29c3057dc7b', Var('chromium_git') + '/external/github.com/tensorflow/tensorflow.git' + '@' + '551a50e768fc48db8e68356a575763278fa1b3b6',
'src/third_party/turbine': { 'src/third_party/turbine': {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/turbine', 'package': 'chromium/third_party/turbine',
'version': 'FJ-IOPRGQsHUZwVeYmVw_idRk5mUUP6_Uj2i6mKQlEMC', 'version': 'y4x80kUnDOxC5QyG48MlVoiRIEn09eaHcIJQFavlqgMC',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -1666,7 +1692,7 @@ deps = {
'condition': 'checkout_android', 'condition': 'checkout_android',
}, },
'src/third_party/vulkan-deps': '{chromium_git}/vulkan-deps@c6a60f3cc711e0758ebb58d8d1db9f97b5973ccc', 'src/third_party/vulkan-deps': '{chromium_git}/vulkan-deps@65b7b8de281ca44627456ade69c7cba884bd1c87',
'src/third_party/vulkan_memory_allocator': 'src/third_party/vulkan_memory_allocator':
Var('chromium_git') + '/external/github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator.git' + '@' + 'ebe84bec02c041d28f902da0214bf442743fc907', Var('chromium_git') + '/external/github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator.git' + '@' + 'ebe84bec02c041d28f902da0214bf442743fc907',
@ -1702,10 +1728,10 @@ deps = {
Var('chromium_git') + '/external/khronosgroup/webgl.git' + '@' + 'cf04aebdf9b53bb2853f22a81465688daf879ec6', Var('chromium_git') + '/external/khronosgroup/webgl.git' + '@' + 'cf04aebdf9b53bb2853f22a81465688daf879ec6',
'src/third_party/webgpu-cts/src': 'src/third_party/webgpu-cts/src':
Var('chromium_git') + '/external/github.com/gpuweb/cts.git' + '@' + 'b32ac31a0eab7386807f01c7cb8d77a6e1104c23', Var('chromium_git') + '/external/github.com/gpuweb/cts.git' + '@' + '958d732db02c2a70bcf4a2b0986f09318db4adfb',
'src/third_party/webrtc': 'src/third_party/webrtc':
Var('webrtc_git') + '/src.git' + '@' + 'cd3ae79bce5516336481fd0a689499601b57d1bc', Var('webrtc_git') + '/src.git' + '@' + '6ff73180ad01aca444c9856f91148eb2b948ce63',
'src/third_party/libgifcodec': 'src/third_party/libgifcodec':
Var('skia_git') + '/libgifcodec' + '@'+ Var('libgifcodec_revision'), Var('skia_git') + '/libgifcodec' + '@'+ Var('libgifcodec_revision'),
@ -1725,9 +1751,6 @@ deps = {
'condition': 'checkout_linux', 'condition': 'checkout_linux',
}, },
'src/third_party/xnnpack/src':
Var('chromium_git') + '/external/github.com/google/XNNPACK.git' + '@' + 'd5dc9e245f0d474235f2b0a48e3e8525de02a5db',
'src/tools/page_cycler/acid3': 'src/tools/page_cycler/acid3':
Var('chromium_git') + '/chromium/deps/acid3.git' + '@' + '6be0a66a1ebd7ebc5abc1b2f405a945f6d871521', Var('chromium_git') + '/chromium/deps/acid3.git' + '@' + '6be0a66a1ebd7ebc5abc1b2f405a945f6d871521',
@ -1778,7 +1801,7 @@ deps = {
Var('chromium_git') + '/v8/v8.git' + '@' + Var('v8_revision'), Var('chromium_git') + '/v8/v8.git' + '@' + Var('v8_revision'),
'src-internal': { 'src-internal': {
'url': 'https://chrome-internal.googlesource.com/chrome/src-internal.git@76aaa026524883823bde358f70bbf17e6acbcb7f', 'url': 'https://chrome-internal.googlesource.com/chrome/src-internal.git@ae99ebde2a4de4d30a66c278c396dba703d2845f',
'condition': 'checkout_src_internal', 'condition': 'checkout_src_internal',
}, },
@ -1786,7 +1809,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromeos_internal/assistant/ambient', 'package': 'chromeos_internal/assistant/ambient',
'version': 'version:float_on_by_background_color_fix', 'version': 'version:float_on_by_slower',
}, },
], ],
'condition': 'checkout_chromeos and checkout_src_internal', 'condition': 'checkout_chromeos and checkout_src_internal',
@ -1797,7 +1820,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromeos_internal/apps/eche_app/app', 'package': 'chromeos_internal/apps/eche_app/app',
'version': 'WFd9ZH0_iotkBxugQh6X3U2S45uwq2RycsFybZZzMZwC', 'version': 'mO7HIkligmD70YaR0NC-cEilQ0xhQYkaBq-8xFFsHAMC',
}, },
], ],
'condition': 'checkout_chromeos and checkout_src_internal', 'condition': 'checkout_chromeos and checkout_src_internal',
@ -1808,7 +1831,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromeos_internal/apps/help_app/app', 'package': 'chromeos_internal/apps/help_app/app',
'version': 'bFdYp_B3xJ8c-mnuvfL1q7fRkYqoORhHWxZEjVuKX0QC', 'version': 'XqfD0KEkd76pT0UI2DhgmgL-CfLXmHeGmHqZ8eSBu-gC',
}, },
], ],
'condition': 'checkout_chromeos and checkout_src_internal', 'condition': 'checkout_chromeos and checkout_src_internal',
@ -1819,7 +1842,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromeos_internal/apps/media_app/app', 'package': 'chromeos_internal/apps/media_app/app',
'version': 'RGIcLrebzU-C8bZd8lqHsASvZE5SOZ0ziavz4c0LBosC', 'version': 'u3LIEepgYh4Lf4_iS5eKnL1K17o6cTHozTZQ6gkS3oUC',
}, },
], ],
'condition': 'checkout_chromeos and checkout_src_internal', 'condition': 'checkout_chromeos and checkout_src_internal',
@ -1830,7 +1853,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromeos_internal/apps/projector_app/app', 'package': 'chromeos_internal/apps/projector_app/app',
'version': 'wCS0Eg6U0v0Is-GGaz_xn2dU-UXdsXbiRerVC2g2RcQC', 'version': 'zvssMRGkrSKVTUnN3dSa_d3_xPuVJ2aPSmegU-rHbMQC',
}, },
], ],
'condition': 'checkout_chromeos and checkout_src_internal', 'condition': 'checkout_chromeos and checkout_src_internal',
@ -2365,17 +2388,6 @@ deps = {
'dep_type': 'cipd', 'dep_type': 'cipd',
}, },
'src/third_party/android_deps/libs/com_google_android_apps_common_testing_accessibility_framework_accessibility_test_framework': {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/com_google_android_apps_common_testing_accessibility_framework_accessibility_test_framework',
'version': 'version:2@3.1.2.cr1',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/com_google_android_datatransport_transport_api': { 'src/third_party/android_deps/libs/com_google_android_datatransport_transport_api': {
'packages': [ 'packages': [
{ {
@ -2391,7 +2403,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_auth', 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_auth',
'version': 'version:2@20.1.0.cr1', 'version': 'version:2@17.0.0.cr1',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -2402,7 +2414,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_auth_api_phone', 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_auth_api_phone',
'version': 'version:2@18.0.1.cr1', 'version': 'version:2@17.5.0.cr1',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -2413,7 +2425,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_auth_base', 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_auth_base',
'version': 'version:2@18.0.2.cr1', 'version': 'version:2@17.0.0.cr1',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -2424,7 +2436,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_base', 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_base',
'version': 'version:2@18.0.1.cr1', 'version': 'version:2@17.5.0.cr1',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -2435,7 +2447,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_basement', 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_basement',
'version': 'version:2@18.0.1.cr1', 'version': 'version:2@17.5.0.cr1',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -2534,7 +2546,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_instantapps', 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_instantapps',
'version': 'version:2@18.0.1.cr1', 'version': 'version:2@17.0.0.cr1',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -2545,7 +2557,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_location', 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_location',
'version': 'version:2@19.0.1.cr1', 'version': 'version:2@17.0.0.cr1',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -2589,7 +2601,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_tasks', 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_tasks',
'version': 'version:2@18.0.1.cr1', 'version': 'version:2@17.2.0.cr1',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -2600,7 +2612,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_vision', 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_vision',
'version': 'version:2@20.1.3.cr1', 'version': 'version:2@18.0.0.cr1',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -2611,7 +2623,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_vision_common', 'package': 'chromium/third_party/android_deps/libs/com_google_android_gms_play_services_vision_common',
'version': 'version:2@19.1.3.cr1', 'version': 'version:2@18.0.0.cr1',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -3047,17 +3059,6 @@ deps = {
'dep_type': 'cipd', 'dep_type': 'cipd',
}, },
'src/third_party/android_deps/libs/com_google_protobuf_protobuf_lite': {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/com_google_protobuf_protobuf_lite',
'version': 'version:2@3.0.1.cr1',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/com_googlecode_java_diff_utils_diffutils': { 'src/third_party/android_deps/libs/com_googlecode_java_diff_utils_diffutils': {
'packages': [ 'packages': [
{ {
@ -3223,17 +3224,6 @@ deps = {
'dep_type': 'cipd', 'dep_type': 'cipd',
}, },
'src/third_party/android_deps/libs/org_hamcrest_hamcrest': {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/org_hamcrest_hamcrest',
'version': 'version:2@2.2.cr1',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/org_jetbrains_annotations': { 'src/third_party/android_deps/libs/org_jetbrains_annotations': {
'packages': [ 'packages': [
{ {
@ -3249,7 +3239,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib', 'package': 'chromium/third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib',
'version': 'version:2@1.6.21.cr1', 'version': 'version:2@1.6.20.cr1',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -3260,7 +3250,7 @@ deps = {
'packages': [ 'packages': [
{ {
'package': 'chromium/third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib_common', 'package': 'chromium/third_party/android_deps/libs/org_jetbrains_kotlin_kotlin_stdlib_common',
'version': 'version:2@1.6.21.cr1', 'version': 'version:2@1.6.20.cr1',
}, },
], ],
'condition': 'checkout_android', 'condition': 'checkout_android',
@ -3322,17 +3312,6 @@ deps = {
'dep_type': 'cipd', 'dep_type': 'cipd',
}, },
'src/third_party/android_deps/libs/org_jsoup_jsoup': {
'packages': [
{
'package': 'chromium/third_party/android_deps/libs/org_jsoup_jsoup',
'version': 'version:2@1.14.3.cr1',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
'src/third_party/android_deps/libs/org_ow2_asm_asm': { 'src/third_party/android_deps/libs/org_ow2_asm_asm': {
'packages': [ 'packages': [
{ {
@ -3596,11 +3575,9 @@ include_rules = [
# explicitly here. # explicitly here.
'-absl', '-absl',
'-third_party/abseil-cpp', '-third_party/abseil-cpp',
'+third_party/abseil-cpp/absl/base/attributes.h',
"+third_party/abseil-cpp/absl/numeric/int128.h", "+third_party/abseil-cpp/absl/numeric/int128.h",
'+third_party/abseil-cpp/absl/types/optional.h', '+third_party/abseil-cpp/absl/types/optional.h',
'+third_party/abseil-cpp/absl/types/variant.h', '+third_party/abseil-cpp/absl/types/variant.h',
'+third_party/abseil-cpp/absl/utility/utility.h',
] ]
@ -3787,6 +3764,17 @@ hooks = [
'condition': 'checkout_mac or checkout_ios', 'condition': 'checkout_mac or checkout_ios',
'action': ['python3', 'src/build/mac_toolchain.py'], 'action': ['python3', 'src/build/mac_toolchain.py'],
}, },
{
# Update the Fuchsia SDK if necessary.
'name': 'Download Fuchsia SDK',
'pattern': '.',
'condition': 'checkout_fuchsia',
'action': [
'python3',
'src/build/fuchsia/update_sdk.py',
'--default-bucket={fuchsia_sdk_bucket}',
],
},
{ {
# Update the prebuilt clang toolchain. # Update the prebuilt clang toolchain.
# Note: On Win, this should run after win_toolchain, as it may use it. # Note: On Win, this should run after win_toolchain, as it may use it.
@ -3795,13 +3783,6 @@ hooks = [
'condition': 'not llvm_force_head_revision', 'condition': 'not llvm_force_head_revision',
'action': ['python3', 'src/tools/clang/scripts/update.py'], 'action': ['python3', 'src/tools/clang/scripts/update.py'],
}, },
{
# Update prebuilt Rust toolchain.
'name': 'rust-toolchain',
'pattern': '.',
'condition': 'fetch_prebuilt_chromium_rust_toolchain',
'action': ['python3', 'src/tools/rust/update_rust.py'],
},
{ {
# Build the clang toolchain from tip-of-tree. # Build the clang toolchain from tip-of-tree.
# Note: On Win, this should run after win_toolchain, as it may use it. # Note: On Win, this should run after win_toolchain, as it may use it.
@ -4218,18 +4199,6 @@ hooks = [
], ],
}, },
# Download test data for Maps telemetry_gpu_integration_test.
{
'name': 'maps_perf_test_load_dataset',
'pattern': '\\.sha1',
'action': [ 'python3',
'src/third_party/depot_tools/download_from_google_storage.py',
'--no_resume',
'--no_auth',
'--bucket', 'chromium-telemetry',
'-s', 'src/tools/perf/page_sets/maps_perf_test/load_dataset.sha1',
],
},
# This is used to ensure that all network operations are properly # This is used to ensure that all network operations are properly
# annotated so we can document what they're for. # annotated so we can document what they're for.
@ -4462,7 +4431,7 @@ hooks = [
'action': [ 'action': [
'src/third_party/chromite/bin/cros', 'src/third_party/chromite/bin/cros',
'chrome-sdk', 'chrome-sdk',
'--fallback-versions=20', '--fallback-versions=10',
'--nogoma', '--nogoma',
'--nogn-gen', '--nogn-gen',
'--no-shell', '--no-shell',
@ -4480,7 +4449,7 @@ hooks = [
'action': [ 'action': [
'src/third_party/chromite/bin/cros', 'src/third_party/chromite/bin/cros',
'chrome-sdk', 'chrome-sdk',
'--fallback-versions=20', '--fallback-versions=10',
'--nogoma', '--nogoma',
'--nogn-gen', '--nogn-gen',
'--no-shell', '--no-shell',
@ -4497,7 +4466,7 @@ hooks = [
'action': [ 'action': [
'src/third_party/chromite/bin/cros', 'src/third_party/chromite/bin/cros',
'chrome-sdk', 'chrome-sdk',
'--fallback-versions=20', '--fallback-versions=10',
'--nogoma', '--nogoma',
'--nogn-gen', '--nogn-gen',
'--no-shell', '--no-shell',
@ -4513,7 +4482,7 @@ hooks = [
'action': [ 'action': [
'src/third_party/chromite/bin/cros', 'src/third_party/chromite/bin/cros',
'chrome-sdk', 'chrome-sdk',
'--fallback-versions=20', '--fallback-versions=10',
'--nogoma', '--nogoma',
'--nogn-gen', '--nogn-gen',
'--no-shell', '--no-shell',
@ -4532,7 +4501,7 @@ hooks = [
'action': [ 'action': [
'src/third_party/chromite/bin/cros', 'src/third_party/chromite/bin/cros',
'chrome-sdk', 'chrome-sdk',
'--fallback-versions=20', '--fallback-versions=10',
'--nogoma', '--nogoma',
'--nogn-gen', '--nogn-gen',
'--no-shell', '--no-shell',
@ -4551,7 +4520,7 @@ hooks = [
'action': [ 'action': [
'src/third_party/chromite/bin/cros', 'src/third_party/chromite/bin/cros',
'chrome-sdk', 'chrome-sdk',
'--fallback-versions=20', '--fallback-versions=10',
'--nogoma', '--nogoma',
'--nogn-gen', '--nogn-gen',
'--no-shell', '--no-shell',
@ -4570,7 +4539,7 @@ hooks = [
'action': [ 'action': [
'src/third_party/chromite/bin/cros', 'src/third_party/chromite/bin/cros',
'chrome-sdk', 'chrome-sdk',
'--fallback-versions=20', '--fallback-versions=10',
'--nogoma', '--nogoma',
'--nogn-gen', '--nogn-gen',
'--no-shell', '--no-shell',
@ -4588,7 +4557,7 @@ hooks = [
'action': [ 'action': [
'src/third_party/chromite/bin/cros', 'src/third_party/chromite/bin/cros',
'chrome-sdk', 'chrome-sdk',
'--fallback-versions=20', '--fallback-versions=10',
'--nogoma', '--nogoma',
'--nogn-gen', '--nogn-gen',
'--no-shell', '--no-shell',

View File

@ -300,13 +300,12 @@ mixed_component("base") {
"debug/stack_trace.h", "debug/stack_trace.h",
"debug/task_trace.cc", "debug/task_trace.cc",
"debug/task_trace.h", "debug/task_trace.h",
"enterprise_util.h",
"environment.cc", "environment.cc",
"environment.h", "environment.h",
"export_template.h", "export_template.h",
"feature_list.cc", "feature_list.cc",
"feature_list.h", "feature_list.h",
"features.cc",
"features.h",
"file_descriptor_store.cc", "file_descriptor_store.cc",
"file_descriptor_store.h", "file_descriptor_store.h",
"file_version_info.h", "file_version_info.h",
@ -396,13 +395,13 @@ mixed_component("base") {
"memory/page_size.h", "memory/page_size.h",
"memory/platform_shared_memory_handle.cc", "memory/platform_shared_memory_handle.cc",
"memory/platform_shared_memory_handle.h", "memory/platform_shared_memory_handle.h",
"memory/platform_shared_memory_mapper.cc",
"memory/platform_shared_memory_mapper.h", "memory/platform_shared_memory_mapper.h",
"memory/platform_shared_memory_region.cc", "memory/platform_shared_memory_region.cc",
"memory/platform_shared_memory_region.h", "memory/platform_shared_memory_region.h",
"memory/ptr_util.h", "memory/ptr_util.h",
"memory/raw_ptr.cc", "memory/raw_ptr.cc",
"memory/raw_ptr.h", "memory/raw_ptr.h",
"memory/raw_ptr_exclusion.h",
"memory/raw_scoped_refptr_mismatch_checker.h", "memory/raw_scoped_refptr_mismatch_checker.h",
"memory/read_only_shared_memory_region.cc", "memory/read_only_shared_memory_region.cc",
"memory/read_only_shared_memory_region.h", "memory/read_only_shared_memory_region.h",
@ -415,8 +414,6 @@ mixed_component("base") {
"memory/scoped_policy.h", "memory/scoped_policy.h",
"memory/scoped_refptr.h", "memory/scoped_refptr.h",
"memory/shared_memory_hooks.h", "memory/shared_memory_hooks.h",
"memory/shared_memory_mapper.cc",
"memory/shared_memory_mapper.h",
"memory/shared_memory_mapping.cc", "memory/shared_memory_mapping.cc",
"memory/shared_memory_mapping.h", "memory/shared_memory_mapping.h",
"memory/shared_memory_security_policy.cc", "memory/shared_memory_security_policy.cc",
@ -429,7 +426,6 @@ mixed_component("base") {
"memory/unsafe_shared_memory_region.cc", "memory/unsafe_shared_memory_region.cc",
"memory/unsafe_shared_memory_region.h", "memory/unsafe_shared_memory_region.h",
"memory/values_equivalent.h", "memory/values_equivalent.h",
"memory/weak_auto_reset.h",
"memory/weak_ptr.cc", "memory/weak_ptr.cc",
"memory/weak_ptr.h", "memory/weak_ptr.h",
"memory/writable_shared_memory_region.cc", "memory/writable_shared_memory_region.cc",
@ -443,6 +439,7 @@ mixed_component("base") {
"message_loop/message_pump_glib.cc", "message_loop/message_pump_glib.cc",
"message_loop/message_pump_glib.h", "message_loop/message_pump_glib.h",
"message_loop/message_pump_type.h", "message_loop/message_pump_type.h",
"message_loop/timer_slack.cc",
"message_loop/timer_slack.h", "message_loop/timer_slack.h",
"message_loop/work_id_provider.cc", "message_loop/work_id_provider.cc",
"message_loop/work_id_provider.h", "message_loop/work_id_provider.h",
@ -611,6 +608,7 @@ mixed_component("base") {
"strings/abseil_string_conversions.h", "strings/abseil_string_conversions.h",
"strings/abseil_string_number_conversions.cc", "strings/abseil_string_number_conversions.cc",
"strings/abseil_string_number_conversions.h", "strings/abseil_string_number_conversions.h",
"strings/char_traits.h",
"strings/escape.cc", "strings/escape.cc",
"strings/escape.h", "strings/escape.h",
"strings/latin1_string_conversions.cc", "strings/latin1_string_conversions.cc",
@ -646,10 +644,6 @@ mixed_component("base") {
"strings/utf_string_conversion_utils.h", "strings/utf_string_conversion_utils.h",
"strings/utf_string_conversions.cc", "strings/utf_string_conversions.cc",
"strings/utf_string_conversions.h", "strings/utf_string_conversions.h",
"substring_set_matcher/string_pattern.cc",
"substring_set_matcher/string_pattern.h",
"substring_set_matcher/substring_set_matcher.cc",
"substring_set_matcher/substring_set_matcher.h",
"supports_user_data.cc", "supports_user_data.cc",
"supports_user_data.h", "supports_user_data.h",
"sync_socket.cc", "sync_socket.cc",
@ -696,6 +690,8 @@ mixed_component("base") {
"task/lazy_thread_pool_task_runner.h", "task/lazy_thread_pool_task_runner.h",
"task/post_job.cc", "task/post_job.cc",
"task/post_job.h", "task/post_job.h",
"task/post_task.cc",
"task/post_task.h",
"task/post_task_and_reply_with_result_internal.h", "task/post_task_and_reply_with_result_internal.h",
"task/scoped_set_task_priority_for_current_thread.cc", "task/scoped_set_task_priority_for_current_thread.cc",
"task/scoped_set_task_priority_for_current_thread.h", "task/scoped_set_task_priority_for_current_thread.h",
@ -915,8 +911,6 @@ mixed_component("base") {
"trace_event/trace_id_helper.h", "trace_event/trace_id_helper.h",
"traits_bag.h", "traits_bag.h",
"tuple.h", "tuple.h",
"types/expected.h",
"types/expected_internal.h",
"types/id_type.h", "types/id_type.h",
"types/pass_key.h", "types/pass_key.h",
"types/strong_alias.h", "types/strong_alias.h",
@ -1024,8 +1018,6 @@ mixed_component("base") {
"debug/invalid_access_win.cc", "debug/invalid_access_win.cc",
"debug/invalid_access_win.h", "debug/invalid_access_win.h",
"debug/stack_trace_win.cc", "debug/stack_trace_win.cc",
"enterprise_util.cc",
"enterprise_util.h",
"enterprise_util_win.cc", "enterprise_util_win.cc",
"file_version_info_win.cc", "file_version_info_win.cc",
"file_version_info_win.h", "file_version_info_win.h",
@ -1195,8 +1187,6 @@ mixed_component("base") {
"allocator/allocator_interception_mac.mm", "allocator/allocator_interception_mac.mm",
"allocator/malloc_zone_functions_mac.cc", "allocator/malloc_zone_functions_mac.cc",
"allocator/malloc_zone_functions_mac.h", "allocator/malloc_zone_functions_mac.h",
"enterprise_util.cc",
"enterprise_util.h",
"enterprise_util_mac.mm", "enterprise_util_mac.mm",
"file_version_info_mac.h", "file_version_info_mac.h",
"file_version_info_mac.mm", "file_version_info_mac.mm",
@ -1848,7 +1838,6 @@ mixed_component("base") {
"process/process_stubs.cc", "process/process_stubs.cc",
"profiler/stack_sampler_posix.cc", "profiler/stack_sampler_posix.cc",
"sync_socket_nacl.cc", "sync_socket_nacl.cc",
"system/sys_info_nacl.cc",
"threading/platform_thread_linux.cc", "threading/platform_thread_linux.cc",
] ]
@ -2439,10 +2428,7 @@ buildflag_header("feature_list_buildflags") {
buildflag_header("logging_buildflags") { buildflag_header("logging_buildflags") {
header = "logging_buildflags.h" header = "logging_buildflags.h"
flags = [ flags = [ "ENABLE_LOG_ERROR_NOT_REACHED=$enable_log_error_not_reached" ]
"ENABLE_LOG_ERROR_NOT_REACHED=$enable_log_error_not_reached",
"USE_RUNTIME_VLOG=$use_runtime_vlog",
]
} }
buildflag_header("orderfile_buildflags") { buildflag_header("orderfile_buildflags") {

View File

@ -95,17 +95,7 @@ class LeakySingleton {
T* GetSlowPath(); T* GetSlowPath();
std::atomic<T*> instance_; std::atomic<T*> instance_;
// Before C++20, having an initializer here causes a "variable does not have a
// constant initializer" error. In C++20, omitting it causes a similar error.
// Presumably this is due to the C++20 changes to make atomic initialization
// (of the other members of this class) sane, so guarding under that
// feature-test.
#if !defined(__cpp_lib_atomic_value_initialization) || \
__cpp_lib_atomic_value_initialization < 201911L
alignas(T) uint8_t instance_buffer_[sizeof(T)]; alignas(T) uint8_t instance_buffer_[sizeof(T)];
#else
alignas(T) uint8_t instance_buffer_[sizeof(T)] = {0};
#endif
std::atomic<bool> initialization_lock_; std::atomic<bool> initialization_lock_;
}; };
@ -143,7 +133,7 @@ T* LeakySingleton<T, Constructor>::GetSlowPath() {
class MainPartitionConstructor { class MainPartitionConstructor {
public: public:
static partition_alloc::ThreadSafePartitionRoot* New(void* buffer) { static base::ThreadSafePartitionRoot* New(void* buffer) {
constexpr base::PartitionOptions::ThreadCache thread_cache = constexpr base::PartitionOptions::ThreadCache thread_cache =
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
// Additional partitions may be created in ConfigurePartitions(). Since // Additional partitions may be created in ConfigurePartitions(). Since
@ -157,7 +147,7 @@ class MainPartitionConstructor {
// and only one is supported at a time. // and only one is supported at a time.
base::PartitionOptions::ThreadCache::kDisabled; base::PartitionOptions::ThreadCache::kDisabled;
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) #endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
auto* new_root = new (buffer) partition_alloc::ThreadSafePartitionRoot({ auto* new_root = new (buffer) base::ThreadSafePartitionRoot({
base::PartitionOptions::AlignedAlloc::kAllowed, base::PartitionOptions::AlignedAlloc::kAllowed,
thread_cache, thread_cache,
base::PartitionOptions::Quarantine::kAllowed, base::PartitionOptions::Quarantine::kAllowed,
@ -170,32 +160,30 @@ class MainPartitionConstructor {
} }
}; };
LeakySingleton<partition_alloc::ThreadSafePartitionRoot, LeakySingleton<base::ThreadSafePartitionRoot, MainPartitionConstructor> g_root
MainPartitionConstructor> CONSTINIT = {};
g_root CONSTINIT = {}; base::ThreadSafePartitionRoot* Allocator() {
partition_alloc::ThreadSafePartitionRoot* Allocator() {
return g_root.Get(); return g_root.Get();
} }
// Original g_root_ if it was replaced by ConfigurePartitions(). // Original g_root_ if it was replaced by ConfigurePartitions().
std::atomic<partition_alloc::ThreadSafePartitionRoot*> g_original_root(nullptr); std::atomic<base::ThreadSafePartitionRoot*> g_original_root(nullptr);
class AlignedPartitionConstructor { class AlignedPartitionConstructor {
public: public:
static partition_alloc::ThreadSafePartitionRoot* New(void* buffer) { static base::ThreadSafePartitionRoot* New(void* buffer) {
return g_root.Get(); return g_root.Get();
} }
}; };
LeakySingleton<partition_alloc::ThreadSafePartitionRoot, LeakySingleton<base::ThreadSafePartitionRoot, AlignedPartitionConstructor>
AlignedPartitionConstructor>
g_aligned_root CONSTINIT = {}; g_aligned_root CONSTINIT = {};
partition_alloc::ThreadSafePartitionRoot* OriginalAllocator() { base::ThreadSafePartitionRoot* OriginalAllocator() {
return g_original_root.load(std::memory_order_relaxed); return g_original_root.load(std::memory_order_relaxed);
} }
partition_alloc::ThreadSafePartitionRoot* AlignedAllocator() { base::ThreadSafePartitionRoot* AlignedAllocator() {
return g_aligned_root.Get(); return g_aligned_root.Get();
} }
@ -253,13 +241,13 @@ void* AllocateAlignedMemory(size_t alignment, size_t size) {
// Note that all "AlignedFree()" variants (_aligned_free() on Windows for // Note that all "AlignedFree()" variants (_aligned_free() on Windows for
// instance) directly call PartitionFree(), so there is no risk of // instance) directly call PartitionFree(), so there is no risk of
// mismatch. (see below the default_dispatch definition). // mismatch. (see below the default_dispatch definition).
if (alignment <= partition_alloc::internal::kAlignment) { if (alignment <= base::kAlignment) {
// This is mandated by |posix_memalign()| and friends, so should never fire. // This is mandated by |posix_memalign()| and friends, so should never fire.
PA_CHECK(base::bits::IsPowerOfTwo(alignment)); PA_CHECK(base::bits::IsPowerOfTwo(alignment));
// TODO(bartekn): See if the compiler optimizes branches down the stack on // TODO(bartekn): See if the compiler optimizes branches down the stack on
// Mac, where PartitionPageSize() isn't constexpr. // Mac, where PartitionPageSize() isn't constexpr.
return Allocator()->AllocWithFlagsNoHooks( return Allocator()->AllocWithFlagsNoHooks(0, size,
0, size, partition_alloc::PartitionPageSize()); base::PartitionPageSize());
} }
return AlignedAllocator()->AlignedAllocWithFlags( return AlignedAllocator()->AlignedAllocWithFlags(
@ -298,8 +286,7 @@ void PartitionAllocSetCallNewHandlerOnMallocFailure(bool value) {
void* PartitionMalloc(const AllocatorDispatch*, size_t size, void* context) { void* PartitionMalloc(const AllocatorDispatch*, size_t size, void* context) {
ScopedDisallowAllocations guard{}; ScopedDisallowAllocations guard{};
return Allocator()->AllocWithFlagsNoHooks( return Allocator()->AllocWithFlagsNoHooks(
0 | g_alloc_flags, MaybeAdjustSize(size), 0 | g_alloc_flags, MaybeAdjustSize(size), PartitionPageSize());
partition_alloc::PartitionPageSize());
} }
void* PartitionMallocUnchecked(const AllocatorDispatch*, void* PartitionMallocUnchecked(const AllocatorDispatch*,
@ -308,7 +295,7 @@ void* PartitionMallocUnchecked(const AllocatorDispatch*,
ScopedDisallowAllocations guard{}; ScopedDisallowAllocations guard{};
return Allocator()->AllocWithFlagsNoHooks( return Allocator()->AllocWithFlagsNoHooks(
partition_alloc::AllocFlags::kReturnNull | g_alloc_flags, partition_alloc::AllocFlags::kReturnNull | g_alloc_flags,
MaybeAdjustSize(size), partition_alloc::PartitionPageSize()); MaybeAdjustSize(size), PartitionPageSize());
} }
void* PartitionCalloc(const AllocatorDispatch*, void* PartitionCalloc(const AllocatorDispatch*,
@ -319,7 +306,7 @@ void* PartitionCalloc(const AllocatorDispatch*,
const size_t total = base::CheckMul(n, MaybeAdjustSize(size)).ValueOrDie(); const size_t total = base::CheckMul(n, MaybeAdjustSize(size)).ValueOrDie();
return Allocator()->AllocWithFlagsNoHooks( return Allocator()->AllocWithFlagsNoHooks(
partition_alloc::AllocFlags::kZeroFill | g_alloc_flags, total, partition_alloc::AllocFlags::kZeroFill | g_alloc_flags, total,
partition_alloc::PartitionPageSize()); PartitionPageSize());
} }
void* PartitionMemalign(const AllocatorDispatch*, void* PartitionMemalign(const AllocatorDispatch*,
@ -358,7 +345,7 @@ void* PartitionAlignedRealloc(const AllocatorDispatch* dispatch,
} else { } else {
// size == 0 and address != null means just "free(address)". // size == 0 and address != null means just "free(address)".
if (address) if (address)
partition_alloc::ThreadSafePartitionRoot::FreeNoHooks(address); base::ThreadSafePartitionRoot::FreeNoHooks(address);
} }
// The original memory block (specified by address) is unchanged if ENOMEM. // The original memory block (specified by address) is unchanged if ENOMEM.
if (!new_ptr) if (!new_ptr)
@ -366,12 +353,11 @@ void* PartitionAlignedRealloc(const AllocatorDispatch* dispatch,
// TODO(tasak): Need to compare the new alignment with the address' alignment. // TODO(tasak): Need to compare the new alignment with the address' alignment.
// If the two alignments are not the same, need to return nullptr with EINVAL. // If the two alignments are not the same, need to return nullptr with EINVAL.
if (address) { if (address) {
size_t usage = size_t usage = base::ThreadSafePartitionRoot::GetUsableSize(address);
partition_alloc::ThreadSafePartitionRoot::GetUsableSize(address);
size_t copy_size = usage > size ? size : usage; size_t copy_size = usage > size ? size : usage;
memcpy(new_ptr, address, copy_size); memcpy(new_ptr, address, copy_size);
partition_alloc::ThreadSafePartitionRoot::FreeNoHooks(address); base::ThreadSafePartitionRoot::FreeNoHooks(address);
} }
return new_ptr; return new_ptr;
} }
@ -432,7 +418,7 @@ void PartitionFree(const AllocatorDispatch*, void* object, void* context) {
} }
#endif #endif
partition_alloc::ThreadSafePartitionRoot::FreeNoHooks(object); base::ThreadSafePartitionRoot::FreeNoHooks(object);
} }
#if BUILDFLAG(IS_APPLE) #if BUILDFLAG(IS_APPLE)
@ -449,7 +435,7 @@ void PartitionFreeDefiniteSize(const AllocatorDispatch*,
ScopedDisallowAllocations guard{}; ScopedDisallowAllocations guard{};
// TODO(lizeb): Optimize PartitionAlloc to use the size information. This is // TODO(lizeb): Optimize PartitionAlloc to use the size information. This is
// still useful though, as we avoid double-checking that the address is owned. // still useful though, as we avoid double-checking that the address is owned.
partition_alloc::ThreadSafePartitionRoot::FreeNoHooks(address); base::ThreadSafePartitionRoot::FreeNoHooks(address);
} }
#endif // BUILDFLAG(IS_APPLE) #endif // BUILDFLAG(IS_APPLE)
@ -471,8 +457,7 @@ size_t PartitionGetSizeEstimate(const AllocatorDispatch*,
#endif // BUILDFLAG(IS_APPLE) #endif // BUILDFLAG(IS_APPLE)
// TODO(lizeb): Returns incorrect values for aligned allocations. // TODO(lizeb): Returns incorrect values for aligned allocations.
const size_t size = const size_t size = base::ThreadSafePartitionRoot::GetUsableSize(address);
partition_alloc::ThreadSafePartitionRoot::GetUsableSize(address);
#if BUILDFLAG(IS_APPLE) #if BUILDFLAG(IS_APPLE)
// The object pointed to by `address` is allocated by the PartitionAlloc. // The object pointed to by `address` is allocated by the PartitionAlloc.
// So, this function must not return zero so that the malloc zone dispatcher // So, this function must not return zero so that the malloc zone dispatcher
@ -550,13 +535,13 @@ void EnablePartitionAllocMemoryReclaimer() {
} }
} }
alignas(partition_alloc::ThreadSafePartitionRoot) uint8_t alignas(base::ThreadSafePartitionRoot) uint8_t
g_allocator_buffer_for_new_main_partition[sizeof( g_allocator_buffer_for_new_main_partition[sizeof(
partition_alloc::ThreadSafePartitionRoot)]; base::ThreadSafePartitionRoot)];
alignas(partition_alloc::ThreadSafePartitionRoot) uint8_t alignas(base::ThreadSafePartitionRoot) uint8_t
g_allocator_buffer_for_aligned_alloc_partition[sizeof( g_allocator_buffer_for_aligned_alloc_partition[sizeof(
partition_alloc::ThreadSafePartitionRoot)]; base::ThreadSafePartitionRoot)];
void ConfigurePartitions( void ConfigurePartitions(
EnableBrp enable_brp, EnableBrp enable_brp,
@ -607,7 +592,7 @@ void ConfigurePartitions(
base::PartitionOptions::UseConfigurablePool::kNo, base::PartitionOptions::UseConfigurablePool::kNo,
}); });
partition_alloc::ThreadSafePartitionRoot* new_aligned_root; base::ThreadSafePartitionRoot* new_aligned_root;
if (use_dedicated_aligned_partition) { if (use_dedicated_aligned_partition) {
// TODO(bartekn): Use the original root instead of creating a new one. It'd // TODO(bartekn): Use the original root instead of creating a new one. It'd
// result in one less partition, but come at a cost of commingling types. // result in one less partition, but come at a cost of commingling types.

View File

@ -47,7 +47,7 @@ kern_return_t MallocIntrospectionEnumerator(task_t task,
} }
size_t MallocIntrospectionGoodSize(malloc_zone_t* zone, size_t size) { size_t MallocIntrospectionGoodSize(malloc_zone_t* zone, size_t size) {
return base::bits::AlignUp(size, partition_alloc::internal::kAlignment); return base::bits::AlignUp(size, base::kAlignment);
} }
boolean_t MallocIntrospectionCheck(malloc_zone_t* zone) { boolean_t MallocIntrospectionCheck(malloc_zone_t* zone) {

View File

@ -46,7 +46,6 @@ namespace {
#if defined(PA_ALLOW_PCSCAN) #if defined(PA_ALLOW_PCSCAN)
#if BUILDFLAG(ENABLE_BASE_TRACING)
constexpr const char* ScannerIdToTracingString( constexpr const char* ScannerIdToTracingString(
internal::StatsCollector::ScannerId id) { internal::StatsCollector::ScannerId id) {
switch (id) { switch (id) {
@ -78,47 +77,34 @@ constexpr const char* MutatorIdToTracingString(
__builtin_unreachable(); __builtin_unreachable();
} }
} }
#endif // BUILDFLAG(ENABLE_BASE_TRACING)
// Inject TRACE_EVENT_BEGIN/END, TRACE_COUNTER1, and UmaHistogramTimes. // Inject TRACE_EVENT_BEGIN/END, TRACE_COUNTER1, and UmaHistogramTimes.
class StatsReporterImpl final : public partition_alloc::StatsReporter { class StatsReporterImpl final : public partition_alloc::StatsReporter {
public: public:
void ReportTraceEvent(internal::StatsCollector::ScannerId id, void ReportTraceEvent(internal::StatsCollector::ScannerId id,
[[maybe_unused]] const PlatformThreadId tid, [[maybe_unused]] const PlatformThreadId tid,
int64_t start_time_ticks_internal_value, TimeTicks start_time,
int64_t end_time_ticks_internal_value) override { TimeTicks end_time) override {
#if BUILDFLAG(ENABLE_BASE_TRACING)
// TRACE_EVENT_* macros below drop most parameters when tracing is // TRACE_EVENT_* macros below drop most parameters when tracing is
// disabled at compile time. // disabled at compile time.
const char* tracing_id = ScannerIdToTracingString(id); const char* tracing_id = ScannerIdToTracingString(id);
const TimeTicks start_time =
TimeTicks::FromInternalValue(start_time_ticks_internal_value);
const TimeTicks end_time =
TimeTicks::FromInternalValue(end_time_ticks_internal_value);
TRACE_EVENT_BEGIN(kTraceCategory, perfetto::StaticString(tracing_id), TRACE_EVENT_BEGIN(kTraceCategory, perfetto::StaticString(tracing_id),
perfetto::ThreadTrack::ForThread(tid), start_time); perfetto::ThreadTrack::ForThread(tid), start_time);
TRACE_EVENT_END(kTraceCategory, perfetto::ThreadTrack::ForThread(tid), TRACE_EVENT_END(kTraceCategory, perfetto::ThreadTrack::ForThread(tid),
end_time); end_time);
#endif // BUILDFLAG(ENABLE_BASE_TRACING)
} }
void ReportTraceEvent(internal::StatsCollector::MutatorId id, void ReportTraceEvent(internal::StatsCollector::MutatorId id,
[[maybe_unused]] const PlatformThreadId tid, [[maybe_unused]] const PlatformThreadId tid,
int64_t start_time_ticks_internal_value, TimeTicks start_time,
int64_t end_time_ticks_internal_value) override { TimeTicks end_time) override {
#if BUILDFLAG(ENABLE_BASE_TRACING)
// TRACE_EVENT_* macros below drop most parameters when tracing is // TRACE_EVENT_* macros below drop most parameters when tracing is
// disabled at compile time. // disabled at compile time.
const char* tracing_id = MutatorIdToTracingString(id); const char* tracing_id = MutatorIdToTracingString(id);
const TimeTicks start_time =
TimeTicks::FromInternalValue(start_time_ticks_internal_value);
const TimeTicks end_time =
TimeTicks::FromInternalValue(end_time_ticks_internal_value);
TRACE_EVENT_BEGIN(kTraceCategory, perfetto::StaticString(tracing_id), TRACE_EVENT_BEGIN(kTraceCategory, perfetto::StaticString(tracing_id),
perfetto::ThreadTrack::ForThread(tid), start_time); perfetto::ThreadTrack::ForThread(tid), start_time);
TRACE_EVENT_END(kTraceCategory, perfetto::ThreadTrack::ForThread(tid), TRACE_EVENT_END(kTraceCategory, perfetto::ThreadTrack::ForThread(tid),
end_time); end_time);
#endif // BUILDFLAG(ENABLE_BASE_TRACING)
} }
void ReportSurvivedQuarantineSize(size_t survived_size) override { void ReportSurvivedQuarantineSize(size_t survived_size) override {
@ -134,8 +120,7 @@ class StatsReporterImpl final : public partition_alloc::StatsReporter {
1000 * survived_rate); 1000 * survived_rate);
} }
void ReportStats(const char* stats_name, int64_t sample_in_usec) override { void ReportStats(const char* stats_name, TimeDelta sample) override {
TimeDelta sample = Microseconds(sample_in_usec);
UmaHistogramTimes(stats_name, sample); UmaHistogramTimes(stats_name, sample);
} }

View File

@ -55,6 +55,9 @@ target(partition_alloc_target_type, "partition_alloc") {
"address_space_stats.h", "address_space_stats.h",
"allocation_guard.cc", "allocation_guard.cc",
"allocation_guard.h", "allocation_guard.h",
"base/bits.h",
"base/migration_adapter.h",
"base/sys_byteorder.h",
"dangling_raw_ptr_checks.cc", "dangling_raw_ptr_checks.cc",
"dangling_raw_ptr_checks.h", "dangling_raw_ptr_checks.h",
"extended_api.cc", "extended_api.cc",
@ -74,40 +77,6 @@ target(partition_alloc_target_type, "partition_alloc") {
"partition_alloc-inl.h", "partition_alloc-inl.h",
"partition_alloc.cc", "partition_alloc.cc",
"partition_alloc.h", "partition_alloc.h",
"partition_alloc_base/atomic_ref_count.h",
"partition_alloc_base/bits.h",
"partition_alloc_base/cpu.cc",
"partition_alloc_base/cpu.h",
"partition_alloc_base/cxx17_backports.h",
"partition_alloc_base/debug/alias.cc",
"partition_alloc_base/debug/alias.h",
"partition_alloc_base/gtest_prod_util.h",
"partition_alloc_base/logging.cc",
"partition_alloc_base/logging.h",
"partition_alloc_base/memory/ref_counted.cc",
"partition_alloc_base/memory/ref_counted.h",
"partition_alloc_base/memory/scoped_refptr.h",
"partition_alloc_base/migration_adapter.h",
"partition_alloc_base/no_destructor.h",
"partition_alloc_base/numerics/checked_math.h",
"partition_alloc_base/numerics/checked_math_impl.h",
"partition_alloc_base/numerics/clamped_math.h",
"partition_alloc_base/numerics/clamped_math_impl.h",
"partition_alloc_base/numerics/math_constants.h",
"partition_alloc_base/numerics/ostream_operators.h",
"partition_alloc_base/numerics/ranges.h",
"partition_alloc_base/numerics/safe_conversions.h",
"partition_alloc_base/numerics/safe_conversions_arm_impl.h",
"partition_alloc_base/numerics/safe_conversions_impl.h",
"partition_alloc_base/numerics/safe_math.h",
"partition_alloc_base/numerics/safe_math_arm_impl.h",
"partition_alloc_base/numerics/safe_math_clang_gcc_impl.h",
"partition_alloc_base/numerics/safe_math_shared_impl.h",
"partition_alloc_base/posix/eintr_wrapper.h",
"partition_alloc_base/rand_util.cc",
"partition_alloc_base/rand_util.h",
"partition_alloc_base/scoped_clear_last_error.h",
"partition_alloc_base/sys_byteorder.h",
"partition_alloc_check.h", "partition_alloc_check.h",
"partition_alloc_config.h", "partition_alloc_config.h",
"partition_alloc_constants.h", "partition_alloc_constants.h",
@ -172,38 +141,15 @@ target(partition_alloc_target_type, "partition_alloc") {
if (is_win) { if (is_win) {
sources += [ sources += [
"page_allocator_internals_win.h", "page_allocator_internals_win.h",
"partition_alloc_base/rand_util_win.cc",
"partition_alloc_base/scoped_clear_last_error_win.cc",
"partition_tls_win.cc", "partition_tls_win.cc",
] ]
} else if (is_posix) { } else if (is_posix) {
sources += [ sources += [
"page_allocator_internals_posix.cc", "page_allocator_internals_posix.cc",
"page_allocator_internals_posix.h", "page_allocator_internals_posix.h",
"partition_alloc_base/files/file_util.h",
"partition_alloc_base/files/file_util_posix.cc",
"partition_alloc_base/posix/safe_strerror.cc",
"partition_alloc_base/posix/safe_strerror.h",
"partition_alloc_base/rand_util_posix.cc",
] ]
} else if (is_fuchsia) { } else if (is_fuchsia) {
sources += [ sources += [ "page_allocator_internals_fuchsia.h" ]
"page_allocator_internals_fuchsia.h",
"partition_alloc_base/posix/safe_strerror.cc",
"partition_alloc_base/posix/safe_strerror.h",
"partition_alloc_base/rand_util_fuchsia.cc",
]
}
if (is_android) {
# Only android build requires native_library, and native_library depends
# on file_path. So file_path is added if is_android = true.
sources += [
"partition_alloc_base/files/file_path.cc",
"partition_alloc_base/files/file_path.h",
"partition_alloc_base/native_library.cc",
"partition_alloc_base/native_library.h",
"partition_alloc_base/native_library_posix.cc",
]
} }
if (current_cpu == "x64") { if (current_cpu == "x64") {
defines += [ "PA_PCSCAN_STACK_SUPPORTED" ] defines += [ "PA_PCSCAN_STACK_SUPPORTED" ]
@ -231,10 +177,7 @@ target(partition_alloc_target_type, "partition_alloc") {
"//build:chromeos_buildflags", "//build:chromeos_buildflags",
"//build/config/compiler:compiler_buildflags", "//build/config/compiler:compiler_buildflags",
] ]
deps = []
# TODO(https://crbug.com/1151236): Remove this dependency on Abseil once PA
# no longer includes any headers directly from base/.
deps = [ "//third_party/abseil-cpp:absl" ]
configs += [ configs += [
":partition_alloc_implementation", ":partition_alloc_implementation",
":memory_tagging", ":memory_tagging",
@ -262,15 +205,6 @@ target(partition_alloc_target_type, "partition_alloc") {
# SecTaskGetCodeSignStatus needs: # SecTaskGetCodeSignStatus needs:
frameworks += [ "Security.framework" ] frameworks += [ "Security.framework" ]
} }
configs += [ "//build/config/compiler:wexit_time_destructors" ]
# Partition alloc is relatively hot (>1% of cycles for users of CrOS). Use speed-focused
# optimizations for it.
if (!is_debug) {
configs -= [ "//build/config/compiler:default_optimization" ]
configs += [ "//build/config/compiler:optimize_speed" ]
}
} }
# TODO(crbug.com/1151236): After making partition_alloc a standalone library, # TODO(crbug.com/1151236): After making partition_alloc a standalone library,
@ -285,3 +219,4 @@ target(partition_alloc_target_type, "partition_alloc") {
# "PUT_REF_COUNT_IN_PREVIOUS_SLOT=$_put_ref_count_in_previous_slot", # "PUT_REF_COUNT_IN_PREVIOUS_SLOT=$_put_ref_count_in_previous_slot",
# "USE_MTE_CHECKED_PTR=$_use_mte_checked_ptr", # "USE_MTE_CHECKED_PTR=$_use_mte_checked_ptr",
# "RECORD_ALLOC_INFO=$_record_alloc_info", # "RECORD_ALLOC_INFO=$_record_alloc_info",

View File

@ -12,17 +12,29 @@ include_rules = [
"+base/check.h", "+base/check.h",
"+base/check_op.h", "+base/check_op.h",
"+base/compiler_specific.h", "+base/compiler_specific.h",
"+base/cpu.h",
"+base/cxx17_backports.h",
"+base/dcheck_is_on.h", "+base/dcheck_is_on.h",
"+base/debug/alias.h",
"+base/debug/proc_maps_linux.h", "+base/debug/proc_maps_linux.h",
"+base/files/file_path.h",
"+base/fuchsia/fuchsia_logging.h", "+base/fuchsia/fuchsia_logging.h",
"+base/gtest_prod_util.h",
"+base/immediate_crash.h", "+base/immediate_crash.h",
"+base/lazy_instance.h", "+base/lazy_instance.h",
"+base/location.h", "+base/location.h",
"+base/logging.h",
"+base/logging_buildflags.h", "+base/logging_buildflags.h",
"+base/mac/foundation_util.h", "+base/mac/foundation_util.h",
"+base/mac/mac_util.h", "+base/mac/mac_util.h",
"+base/mac/scoped_cftyperef.h", "+base/mac/scoped_cftyperef.h",
"+base/memory/ref_counted.h",
"+base/memory/scoped_refptr.h",
"+base/native_library.h",
"+base/no_destructor.h",
"+base/posix/eintr_wrapper.h",
"+base/process/memory.h", "+base/process/memory.h",
"+base/rand_util.h",
"+base/strings/stringprintf.h", "+base/strings/stringprintf.h",
"+base/system/sys_info.h", "+base/system/sys_info.h",
"+base/test/bind.h", "+base/test/bind.h",
@ -38,9 +50,6 @@ include_rules = [
"+build/build_config.h", "+build/build_config.h",
"+build/buildflag.h", "+build/buildflag.h",
"+build/chromecast_buildflags.h", "+build/chromecast_buildflags.h",
"+testing/gmock/include/gmock/gmock.h",
"+testing/gtest/include/gtest/gtest.h", "+testing/gtest/include/gtest/gtest.h",
"+testing/gtest/include/gtest/gtest_prod.h",
"+testing/perf/perf_result_reporter.h", "+testing/perf/perf_result_reporter.h",
"+third_party/lss/linux_syscall_support.h",
] ]

View File

@ -25,7 +25,7 @@ a partition that contains similar-sized objects, e.g. one bucket holds sizes
geometrically-spaced, and go all the way up to `kMaxBucketed`, which is a tad geometrically-spaced, and go all the way up to `kMaxBucketed`, which is a tad
under 1MiB (so called *normal buckets*). There are tens of buckets, 4 between under 1MiB (so called *normal buckets*). There are tens of buckets, 4 between
each power of two (except for lower sizes where buckets that aren't a multiple each power of two (except for lower sizes where buckets that aren't a multiple
of `partition_alloc::internal::kAlignment` simply don't exist). of `base::kAlignment` simply don't exist).
Larger allocations (&gt;`kMaxBucketed`) are realized by direct memory mapping Larger allocations (&gt;`kMaxBucketed`) are realized by direct memory mapping
(*direct map*). (*direct map*).
@ -85,8 +85,7 @@ PartitionAlloc also guarantees that:
### Alignment ### Alignment
PartitionAlloc guarantees that returned pointers are aligned on PartitionAlloc guarantees that returned pointers are aligned on
`partition_alloc::internal::kAlignment` boundary (typically 16B on `base::kAlignment` boundary (typically 16B on 64-bit systems, and 8B on 32-bit).
64-bit systems, and 8B on 32-bit).
PartitionAlloc also supports higher levels of alignment, that can be requested PartitionAlloc also supports higher levels of alignment, that can be requested
via `PartitionAlloc::AlignedAllocWithFlags()` or platform-specific APIs (such as via `PartitionAlloc::AlignedAllocWithFlags()` or platform-specific APIs (such as
@ -95,8 +94,7 @@ alignment has to be a power of two. PartitionAlloc reserves the right to round
up the requested size to the nearest power of two, greater than or equal to the up the requested size to the nearest power of two, greater than or equal to the
requested alignment. This may be wasteful, but allows taking advantage of requested alignment. This may be wasteful, but allows taking advantage of
natural PartitionAlloc alignment guarantees. Allocations with an alignment natural PartitionAlloc alignment guarantees. Allocations with an alignment
requirement greater than `partition_alloc::internal::kAlignment` are expected requirement greater than `base::kAlignment` are expected to be very rare.
to be very rare.
## PartitionAlloc-Everywhere ## PartitionAlloc-Everywhere

View File

@ -16,7 +16,6 @@
#include "base/allocator/partition_allocator/partition_alloc_constants.h" #include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_lock.h" #include "base/allocator/partition_allocator/partition_lock.h"
#include "base/base_export.h" #include "base/base_export.h"
#include "base/dcheck_is_on.h"
#include "base/thread_annotations.h" #include "base/thread_annotations.h"
#include "build/build_config.h" #include "build/build_config.h"

View File

@ -92,14 +92,7 @@ AslrMask(uintptr_t bits) {
} }
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE uintptr_t PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE uintptr_t
ASLROffset() { ASLROffset() {
// Be careful, there is a zone where macOS will not map memory, at least return AslrAddress(0x1000000000ULL);
// on ARM64. From an ARM64 machine running 12.3, the range seems to be
// [0x1000000000, 0x7000000000). Make sure that the range we use is
// outside these bounds. In 12.3, there is a reserved area between
// MACH_VM_MIN_GPU_CARVEOUT_ADDRESS and MACH_VM_MAX_GPU_CARVEOUT_ADDRESS,
// which is reserved on ARM64. See these constants in XNU's source code
// for details (xnu-8019.80.24/osfmk/mach/arm/vm_param.h).
return AslrAddress(0x10000000000ULL);
} }
#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA) #elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)

View File

@ -4,15 +4,15 @@
// This file defines some bit utilities. // This file defines some bit utilities.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_BITS_H_ #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_BASE_BITS_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_BITS_H_ #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_BASE_BITS_H_
#include <climits> #include <climits>
#include <cstddef> #include <cstddef>
#include <cstdint> #include <cstdint>
#include <type_traits> #include <type_traits>
#include "base/allocator/partition_allocator/partition_alloc_base/migration_adapter.h" #include "base/allocator/partition_allocator/base/migration_adapter.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/compiler_specific.h" #include "base/compiler_specific.h"
#include "build/build_config.h" #include "build/build_config.h"
@ -237,4 +237,4 @@ constexpr T LeftmostBit() {
} // namespace partition_alloc::internal::base::bits } // namespace partition_alloc::internal::base::bits
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_BITS_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_BASE_BITS_H_

View File

@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_MIGRATION_ADAPTER_H_ #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_BASE_MIGRATION_ADAPTER_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_MIGRATION_ADAPTER_H_ #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_BASE_MIGRATION_ADAPTER_H_
#include <cstdint> #include <cstdint>
#include <string> #include <string>
@ -23,6 +23,7 @@ class PlatformThreadHandle;
class PlatformThreadRef; class PlatformThreadRef;
class TimeDelta; class TimeDelta;
class TimeTicks; class TimeTicks;
class CPU;
template <typename Type, typename Traits> template <typename Type, typename Traits>
class LazyInstance; class LazyInstance;
@ -37,13 +38,24 @@ constexpr TimeDelta Milliseconds(T n);
template <typename T> template <typename T>
constexpr TimeDelta Microseconds(T n); constexpr TimeDelta Microseconds(T n);
BASE_EXPORT uint64_t RandGenerator(uint64_t range);
BASE_EXPORT std::string StringPrintf(const char* format, ...); BASE_EXPORT std::string StringPrintf(const char* format, ...);
#if BUILDFLAG(IS_ANDROID) template <typename T, typename O>
template <typename CharT, typename Traits> class NoDestructor;
class BasicStringPiece;
using StringPiece = BasicStringPiece<char, std::char_traits<char>>; namespace debug {
#endif
void BASE_EXPORT Alias(const void* var);
} // namespace debug
namespace internal {
template <typename T>
class CheckedNumeric;
}
#if BUILDFLAG(IS_MAC) #if BUILDFLAG(IS_MAC)
@ -68,7 +80,6 @@ bool IsAtLeastOS10_14();
bool IsOS10_11(); bool IsOS10_11();
} // namespace mac } // namespace mac
#endif // BUILDFLAG(IS_MAC) #endif // BUILDFLAG(IS_MAC)
} // namespace base } // namespace base
@ -77,18 +88,22 @@ namespace partition_alloc::internal::base {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once // TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done. // the migration to the new namespaces gets done.
using ::base::CPU;
using ::base::LapTimer; using ::base::LapTimer;
using ::base::LazyInstance; using ::base::LazyInstance;
using ::base::LazyInstanceTraitsBase; using ::base::LazyInstanceTraitsBase;
using ::base::Microseconds; using ::base::Microseconds;
using ::base::Milliseconds; using ::base::Milliseconds;
using ::base::NoDestructor;
using ::base::PlatformThread; using ::base::PlatformThread;
using ::base::PlatformThreadHandle; using ::base::PlatformThreadHandle;
using ::base::PlatformThreadRef; using ::base::PlatformThreadRef;
using ::base::RandGenerator;
using ::base::Seconds; using ::base::Seconds;
using ::base::StringPrintf; using ::base::StringPrintf;
using ::base::TimeDelta; using ::base::TimeDelta;
using ::base::TimeTicks; using ::base::TimeTicks;
using ::base::internal::CheckedNumeric;
#if BUILDFLAG(IS_MAC) #if BUILDFLAG(IS_MAC)
template <typename CFT> template <typename CFT>
@ -96,6 +111,12 @@ using ScopedCFTypeRef =
::base::ScopedTypeRef<CFT, ::base::internal::ScopedCFTypeRefTraits<CFT>>; ::base::ScopedTypeRef<CFT, ::base::internal::ScopedCFTypeRefTraits<CFT>>;
#endif #endif
namespace debug {
using ::base::debug::Alias;
} // namespace debug
#if BUILDFLAG(IS_MAC) #if BUILDFLAG(IS_MAC)
namespace mac { namespace mac {
@ -108,4 +129,4 @@ using ::base::mac::IsOS10_11;
} // namespace partition_alloc::internal::base } // namespace partition_alloc::internal::base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_MIGRATION_ADAPTER_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_BASE_MIGRATION_ADAPTER_H_

View File

@ -8,12 +8,12 @@
// Use the functions defined here rather than using the platform-specific // Use the functions defined here rather than using the platform-specific
// functions directly. // functions directly.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_SYS_BYTEORDER_H_ #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_BASE_SYS_BYTEORDER_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_SYS_BYTEORDER_H_ #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_BASE_SYS_BYTEORDER_H_
#include <cstdint> #include <cstdint>
#include "base/allocator/partition_allocator/partition_alloc_base/migration_adapter.h" #include "base/allocator/partition_allocator/base/migration_adapter.h"
#include "build/build_config.h" #include "build/build_config.h"
#if defined(COMPILER_MSVC) #if defined(COMPILER_MSVC)
@ -141,4 +141,4 @@ inline uint64_t HostToNet64(uint64_t x) {
} // namespace partition_alloc::internal::base } // namespace partition_alloc::internal::base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_SYS_BYTEORDER_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_BASE_SYS_BYTEORDER_H_

View File

@ -5,10 +5,10 @@
#include "base/allocator/partition_allocator/memory_reclaimer.h" #include "base/allocator/partition_allocator/memory_reclaimer.h"
#include "base/allocator/partition_allocator/partition_alloc.h" #include "base/allocator/partition_allocator/partition_alloc.h"
#include "base/allocator/partition_allocator/partition_alloc_base/no_destructor.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h" #include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/starscan/pcscan.h" #include "base/allocator/partition_allocator/starscan/pcscan.h"
#include "base/no_destructor.h"
// TODO(bikineev): Temporarily disable *Scan in MemoryReclaimer as it seems to // TODO(bikineev): Temporarily disable *Scan in MemoryReclaimer as it seems to
// cause significant jank. // cause significant jank.
@ -18,7 +18,7 @@ namespace partition_alloc {
// static // static
MemoryReclaimer* MemoryReclaimer::Instance() { MemoryReclaimer* MemoryReclaimer::Instance() {
static internal::base::NoDestructor<MemoryReclaimer> instance; static base::NoDestructor<MemoryReclaimer> instance;
return instance.get(); return instance.get();
} }

View File

@ -8,10 +8,10 @@
#include <memory> #include <memory>
#include <set> #include <set>
#include "base/allocator/partition_allocator/partition_alloc_base/no_destructor.h"
#include "base/allocator/partition_allocator/partition_alloc_forward.h" #include "base/allocator/partition_allocator/partition_alloc_forward.h"
#include "base/allocator/partition_allocator/partition_lock.h" #include "base/allocator/partition_allocator/partition_lock.h"
#include "base/base_export.h" #include "base/base_export.h"
#include "base/no_destructor.h"
#include "base/thread_annotations.h" #include "base/thread_annotations.h"
#include "base/time/time.h" #include "base/time/time.h"
@ -65,7 +65,7 @@ class BASE_EXPORT MemoryReclaimer {
internal::Lock lock_; internal::Lock lock_;
std::set<PartitionRoot<>*> partitions_ GUARDED_BY(lock_); std::set<PartitionRoot<>*> partitions_ GUARDED_BY(lock_);
friend class internal::base::NoDestructor<MemoryReclaimer>; friend class base::NoDestructor<MemoryReclaimer>;
friend class MemoryReclaimerTest; friend class MemoryReclaimerTest;
}; };

View File

@ -8,8 +8,8 @@
#include <cstdint> #include <cstdint>
#include "base/allocator/partition_allocator/address_space_randomization.h" #include "base/allocator/partition_allocator/address_space_randomization.h"
#include "base/allocator/partition_allocator/base/bits.h"
#include "base/allocator/partition_allocator/page_allocator_internal.h" #include "base/allocator/partition_allocator/page_allocator_internal.h"
#include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_lock.h" #include "base/allocator/partition_allocator/partition_lock.h"
#include "build/build_config.h" #include "build/build_config.h"

View File

@ -163,4 +163,21 @@ constexpr size_t kPageMetadataSize = 1 << kPageMetadataShift;
} // namespace partition_alloc::internal } // namespace partition_alloc::internal
namespace base {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::internal::kPageMetadataShift;
using ::partition_alloc::internal::kPageMetadataSize;
using ::partition_alloc::internal::PageAllocationGranularity;
using ::partition_alloc::internal::PageAllocationGranularityBaseMask;
using ::partition_alloc::internal::PageAllocationGranularityOffsetMask;
using ::partition_alloc::internal::PageAllocationGranularityShift;
using ::partition_alloc::internal::SystemPageBaseMask;
using ::partition_alloc::internal::SystemPageOffsetMask;
using ::partition_alloc::internal::SystemPageShift;
using ::partition_alloc::internal::SystemPageSize;
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_CONSTANTS_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_CONSTANTS_H_

View File

@ -3,8 +3,8 @@
// found in the LICENSE file. // found in the LICENSE file.
#include "base/allocator/partition_allocator/page_allocator.h" #include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/partition_alloc_base/cpu.h"
#include "base/allocator/partition_allocator/partition_alloc_notreached.h" #include "base/allocator/partition_allocator/partition_alloc_notreached.h"
#include "base/cpu.h"
#include <sys/mman.h> #include <sys/mman.h>

View File

@ -14,9 +14,9 @@
#include "base/allocator/partition_allocator/oom.h" #include "base/allocator/partition_allocator/oom.h"
#include "base/allocator/partition_allocator/page_allocator.h" #include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/partition_alloc_base/posix/eintr_wrapper.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/dcheck_is_on.h" #include "base/dcheck_is_on.h"
#include "base/posix/eintr_wrapper.h"
#include "build/build_config.h" #include "build/build_config.h"
#if BUILDFLAG(IS_APPLE) #if BUILDFLAG(IS_APPLE)
@ -197,7 +197,7 @@ bool TrySetSystemPagesAccessInternal(
uintptr_t address, uintptr_t address,
size_t length, size_t length,
PageAccessibilityConfiguration accessibility) { PageAccessibilityConfiguration accessibility) {
return 0 == PA_HANDLE_EINTR(mprotect(reinterpret_cast<void*>(address), length, return 0 == HANDLE_EINTR(mprotect(reinterpret_cast<void*>(address), length,
GetAccessFlags(accessibility))); GetAccessFlags(accessibility)));
} }
@ -206,7 +206,7 @@ void SetSystemPagesAccessInternal(
size_t length, size_t length,
PageAccessibilityConfiguration accessibility) { PageAccessibilityConfiguration accessibility) {
int access_flags = GetAccessFlags(accessibility); int access_flags = GetAccessFlags(accessibility);
const int ret = PA_HANDLE_EINTR( const int ret = HANDLE_EINTR(
mprotect(reinterpret_cast<void*>(address), length, access_flags)); mprotect(reinterpret_cast<void*>(address), length, access_flags));
// On Linux, man mprotect(2) states that ENOMEM is returned when (1) internal // On Linux, man mprotect(2) states that ENOMEM is returned when (1) internal

View File

@ -9,14 +9,14 @@
#include <ostream> #include <ostream>
#include "base/allocator/partition_allocator/address_pool_manager.h" #include "base/allocator/partition_allocator/address_pool_manager.h"
#include "base/allocator/partition_allocator/base/bits.h"
#include "base/allocator/partition_allocator/page_allocator.h" #include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
#include "base/allocator/partition_allocator/partition_alloc_base/debug/alias.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h" #include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h" #include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/tagging.h" #include "base/allocator/partition_allocator/tagging.h"
#include "base/compiler_specific.h" #include "base/compiler_specific.h"
#include "base/debug/alias.h"
#include "build/build_config.h" #include "build/build_config.h"
#if BUILDFLAG(IS_WIN) #if BUILDFLAG(IS_WIN)
@ -31,18 +31,18 @@ namespace {
#if BUILDFLAG(IS_WIN) #if BUILDFLAG(IS_WIN)
NOINLINE void HandleGigaCageAllocFailureOutOfVASpace() { NOINLINE void HandleGigaCageAllocFailureOutOfVASpace() {
PA_NO_CODE_FOLDING(); NO_CODE_FOLDING();
PA_CHECK(false); PA_CHECK(false);
} }
NOINLINE void HandleGigaCageAllocFailureOutOfCommitCharge() { NOINLINE void HandleGigaCageAllocFailureOutOfCommitCharge() {
PA_NO_CODE_FOLDING(); NO_CODE_FOLDING();
PA_CHECK(false); PA_CHECK(false);
} }
#endif // BUILDFLAG(IS_WIN) #endif // BUILDFLAG(IS_WIN)
NOINLINE void HandleGigaCageAllocFailure() { NOINLINE void HandleGigaCageAllocFailure() {
PA_NO_CODE_FOLDING(); NO_CODE_FOLDING();
uint32_t alloc_page_error_code = GetAllocPageErrorCode(); uint32_t alloc_page_error_code = GetAllocPageErrorCode();
PA_DEBUG_DATA_ON_STACK("error", static_cast<size_t>(alloc_page_error_code)); PA_DEBUG_DATA_ON_STACK("error", static_cast<size_t>(alloc_page_error_code));
// It's important to easily differentiate these two failures on Windows, so // It's important to easily differentiate these two failures on Windows, so

View File

@ -11,8 +11,8 @@
#include "base/allocator/buildflags.h" #include "base/allocator/buildflags.h"
#include "base/allocator/partition_allocator/address_pool_manager_types.h" #include "base/allocator/partition_allocator/address_pool_manager_types.h"
#include "base/allocator/partition_allocator/base/bits.h"
#include "base/allocator/partition_allocator/page_allocator_constants.h" #include "base/allocator/partition_allocator/page_allocator_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h" #include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h" #include "base/allocator/partition_allocator/partition_alloc_constants.h"

View File

@ -5,12 +5,10 @@
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_INL_H_ #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_INL_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_INL_H_ #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_INL_H_
#include <algorithm>
#include <cstring> #include <cstring>
#include "base/allocator/partition_allocator/partition_ref_count.h" #include "base/allocator/partition_allocator/partition_ref_count.h"
#include "base/allocator/partition_allocator/random.h" #include "base/allocator/partition_allocator/random.h"
#include "base/dcheck_is_on.h"
#include "build/build_config.h" #include "build/build_config.h"
// Prefetch *x into memory. // Prefetch *x into memory.
@ -34,16 +32,6 @@ ALWAYS_INLINE void SecureMemset(void* ptr, uint8_t value, size_t size) {
__asm__ __volatile__("" : : "r"(ptr) : "memory"); __asm__ __volatile__("" : : "r"(ptr) : "memory");
} }
// Used to memset() memory for debugging purposes only.
ALWAYS_INLINE void DebugMemset(void* ptr, int value, size_t size) {
// Only set the first 512kiB of the allocation. This is enough to detect uses
// of uininitialized / freed memory, and makes tests run significantly
// faster. Note that for direct-mapped allocations, memory is decomitted at
// free() time, so freed memory usage cannot happen.
size_t size_to_memset = std::min(size, size_t{1} << 19);
memset(ptr, value, size_to_memset);
}
// Returns true if we've hit the end of a random-length period. We don't want to // Returns true if we've hit the end of a random-length period. We don't want to
// invoke `RandomValue` too often, because we call this function in a hot spot // invoke `RandomValue` too often, because we call this function in a hot spot
// (`Free`), and `RandomValue` incurs the cost of atomics. // (`Free`), and `RandomValue` incurs the cost of atomics.

View File

@ -1,69 +0,0 @@
// Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This is a low level implementation of atomic semantics for reference
// counting. Please use base/memory/ref_counted.h directly instead.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_ATOMIC_REF_COUNT_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_ATOMIC_REF_COUNT_H_
#include <atomic>
namespace partition_alloc::internal::base {
class AtomicRefCount {
public:
constexpr AtomicRefCount() : ref_count_(0) {}
explicit constexpr AtomicRefCount(int initial_value)
: ref_count_(initial_value) {}
// Increment a reference count.
// Returns the previous value of the count.
int Increment() { return Increment(1); }
// Increment a reference count by "increment", which must exceed 0.
// Returns the previous value of the count.
int Increment(int increment) {
return ref_count_.fetch_add(increment, std::memory_order_relaxed);
}
// Decrement a reference count, and return whether the result is non-zero.
// Insert barriers to ensure that state written before the reference count
// became zero will be visible to a thread that has just made the count zero.
bool Decrement() {
// TODO(jbroman): Technically this doesn't need to be an acquire operation
// unless the result is 1 (i.e., the ref count did indeed reach zero).
// However, there are toolchain issues that make that not work as well at
// present (notably TSAN doesn't like it).
return ref_count_.fetch_sub(1, std::memory_order_acq_rel) != 1;
}
// Return whether the reference count is one. If the reference count is used
// in the conventional way, a reference count of 1 implies that the current
// thread owns the reference and no other thread shares it. This call
// performs the test for a reference count of one, and performs the memory
// barrier needed for the owning thread to act on the object, knowing that it
// has exclusive access to the object.
bool IsOne() const { return ref_count_.load(std::memory_order_acquire) == 1; }
// Return whether the reference count is zero. With conventional object
// referencing counting, the object will be destroyed, so the reference count
// should never be zero. Hence this is generally used for a debug check.
bool IsZero() const {
return ref_count_.load(std::memory_order_acquire) == 0;
}
// Returns the current reference count (with no barriers). This is subtle, and
// should be used only for debugging.
int SubtleRefCountForDebug() const {
return ref_count_.load(std::memory_order_relaxed);
}
private:
std::atomic_int ref_count_;
};
} // namespace partition_alloc::internal::base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_ATOMIC_REF_COUNT_H_

View File

@ -1,201 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/partition_alloc_base/cpu.h"
#include <inttypes.h>
#include <limits.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include <algorithm>
#include <sstream>
#include <utility>
#include "build/build_config.h"
#if defined(ARCH_CPU_ARM_FAMILY) && \
(BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS))
#include <asm/hwcap.h>
#include <sys/auxv.h>
// Temporary definitions until a new hwcap.h is pulled in everywhere.
// https://crbug.com/1265965
#ifndef HWCAP2_MTE
#define HWCAP2_MTE (1 << 18)
#define HWCAP2_BTI (1 << 17)
#endif
#endif
#if defined(ARCH_CPU_X86_FAMILY)
#if defined(COMPILER_MSVC)
#include <immintrin.h> // For _xgetbv()
#include <intrin.h>
#endif
#endif
namespace partition_alloc::internal::base {
CPU::CPU() {
Initialize();
}
CPU::CPU(CPU&&) = default;
namespace {
#if defined(ARCH_CPU_X86_FAMILY)
#if !defined(COMPILER_MSVC)
#if defined(__pic__) && defined(__i386__)
void __cpuid(int cpu_info[4], int info_type) {
__asm__ volatile(
"mov %%ebx, %%edi\n"
"cpuid\n"
"xchg %%edi, %%ebx\n"
: "=a"(cpu_info[0]), "=D"(cpu_info[1]), "=c"(cpu_info[2]),
"=d"(cpu_info[3])
: "a"(info_type), "c"(0));
}
#else
void __cpuid(int cpu_info[4], int info_type) {
__asm__ volatile("cpuid\n"
: "=a"(cpu_info[0]), "=b"(cpu_info[1]), "=c"(cpu_info[2]),
"=d"(cpu_info[3])
: "a"(info_type), "c"(0));
}
#endif
#endif // !defined(COMPILER_MSVC)
// xgetbv returns the value of an Intel Extended Control Register (XCR).
// Currently only XCR0 is defined by Intel so |xcr| should always be zero.
uint64_t xgetbv(uint32_t xcr) {
#if defined(COMPILER_MSVC)
return _xgetbv(xcr);
#else
uint32_t eax, edx;
__asm__ volatile("xgetbv" : "=a"(eax), "=d"(edx) : "c"(xcr));
return (static_cast<uint64_t>(edx) << 32) | eax;
#endif // defined(COMPILER_MSVC)
}
#endif // ARCH_CPU_X86_FAMILY
} // namespace
void CPU::Initialize() {
#if defined(ARCH_CPU_X86_FAMILY)
int cpu_info[4] = {-1};
// __cpuid with an InfoType argument of 0 returns the number of
// valid Ids in CPUInfo[0] and the CPU identification string in
// the other three array elements. The CPU identification string is
// not in linear order. The code below arranges the information
// in a human readable form. The human readable order is CPUInfo[1] |
// CPUInfo[3] | CPUInfo[2]. CPUInfo[2] and CPUInfo[3] are swapped
// before using memcpy() to copy these three array elements to |cpu_string|.
__cpuid(cpu_info, 0);
int num_ids = cpu_info[0];
std::swap(cpu_info[2], cpu_info[3]);
// Interpret CPU feature information.
if (num_ids > 0) {
int cpu_info7[4] = {0};
__cpuid(cpu_info, 1);
if (num_ids >= 7) {
__cpuid(cpu_info7, 7);
}
signature_ = cpu_info[0];
stepping_ = cpu_info[0] & 0xf;
type_ = (cpu_info[0] >> 12) & 0x3;
has_mmx_ = (cpu_info[3] & 0x00800000) != 0;
has_sse_ = (cpu_info[3] & 0x02000000) != 0;
has_sse2_ = (cpu_info[3] & 0x04000000) != 0;
has_sse3_ = (cpu_info[2] & 0x00000001) != 0;
has_ssse3_ = (cpu_info[2] & 0x00000200) != 0;
has_sse41_ = (cpu_info[2] & 0x00080000) != 0;
has_sse42_ = (cpu_info[2] & 0x00100000) != 0;
has_popcnt_ = (cpu_info[2] & 0x00800000) != 0;
// "Hypervisor Present Bit: Bit 31 of ECX of CPUID leaf 0x1."
// See https://lwn.net/Articles/301888/
// This is checking for any hypervisor. Hypervisors may choose not to
// announce themselves. Hypervisors trap CPUID and sometimes return
// different results to underlying hardware.
is_running_in_vm_ = (cpu_info[2] & 0x80000000) != 0;
// AVX instructions will generate an illegal instruction exception unless
// a) they are supported by the CPU,
// b) XSAVE is supported by the CPU and
// c) XSAVE is enabled by the kernel.
// See http://software.intel.com/en-us/blogs/2011/04/14/is-avx-enabled
//
// In addition, we have observed some crashes with the xgetbv instruction
// even after following Intel's example code. (See crbug.com/375968.)
// Because of that, we also test the XSAVE bit because its description in
// the CPUID documentation suggests that it signals xgetbv support.
has_avx_ = (cpu_info[2] & 0x10000000) != 0 &&
(cpu_info[2] & 0x04000000) != 0 /* XSAVE */ &&
(cpu_info[2] & 0x08000000) != 0 /* OSXSAVE */ &&
(xgetbv(0) & 6) == 6 /* XSAVE enabled by kernel */;
has_aesni_ = (cpu_info[2] & 0x02000000) != 0;
has_fma3_ = (cpu_info[2] & 0x00001000) != 0;
has_avx2_ = has_avx_ && (cpu_info7[1] & 0x00000020) != 0;
}
// Get the brand string of the cpu.
__cpuid(cpu_info, 0x80000000);
const int max_parameter = cpu_info[0];
static constexpr int kParameterContainingNonStopTimeStampCounter = 0x80000007;
if (max_parameter >= kParameterContainingNonStopTimeStampCounter) {
__cpuid(cpu_info, kParameterContainingNonStopTimeStampCounter);
has_non_stop_time_stamp_counter_ = (cpu_info[3] & (1 << 8)) != 0;
}
if (!has_non_stop_time_stamp_counter_ && is_running_in_vm_) {
int cpu_info_hv[4] = {};
__cpuid(cpu_info_hv, 0x40000000);
if (cpu_info_hv[1] == 0x7263694D && // Micr
cpu_info_hv[2] == 0x666F736F && // osof
cpu_info_hv[3] == 0x76482074) { // t Hv
// If CPUID says we have a variant TSC and a hypervisor has identified
// itself and the hypervisor says it is Microsoft Hyper-V, then treat
// TSC as invariant.
//
// Microsoft Hyper-V hypervisor reports variant TSC as there are some
// scenarios (eg. VM live migration) where the TSC is variant, but for
// our purposes we can treat it as invariant.
has_non_stop_time_stamp_counter_ = true;
}
}
#elif defined(ARCH_CPU_ARM_FAMILY)
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
#if defined(ARCH_CPU_ARM64)
// Check for Armv8.5-A BTI/MTE support, exposed via HWCAP2
unsigned long hwcap2 = getauxval(AT_HWCAP2);
has_mte_ = hwcap2 & HWCAP2_MTE;
has_bti_ = hwcap2 & HWCAP2_BTI;
#endif
#elif BUILDFLAG(IS_WIN)
// Windows makes high-resolution thread timing information available in
// user-space.
has_non_stop_time_stamp_counter_ = true;
#endif
#endif
}
const CPU& CPU::GetInstanceNoAllocation() {
static const CPU cpu;
return cpu;
}
} // namespace partition_alloc::internal::base

View File

@ -1,100 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_CPU_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_CPU_H_
#include "base/allocator/partition_allocator/partition_alloc_base/migration_adapter.h"
#include "base/base_export.h"
#include "build/build_config.h"
namespace partition_alloc::internal::base {
// Query information about the processor.
class BASE_EXPORT CPU final {
public:
CPU();
CPU(CPU&&);
CPU(const CPU&) = delete;
// Get a preallocated instance of CPU.
// This can be used in very early application startup. The instance of CPU is
// created without branding, see CPU(bool requires_branding) for details and
// implications.
static const CPU& GetInstanceNoAllocation();
enum IntelMicroArchitecture {
PENTIUM = 0,
SSE = 1,
SSE2 = 2,
SSE3 = 3,
SSSE3 = 4,
SSE41 = 5,
SSE42 = 6,
AVX = 7,
AVX2 = 8,
FMA3 = 9,
MAX_INTEL_MICRO_ARCHITECTURE = 10
};
// Accessors for CPU information.
int signature() const { return signature_; }
int stepping() const { return stepping_; }
int type() const { return type_; }
bool has_mmx() const { return has_mmx_; }
bool has_sse() const { return has_sse_; }
bool has_sse2() const { return has_sse2_; }
bool has_sse3() const { return has_sse3_; }
bool has_ssse3() const { return has_ssse3_; }
bool has_sse41() const { return has_sse41_; }
bool has_sse42() const { return has_sse42_; }
bool has_popcnt() const { return has_popcnt_; }
bool has_avx() const { return has_avx_; }
bool has_fma3() const { return has_fma3_; }
bool has_avx2() const { return has_avx2_; }
bool has_aesni() const { return has_aesni_; }
bool has_non_stop_time_stamp_counter() const {
return has_non_stop_time_stamp_counter_;
}
bool is_running_in_vm() const { return is_running_in_vm_; }
// Armv8.5-A extensions for control flow and memory safety.
#if defined(ARCH_CPU_ARM_FAMILY)
bool has_mte() const { return has_mte_; }
bool has_bti() const { return has_bti_; }
#else
constexpr bool has_mte() const { return false; }
constexpr bool has_bti() const { return false; }
#endif
private:
// Query the processor for CPUID information.
void Initialize();
int signature_ = 0; // raw form of type, family, model, and stepping
int type_ = 0; // process type
int stepping_ = 0; // processor revision number
bool has_mmx_ = false;
bool has_sse_ = false;
bool has_sse2_ = false;
bool has_sse3_ = false;
bool has_ssse3_ = false;
bool has_sse41_ = false;
bool has_sse42_ = false;
bool has_popcnt_ = false;
bool has_avx_ = false;
bool has_fma3_ = false;
bool has_avx2_ = false;
bool has_aesni_ = false;
#if defined(ARCH_CPU_ARM_FAMILY)
bool has_mte_ = false; // Armv8.5-A MTE (Memory Taggging Extension)
bool has_bti_ = false; // Armv8.5-A BTI (Branch Target Identification)
#endif
bool has_non_stop_time_stamp_counter_ = false;
bool is_running_in_vm_ = false;
};
} // namespace partition_alloc::internal::base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_CPU_H_

View File

@ -1,35 +0,0 @@
// Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_CXX17_BACKPORTS_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_CXX17_BACKPORTS_H_
#include <functional>
#include <tuple>
#include <type_traits>
#include <utility>
#include "base/allocator/partition_allocator/partition_alloc_check.h"
namespace partition_alloc::internal::base {
// C++14 implementation of C++17's std::clamp():
// https://en.cppreference.com/w/cpp/algorithm/clamp
// Please note that the C++ spec makes it undefined behavior to call std::clamp
// with a value of `lo` that compares greater than the value of `hi`. This
// implementation uses a CHECK to enforce this as a hard restriction.
template <typename T, typename Compare>
constexpr const T& clamp(const T& v, const T& lo, const T& hi, Compare comp) {
PA_CHECK(!comp(hi, lo));
return comp(v, lo) ? lo : comp(hi, v) ? hi : v;
}
template <typename T>
constexpr const T& clamp(const T& v, const T& lo, const T& hi) {
return base::clamp(v, lo, hi, std::less<T>{});
}
} // namespace partition_alloc::internal::base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_CXX17_BACKPORTS_H_

View File

@ -1,15 +0,0 @@
// Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/partition_alloc_base/debug/alias.h"
#include "base/compiler_specific.h"
namespace partition_alloc::internal::base::debug {
// This file/function should be excluded from LTO/LTCG to ensure that the
// compiler can't see this function's implementation when compiling calls to it.
NOINLINE void Alias(const void* var) {}
} // namespace partition_alloc::internal::base::debug

View File

@ -1,93 +0,0 @@
// Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_DEBUG_ALIAS_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_DEBUG_ALIAS_H_
#include <stddef.h>
#include "base/allocator/partition_allocator/partition_alloc_base/migration_adapter.h"
#include "base/base_export.h"
namespace partition_alloc::internal::base::debug {
// Make the optimizer think that |var| is aliased. This can be used to inhibit
// three different kinds of optimizations:
//
// Case #1: Prevent a local variable from being optimized out if it would not
// otherwise be live at the point of a potential crash. This can only be done
// with local variables, not globals, object members, or function return values
// - these must be copied to locals if you want to ensure they are recorded in
// crash dumps. Function arguments are fine to use since the
// base::debug::Alias() call on them will make sure they are copied to the stack
// even if they were passed in a register. Note that if the local variable is a
// pointer then its value will be retained but the memory that it points to will
// probably not be saved in the crash dump - by default only stack memory is
// saved. Therefore the aliasing technique is usually only worthwhile with
// non-pointer variables. If you have a pointer to an object and you want to
// retain the object's state you need to copy the object or its fields to local
// variables.
//
// Example usage:
// int last_error = err_;
// base::debug::Alias(&last_error);
// char name_copy[16];
// strncpy(name_copy, p->name, sizeof(name_copy)-1);
// name_copy[sizeof(name_copy)-1] = '\0';;
// base::debug::alias(name_copy);
// CHECK(false);
//
// Case #2: Prevent a tail call into a function. This is useful to make sure the
// function containing the call to base::debug::Alias() will be present in the
// call stack. In this case there is no memory that needs to be on
// the stack so we can use nullptr. The call to base::debug::Alias() needs to
// happen after the call that is suspected to be tail called. Note: This
// technique will prevent tail calls at the specific call site only. To prevent
// them for all invocations of a function look at NOT_TAIL_CALLED.
//
// Example usage:
// NOINLINE void Foo(){
// ... code ...
//
// Bar();
// base::debug::Alias(nullptr);
// }
//
// Case #3: Prevent code folding of a non-unique function. Code folding can
// cause the same address to be assigned to different functions if they are
// identical. If finding the precise signature of a function in the call-stack
// is important and it's suspected the function is identical to other functions
// it can be made unique using PA_NO_CODE_FOLDING which is a wrapper around
// base::debug::Alias();
//
// Example usage:
// NOINLINE void Foo(){
// PA_NO_CODE_FOLDING();
// Bar();
// }
//
// Finally please note that these effects compound. This means that saving a
// stack variable (case #1) using base::debug::Alias() will also inhibit
// tail calls for calls in earlier lines and prevent code folding.
void BASE_EXPORT Alias(const void* var);
} // namespace partition_alloc::internal::base::debug
// Code folding is a linker optimization whereby the linker identifies functions
// that are bit-identical and overlays them. This saves space but it leads to
// confusing call stacks because multiple symbols are at the same address and
// it is unpredictable which one will be displayed. Disabling of code folding is
// particularly useful when function names are used as signatures in crashes.
// This macro doesn't guarantee that code folding will be prevented but it
// greatly reduces the odds and always prevents it within one source file.
// If using in a function that terminates the process it is safest to put the
// PA_NO_CODE_FOLDING macro at the top of the function.
// Use like:
// void FooBarFailure(size_t size) { PA_NO_CODE_FOLDING(); OOM_CRASH(size); }
#define PA_NO_CODE_FOLDING() \
const int line_number = __LINE__; \
::partition_alloc::internal::base::debug::Alias(&line_number)
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_DEBUG_ALIAS_H_

View File

@ -1,165 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/partition_alloc_base/files/file_path.h"
#include "build/build_config.h"
// file_path.h is a widely included header and its size has significant impact
// on build time. Try not to raise this limit unless necessary. See
// https://chromium.googlesource.com/chromium/src/+/HEAD/docs/wmax_tokens.md
#ifndef NACL_TC_REV
#pragma clang max_tokens_here 340000
#endif
#include <string.h>
#include <algorithm>
#include "base/check_op.h"
#include "build/build_config.h"
#if BUILDFLAG(IS_WIN)
#include <windows.h>
#elif BUILDFLAG(IS_APPLE)
#include <CoreFoundation/CoreFoundation.h>
#endif
namespace partition_alloc::internal::base {
using StringType = FilePath::StringType;
const FilePath::CharType kStringTerminator = PA_FILE_PATH_LITERAL('\0');
// If this FilePath contains a drive letter specification, returns the
// position of the last character of the drive letter specification,
// otherwise returns npos. This can only be true on Windows, when a pathname
// begins with a letter followed by a colon. On other platforms, this always
// returns npos.
StringType::size_type FindDriveLetter(const StringType& path) {
#if defined(PA_FILE_PATH_USES_DRIVE_LETTERS)
// This is dependent on an ASCII-based character set, but that's a
// reasonable assumption. iswalpha can be too inclusive here.
if (path.length() >= 2 && path[1] == L':' &&
((path[0] >= L'A' && path[0] <= L'Z') ||
(path[0] >= L'a' && path[0] <= L'z'))) {
return 1;
}
#endif // PA_FILE_PATH_USES_DRIVE_LETTERS
return StringType::npos;
}
bool IsPathAbsolute(const StringType& path) {
#if defined(PA_FILE_PATH_USES_DRIVE_LETTERS)
StringType::size_type letter = FindDriveLetter(path);
if (letter != StringType::npos) {
// Look for a separator right after the drive specification.
return path.length() > letter + 1 &&
FilePath::IsSeparator(path[letter + 1]);
}
// Look for a pair of leading separators.
return path.length() > 1 && FilePath::IsSeparator(path[0]) &&
FilePath::IsSeparator(path[1]);
#else // PA_FILE_PATH_USES_DRIVE_LETTERS
// Look for a separator in the first position.
return path.length() > 0 && FilePath::IsSeparator(path[0]);
#endif // PA_FILE_PATH_USES_DRIVE_LETTERS
}
FilePath::FilePath() = default;
FilePath::FilePath(const FilePath& that) = default;
FilePath::FilePath(FilePath&& that) noexcept = default;
FilePath::FilePath(const StringType& path) : path_(path) {
StringType::size_type nul_pos = path_.find(kStringTerminator);
if (nul_pos != StringType::npos)
path_.erase(nul_pos, StringType::npos);
}
FilePath::~FilePath() = default;
FilePath& FilePath::operator=(const FilePath& that) = default;
FilePath& FilePath::operator=(FilePath&& that) noexcept = default;
// static
bool FilePath::IsSeparator(CharType character) {
for (size_t i = 0; i < kSeparatorsLength - 1; ++i) {
if (character == kSeparators[i]) {
return true;
}
}
return false;
}
FilePath FilePath::Append(const StringType& component) const {
StringType appended = component;
StringType without_nuls;
StringType::size_type nul_pos = component.find(kStringTerminator);
if (nul_pos != StringType::npos) {
without_nuls = component.substr(0, nul_pos);
appended = without_nuls;
}
DCHECK(!IsPathAbsolute(appended));
if (path_.compare(kCurrentDirectory) == 0 && !appended.empty()) {
// Append normally doesn't do any normalization, but as a special case,
// when appending to kCurrentDirectory, just return a new path for the
// component argument. Appending component to kCurrentDirectory would
// serve no purpose other than needlessly lengthening the path, and
// it's likely in practice to wind up with FilePath objects containing
// only kCurrentDirectory when calling DirName on a single relative path
// component.
return FilePath(appended);
}
FilePath new_path(path_);
new_path.StripTrailingSeparatorsInternal();
// Don't append a separator if the path is empty (indicating the current
// directory) or if the path component is empty (indicating nothing to
// append).
if (!appended.empty() && !new_path.path_.empty()) {
// Don't append a separator if the path still ends with a trailing
// separator after stripping (indicating the root directory).
if (!IsSeparator(new_path.path_.back())) {
// Don't append a separator if the path is just a drive letter.
if (FindDriveLetter(new_path.path_) + 1 != new_path.path_.length()) {
new_path.path_.append(1, kSeparators[0]);
}
}
}
new_path.path_.append(appended.data(), appended.size());
return new_path;
}
FilePath FilePath::Append(const FilePath& component) const {
return Append(component.value());
}
void FilePath::StripTrailingSeparatorsInternal() {
// If there is no drive letter, start will be 1, which will prevent stripping
// the leading separator if there is only one separator. If there is a drive
// letter, start will be set appropriately to prevent stripping the first
// separator following the drive letter, if a separator immediately follows
// the drive letter.
StringType::size_type start = FindDriveLetter(path_) + 2;
StringType::size_type last_stripped = StringType::npos;
for (StringType::size_type pos = path_.length();
pos > start && IsSeparator(path_[pos - 1]); --pos) {
// If the string only has two separators and they're at the beginning,
// don't strip them, unless the string began with more than two separators.
if (pos != start + 1 || last_stripped == start + 2 ||
!IsSeparator(path_[start - 1])) {
path_.resize(pos - 1);
last_stripped = pos;
}
}
}
} // namespace partition_alloc::internal::base

View File

@ -1,232 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// FilePath is a container for pathnames stored in a platform's native string
// type, providing containers for manipulation in according with the
// platform's conventions for pathnames. It supports the following path
// types:
//
// POSIX Windows
// --------------- ----------------------------------
// Fundamental type char[] wchar_t[]
// Encoding unspecified* UTF-16
// Separator / \, tolerant of /
// Drive letters no case-insensitive A-Z followed by :
// Alternate root // (surprise!) \\ (2 Separators), for UNC paths
//
// * The encoding need not be specified on POSIX systems, although some
// POSIX-compliant systems do specify an encoding. Mac OS X uses UTF-8.
// Chrome OS also uses UTF-8.
// Linux does not specify an encoding, but in practice, the locale's
// character set may be used.
//
// For more arcane bits of path trivia, see below.
//
// FilePath objects are intended to be used anywhere paths are. An
// application may pass FilePath objects around internally, masking the
// underlying differences between systems, only differing in implementation
// where interfacing directly with the system. For example, a single
// OpenFile(const FilePath &) function may be made available, allowing all
// callers to operate without regard to the underlying implementation. On
// POSIX-like platforms, OpenFile might wrap fopen, and on Windows, it might
// wrap _wfopen_s, perhaps both by calling file_path.value().c_str(). This
// allows each platform to pass pathnames around without requiring conversions
// between encodings, which has an impact on performance, but more imporantly,
// has an impact on correctness on platforms that do not have well-defined
// encodings for pathnames.
//
// Several methods are available to perform common operations on a FilePath
// object, such as determining the parent directory (DirName), isolating the
// final path component (BaseName), and appending a relative pathname string
// to an existing FilePath object (Append). These methods are highly
// recommended over attempting to split and concatenate strings directly.
// These methods are based purely on string manipulation and knowledge of
// platform-specific pathname conventions, and do not consult the filesystem
// at all, making them safe to use without fear of blocking on I/O operations.
// These methods do not function as mutators but instead return distinct
// instances of FilePath objects, and are therefore safe to use on const
// objects. The objects themselves are safe to share between threads.
//
// To aid in initialization of FilePath objects from string literals, a
// FILE_PATH_LITERAL macro is provided, which accounts for the difference
// between char[]-based pathnames on POSIX systems and wchar_t[]-based
// pathnames on Windows.
//
// As a precaution against premature truncation, paths can't contain NULs.
//
// Because a FilePath object should not be instantiated at the global scope,
// instead, use a FilePath::CharType[] and initialize it with
// FILE_PATH_LITERAL. At runtime, a FilePath object can be created from the
// character array. Example:
//
// | const FilePath::CharType kLogFileName[] = FILE_PATH_LITERAL("log.txt");
// |
// | void Function() {
// | FilePath log_file_path(kLogFileName);
// | [...]
// | }
//
// WARNING: FilePaths should ALWAYS be displayed with LTR directionality, even
// when the UI language is RTL. This means you always need to pass filepaths
// through base::i18n::WrapPathWithLTRFormatting() before displaying it in the
// RTL UI.
//
// This is a very common source of bugs, please try to keep this in mind.
//
// ARCANE BITS OF PATH TRIVIA
//
// - A double leading slash is actually part of the POSIX standard. Systems
// are allowed to treat // as an alternate root, as Windows does for UNC
// (network share) paths. Most POSIX systems don't do anything special
// with two leading slashes, but FilePath handles this case properly
// in case it ever comes across such a system. FilePath needs this support
// for Windows UNC paths, anyway.
// References:
// The Open Group Base Specifications Issue 7, sections 3.267 ("Pathname")
// and 4.12 ("Pathname Resolution"), available at:
// http://www.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap03.html#tag_03_267
// http://www.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap04.html#tag_04_12
//
// - Windows treats c:\\ the same way it treats \\. This was intended to
// allow older applications that require drive letters to support UNC paths
// like \\server\share\path, by permitting c:\\server\share\path as an
// equivalent. Since the OS treats these paths specially, FilePath needs
// to do the same. Since Windows can use either / or \ as the separator,
// FilePath treats c://, c:\\, //, and \\ all equivalently.
// Reference:
// The Old New Thing, "Why is a drive letter permitted in front of UNC
// paths (sometimes)?", available at:
// http://blogs.msdn.com/oldnewthing/archive/2005/11/22/495740.aspx
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_FILES_FILE_PATH_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_FILES_FILE_PATH_H_
#include <cstddef>
#include <iosfwd>
#include <string>
#include "base/allocator/partition_allocator/partition_alloc_base/migration_adapter.h"
#include "base/base_export.h"
#include "build/build_config.h"
// Windows-style drive letter support and pathname separator characters can be
// enabled and disabled independently, to aid testing. These #defines are
// here so that the same setting can be used in both the implementation and
// in the unit test.
#if BUILDFLAG(IS_WIN)
#define PA_FILE_PATH_USES_DRIVE_LETTERS
#define PA_FILE_PATH_USES_WIN_SEPARATORS
#endif // BUILDFLAG(IS_WIN)
// Macros for string literal initialization of FilePath::CharType[].
#if BUILDFLAG(IS_WIN)
#define PA_FILE_PATH_LITERAL(x) L##x
#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
#define PA_FILE_PATH_LITERAL(x) x
#endif // BUILDFLAG(IS_WIN)
namespace partition_alloc::internal::base {
// An abstraction to isolate users from the differences between native
// pathnames on different platforms.
class BASE_EXPORT FilePath {
public:
#if BUILDFLAG(IS_WIN)
// On Windows, for Unicode-aware applications, native pathnames are wchar_t
// arrays encoded in UTF-16.
typedef std::wstring StringType;
#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
// On most platforms, native pathnames are char arrays, and the encoding
// may or may not be specified. On Mac OS X, native pathnames are encoded
// in UTF-8.
typedef std::string StringType;
#endif // BUILDFLAG(IS_WIN)
typedef StringType::value_type CharType;
// Null-terminated array of separators used to separate components in paths.
// Each character in this array is a valid separator, but kSeparators[0] is
// treated as the canonical separator and is used when composing pathnames.
static constexpr CharType kSeparators[] =
#if defined(PA_FILE_PATH_USES_WIN_SEPARATORS)
PA_FILE_PATH_LITERAL("\\/");
#else // PA_FILE_PATH_USES_WIN_SEPARATORS
PA_FILE_PATH_LITERAL("/");
#endif // PA_FILE_PATH_USES_WIN_SEPARATORS
// std::size(kSeparators), i.e., the number of separators in kSeparators plus
// one (the null terminator at the end of kSeparators).
static constexpr size_t kSeparatorsLength = std::size(kSeparators);
// The special path component meaning "this directory."
static constexpr CharType kCurrentDirectory[] = PA_FILE_PATH_LITERAL(".");
// The special path component meaning "the parent directory."
static constexpr CharType kParentDirectory[] = PA_FILE_PATH_LITERAL("..");
// The character used to identify a file extension.
static constexpr CharType kExtensionSeparator = PA_FILE_PATH_LITERAL('.');
FilePath();
FilePath(const FilePath& that);
explicit FilePath(const StringType& that);
~FilePath();
FilePath& operator=(const FilePath& that);
// Constructs FilePath with the contents of |that|, which is left in valid but
// unspecified state.
FilePath(FilePath&& that) noexcept;
// Replaces the contents with those of |that|, which is left in valid but
// unspecified state.
FilePath& operator=(FilePath&& that) noexcept;
// Required for some STL containers and operations
bool operator<(const FilePath& that) const { return path_ < that.path_; }
const StringType& value() const { return path_; }
[[nodiscard]] bool empty() const { return path_.empty(); }
void clear() { path_.clear(); }
// Returns true if |character| is in kSeparators.
static bool IsSeparator(CharType character);
// Returns a FilePath by appending a separator and the supplied path
// component to this object's path. Append takes care to avoid adding
// excessive separators if this object's path already ends with a separator.
// If this object's path is kCurrentDirectory, a new FilePath corresponding
// only to |component| is returned. |component| must be a relative path;
// it is an error to pass an absolute path.
[[nodiscard]] FilePath Append(const FilePath& component) const;
[[nodiscard]] FilePath Append(const StringType& component) const;
private:
// Remove trailing separators from this object. If the path is absolute, it
// will never be stripped any more than to refer to the absolute root
// directory, so "////" will become "/", not "". A leading pair of
// separators is never stripped, to support alternate roots. This is used to
// support UNC paths on Windows.
void StripTrailingSeparatorsInternal();
StringType path_;
};
} // namespace partition_alloc::internal::base
namespace std {
template <>
struct hash<::partition_alloc::internal::base::FilePath> {
typedef ::partition_alloc::internal::base::FilePath argument_type;
typedef std::size_t result_type;
result_type operator()(argument_type const& f) const {
return hash<::partition_alloc::internal::base::FilePath::StringType>()(
f.value());
}
};
} // namespace std
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_FILES_FILE_PATH_H_

View File

@ -1,36 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file contains utility functions for dealing with the local
// filesystem.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_FILES_FILE_UTIL_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_FILES_FILE_UTIL_H_
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#include "base/base_export.h"
#include "build/build_config.h"
#if BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
#include <sys/stat.h>
#include <unistd.h>
#endif
namespace partition_alloc::internal::base {
#if BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
// Read exactly |bytes| bytes from file descriptor |fd|, storing the result
// in |buffer|. This function is protected against EINTR and partial reads.
// Returns true iff |bytes| bytes have been successfully read from |fd|.
BASE_EXPORT bool ReadFromFD(int fd, char* buffer, size_t bytes);
#endif // BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
} // namespace partition_alloc::internal::base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_FILES_FILE_UTIL_H_

View File

@ -1,23 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/partition_alloc_base/files/file_util.h"
#include "base/allocator/partition_allocator/partition_alloc_base/posix/eintr_wrapper.h"
namespace partition_alloc::internal::base {
bool ReadFromFD(int fd, char* buffer, size_t bytes) {
size_t total_read = 0;
while (total_read < bytes) {
ssize_t bytes_read =
PA_HANDLE_EINTR(read(fd, buffer + total_read, bytes - total_read));
if (bytes_read <= 0)
break;
total_read += bytes_read;
}
return total_read == bytes;
}
} // namespace partition_alloc::internal::base

View File

@ -1,66 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_GTEST_PROD_UTIL_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_GTEST_PROD_UTIL_H_
#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
// This is a wrapper for gtest's FRIEND_TEST macro that friends
// test with all possible prefixes. This is very helpful when changing the test
// prefix, because the friend declarations don't need to be updated.
//
// Example usage:
//
// class MyClass {
// private:
// void MyMethod();
// PA_FRIEND_TEST_ALL_PREFIXES(MyClassTest, MyMethod);
// };
#define PA_FRIEND_TEST_ALL_PREFIXES(test_case_name, test_name) \
FRIEND_TEST(test_case_name, test_name); \
FRIEND_TEST(test_case_name, DISABLED_##test_name); \
FRIEND_TEST(test_case_name, FLAKY_##test_name)
// C++ compilers will refuse to compile the following code:
//
// namespace foo {
// class MyClass {
// private:
// PA_FRIEND_TEST_ALL_PREFIXES(MyClassTest, TestMethod);
// bool private_var;
// };
// } // namespace foo
//
// class MyClassTest::TestMethod() {
// foo::MyClass foo_class;
// foo_class.private_var = true;
// }
//
// Unless you forward declare MyClassTest::TestMethod outside of namespace foo.
// Use PA_FORWARD_DECLARE_TEST to do so for all possible prefixes.
//
// Example usage:
//
// PA_FORWARD_DECLARE_TEST(MyClassTest, TestMethod);
//
// namespace foo {
// class MyClass {
// private:
// PA_FRIEND_TEST_ALL_PREFIXES(::MyClassTest, TestMethod); // NOTE use of ::
// bool private_var;
// };
// } // namespace foo
//
// class MyClassTest::TestMethod() {
// foo::MyClass foo_class;
// foo_class.private_var = true;
// }
#define PA_FORWARD_DECLARE_TEST(test_case_name, test_name) \
class test_case_name##_##test_name##_Test; \
class test_case_name##_##DISABLED_##test_name##_Test; \
class test_case_name##_##FLAKY_##test_name##_Test
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_GTEST_PROD_UTIL_H_

View File

@ -1,286 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/partition_alloc_base/logging.h"
#ifdef BASE_CHECK_H_
#error "logging.h should not include check.h"
#endif
#include "base/allocator/partition_allocator/partition_alloc_base/debug/alias.h"
#include "base/base_export.h"
#include "base/immediate_crash.h"
#include "build/build_config.h"
#if BUILDFLAG(IS_WIN)
#include <io.h>
#include <windows.h>
// Windows warns on using write(). It prefers _write().
#define write(fd, buf, count) _write(fd, buf, static_cast<unsigned int>(count))
// Windows doesn't define STDERR_FILENO. Define it here.
#define STDERR_FILENO 2
#elif BUILDFLAG(IS_APPLE)
// In MacOS 10.12 and iOS 10.0 and later ASL (Apple System Log) was deprecated
// in favor of OS_LOG (Unified Logging).
#include <AvailabilityMacros.h>
#if BUILDFLAG(IS_IOS)
#if !defined(__IPHONE_10_0) || __IPHONE_OS_VERSION_MIN_REQUIRED < __IPHONE_10_0
#define USE_ASL
#endif
#else // BUILDFLAG(IS_IOS)
#if !defined(MAC_OS_X_VERSION_10_12) || \
MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_12
#define USE_ASL
#endif
#endif // BUILDFLAG(IS_IOS)
#if defined(USE_ASL)
#include <asl.h>
#else
#include <os/log.h>
#endif
#include <CoreFoundation/CoreFoundation.h>
#include <mach-o/dyld.h>
#include <mach/mach.h>
#include <mach/mach_time.h>
#endif
#if BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#endif
#include <cstring>
#include <ostream>
#include <string>
#include "base/allocator/partition_allocator/partition_alloc_base/debug/alias.h"
#include "base/allocator/partition_allocator/partition_alloc_base/posix/eintr_wrapper.h"
#if BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
#include "base/allocator/partition_allocator/partition_alloc_base/posix/safe_strerror.h"
#endif
namespace partition_alloc::internal::logging {
namespace {
const char* const log_severity_names[] = {"INFO", "WARNING", "ERROR", "FATAL"};
static_assert(LOGGING_NUM_SEVERITIES == std::size(log_severity_names),
"Incorrect number of log_severity_names");
const char* log_severity_name(int severity) {
if (severity >= 0 && severity < LOGGING_NUM_SEVERITIES)
return log_severity_names[severity];
return "UNKNOWN";
}
int g_min_log_level = 0;
// A log message handler that gets notified of every log message we process.
LogMessageHandlerFunction g_log_message_handler = nullptr;
void WriteToFd(int fd, const char* data, size_t length) {
size_t bytes_written = 0;
int rv;
while (bytes_written < length) {
rv = PA_HANDLE_EINTR(
write(fd, data + bytes_written, length - bytes_written));
if (rv < 0) {
// Give up, nothing we can do now.
break;
}
bytes_written += rv;
}
}
} // namespace
#if defined(DCHECK_IS_CONFIGURABLE)
// In DCHECK-enabled Chrome builds, allow the meaning of LOGGING_DCHECK to be
// determined at run-time. We default it to INFO, to avoid it triggering
// crashes before the run-time has explicitly chosen the behaviour.
BASE_EXPORT logging::LogSeverity LOGGING_DCHECK = LOGGING_INFO;
#endif // defined(DCHECK_IS_CONFIGURABLE)
// This is never instantiated, it's just used for EAT_STREAM_PARAMETERS to have
// an object of the correct type on the LHS of the unused part of the ternary
// operator.
std::ostream* g_swallow_stream;
void SetMinLogLevel(int level) {
g_min_log_level = std::min(LOGGING_FATAL, level);
}
int GetMinLogLevel() {
return g_min_log_level;
}
bool ShouldCreateLogMessage(int severity) {
if (severity < g_min_log_level)
return false;
// Return true here unless we know ~LogMessage won't do anything.
return true;
}
int GetVlogVerbosity() {
return std::max(-1, LOG_INFO - GetMinLogLevel());
}
void SetLogMessageHandler(LogMessageHandlerFunction handler) {
g_log_message_handler = handler;
}
LogMessageHandlerFunction GetLogMessageHandler() {
return g_log_message_handler;
}
LogMessage::LogMessage(const char* file, int line, LogSeverity severity)
: severity_(severity), file_(file), line_(line) {
Init(file, line);
}
LogMessage::LogMessage(const char* file, int line, const char* condition)
: severity_(LOGGING_FATAL), file_(file), line_(line) {
Init(file, line);
stream_ << "Check failed: " << condition << ". ";
}
LogMessage::~LogMessage() {
stream_ << std::endl;
std::string str_newline(stream_.str());
// Give any log message handler first dibs on the message.
if (g_log_message_handler &&
g_log_message_handler(severity_, file_, line_, message_start_,
str_newline)) {
// The handler took care of it, no further processing.
return;
}
// Always use RawLog() if g_log_message_handler doesn't filter messages.
RawLog(severity_, str_newline.c_str());
}
// writes the common header info to the stream
void LogMessage::Init(const char* file, int line) {
std::string filename(file);
size_t last_slash_pos = filename.find_last_of("\\/");
if (last_slash_pos != std::string::npos)
filename.erase(0, last_slash_pos + 1);
{
// TODO(darin): It might be nice if the columns were fixed width.
stream_ << '[';
// TODO(1151236): show process id, thread id, timestamp and so on
// if needed.
if (severity_ >= 0) {
stream_ << log_severity_name(severity_);
} else {
stream_ << "VERBOSE" << -severity_;
}
stream_ << ":" << filename << "(" << line << ")] ";
}
message_start_ = stream_.str().length();
}
#if BUILDFLAG(IS_WIN)
// This has already been defined in the header, but defining it again as DWORD
// ensures that the type used in the header is equivalent to DWORD. If not,
// the redefinition is a compile error.
typedef DWORD SystemErrorCode;
#endif
SystemErrorCode GetLastSystemErrorCode() {
#if BUILDFLAG(IS_WIN)
return ::GetLastError();
#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
return errno;
#endif
}
BASE_EXPORT std::string SystemErrorCodeToString(SystemErrorCode error_code) {
#if BUILDFLAG(IS_WIN)
const int kErrorMessageBufferSize = 256;
char msgbuf[kErrorMessageBufferSize];
DWORD flags = FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS;
DWORD len = FormatMessageA(flags, nullptr, error_code, 0, msgbuf,
std::size(msgbuf), nullptr);
if (len) {
// Messages returned by system end with line breaks.
std::string message(msgbuf);
size_t whitespace_pos = message.find_last_not_of("\n\r ");
if (whitespace_pos != std::string::npos)
message.erase(whitespace_pos + 1);
return message + base::StringPrintf(" (0x%lX)", error_code);
}
return base::StringPrintf("Error (0x%lX) while retrieving error. (0x%lX)",
GetLastError(), error_code);
#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
return base::safe_strerror(error_code) +
base::StringPrintf(" (%d)", error_code);
#endif // BUILDFLAG(IS_WIN)
}
#if BUILDFLAG(IS_WIN)
Win32ErrorLogMessage::Win32ErrorLogMessage(const char* file,
int line,
LogSeverity severity,
SystemErrorCode err)
: LogMessage(file, line, severity), err_(err) {}
Win32ErrorLogMessage::~Win32ErrorLogMessage() {
stream() << ": " << SystemErrorCodeToString(err_);
// We're about to crash (CHECK). Put |err_| on the stack (by placing it in a
// field) and use Alias in hopes that it makes it into crash dumps.
DWORD last_error = err_;
base::debug::Alias(&last_error);
}
#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
ErrnoLogMessage::ErrnoLogMessage(const char* file,
int line,
LogSeverity severity,
SystemErrorCode err)
: LogMessage(file, line, severity), err_(err) {}
ErrnoLogMessage::~ErrnoLogMessage() {
stream() << ": " << SystemErrorCodeToString(err_);
// We're about to crash (CHECK). Put |err_| on the stack (by placing it in a
// field) and use Alias in hopes that it makes it into crash dumps.
int last_error = err_;
base::debug::Alias(&last_error);
}
#endif // BUILDFLAG(IS_WIN)
void RawLog(int level, const char* message) {
if (level >= g_min_log_level && message) {
const size_t message_len = strlen(message);
WriteToFd(STDERR_FILENO, message, message_len);
if (message_len > 0 && message[message_len - 1] != '\n') {
int rv;
do {
rv = PA_HANDLE_EINTR(write(STDERR_FILENO, "\n", 1));
if (rv < 0) {
// Give up, nothing we can do now.
break;
}
} while (rv != 1);
}
}
if (level == LOGGING_FATAL)
IMMEDIATE_CRASH();
}
// This was defined at the beginning of this file.
#undef write
} // namespace partition_alloc::internal::logging

View File

@ -1,517 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_LOGGING_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_LOGGING_H_
#include <stddef.h>
#include <cassert>
#include <cstdint>
#include <sstream>
#include <string>
#include "base/allocator/partition_allocator/partition_alloc_base/migration_adapter.h"
#include "base/allocator/partition_allocator/partition_alloc_base/scoped_clear_last_error.h"
#include "base/base_export.h"
#include "base/compiler_specific.h"
#include "base/dcheck_is_on.h"
#include "build/build_config.h"
// TODO(1151236): Need to update the description, because logging for PA
// standalone library was minimized.
//
// Optional message capabilities
// -----------------------------
// Assertion failed messages and fatal errors are displayed in a dialog box
// before the application exits. However, running this UI creates a message
// loop, which causes application messages to be processed and potentially
// dispatched to existing application windows. Since the application is in a
// bad state when this assertion dialog is displayed, these messages may not
// get processed and hang the dialog, or the application might go crazy.
//
// Therefore, it can be beneficial to display the error dialog in a separate
// process from the main application. When the logging system needs to display
// a fatal error dialog box, it will look for a program called
// "DebugMessage.exe" in the same directory as the application executable. It
// will run this application with the message as the command line, and will
// not include the name of the application as is traditional for easier
// parsing.
//
// The code for DebugMessage.exe is only one line. In WinMain, do:
// MessageBox(NULL, GetCommandLineW(), L"Fatal Error", 0);
//
// If DebugMessage.exe is not found, the logging code will use a normal
// MessageBox, potentially causing the problems discussed above.
// Instructions
// ------------
//
// Make a bunch of macros for logging. The way to log things is to stream
// things to PA_LOG(<a particular severity level>). E.g.,
//
// PA_LOG(INFO) << "Found " << num_cookies << " cookies";
//
// You can also do conditional logging:
//
// PA_LOG_IF(INFO, num_cookies > 10) << "Got lots of cookies";
//
// The CHECK(condition) macro is active in both debug and release builds and
// effectively performs a PA_LOG(FATAL) which terminates the process and
// generates a crashdump unless a debugger is attached.
//
// There are also "debug mode" logging macros like the ones above:
//
// PA_DLOG(INFO) << "Found cookies";
//
// PA_DLOG_IF(INFO, num_cookies > 10) << "Got lots of cookies";
//
// All "debug mode" logging is compiled away to nothing for non-debug mode
// compiles. PA_LOG_IF and development flags also work well together
// because the code can be compiled away sometimes.
//
// We also have
//
// PA_LOG_ASSERT(assertion);
// PA_DLOG_ASSERT(assertion);
//
// which is syntactic sugar for PA_{,D}LOG_IF(FATAL, assert fails) << assertion;
//
// There are "verbose level" logging macros. They look like
//
// PA_VLOG(1) << "I'm printed when you run the program with --v=1 or more";
// PA_VLOG(2) << "I'm printed when you run the program with --v=2 or more";
//
// These always log at the INFO log level (when they log at all).
//
// There's also PA_VLOG_IS_ON(n) "verbose level" condition macro. To be used as
//
// if (PA_VLOG_IS_ON(2)) {
// // do some logging preparation and logging
// // that can't be accomplished with just PA_VLOG(2) << ...;
// }
//
// There is also a PA_VLOG_IF "verbose level" condition macro for sample
// cases, when some extra computation and preparation for logs is not
// needed.
//
// PA_VLOG_IF(1, (size > 1024))
// << "I'm printed when size is more than 1024 and when you run the "
// "program with --v=1 or more";
//
// We also override the standard 'assert' to use 'PA_DLOG_ASSERT'.
//
// Lastly, there is:
//
// PA_PLOG(ERROR) << "Couldn't do foo";
// PA_DPLOG(ERROR) << "Couldn't do foo";
// PA_PLOG_IF(ERROR, cond) << "Couldn't do foo";
// PA_DPLOG_IF(ERROR, cond) << "Couldn't do foo";
// PA_PCHECK(condition) << "Couldn't do foo";
// PA_DPCHECK(condition) << "Couldn't do foo";
//
// which append the last system error to the message in string form (taken from
// GetLastError() on Windows and errno on POSIX).
//
// The supported severity levels for macros that allow you to specify one
// are (in increasing order of severity) INFO, WARNING, ERROR, and FATAL.
//
// Very important: logging a message at the FATAL severity level causes
// the program to terminate (after the message is logged).
//
// There is the special severity of DFATAL, which logs FATAL in DCHECK-enabled
// builds, ERROR in normal mode.
//
// Output is formatted as per the following example:
// [VERBOSE1:drm_device_handle.cc(90)] Succeeded
// authenticating /dev/dri/card0 in 0 ms with 1 attempt(s)
//
// The colon separated fields inside the brackets are as follows:
// 1. The log level
// 2. The filename and line number where the log was instantiated
//
// Additional logging-related information can be found here:
// https://chromium.googlesource.com/chromium/src/+/main/docs/linux/debugging.md#Logging
namespace partition_alloc::internal::logging {
// Sets the log level. Anything at or above this level will be written to the
// log file/displayed to the user (if applicable). Anything below this level
// will be silently ignored. The log level defaults to 0 (everything is logged
// up to level INFO) if this function is not called.
// Note that log messages for VLOG(x) are logged at level -x, so setting
// the min log level to negative values enables verbose logging.
BASE_EXPORT void SetMinLogLevel(int level);
// Gets the current log level.
BASE_EXPORT int GetMinLogLevel();
// Used by PA_LOG_IS_ON to lazy-evaluate stream arguments.
BASE_EXPORT bool ShouldCreateLogMessage(int severity);
// Gets the PA_VLOG default verbosity level.
BASE_EXPORT int GetVlogVerbosity();
// Sets the Log Message Handler that gets passed every log message before
// it's sent to other log destinations (if any).
// Returns true to signal that it handled the message and the message
// should not be sent to other log destinations.
typedef bool (*LogMessageHandlerFunction)(int severity,
const char* file,
int line,
size_t message_start,
const std::string& str);
BASE_EXPORT void SetLogMessageHandler(LogMessageHandlerFunction handler);
BASE_EXPORT LogMessageHandlerFunction GetLogMessageHandler();
using LogSeverity = int;
constexpr LogSeverity LOGGING_VERBOSE = -1; // This is level 1 verbosity
// Note: the log severities are used to index into the array of names,
// see log_severity_names.
constexpr LogSeverity LOGGING_INFO = 0;
constexpr LogSeverity LOGGING_WARNING = 1;
constexpr LogSeverity LOGGING_ERROR = 2;
constexpr LogSeverity LOGGING_FATAL = 3;
constexpr LogSeverity LOGGING_NUM_SEVERITIES = 4;
// LOGGING_DFATAL is LOGGING_FATAL in DCHECK-enabled builds, ERROR in normal
// mode.
#if DCHECK_IS_ON()
constexpr LogSeverity LOGGING_DFATAL = LOGGING_FATAL;
#else
constexpr LogSeverity LOGGING_DFATAL = LOGGING_ERROR;
#endif
// This block duplicates the above entries to facilitate incremental conversion
// from LOG_FOO to LOGGING_FOO.
// TODO(thestig): Convert existing users to LOGGING_FOO and remove this block.
constexpr LogSeverity LOG_VERBOSE = LOGGING_VERBOSE;
constexpr LogSeverity LOG_INFO = LOGGING_INFO;
constexpr LogSeverity LOG_WARNING = LOGGING_WARNING;
constexpr LogSeverity LOG_ERROR = LOGGING_ERROR;
constexpr LogSeverity LOG_FATAL = LOGGING_FATAL;
constexpr LogSeverity LOG_DFATAL = LOGGING_DFATAL;
// A few definitions of macros that don't generate much code. These are used
// by PA_LOG() and LOG_IF, etc. Since these are used all over our code, it's
// better to have compact code for these operations.
#define PA_COMPACT_GOOGLE_LOG_EX_INFO(ClassName, ...) \
::partition_alloc::internal::logging::ClassName( \
__FILE__, __LINE__, ::partition_alloc::internal::logging::LOGGING_INFO, \
##__VA_ARGS__)
#define PA_COMPACT_GOOGLE_LOG_EX_WARNING(ClassName, ...) \
::partition_alloc::internal::logging::ClassName( \
__FILE__, __LINE__, \
::partition_alloc::internal::logging::LOGGING_WARNING, ##__VA_ARGS__)
#define PA_COMPACT_GOOGLE_LOG_EX_ERROR(ClassName, ...) \
::partition_alloc::internal::logging::ClassName( \
__FILE__, __LINE__, ::partition_alloc::internal::logging::LOGGING_ERROR, \
##__VA_ARGS__)
#define PA_COMPACT_GOOGLE_LOG_EX_FATAL(ClassName, ...) \
::partition_alloc::internal::logging::ClassName( \
__FILE__, __LINE__, ::partition_alloc::internal::logging::LOGGING_FATAL, \
##__VA_ARGS__)
#define PA_COMPACT_GOOGLE_LOG_EX_DFATAL(ClassName, ...) \
::partition_alloc::internal::logging::ClassName( \
__FILE__, __LINE__, \
::partition_alloc::internal::logging::LOGGING_DFATAL, ##__VA_ARGS__)
#define PA_COMPACT_GOOGLE_LOG_EX_DCHECK(ClassName, ...) \
::partition_alloc::internal::logging::ClassName( \
__FILE__, __LINE__, \
::partition_alloc::internal::logging::LOGGING_DCHECK, ##__VA_ARGS__)
#define PA_COMPACT_GOOGLE_LOG_INFO PA_COMPACT_GOOGLE_LOG_EX_INFO(LogMessage)
#define PA_COMPACT_GOOGLE_LOG_WARNING \
PA_COMPACT_GOOGLE_LOG_EX_WARNING(LogMessage)
#define PA_COMPACT_GOOGLE_LOG_ERROR PA_COMPACT_GOOGLE_LOG_EX_ERROR(LogMessage)
#define PA_COMPACT_GOOGLE_LOG_FATAL PA_COMPACT_GOOGLE_LOG_EX_FATAL(LogMessage)
#define PA_COMPACT_GOOGLE_LOG_DFATAL PA_COMPACT_GOOGLE_LOG_EX_DFATAL(LogMessage)
#define PA_COMPACT_GOOGLE_LOG_DCHECK PA_COMPACT_GOOGLE_LOG_EX_DCHECK(LogMessage)
#if BUILDFLAG(IS_WIN)
// wingdi.h defines ERROR to be 0. When we call PA_LOG(ERROR), it gets
// substituted with 0, and it expands to PA_COMPACT_GOOGLE_LOG_0. To allow us
// to keep using this syntax, we define this macro to do the same thing
// as PA_COMPACT_GOOGLE_LOG_ERROR, and also define ERROR the same way that
// the Windows SDK does for consistency.
#define PA_ERROR 0
#define PA_COMPACT_GOOGLE_LOG_EX_0(ClassName, ...) \
PA_COMPACT_GOOGLE_LOG_EX_ERROR(ClassName, ##__VA_ARGS__)
#define PA_COMPACT_GOOGLE_LOG_0 PA_COMPACT_GOOGLE_LOG_ERROR
// Needed for LOG_IS_ON(ERROR).
constexpr LogSeverity LOGGING_0 = LOGGING_ERROR;
#endif
// As special cases, we can assume that LOG_IS_ON(FATAL) always holds. Also,
// LOG_IS_ON(DFATAL) always holds in debug mode. In particular, CHECK()s will
// always fire if they fail.
#define PA_LOG_IS_ON(severity) \
(::partition_alloc::internal::logging::ShouldCreateLogMessage( \
::partition_alloc::internal::logging::LOGGING_##severity))
// We don't do any caching tricks with VLOG_IS_ON() like the
// google-glog version since it increases binary size. This means
// that using the v-logging functions in conjunction with --vmodule
// may be slow.
#define PA_VLOG_IS_ON(verboselevel) \
((verboselevel) <= ::partition_alloc::internal::logging::GetVlogVerbosity())
// Helper macro which avoids evaluating the arguments to a stream if
// the condition doesn't hold. Condition is evaluated once and only once.
#define PA_LAZY_STREAM(stream, condition) \
!(condition) \
? (void)0 \
: ::partition_alloc::internal::logging::LogMessageVoidify() & (stream)
// We use the preprocessor's merging operator, "##", so that, e.g.,
// PA_LOG(INFO) becomes the token PA_COMPACT_GOOGLE_LOG_INFO. There's some
// funny subtle difference between ostream member streaming functions (e.g.,
// ostream::operator<<(int) and ostream non-member streaming functions
// (e.g., ::operator<<(ostream&, string&): it turns out that it's
// impossible to stream something like a string directly to an unnamed
// ostream. We employ a neat hack by calling the stream() member
// function of LogMessage which seems to avoid the problem.
#define PA_LOG_STREAM(severity) PA_COMPACT_GOOGLE_LOG_##severity.stream()
#define PA_LOG(severity) \
PA_LAZY_STREAM(PA_LOG_STREAM(severity), PA_LOG_IS_ON(severity))
#define PA_LOG_IF(severity, condition) \
PA_LAZY_STREAM(PA_LOG_STREAM(severity), PA_LOG_IS_ON(severity) && (condition))
// The VLOG macros log with negative verbosities.
#define PA_VLOG_STREAM(verbose_level) \
::partition_alloc::internal::logging::LogMessage(__FILE__, __LINE__, \
-(verbose_level)) \
.stream()
#define PA_VLOG(verbose_level) \
PA_LAZY_STREAM(PA_VLOG_STREAM(verbose_level), PA_VLOG_IS_ON(verbose_level))
#define PA_VLOG_IF(verbose_level, condition) \
PA_LAZY_STREAM(PA_VLOG_STREAM(verbose_level), \
PA_VLOG_IS_ON(verbose_level) && (condition))
#if BUILDFLAG(IS_WIN)
#define PA_VPLOG_STREAM(verbose_level) \
::partition_alloc::internal::logging::Win32ErrorLogMessage( \
__FILE__, __LINE__, -(verbose_level), \
::partition_alloc::internal::logging::GetLastSystemErrorCode()) \
.stream()
#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
#define PA_VPLOG_STREAM(verbose_level) \
::partition_alloc::internal::logging::ErrnoLogMessage( \
__FILE__, __LINE__, -(verbose_level), \
::partition_alloc::internal::logging::GetLastSystemErrorCode()) \
.stream()
#endif
#define PA_VPLOG(verbose_level) \
PA_LAZY_STREAM(PA_VPLOG_STREAM(verbose_level), PA_VLOG_IS_ON(verbose_level))
#define PA_VPLOG_IF(verbose_level, condition) \
PA_LAZY_STREAM(PA_VPLOG_STREAM(verbose_level), \
PA_VLOG_IS_ON(verbose_level) && (condition))
// TODO(akalin): Add more VLOG variants, e.g. VPLOG.
#define PA_LOG_ASSERT(condition) \
PA_LOG_IF(FATAL, !(ANALYZER_ASSUME_TRUE(condition))) \
<< "Assert failed: " #condition ". "
#if BUILDFLAG(IS_WIN)
#define PA_PLOG_STREAM(severity) \
PA_COMPACT_GOOGLE_LOG_EX_##severity( \
Win32ErrorLogMessage, \
::partition_alloc::internal::logging::GetLastSystemErrorCode()) \
.stream()
#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
#define PA_PLOG_STREAM(severity) \
PA_COMPACT_GOOGLE_LOG_EX_##severity( \
ErrnoLogMessage, \
::partition_alloc::internal::logging::GetLastSystemErrorCode()) \
.stream()
#endif
#define PA_PLOG(severity) \
PA_LAZY_STREAM(PA_PLOG_STREAM(severity), PA_LOG_IS_ON(severity))
#define PA_PLOG_IF(severity, condition) \
PA_LAZY_STREAM(PA_PLOG_STREAM(severity), \
PA_LOG_IS_ON(severity) && (condition))
BASE_EXPORT extern std::ostream* g_swallow_stream;
// Note that g_swallow_stream is used instead of an arbitrary PA_LOG() stream to
// avoid the creation of an object with a non-trivial destructor (LogMessage).
// On MSVC x86 (checked on 2015 Update 3), this causes a few additional
// pointless instructions to be emitted even at full optimization level, even
// though the : arm of the ternary operator is clearly never executed. Using a
// simpler object to be &'d with Voidify() avoids these extra instructions.
// Using a simpler POD object with a templated operator<< also works to avoid
// these instructions. However, this causes warnings on statically defined
// implementations of operator<<(std::ostream, ...) in some .cc files, because
// they become defined-but-unreferenced functions. A reinterpret_cast of 0 to an
// ostream* also is not suitable, because some compilers warn of undefined
// behavior.
#define PA_EAT_STREAM_PARAMETERS \
true ? (void)0 \
: ::partition_alloc::internal::logging::LogMessageVoidify() & \
(*::partition_alloc::internal::logging::g_swallow_stream)
// Definitions for DLOG et al.
#if DCHECK_IS_ON()
#define PA_DLOG_IS_ON(severity) PA_LOG_IS_ON(severity)
#define PA_DLOG_IF(severity, condition) PA_LOG_IF(severity, condition)
#define PA_DLOG_ASSERT(condition) PA_LOG_ASSERT(condition)
#define PA_DPLOG_IF(severity, condition) PA_PLOG_IF(severity, condition)
#define PA_DVLOG_IF(verboselevel, condition) PA_VLOG_IF(verboselevel, condition)
#define PA_DVPLOG_IF(verboselevel, condition) \
PA_VPLOG_IF(verboselevel, condition)
#else // DCHECK_IS_ON()
// If !DCHECK_IS_ON(), we want to avoid emitting any references to |condition|
// (which may reference a variable defined only if DCHECK_IS_ON()).
// Contrast this with DCHECK et al., which has different behavior.
#define PA_DLOG_IS_ON(severity) false
#define PA_DLOG_IF(severity, condition) PA_EAT_STREAM_PARAMETERS
#define PA_DLOG_ASSERT(condition) PA_EAT_STREAM_PARAMETERS
#define PA_DPLOG_IF(severity, condition) PA_EAT_STREAM_PARAMETERS
#define PA_DVLOG_IF(verboselevel, condition) PA_EAT_STREAM_PARAMETERS
#define PA_DVPLOG_IF(verboselevel, condition) PA_EAT_STREAM_PARAMETERS
#endif // DCHECK_IS_ON()
#define PA_DLOG(severity) \
PA_LAZY_STREAM(PA_LOG_STREAM(severity), PA_DLOG_IS_ON(severity))
#define PA_DPLOG(severity) \
PA_LAZY_STREAM(PA_PLOG_STREAM(severity), PA_DLOG_IS_ON(severity))
#define PA_DVLOG(verboselevel) PA_DVLOG_IF(verboselevel, true)
#define PA_DVPLOG(verboselevel) PA_DVPLOG_IF(verboselevel, true)
// Definitions for DCHECK et al.
#if defined(DCHECK_IS_CONFIGURABLE)
BASE_EXPORT extern LogSeverity LOGGING_DCHECK;
#else
constexpr LogSeverity LOGGING_DCHECK = LOGGING_FATAL;
#endif // defined(DCHECK_IS_CONFIGURABLE)
// Redefine the standard assert to use our nice log files
#undef assert
#define assert(x) PA_DLOG_ASSERT(x)
// This class more or less represents a particular log message. You
// create an instance of LogMessage and then stream stuff to it.
// When you finish streaming to it, ~LogMessage is called and the
// full message gets streamed to the appropriate destination.
//
// You shouldn't actually use LogMessage's constructor to log things,
// though. You should use the PA_LOG() macro (and variants thereof)
// above.
class BASE_EXPORT LogMessage {
public:
// Used for PA_LOG(severity).
LogMessage(const char* file, int line, LogSeverity severity);
// Used for CHECK(). Implied severity = LOGGING_FATAL.
LogMessage(const char* file, int line, const char* condition);
LogMessage(const LogMessage&) = delete;
LogMessage& operator=(const LogMessage&) = delete;
virtual ~LogMessage();
std::ostream& stream() { return stream_; }
LogSeverity severity() { return severity_; }
std::string str() { return stream_.str(); }
private:
void Init(const char* file, int line);
const LogSeverity severity_;
std::ostringstream stream_;
size_t message_start_; // Offset of the start of the message (past prefix
// info).
// The file and line information passed in to the constructor.
const char* const file_;
const int line_;
// This is useful since the LogMessage class uses a lot of Win32 calls
// that will lose the value of GLE and the code that called the log function
// will have lost the thread error value when the log call returns.
base::ScopedClearLastError last_error_;
};
// This class is used to explicitly ignore values in the conditional
// logging macros. This avoids compiler warnings like "value computed
// is not used" and "statement has no effect".
class LogMessageVoidify {
public:
LogMessageVoidify() = default;
// This has to be an operator with a precedence lower than << but
// higher than ?:
void operator&(std::ostream&) {}
};
#if BUILDFLAG(IS_WIN)
typedef unsigned long SystemErrorCode;
#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
typedef int SystemErrorCode;
#endif
// Alias for ::GetLastError() on Windows and errno on POSIX. Avoids having to
// pull in windows.h just for GetLastError() and DWORD.
BASE_EXPORT SystemErrorCode GetLastSystemErrorCode();
BASE_EXPORT std::string SystemErrorCodeToString(SystemErrorCode error_code);
#if BUILDFLAG(IS_WIN)
// Appends a formatted system message of the GetLastError() type.
class BASE_EXPORT Win32ErrorLogMessage : public LogMessage {
public:
Win32ErrorLogMessage(const char* file,
int line,
LogSeverity severity,
SystemErrorCode err);
Win32ErrorLogMessage(const Win32ErrorLogMessage&) = delete;
Win32ErrorLogMessage& operator=(const Win32ErrorLogMessage&) = delete;
// Appends the error message before destructing the encapsulated class.
~Win32ErrorLogMessage() override;
private:
SystemErrorCode err_;
};
#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
// Appends a formatted system message of the errno type
class BASE_EXPORT ErrnoLogMessage : public LogMessage {
public:
ErrnoLogMessage(const char* file,
int line,
LogSeverity severity,
SystemErrorCode err);
ErrnoLogMessage(const ErrnoLogMessage&) = delete;
ErrnoLogMessage& operator=(const ErrnoLogMessage&) = delete;
// Appends the error message before destructing the encapsulated class.
~ErrnoLogMessage() override;
private:
SystemErrorCode err_;
};
#endif // BUILDFLAG(IS_WIN)
// Async signal safe logging mechanism.
BASE_EXPORT void RawLog(int level, const char* message);
#define PA_RAW_LOG(level, message) \
::partition_alloc::internal::logging::RawLog( \
::partition_alloc::internal::logging::LOGGING_##level, message)
} // namespace partition_alloc::internal::logging
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_LOGGING_H_

View File

@ -1,46 +0,0 @@
// Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/partition_alloc_base/memory/ref_counted.h"
#include <limits>
#include <ostream>
#include <type_traits>
namespace partition_alloc::internal::base::subtle {
bool RefCountedThreadSafeBase::HasOneRef() const {
return ref_count_.IsOne();
}
bool RefCountedThreadSafeBase::HasAtLeastOneRef() const {
return !ref_count_.IsZero();
}
#if DCHECK_IS_ON()
RefCountedThreadSafeBase::~RefCountedThreadSafeBase() {
DCHECK(in_dtor_) << "RefCountedThreadSafe object deleted without "
"calling Release()";
}
#endif
// For security and correctness, we check the arithmetic on ref counts.
//
// In an attempt to avoid binary bloat (from inlining the `CHECK`), we define
// these functions out-of-line. However, compilers are wily. Further testing may
// show that `NOINLINE` helps or hurts.
//
#if !defined(ARCH_CPU_X86_FAMILY)
bool RefCountedThreadSafeBase::Release() const {
return ReleaseImpl();
}
void RefCountedThreadSafeBase::AddRef() const {
AddRefImpl();
}
void RefCountedThreadSafeBase::AddRefWithCheck() const {
AddRefWithCheckImpl();
}
#endif
} // namespace partition_alloc::internal::base::subtle

View File

@ -1,188 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_MEMORY_REF_COUNTED_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_MEMORY_REF_COUNTED_H_
#include "base/allocator/partition_allocator/partition_alloc_base/atomic_ref_count.h"
#include "base/allocator/partition_allocator/partition_alloc_base/memory/scoped_refptr.h"
#include "base/base_export.h"
#include "base/check.h"
#include "base/check_op.h"
#include "base/compiler_specific.h"
#include "base/dcheck_is_on.h"
#include "build/build_config.h"
namespace partition_alloc::internal::base {
namespace subtle {
class BASE_EXPORT RefCountedThreadSafeBase {
public:
RefCountedThreadSafeBase(const RefCountedThreadSafeBase&) = delete;
RefCountedThreadSafeBase& operator=(const RefCountedThreadSafeBase&) = delete;
bool HasOneRef() const;
bool HasAtLeastOneRef() const;
protected:
explicit constexpr RefCountedThreadSafeBase(StartRefCountFromZeroTag) {}
explicit constexpr RefCountedThreadSafeBase(StartRefCountFromOneTag)
: ref_count_(1) {
#if DCHECK_IS_ON()
needs_adopt_ref_ = true;
#endif
}
#if DCHECK_IS_ON()
~RefCountedThreadSafeBase();
#else
~RefCountedThreadSafeBase() = default;
#endif
// Release and AddRef are suitable for inlining on X86 because they generate
// very small code sequences. On other platforms (ARM), it causes a size
// regression and is probably not worth it.
#if defined(ARCH_CPU_X86_FAMILY)
// Returns true if the object should self-delete.
bool Release() const { return ReleaseImpl(); }
void AddRef() const { AddRefImpl(); }
void AddRefWithCheck() const { AddRefWithCheckImpl(); }
#else
// Returns true if the object should self-delete.
bool Release() const;
void AddRef() const;
void AddRefWithCheck() const;
#endif
private:
template <typename U>
friend scoped_refptr<U> AdoptRef(U*);
void Adopted() const {
#if DCHECK_IS_ON()
DCHECK(needs_adopt_ref_);
needs_adopt_ref_ = false;
#endif
}
ALWAYS_INLINE void AddRefImpl() const {
#if DCHECK_IS_ON()
DCHECK(!in_dtor_);
// This RefCounted object is created with non-zero reference count.
// The first reference to such a object has to be made by AdoptRef or
// MakeRefCounted.
DCHECK(!needs_adopt_ref_);
#endif
ref_count_.Increment();
}
ALWAYS_INLINE void AddRefWithCheckImpl() const {
#if DCHECK_IS_ON()
DCHECK(!in_dtor_);
// This RefCounted object is created with non-zero reference count.
// The first reference to such a object has to be made by AdoptRef or
// MakeRefCounted.
DCHECK(!needs_adopt_ref_);
#endif
CHECK_GT(ref_count_.Increment(), 0);
}
ALWAYS_INLINE bool ReleaseImpl() const {
#if DCHECK_IS_ON()
DCHECK(!in_dtor_);
DCHECK(!ref_count_.IsZero());
#endif
if (!ref_count_.Decrement()) {
#if DCHECK_IS_ON()
in_dtor_ = true;
#endif
return true;
}
return false;
}
mutable AtomicRefCount ref_count_{0};
#if DCHECK_IS_ON()
mutable bool needs_adopt_ref_ = false;
mutable bool in_dtor_ = false;
#endif
};
} // namespace subtle
// Forward declaration.
template <class T, typename Traits>
class RefCountedThreadSafe;
// Default traits for RefCountedThreadSafe<T>. Deletes the object when its ref
// count reaches 0. Overload to delete it on a different thread etc.
template <typename T>
struct DefaultRefCountedThreadSafeTraits {
static void Destruct(const T* x) {
// Delete through RefCountedThreadSafe to make child classes only need to be
// friend with RefCountedThreadSafe instead of this struct, which is an
// implementation detail.
RefCountedThreadSafe<T, DefaultRefCountedThreadSafeTraits>::DeleteInternal(
x);
}
};
//
// A thread-safe variant of RefCounted<T>
//
// class MyFoo : public base::RefCountedThreadSafe<MyFoo> {
// ...
// };
//
// If you're using the default trait, then you should add compile time
// asserts that no one else is deleting your object. i.e.
// private:
// friend class base::RefCountedThreadSafe<MyFoo>;
// ~MyFoo();
//
// We can use REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE() with RefCountedThreadSafe
// too. See the comment above the RefCounted definition for details.
template <class T, typename Traits = DefaultRefCountedThreadSafeTraits<T>>
class RefCountedThreadSafe : public subtle::RefCountedThreadSafeBase {
public:
static constexpr subtle::StartRefCountFromZeroTag kRefCountPreference =
subtle::kStartRefCountFromZeroTag;
explicit RefCountedThreadSafe()
: subtle::RefCountedThreadSafeBase(T::kRefCountPreference) {}
RefCountedThreadSafe(const RefCountedThreadSafe&) = delete;
RefCountedThreadSafe& operator=(const RefCountedThreadSafe&) = delete;
void AddRef() const { AddRefImpl(T::kRefCountPreference); }
void Release() const {
if (subtle::RefCountedThreadSafeBase::Release()) {
ANALYZER_SKIP_THIS_PATH();
Traits::Destruct(static_cast<const T*>(this));
}
}
protected:
~RefCountedThreadSafe() = default;
private:
friend struct DefaultRefCountedThreadSafeTraits<T>;
template <typename U>
static void DeleteInternal(const U* x) {
delete x;
}
void AddRefImpl(subtle::StartRefCountFromZeroTag) const {
subtle::RefCountedThreadSafeBase::AddRef();
}
void AddRefImpl(subtle::StartRefCountFromOneTag) const {
subtle::RefCountedThreadSafeBase::AddRefWithCheck();
}
};
} // namespace partition_alloc::internal::base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_MEMORY_REF_COUNTED_H_

View File

@ -1,371 +0,0 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_MEMORY_SCOPED_REFPTR_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_MEMORY_SCOPED_REFPTR_H_
#include <stddef.h>
#include <iosfwd>
#include <type_traits>
#include <utility>
#include "base/check.h"
#include "base/compiler_specific.h"
namespace partition_alloc::internal {
template <class T>
class scoped_refptr;
namespace base {
template <class, typename>
class RefCountedThreadSafe;
template <typename T>
scoped_refptr<T> AdoptRef(T* t);
namespace subtle {
enum AdoptRefTag { kAdoptRefTag };
enum StartRefCountFromZeroTag { kStartRefCountFromZeroTag };
enum StartRefCountFromOneTag { kStartRefCountFromOneTag };
// scoped_refptr<T> is typically used with one of several RefCounted<T> base
// classes or with custom AddRef and Release methods. These overloads dispatch
// on which was used.
template <typename T, typename U, typename V>
constexpr bool IsRefCountPreferenceOverridden(
const T*,
const RefCountedThreadSafe<U, V>*) {
return !std::is_same<std::decay_t<decltype(T::kRefCountPreference)>,
std::decay_t<decltype(U::kRefCountPreference)>>::value;
}
constexpr bool IsRefCountPreferenceOverridden(...) {
return false;
}
template <typename T, typename U, typename V>
constexpr void AssertRefCountBaseMatches(const T*,
const RefCountedThreadSafe<U, V>*) {
static_assert(
std::is_base_of_v<U, T>,
"T implements RefCountedThreadSafe<U>, but U is not a base of T.");
}
constexpr void AssertRefCountBaseMatches(...) {}
} // namespace subtle
// Creates a scoped_refptr from a raw pointer without incrementing the reference
// count. Use this only for a newly created object whose reference count starts
// from 1 instead of 0.
template <typename T>
scoped_refptr<T> AdoptRef(T* obj) {
using Tag = std::decay_t<decltype(T::kRefCountPreference)>;
static_assert(std::is_same<subtle::StartRefCountFromOneTag, Tag>::value,
"Use AdoptRef only if the reference count starts from one.");
DCHECK(obj);
DCHECK(obj->HasOneRef());
obj->Adopted();
return scoped_refptr<T>(obj, subtle::kAdoptRefTag);
}
namespace subtle {
template <typename T>
scoped_refptr<T> AdoptRefIfNeeded(T* obj, StartRefCountFromZeroTag) {
return scoped_refptr<T>(obj);
}
template <typename T>
scoped_refptr<T> AdoptRefIfNeeded(T* obj, StartRefCountFromOneTag) {
return AdoptRef(obj);
}
} // namespace subtle
// Constructs an instance of T, which is a ref counted type, and wraps the
// object into a scoped_refptr<T>.
template <typename T, typename... Args>
scoped_refptr<T> MakeRefCounted(Args&&... args) {
T* obj = new T(std::forward<Args>(args)...);
return subtle::AdoptRefIfNeeded(obj, T::kRefCountPreference);
}
// Takes an instance of T, which is a ref counted type, and wraps the object
// into a scoped_refptr<T>.
template <typename T>
scoped_refptr<T> WrapRefCounted(T* t) {
return scoped_refptr<T>(t);
}
} // namespace base
//
// A smart pointer class for reference counted objects. Use this class instead
// of calling AddRef and Release manually on a reference counted object to
// avoid common memory leaks caused by forgetting to Release an object
// reference. Sample usage:
//
// class MyFoo : public RefCounted<MyFoo> {
// ...
// private:
// friend class RefCounted<MyFoo>; // Allow destruction by RefCounted<>.
// ~MyFoo(); // Destructor must be private/protected.
// };
//
// void some_function() {
// scoped_refptr<MyFoo> foo = MakeRefCounted<MyFoo>();
// foo->Method(param);
// // |foo| is released when this function returns
// }
//
// void some_other_function() {
// scoped_refptr<MyFoo> foo = MakeRefCounted<MyFoo>();
// ...
// foo.reset(); // explicitly releases |foo|
// ...
// if (foo)
// foo->Method(param);
// }
//
// The above examples show how scoped_refptr<T> acts like a pointer to T.
// Given two scoped_refptr<T> classes, it is also possible to exchange
// references between the two objects, like so:
//
// {
// scoped_refptr<MyFoo> a = MakeRefCounted<MyFoo>();
// scoped_refptr<MyFoo> b;
//
// b.swap(a);
// // now, |b| references the MyFoo object, and |a| references nullptr.
// }
//
// To make both |a| and |b| in the above example reference the same MyFoo
// object, simply use the assignment operator:
//
// {
// scoped_refptr<MyFoo> a = MakeRefCounted<MyFoo>();
// scoped_refptr<MyFoo> b;
//
// b = a;
// // now, |a| and |b| each own a reference to the same MyFoo object.
// }
//
// Also see Chromium's ownership and calling conventions:
// https://chromium.googlesource.com/chromium/src/+/lkgr/styleguide/c++/c++.md#object-ownership-and-calling-conventions
// Specifically:
// If the function (at least sometimes) takes a ref on a refcounted object,
// declare the param as scoped_refptr<T>. The caller can decide whether it
// wishes to transfer ownership (by calling std::move(t) when passing t) or
// retain its ref (by simply passing t directly).
// In other words, use scoped_refptr like you would a std::unique_ptr except
// in the odd case where it's required to hold on to a ref while handing one
// to another component (if a component merely needs to use t on the stack
// without keeping a ref: pass t as a raw T*).
template <class T>
class TRIVIAL_ABI scoped_refptr {
public:
typedef T element_type;
constexpr scoped_refptr() = default;
// Allow implicit construction from nullptr.
constexpr scoped_refptr(std::nullptr_t) {}
// Constructs from a raw pointer. Note that this constructor allows implicit
// conversion from T* to scoped_refptr<T> which is strongly discouraged. If
// you are creating a new ref-counted object please use
// base::MakeRefCounted<T>() or base::WrapRefCounted<T>(). Otherwise you
// should move or copy construct from an existing scoped_refptr<T> to the
// ref-counted object.
scoped_refptr(T* p) : ptr_(p) {
if (ptr_)
AddRef(ptr_);
}
// Copy constructor. This is required in addition to the copy conversion
// constructor below.
scoped_refptr(const scoped_refptr& r) : scoped_refptr(r.ptr_) {}
// Copy conversion constructor.
template <typename U,
typename = typename std::enable_if<
std::is_convertible<U*, T*>::value>::type>
scoped_refptr(const scoped_refptr<U>& r) : scoped_refptr(r.ptr_) {}
// Move constructor. This is required in addition to the move conversion
// constructor below.
scoped_refptr(scoped_refptr&& r) noexcept : ptr_(r.ptr_) { r.ptr_ = nullptr; }
// Move conversion constructor.
template <typename U,
typename = typename std::enable_if<
std::is_convertible<U*, T*>::value>::type>
scoped_refptr(scoped_refptr<U>&& r) noexcept : ptr_(r.ptr_) {
r.ptr_ = nullptr;
}
~scoped_refptr() {
static_assert(!base::subtle::IsRefCountPreferenceOverridden(
static_cast<T*>(nullptr), static_cast<T*>(nullptr)),
"It's unsafe to override the ref count preference."
" Please remove REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE"
" from subclasses.");
if (ptr_)
Release(ptr_);
}
T* get() const { return ptr_; }
T& operator*() const {
DCHECK(ptr_);
return *ptr_;
}
T* operator->() const {
DCHECK(ptr_);
return ptr_;
}
scoped_refptr& operator=(std::nullptr_t) {
reset();
return *this;
}
scoped_refptr& operator=(T* p) { return *this = scoped_refptr(p); }
// Unified assignment operator.
scoped_refptr& operator=(scoped_refptr r) noexcept {
swap(r);
return *this;
}
// Sets managed object to null and releases reference to the previous managed
// object, if it existed.
void reset() { scoped_refptr().swap(*this); }
// Returns the owned pointer (if any), releasing ownership to the caller. The
// caller is responsible for managing the lifetime of the reference.
[[nodiscard]] T* release();
void swap(scoped_refptr& r) noexcept { std::swap(ptr_, r.ptr_); }
explicit operator bool() const { return ptr_ != nullptr; }
template <typename U>
bool operator==(const scoped_refptr<U>& rhs) const {
return ptr_ == rhs.get();
}
template <typename U>
bool operator!=(const scoped_refptr<U>& rhs) const {
return !operator==(rhs);
}
template <typename U>
bool operator<(const scoped_refptr<U>& rhs) const {
return ptr_ < rhs.get();
}
protected:
T* ptr_ = nullptr;
private:
template <typename U>
friend scoped_refptr<U> base::AdoptRef(U*);
scoped_refptr(T* p, base::subtle::AdoptRefTag) : ptr_(p) {}
// Friend required for move constructors that set r.ptr_ to null.
template <typename U>
friend class scoped_refptr;
// Non-inline helpers to allow:
// class Opaque;
// extern template class scoped_refptr<Opaque>;
// Otherwise the compiler will complain that Opaque is an incomplete type.
static void AddRef(T* ptr);
static void Release(T* ptr);
};
template <typename T>
T* scoped_refptr<T>::release() {
T* ptr = ptr_;
ptr_ = nullptr;
return ptr;
}
// static
template <typename T>
void scoped_refptr<T>::AddRef(T* ptr) {
base::subtle::AssertRefCountBaseMatches(ptr, ptr);
ptr->AddRef();
}
// static
template <typename T>
void scoped_refptr<T>::Release(T* ptr) {
base::subtle::AssertRefCountBaseMatches(ptr, ptr);
ptr->Release();
}
template <typename T, typename U>
bool operator==(const scoped_refptr<T>& lhs, const U* rhs) {
return lhs.get() == rhs;
}
template <typename T, typename U>
bool operator==(const T* lhs, const scoped_refptr<U>& rhs) {
return lhs == rhs.get();
}
template <typename T>
bool operator==(const scoped_refptr<T>& lhs, std::nullptr_t null) {
return !static_cast<bool>(lhs);
}
template <typename T>
bool operator==(std::nullptr_t null, const scoped_refptr<T>& rhs) {
return !static_cast<bool>(rhs);
}
template <typename T, typename U>
bool operator!=(const scoped_refptr<T>& lhs, const U* rhs) {
return !operator==(lhs, rhs);
}
template <typename T, typename U>
bool operator!=(const T* lhs, const scoped_refptr<U>& rhs) {
return !operator==(lhs, rhs);
}
template <typename T>
bool operator!=(const scoped_refptr<T>& lhs, std::nullptr_t null) {
return !operator==(lhs, null);
}
template <typename T>
bool operator!=(std::nullptr_t null, const scoped_refptr<T>& rhs) {
return !operator==(null, rhs);
}
template <typename T>
std::ostream& operator<<(std::ostream& out, const scoped_refptr<T>& p) {
return out << p.get();
}
template <typename T>
void swap(scoped_refptr<T>& lhs, scoped_refptr<T>& rhs) noexcept {
lhs.swap(rhs);
}
} // namespace partition_alloc::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_MEMORY_SCOPED_REFPTR_H_

View File

@ -1,15 +0,0 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/partition_alloc_base/native_library.h"
namespace partition_alloc::internal::base {
NativeLibrary LoadNativeLibrary(const FilePath& library_path,
NativeLibraryLoadError* error) {
return LoadNativeLibraryWithOptions(library_path, NativeLibraryOptions(),
error);
}
} // namespace partition_alloc::internal::base

View File

@ -1,96 +0,0 @@
// Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NATIVE_LIBRARY_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NATIVE_LIBRARY_H_
// This file defines a cross-platform "NativeLibrary" type which represents
// a loadable module.
#include <string>
#include "base/allocator/partition_allocator/partition_alloc_base/files/file_path.h"
#include "base/allocator/partition_allocator/partition_alloc_base/migration_adapter.h"
#include "base/base_export.h"
#include "build/build_config.h"
#if BUILDFLAG(IS_WIN)
#include <windows.h>
#elif BUILDFLAG(IS_APPLE)
#import <CoreFoundation/CoreFoundation.h>
#endif // OS_*
namespace partition_alloc::internal::base {
#if BUILDFLAG(IS_WIN)
using NativeLibrary = HMODULE;
#elif BUILDFLAG(IS_APPLE)
enum NativeLibraryType { BUNDLE, DYNAMIC_LIB };
enum NativeLibraryObjCStatus {
OBJC_UNKNOWN,
OBJC_PRESENT,
OBJC_NOT_PRESENT,
};
struct NativeLibraryStruct {
NativeLibraryType type;
CFBundleRefNum bundle_resource_ref;
NativeLibraryObjCStatus objc_status;
union {
CFBundleRef bundle;
void* dylib;
};
};
using NativeLibrary = NativeLibraryStruct*;
#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
using NativeLibrary = void*;
#endif // OS_*
struct BASE_EXPORT NativeLibraryLoadError {
#if BUILDFLAG(IS_WIN)
NativeLibraryLoadError() : code(0) {}
#endif // BUILDFLAG(IS_WIN)
// Returns a string representation of the load error.
std::string ToString() const;
#if BUILDFLAG(IS_WIN)
DWORD code;
#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
std::string message;
#endif // BUILDFLAG(IS_WIN)
};
struct BASE_EXPORT NativeLibraryOptions {
NativeLibraryOptions() = default;
NativeLibraryOptions(const NativeLibraryOptions& options) = default;
// If |true|, a loaded library is required to prefer local symbol resolution
// before considering global symbols. Note that this is already the default
// behavior on most systems. Setting this to |false| does not guarantee the
// inverse, i.e., it does not force a preference for global symbols over local
// ones.
bool prefer_own_symbols = false;
};
// Loads a native library from disk. Release it with UnloadNativeLibrary when
// you're done. Returns NULL on failure.
// If |error| is not NULL, it may be filled in on load error.
BASE_EXPORT NativeLibrary LoadNativeLibrary(const FilePath& library_path,
NativeLibraryLoadError* error);
// Loads a native library from disk. Release it with UnloadNativeLibrary when
// you're done. Returns NULL on failure.
// If |error| is not NULL, it may be filled in on load error.
BASE_EXPORT NativeLibrary
LoadNativeLibraryWithOptions(const FilePath& library_path,
const NativeLibraryOptions& options,
NativeLibraryLoadError* error);
// Gets a function pointer from a native library.
BASE_EXPORT void* GetFunctionPointerFromNativeLibrary(NativeLibrary library,
const std::string& name);
} // namespace partition_alloc::internal::base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NATIVE_LIBRARY_H_

View File

@ -1,56 +0,0 @@
// Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/partition_alloc_base/native_library.h"
#include <dlfcn.h>
#include "base/allocator/partition_allocator/partition_alloc_base/files/file_path.h"
#include "base/check.h"
#include "build/build_config.h"
namespace partition_alloc::internal::base {
std::string NativeLibraryLoadError::ToString() const {
return message;
}
NativeLibrary LoadNativeLibraryWithOptions(const FilePath& library_path,
const NativeLibraryOptions& options,
NativeLibraryLoadError* error) {
// TODO(1151236): Temporarily disable this ScopedBlockingCall. After making
// partition_alloc ScopedBlockingCall() to see the same blocking_observer_
// in base's ScopedBlockingCall(), we will copy ScopedBlockingCall code and
// will enable this.
// dlopen() opens the file off disk.
// ScopedBlockingCall scoped_blocking_call(BlockingType::MAY_BLOCK);
// We deliberately do not use RTLD_DEEPBIND by default. For the history why,
// please refer to the bug tracker. Some useful bug reports to read include:
// http://crbug.com/17943, http://crbug.com/17557, http://crbug.com/36892,
// and http://crbug.com/40794.
int flags = RTLD_LAZY;
#if BUILDFLAG(IS_ANDROID) || !defined(RTLD_DEEPBIND)
// Certain platforms don't define RTLD_DEEPBIND. Android dlopen() requires
// further investigation, as it might vary across versions. Crash here to
// warn developers that they're trying to rely on uncertain behavior.
CHECK(!options.prefer_own_symbols);
#else
if (options.prefer_own_symbols)
flags |= RTLD_DEEPBIND;
#endif
void* dl = dlopen(library_path.value().c_str(), flags);
if (!dl && error)
error->message = dlerror();
return dl;
}
void* GetFunctionPointerFromNativeLibrary(NativeLibrary library,
const std::string& name) {
return dlsym(library, name.c_str());
}
} // namespace partition_alloc::internal::base

View File

@ -1,115 +0,0 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NO_DESTRUCTOR_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NO_DESTRUCTOR_H_
#include <new>
#include <type_traits>
#include <utility>
namespace partition_alloc::internal::base {
// A tag type used for NoDestructor to allow it to be created for a type that
// has a trivial destructor. Use for cases where the same class might have
// different implementations that vary on destructor triviality or when the
// LSan hiding properties of NoDestructor are needed.
struct AllowForTriviallyDestructibleType;
// A wrapper that makes it easy to create an object of type T with static
// storage duration that:
// - is only constructed on first access
// - never invokes the destructor
// in order to satisfy the styleguide ban on global constructors and
// destructors.
//
// Runtime constant example:
// const std::string& GetLineSeparator() {
// // Forwards to std::string(size_t, char, const Allocator&) constructor.
// static const base::NoDestructor<std::string> s(5, '-');
// return *s;
// }
//
// More complex initialization with a lambda:
// const std::string& GetSessionNonce() {
// static const base::NoDestructor<std::string> nonce([] {
// std::string s(16);
// crypto::RandString(s.data(), s.size());
// return s;
// }());
// return *nonce;
// }
//
// NoDestructor<T> stores the object inline, so it also avoids a pointer
// indirection and a malloc. Also note that since C++11 static local variable
// initialization is thread-safe and so is this pattern. Code should prefer to
// use NoDestructor<T> over:
// - A function scoped static T* or T& that is dynamically initialized.
// - A global base::LazyInstance<T>.
//
// Note that since the destructor is never run, this *will* leak memory if used
// as a stack or member variable. Furthermore, a NoDestructor<T> should never
// have global scope as that may require a static initializer.
template <typename T, typename O = std::nullptr_t>
class NoDestructor {
public:
static_assert(
!std::is_trivially_destructible<T>::value ||
std::is_same<O, AllowForTriviallyDestructibleType>::value,
"base::NoDestructor is not needed because the templated class has a "
"trivial destructor");
static_assert(std::is_same<O, AllowForTriviallyDestructibleType>::value ||
std::is_same<O, std::nullptr_t>::value,
"AllowForTriviallyDestructibleType is the only valid option "
"for the second template parameter of NoDestructor");
// Not constexpr; just write static constexpr T x = ...; if the value should
// be a constexpr.
template <typename... Args>
explicit NoDestructor(Args&&... args) {
new (storage_) T(std::forward<Args>(args)...);
}
// Allows copy and move construction of the contained type, to allow
// construction from an initializer list, e.g. for std::vector.
explicit NoDestructor(const T& x) { new (storage_) T(x); }
explicit NoDestructor(T&& x) { new (storage_) T(std::move(x)); }
NoDestructor(const NoDestructor&) = delete;
NoDestructor& operator=(const NoDestructor&) = delete;
~NoDestructor() = default;
const T& operator*() const { return *get(); }
T& operator*() { return *get(); }
const T* operator->() const { return get(); }
T* operator->() { return get(); }
const T* get() const { return reinterpret_cast<const T*>(storage_); }
T* get() { return reinterpret_cast<T*>(storage_); }
private:
alignas(T) char storage_[sizeof(T)];
#if defined(LEAK_SANITIZER)
// TODO(https://crbug.com/812277): This is a hack to work around the fact
// that LSan doesn't seem to treat NoDestructor as a root for reachability
// analysis. This means that code like this:
// static base::NoDestructor<std::vector<int>> v({1, 2, 3});
// is considered a leak. Using the standard leak sanitizer annotations to
// suppress leaks doesn't work: std::vector is implicitly constructed before
// calling the base::NoDestructor constructor.
//
// Unfortunately, I haven't been able to demonstrate this issue in simpler
// reproductions: until that's resolved, hold an explicit pointer to the
// placement-new'd object in leak sanitizer mode to help LSan realize that
// objects allocated by the contained type are still reachable.
T* storage_ptr_ = reinterpret_cast<T*>(storage_);
#endif // defined(LEAK_SANITIZER)
};
} // namespace partition_alloc::internal::base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NO_DESTRUCTOR_H_

View File

@ -1,375 +0,0 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_CHECKED_MATH_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_CHECKED_MATH_H_
#include <stddef.h>
#include <limits>
#include <type_traits>
#include "base/allocator/partition_allocator/partition_alloc_base/numerics/checked_math_impl.h"
namespace partition_alloc::internal::base {
namespace internal {
template <typename T>
class CheckedNumeric {
static_assert(std::is_arithmetic<T>::value,
"CheckedNumeric<T>: T must be a numeric type.");
public:
template <typename Src>
friend class CheckedNumeric;
using type = T;
constexpr CheckedNumeric() = default;
// Copy constructor.
template <typename Src>
constexpr CheckedNumeric(const CheckedNumeric<Src>& rhs)
: state_(rhs.state_.value(), rhs.IsValid()) {}
// This is not an explicit constructor because we implicitly upgrade regular
// numerics to CheckedNumerics to make them easier to use.
template <typename Src>
constexpr CheckedNumeric(Src value) // NOLINT(runtime/explicit)
: state_(value) {
static_assert(UnderlyingType<Src>::is_numeric, "Argument must be numeric.");
}
// This is not an explicit constructor because we want a seamless conversion
// from StrictNumeric types.
template <typename Src>
constexpr CheckedNumeric(
StrictNumeric<Src> value) // NOLINT(runtime/explicit)
: state_(static_cast<Src>(value)) {}
// IsValid() - The public API to test if a CheckedNumeric is currently valid.
// A range checked destination type can be supplied using the Dst template
// parameter.
template <typename Dst = T>
constexpr bool IsValid() const {
return state_.is_valid() &&
IsValueInRangeForNumericType<Dst>(state_.value());
}
// AssignIfValid(Dst) - Assigns the underlying value if it is currently valid
// and is within the range supported by the destination type. Returns true if
// successful and false otherwise.
template <typename Dst>
#if defined(__clang__) || defined(__GNUC__)
__attribute__((warn_unused_result))
#elif defined(_MSC_VER)
_Check_return_
#endif
constexpr bool
AssignIfValid(Dst* result) const {
return PA_BASE_NUMERICS_LIKELY(IsValid<Dst>())
? ((*result = static_cast<Dst>(state_.value())), true)
: false;
}
// ValueOrDie() - The primary accessor for the underlying value. If the
// current state is not valid it will CHECK and crash.
// A range checked destination type can be supplied using the Dst template
// parameter, which will trigger a CHECK if the value is not in bounds for
// the destination.
// The CHECK behavior can be overridden by supplying a handler as a
// template parameter, for test code, etc. However, the handler cannot access
// the underlying value, and it is not available through other means.
template <typename Dst = T, class CheckHandler = CheckOnFailure>
constexpr StrictNumeric<Dst> ValueOrDie() const {
return PA_BASE_NUMERICS_LIKELY(IsValid<Dst>())
? static_cast<Dst>(state_.value())
: CheckHandler::template HandleFailure<Dst>();
}
// ValueOrDefault(T default_value) - A convenience method that returns the
// current value if the state is valid, and the supplied default_value for
// any other state.
// A range checked destination type can be supplied using the Dst template
// parameter. WARNING: This function may fail to compile or CHECK at runtime
// if the supplied default_value is not within range of the destination type.
template <typename Dst = T, typename Src>
constexpr StrictNumeric<Dst> ValueOrDefault(const Src default_value) const {
return PA_BASE_NUMERICS_LIKELY(IsValid<Dst>())
? static_cast<Dst>(state_.value())
: checked_cast<Dst>(default_value);
}
// Returns a checked numeric of the specified type, cast from the current
// CheckedNumeric. If the current state is invalid or the destination cannot
// represent the result then the returned CheckedNumeric will be invalid.
template <typename Dst>
constexpr CheckedNumeric<typename UnderlyingType<Dst>::type> Cast() const {
return *this;
}
// This friend method is available solely for providing more detailed logging
// in the tests. Do not implement it in production code, because the
// underlying values may change at any time.
template <typename U>
friend U GetNumericValueForTest(const CheckedNumeric<U>& src);
// Prototypes for the supported arithmetic operator overloads.
template <typename Src>
constexpr CheckedNumeric& operator+=(const Src rhs);
template <typename Src>
constexpr CheckedNumeric& operator-=(const Src rhs);
template <typename Src>
constexpr CheckedNumeric& operator*=(const Src rhs);
template <typename Src>
constexpr CheckedNumeric& operator/=(const Src rhs);
template <typename Src>
constexpr CheckedNumeric& operator%=(const Src rhs);
template <typename Src>
constexpr CheckedNumeric& operator<<=(const Src rhs);
template <typename Src>
constexpr CheckedNumeric& operator>>=(const Src rhs);
template <typename Src>
constexpr CheckedNumeric& operator&=(const Src rhs);
template <typename Src>
constexpr CheckedNumeric& operator|=(const Src rhs);
template <typename Src>
constexpr CheckedNumeric& operator^=(const Src rhs);
constexpr CheckedNumeric operator-() const {
// Use an optimized code path for a known run-time variable.
if (!PA_IsConstantEvaluated() && std::is_signed<T>::value &&
std::is_floating_point<T>::value) {
return FastRuntimeNegate();
}
// The negation of two's complement int min is int min.
const bool is_valid =
IsValid() &&
(!std::is_signed<T>::value || std::is_floating_point<T>::value ||
NegateWrapper(state_.value()) != std::numeric_limits<T>::lowest());
return CheckedNumeric<T>(NegateWrapper(state_.value()), is_valid);
}
constexpr CheckedNumeric operator~() const {
return CheckedNumeric<decltype(InvertWrapper(T()))>(
InvertWrapper(state_.value()), IsValid());
}
constexpr CheckedNumeric Abs() const {
return !IsValueNegative(state_.value()) ? *this : -*this;
}
template <typename U>
constexpr CheckedNumeric<typename MathWrapper<CheckedMaxOp, T, U>::type> Max(
const U rhs) const {
return CheckMax(*this, rhs);
}
template <typename U>
constexpr CheckedNumeric<typename MathWrapper<CheckedMinOp, T, U>::type> Min(
const U rhs) const {
return CheckMin(*this, rhs);
}
// This function is available only for integral types. It returns an unsigned
// integer of the same width as the source type, containing the absolute value
// of the source, and properly handling signed min.
constexpr CheckedNumeric<typename UnsignedOrFloatForSize<T>::type>
UnsignedAbs() const {
return CheckedNumeric<typename UnsignedOrFloatForSize<T>::type>(
SafeUnsignedAbs(state_.value()), state_.is_valid());
}
constexpr CheckedNumeric& operator++() {
*this += 1;
return *this;
}
constexpr CheckedNumeric operator++(int) {
CheckedNumeric value = *this;
*this += 1;
return value;
}
constexpr CheckedNumeric& operator--() {
*this -= 1;
return *this;
}
constexpr CheckedNumeric operator--(int) {
// TODO(pkasting): Consider std::exchange() once it's constexpr in C++20.
const CheckedNumeric value = *this;
*this -= 1;
return value;
}
// These perform the actual math operations on the CheckedNumerics.
// Binary arithmetic operations.
template <template <typename, typename, typename> class M,
typename L,
typename R>
static constexpr CheckedNumeric MathOp(const L lhs, const R rhs) {
using Math = typename MathWrapper<M, L, R>::math;
T result = 0;
const bool is_valid =
Wrapper<L>::is_valid(lhs) && Wrapper<R>::is_valid(rhs) &&
Math::Do(Wrapper<L>::value(lhs), Wrapper<R>::value(rhs), &result);
return CheckedNumeric<T>(result, is_valid);
}
// Assignment arithmetic operations.
template <template <typename, typename, typename> class M, typename R>
constexpr CheckedNumeric& MathOp(const R rhs) {
using Math = typename MathWrapper<M, T, R>::math;
T result = 0; // Using T as the destination saves a range check.
const bool is_valid =
state_.is_valid() && Wrapper<R>::is_valid(rhs) &&
Math::Do(state_.value(), Wrapper<R>::value(rhs), &result);
*this = CheckedNumeric<T>(result, is_valid);
return *this;
}
private:
CheckedNumericState<T> state_;
CheckedNumeric FastRuntimeNegate() const {
T result;
const bool success = CheckedSubOp<T, T>::Do(T(0), state_.value(), &result);
return CheckedNumeric<T>(result, IsValid() && success);
}
template <typename Src>
constexpr CheckedNumeric(Src value, bool is_valid)
: state_(value, is_valid) {}
// These wrappers allow us to handle state the same way for both
// CheckedNumeric and POD arithmetic types.
template <typename Src>
struct Wrapper {
static constexpr bool is_valid(Src) { return true; }
static constexpr Src value(Src value) { return value; }
};
template <typename Src>
struct Wrapper<CheckedNumeric<Src>> {
static constexpr bool is_valid(const CheckedNumeric<Src> v) {
return v.IsValid();
}
static constexpr Src value(const CheckedNumeric<Src> v) {
return v.state_.value();
}
};
template <typename Src>
struct Wrapper<StrictNumeric<Src>> {
static constexpr bool is_valid(const StrictNumeric<Src>) { return true; }
static constexpr Src value(const StrictNumeric<Src> v) {
return static_cast<Src>(v);
}
};
};
// Convenience functions to avoid the ugly template disambiguator syntax.
template <typename Dst, typename Src>
constexpr bool IsValidForType(const CheckedNumeric<Src> value) {
return value.template IsValid<Dst>();
}
template <typename Dst, typename Src>
constexpr StrictNumeric<Dst> ValueOrDieForType(
const CheckedNumeric<Src> value) {
return value.template ValueOrDie<Dst>();
}
template <typename Dst, typename Src, typename Default>
constexpr StrictNumeric<Dst> ValueOrDefaultForType(
const CheckedNumeric<Src> value,
const Default default_value) {
return value.template ValueOrDefault<Dst>(default_value);
}
// Convenience wrapper to return a new CheckedNumeric from the provided
// arithmetic or CheckedNumericType.
template <typename T>
constexpr CheckedNumeric<typename UnderlyingType<T>::type> MakeCheckedNum(
const T value) {
return value;
}
// These implement the variadic wrapper for the math operations.
template <template <typename, typename, typename> class M,
typename L,
typename R>
constexpr CheckedNumeric<typename MathWrapper<M, L, R>::type> CheckMathOp(
const L lhs,
const R rhs) {
using Math = typename MathWrapper<M, L, R>::math;
return CheckedNumeric<typename Math::result_type>::template MathOp<M>(lhs,
rhs);
}
// General purpose wrapper template for arithmetic operations.
template <template <typename, typename, typename> class M,
typename L,
typename R,
typename... Args>
constexpr auto CheckMathOp(const L lhs, const R rhs, const Args... args) {
return CheckMathOp<M>(CheckMathOp<M>(lhs, rhs), args...);
}
PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Add, +, +=)
PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Sub, -, -=)
PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Mul, *, *=)
PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Div, /, /=)
PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Mod, %, %=)
PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Lsh, <<, <<=)
PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Rsh, >>, >>=)
PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, And, &, &=)
PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Or, |, |=)
PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Checked, Check, Xor, ^, ^=)
PA_BASE_NUMERIC_ARITHMETIC_VARIADIC(Checked, Check, Max)
PA_BASE_NUMERIC_ARITHMETIC_VARIADIC(Checked, Check, Min)
// These are some extra StrictNumeric operators to support simple pointer
// arithmetic with our result types. Since wrapping on a pointer is always
// bad, we trigger the CHECK condition here.
template <typename L, typename R>
L* operator+(L* lhs, const StrictNumeric<R> rhs) {
const uintptr_t result = CheckAdd(reinterpret_cast<uintptr_t>(lhs),
CheckMul(sizeof(L), static_cast<R>(rhs)))
.template ValueOrDie<uintptr_t>();
return reinterpret_cast<L*>(result);
}
template <typename L, typename R>
L* operator-(L* lhs, const StrictNumeric<R> rhs) {
const uintptr_t result = CheckSub(reinterpret_cast<uintptr_t>(lhs),
CheckMul(sizeof(L), static_cast<R>(rhs)))
.template ValueOrDie<uintptr_t>();
return reinterpret_cast<L*>(result);
}
} // namespace internal
using internal::CheckAdd;
using internal::CheckAnd;
using internal::CheckDiv;
using internal::CheckedNumeric;
using internal::CheckLsh;
using internal::CheckMax;
using internal::CheckMin;
using internal::CheckMod;
using internal::CheckMul;
using internal::CheckOr;
using internal::CheckRsh;
using internal::CheckSub;
using internal::CheckXor;
using internal::IsValidForType;
using internal::MakeCheckedNum;
using internal::ValueOrDefaultForType;
using internal::ValueOrDieForType;
} // namespace partition_alloc::internal::base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_CHECKED_MATH_H_

View File

@ -1,592 +0,0 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_CHECKED_MATH_IMPL_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_CHECKED_MATH_IMPL_H_
#include <stddef.h>
#include <stdint.h>
#include <climits>
#include <cmath>
#include <cstdlib>
#include <limits>
#include <type_traits>
#include "base/allocator/partition_allocator/partition_alloc_base/numerics/safe_conversions.h"
#include "base/allocator/partition_allocator/partition_alloc_base/numerics/safe_math_shared_impl.h"
namespace partition_alloc::internal::base::internal {
template <typename T>
constexpr bool CheckedAddImpl(T x, T y, T* result) {
static_assert(std::is_integral<T>::value, "Type must be integral");
// Since the value of x+y is undefined if we have a signed type, we compute
// it using the unsigned type of the same size.
using UnsignedDst = typename std::make_unsigned<T>::type;
using SignedDst = typename std::make_signed<T>::type;
const UnsignedDst ux = static_cast<UnsignedDst>(x);
const UnsignedDst uy = static_cast<UnsignedDst>(y);
const UnsignedDst uresult = static_cast<UnsignedDst>(ux + uy);
// Addition is valid if the sign of (x + y) is equal to either that of x or
// that of y.
if (std::is_signed<T>::value
? static_cast<SignedDst>((uresult ^ ux) & (uresult ^ uy)) < 0
: uresult < uy) // Unsigned is either valid or underflow.
return false;
*result = static_cast<T>(uresult);
return true;
}
template <typename T, typename U, class Enable = void>
struct CheckedAddOp {};
template <typename T, typename U>
struct CheckedAddOp<T,
U,
typename std::enable_if<std::is_integral<T>::value &&
std::is_integral<U>::value>::type> {
using result_type = typename MaxExponentPromotion<T, U>::type;
template <typename V>
static constexpr bool Do(T x, U y, V* result) {
if constexpr (CheckedAddFastOp<T, U>::is_supported)
return CheckedAddFastOp<T, U>::Do(x, y, result);
// Double the underlying type up to a full machine word.
using FastPromotion = typename FastIntegerArithmeticPromotion<T, U>::type;
using Promotion =
typename std::conditional<(IntegerBitsPlusSign<FastPromotion>::value >
IntegerBitsPlusSign<intptr_t>::value),
typename BigEnoughPromotion<T, U>::type,
FastPromotion>::type;
// Fail if either operand is out of range for the promoted type.
// TODO(jschuh): This could be made to work for a broader range of values.
if (PA_BASE_NUMERICS_UNLIKELY(
!IsValueInRangeForNumericType<Promotion>(x) ||
!IsValueInRangeForNumericType<Promotion>(y))) {
return false;
}
Promotion presult = {};
bool is_valid = true;
if (IsIntegerArithmeticSafe<Promotion, T, U>::value) {
presult = static_cast<Promotion>(x) + static_cast<Promotion>(y);
} else {
is_valid = CheckedAddImpl(static_cast<Promotion>(x),
static_cast<Promotion>(y), &presult);
}
if (!is_valid || !IsValueInRangeForNumericType<V>(presult))
return false;
*result = static_cast<V>(presult);
return true;
}
};
template <typename T>
constexpr bool CheckedSubImpl(T x, T y, T* result) {
static_assert(std::is_integral<T>::value, "Type must be integral");
// Since the value of x+y is undefined if we have a signed type, we compute
// it using the unsigned type of the same size.
using UnsignedDst = typename std::make_unsigned<T>::type;
using SignedDst = typename std::make_signed<T>::type;
const UnsignedDst ux = static_cast<UnsignedDst>(x);
const UnsignedDst uy = static_cast<UnsignedDst>(y);
const UnsignedDst uresult = static_cast<UnsignedDst>(ux - uy);
// Subtraction is valid if either x and y have same sign, or (x-y) and x have
// the same sign.
if (std::is_signed<T>::value
? static_cast<SignedDst>((uresult ^ ux) & (ux ^ uy)) < 0
: x < y)
return false;
*result = static_cast<T>(uresult);
return true;
}
template <typename T, typename U, class Enable = void>
struct CheckedSubOp {};
template <typename T, typename U>
struct CheckedSubOp<T,
U,
typename std::enable_if<std::is_integral<T>::value &&
std::is_integral<U>::value>::type> {
using result_type = typename MaxExponentPromotion<T, U>::type;
template <typename V>
static constexpr bool Do(T x, U y, V* result) {
if constexpr (CheckedSubFastOp<T, U>::is_supported)
return CheckedSubFastOp<T, U>::Do(x, y, result);
// Double the underlying type up to a full machine word.
using FastPromotion = typename FastIntegerArithmeticPromotion<T, U>::type;
using Promotion =
typename std::conditional<(IntegerBitsPlusSign<FastPromotion>::value >
IntegerBitsPlusSign<intptr_t>::value),
typename BigEnoughPromotion<T, U>::type,
FastPromotion>::type;
// Fail if either operand is out of range for the promoted type.
// TODO(jschuh): This could be made to work for a broader range of values.
if (PA_BASE_NUMERICS_UNLIKELY(
!IsValueInRangeForNumericType<Promotion>(x) ||
!IsValueInRangeForNumericType<Promotion>(y))) {
return false;
}
Promotion presult = {};
bool is_valid = true;
if (IsIntegerArithmeticSafe<Promotion, T, U>::value) {
presult = static_cast<Promotion>(x) - static_cast<Promotion>(y);
} else {
is_valid = CheckedSubImpl(static_cast<Promotion>(x),
static_cast<Promotion>(y), &presult);
}
if (!is_valid || !IsValueInRangeForNumericType<V>(presult))
return false;
*result = static_cast<V>(presult);
return true;
}
};
template <typename T>
constexpr bool CheckedMulImpl(T x, T y, T* result) {
static_assert(std::is_integral<T>::value, "Type must be integral");
// Since the value of x*y is potentially undefined if we have a signed type,
// we compute it using the unsigned type of the same size.
using UnsignedDst = typename std::make_unsigned<T>::type;
using SignedDst = typename std::make_signed<T>::type;
const UnsignedDst ux = SafeUnsignedAbs(x);
const UnsignedDst uy = SafeUnsignedAbs(y);
const UnsignedDst uresult = static_cast<UnsignedDst>(ux * uy);
const bool is_negative =
std::is_signed<T>::value && static_cast<SignedDst>(x ^ y) < 0;
// We have a fast out for unsigned identity or zero on the second operand.
// After that it's an unsigned overflow check on the absolute value, with
// a +1 bound for a negative result.
if (uy > UnsignedDst(!std::is_signed<T>::value || is_negative) &&
ux > (std::numeric_limits<T>::max() + UnsignedDst(is_negative)) / uy)
return false;
*result = is_negative ? 0 - uresult : uresult;
return true;
}
template <typename T, typename U, class Enable = void>
struct CheckedMulOp {};
template <typename T, typename U>
struct CheckedMulOp<T,
U,
typename std::enable_if<std::is_integral<T>::value &&
std::is_integral<U>::value>::type> {
using result_type = typename MaxExponentPromotion<T, U>::type;
template <typename V>
static constexpr bool Do(T x, U y, V* result) {
if constexpr (CheckedMulFastOp<T, U>::is_supported)
return CheckedMulFastOp<T, U>::Do(x, y, result);
using Promotion = typename FastIntegerArithmeticPromotion<T, U>::type;
// Verify the destination type can hold the result (always true for 0).
if (PA_BASE_NUMERICS_UNLIKELY(
(!IsValueInRangeForNumericType<Promotion>(x) ||
!IsValueInRangeForNumericType<Promotion>(y)) &&
x && y)) {
return false;
}
Promotion presult = {};
bool is_valid = true;
if (CheckedMulFastOp<Promotion, Promotion>::is_supported) {
// The fast op may be available with the promoted type.
is_valid = CheckedMulFastOp<Promotion, Promotion>::Do(x, y, &presult);
} else if (IsIntegerArithmeticSafe<Promotion, T, U>::value) {
presult = static_cast<Promotion>(x) * static_cast<Promotion>(y);
} else {
is_valid = CheckedMulImpl(static_cast<Promotion>(x),
static_cast<Promotion>(y), &presult);
}
if (!is_valid || !IsValueInRangeForNumericType<V>(presult))
return false;
*result = static_cast<V>(presult);
return true;
}
};
// Division just requires a check for a zero denominator or an invalid negation
// on signed min/-1.
template <typename T, typename U, class Enable = void>
struct CheckedDivOp {};
template <typename T, typename U>
struct CheckedDivOp<T,
U,
typename std::enable_if<std::is_integral<T>::value &&
std::is_integral<U>::value>::type> {
using result_type = typename MaxExponentPromotion<T, U>::type;
template <typename V>
static constexpr bool Do(T x, U y, V* result) {
if (PA_BASE_NUMERICS_UNLIKELY(!y))
return false;
// The overflow check can be compiled away if we don't have the exact
// combination of types needed to trigger this case.
using Promotion = typename BigEnoughPromotion<T, U>::type;
if (PA_BASE_NUMERICS_UNLIKELY(
(std::is_signed<T>::value && std::is_signed<U>::value &&
IsTypeInRangeForNumericType<T, Promotion>::value &&
static_cast<Promotion>(x) ==
std::numeric_limits<Promotion>::lowest() &&
y == static_cast<U>(-1)))) {
return false;
}
// This branch always compiles away if the above branch wasn't removed.
if (PA_BASE_NUMERICS_UNLIKELY(
(!IsValueInRangeForNumericType<Promotion>(x) ||
!IsValueInRangeForNumericType<Promotion>(y)) &&
x)) {
return false;
}
const Promotion presult = Promotion(x) / Promotion(y);
if (!IsValueInRangeForNumericType<V>(presult))
return false;
*result = static_cast<V>(presult);
return true;
}
};
template <typename T, typename U, class Enable = void>
struct CheckedModOp {};
template <typename T, typename U>
struct CheckedModOp<T,
U,
typename std::enable_if<std::is_integral<T>::value &&
std::is_integral<U>::value>::type> {
using result_type = typename MaxExponentPromotion<T, U>::type;
template <typename V>
static constexpr bool Do(T x, U y, V* result) {
if (PA_BASE_NUMERICS_UNLIKELY(!y))
return false;
using Promotion = typename BigEnoughPromotion<T, U>::type;
if (PA_BASE_NUMERICS_UNLIKELY(
(std::is_signed<T>::value && std::is_signed<U>::value &&
IsTypeInRangeForNumericType<T, Promotion>::value &&
static_cast<Promotion>(x) ==
std::numeric_limits<Promotion>::lowest() &&
y == static_cast<U>(-1)))) {
*result = 0;
return true;
}
const Promotion presult =
static_cast<Promotion>(x) % static_cast<Promotion>(y);
if (!IsValueInRangeForNumericType<V>(presult))
return false;
*result = static_cast<Promotion>(presult);
return true;
}
};
template <typename T, typename U, class Enable = void>
struct CheckedLshOp {};
// Left shift. Shifts less than 0 or greater than or equal to the number
// of bits in the promoted type are undefined. Shifts of negative values
// are undefined. Otherwise it is defined when the result fits.
template <typename T, typename U>
struct CheckedLshOp<T,
U,
typename std::enable_if<std::is_integral<T>::value &&
std::is_integral<U>::value>::type> {
using result_type = T;
template <typename V>
static constexpr bool Do(T x, U shift, V* result) {
// Disallow negative numbers and verify the shift is in bounds.
if (PA_BASE_NUMERICS_LIKELY(
!IsValueNegative(x) &&
as_unsigned(shift) < as_unsigned(std::numeric_limits<T>::digits))) {
// Shift as unsigned to avoid undefined behavior.
*result = static_cast<V>(as_unsigned(x) << shift);
// If the shift can be reversed, we know it was valid.
return *result >> shift == x;
}
// Handle the legal corner-case of a full-width signed shift of zero.
if (!std::is_signed<T>::value || x ||
as_unsigned(shift) != as_unsigned(std::numeric_limits<T>::digits))
return false;
*result = 0;
return true;
}
};
template <typename T, typename U, class Enable = void>
struct CheckedRshOp {};
// Right shift. Shifts less than 0 or greater than or equal to the number
// of bits in the promoted type are undefined. Otherwise, it is always defined,
// but a right shift of a negative value is implementation-dependent.
template <typename T, typename U>
struct CheckedRshOp<T,
U,
typename std::enable_if<std::is_integral<T>::value &&
std::is_integral<U>::value>::type> {
using result_type = T;
template <typename V>
static constexpr bool Do(T x, U shift, V* result) {
// Use sign conversion to push negative values out of range.
if (PA_BASE_NUMERICS_UNLIKELY(as_unsigned(shift) >=
IntegerBitsPlusSign<T>::value)) {
return false;
}
const T tmp = x >> shift;
if (!IsValueInRangeForNumericType<V>(tmp))
return false;
*result = static_cast<V>(tmp);
return true;
}
};
template <typename T, typename U, class Enable = void>
struct CheckedAndOp {};
// For simplicity we support only unsigned integer results.
template <typename T, typename U>
struct CheckedAndOp<T,
U,
typename std::enable_if<std::is_integral<T>::value &&
std::is_integral<U>::value>::type> {
using result_type = typename std::make_unsigned<
typename MaxExponentPromotion<T, U>::type>::type;
template <typename V>
static constexpr bool Do(T x, U y, V* result) {
const result_type tmp =
static_cast<result_type>(x) & static_cast<result_type>(y);
if (!IsValueInRangeForNumericType<V>(tmp))
return false;
*result = static_cast<V>(tmp);
return true;
}
};
template <typename T, typename U, class Enable = void>
struct CheckedOrOp {};
// For simplicity we support only unsigned integers.
template <typename T, typename U>
struct CheckedOrOp<T,
U,
typename std::enable_if<std::is_integral<T>::value &&
std::is_integral<U>::value>::type> {
using result_type = typename std::make_unsigned<
typename MaxExponentPromotion<T, U>::type>::type;
template <typename V>
static constexpr bool Do(T x, U y, V* result) {
const result_type tmp =
static_cast<result_type>(x) | static_cast<result_type>(y);
if (!IsValueInRangeForNumericType<V>(tmp))
return false;
*result = static_cast<V>(tmp);
return true;
}
};
template <typename T, typename U, class Enable = void>
struct CheckedXorOp {};
// For simplicity we support only unsigned integers.
template <typename T, typename U>
struct CheckedXorOp<T,
U,
typename std::enable_if<std::is_integral<T>::value &&
std::is_integral<U>::value>::type> {
using result_type = typename std::make_unsigned<
typename MaxExponentPromotion<T, U>::type>::type;
template <typename V>
static constexpr bool Do(T x, U y, V* result) {
const result_type tmp =
static_cast<result_type>(x) ^ static_cast<result_type>(y);
if (!IsValueInRangeForNumericType<V>(tmp))
return false;
*result = static_cast<V>(tmp);
return true;
}
};
// Max doesn't really need to be implemented this way because it can't fail,
// but it makes the code much cleaner to use the MathOp wrappers.
template <typename T, typename U, class Enable = void>
struct CheckedMaxOp {};
template <typename T, typename U>
struct CheckedMaxOp<
T,
U,
typename std::enable_if<std::is_arithmetic<T>::value &&
std::is_arithmetic<U>::value>::type> {
using result_type = typename MaxExponentPromotion<T, U>::type;
template <typename V>
static constexpr bool Do(T x, U y, V* result) {
const result_type tmp = IsGreater<T, U>::Test(x, y)
? static_cast<result_type>(x)
: static_cast<result_type>(y);
if (!IsValueInRangeForNumericType<V>(tmp))
return false;
*result = static_cast<V>(tmp);
return true;
}
};
// Min doesn't really need to be implemented this way because it can't fail,
// but it makes the code much cleaner to use the MathOp wrappers.
template <typename T, typename U, class Enable = void>
struct CheckedMinOp {};
template <typename T, typename U>
struct CheckedMinOp<
T,
U,
typename std::enable_if<std::is_arithmetic<T>::value &&
std::is_arithmetic<U>::value>::type> {
using result_type = typename LowestValuePromotion<T, U>::type;
template <typename V>
static constexpr bool Do(T x, U y, V* result) {
const result_type tmp = IsLess<T, U>::Test(x, y)
? static_cast<result_type>(x)
: static_cast<result_type>(y);
if (!IsValueInRangeForNumericType<V>(tmp))
return false;
*result = static_cast<V>(tmp);
return true;
}
};
// This is just boilerplate that wraps the standard floating point arithmetic.
// A macro isn't the nicest solution, but it beats rewriting these repeatedly.
#define PA_BASE_FLOAT_ARITHMETIC_OPS(NAME, OP) \
template <typename T, typename U> \
struct Checked##NAME##Op< \
T, U, \
typename std::enable_if<std::is_floating_point<T>::value || \
std::is_floating_point<U>::value>::type> { \
using result_type = typename MaxExponentPromotion<T, U>::type; \
template <typename V> \
static constexpr bool Do(T x, U y, V* result) { \
using Promotion = typename MaxExponentPromotion<T, U>::type; \
const Promotion presult = x OP y; \
if (!IsValueInRangeForNumericType<V>(presult)) \
return false; \
*result = static_cast<V>(presult); \
return true; \
} \
};
PA_BASE_FLOAT_ARITHMETIC_OPS(Add, +)
PA_BASE_FLOAT_ARITHMETIC_OPS(Sub, -)
PA_BASE_FLOAT_ARITHMETIC_OPS(Mul, *)
PA_BASE_FLOAT_ARITHMETIC_OPS(Div, /)
#undef PA_BASE_FLOAT_ARITHMETIC_OPS
// Floats carry around their validity state with them, but integers do not. So,
// we wrap the underlying value in a specialization in order to hide that detail
// and expose an interface via accessors.
enum NumericRepresentation {
NUMERIC_INTEGER,
NUMERIC_FLOATING,
NUMERIC_UNKNOWN
};
template <typename NumericType>
struct GetNumericRepresentation {
static const NumericRepresentation value =
std::is_integral<NumericType>::value
? NUMERIC_INTEGER
: (std::is_floating_point<NumericType>::value ? NUMERIC_FLOATING
: NUMERIC_UNKNOWN);
};
template <typename T,
NumericRepresentation type = GetNumericRepresentation<T>::value>
class CheckedNumericState {};
// Integrals require quite a bit of additional housekeeping to manage state.
template <typename T>
class CheckedNumericState<T, NUMERIC_INTEGER> {
public:
template <typename Src = int>
constexpr explicit CheckedNumericState(Src value = 0, bool is_valid = true)
: is_valid_(is_valid && IsValueInRangeForNumericType<T>(value)),
value_(WellDefinedConversionOrZero(value, is_valid_)) {
static_assert(std::is_arithmetic<Src>::value, "Argument must be numeric.");
}
template <typename Src>
constexpr CheckedNumericState(const CheckedNumericState<Src>& rhs)
: CheckedNumericState(rhs.value(), rhs.is_valid()) {}
constexpr bool is_valid() const { return is_valid_; }
constexpr T value() const { return value_; }
private:
// Ensures that a type conversion does not trigger undefined behavior.
template <typename Src>
static constexpr T WellDefinedConversionOrZero(Src value, bool is_valid) {
using SrcType = typename internal::UnderlyingType<Src>::type;
return (std::is_integral<SrcType>::value || is_valid)
? static_cast<T>(value)
: 0;
}
// is_valid_ precedes value_ because member initializers in the constructors
// are evaluated in field order, and is_valid_ must be read when initializing
// value_.
bool is_valid_;
T value_;
};
// Floating points maintain their own validity, but need translation wrappers.
template <typename T>
class CheckedNumericState<T, NUMERIC_FLOATING> {
public:
template <typename Src = double>
constexpr explicit CheckedNumericState(Src value = 0.0, bool is_valid = true)
: value_(WellDefinedConversionOrNaN(
value,
is_valid && IsValueInRangeForNumericType<T>(value))) {}
template <typename Src>
constexpr CheckedNumericState(const CheckedNumericState<Src>& rhs)
: CheckedNumericState(rhs.value(), rhs.is_valid()) {}
constexpr bool is_valid() const {
// Written this way because std::isfinite is not reliably constexpr.
return PA_IsConstantEvaluated()
? value_ <= std::numeric_limits<T>::max() &&
value_ >= std::numeric_limits<T>::lowest()
: std::isfinite(value_);
}
constexpr T value() const { return value_; }
private:
// Ensures that a type conversion does not trigger undefined behavior.
template <typename Src>
static constexpr T WellDefinedConversionOrNaN(Src value, bool is_valid) {
using SrcType = typename internal::UnderlyingType<Src>::type;
return (StaticDstRangeRelationToSrcRange<T, SrcType>::value ==
NUMERIC_RANGE_CONTAINED ||
is_valid)
? static_cast<T>(value)
: std::numeric_limits<T>::quiet_NaN();
}
T value_;
};
} // namespace partition_alloc::internal::base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_CHECKED_MATH_IMPL_H_

View File

@ -1,254 +0,0 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_CLAMPED_MATH_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_CLAMPED_MATH_H_
#include <stddef.h>
#include <limits>
#include <type_traits>
#include "base/allocator/partition_allocator/partition_alloc_base/numerics/clamped_math_impl.h"
namespace partition_alloc::internal::base {
namespace internal {
template <typename T>
class ClampedNumeric {
static_assert(std::is_arithmetic<T>::value,
"ClampedNumeric<T>: T must be a numeric type.");
public:
using type = T;
constexpr ClampedNumeric() : value_(0) {}
// Copy constructor.
template <typename Src>
constexpr ClampedNumeric(const ClampedNumeric<Src>& rhs)
: value_(saturated_cast<T>(rhs.value_)) {}
template <typename Src>
friend class ClampedNumeric;
// This is not an explicit constructor because we implicitly upgrade regular
// numerics to ClampedNumerics to make them easier to use.
template <typename Src>
constexpr ClampedNumeric(Src value) // NOLINT(runtime/explicit)
: value_(saturated_cast<T>(value)) {
static_assert(UnderlyingType<Src>::is_numeric, "Argument must be numeric.");
}
// This is not an explicit constructor because we want a seamless conversion
// from StrictNumeric types.
template <typename Src>
constexpr ClampedNumeric(
StrictNumeric<Src> value) // NOLINT(runtime/explicit)
: value_(saturated_cast<T>(static_cast<Src>(value))) {}
// Returns a ClampedNumeric of the specified type, cast from the current
// ClampedNumeric, and saturated to the destination type.
template <typename Dst>
constexpr ClampedNumeric<typename UnderlyingType<Dst>::type> Cast() const {
return *this;
}
// Prototypes for the supported arithmetic operator overloads.
template <typename Src>
constexpr ClampedNumeric& operator+=(const Src rhs);
template <typename Src>
constexpr ClampedNumeric& operator-=(const Src rhs);
template <typename Src>
constexpr ClampedNumeric& operator*=(const Src rhs);
template <typename Src>
constexpr ClampedNumeric& operator/=(const Src rhs);
template <typename Src>
constexpr ClampedNumeric& operator%=(const Src rhs);
template <typename Src>
constexpr ClampedNumeric& operator<<=(const Src rhs);
template <typename Src>
constexpr ClampedNumeric& operator>>=(const Src rhs);
template <typename Src>
constexpr ClampedNumeric& operator&=(const Src rhs);
template <typename Src>
constexpr ClampedNumeric& operator|=(const Src rhs);
template <typename Src>
constexpr ClampedNumeric& operator^=(const Src rhs);
constexpr ClampedNumeric operator-() const {
// The negation of two's complement int min is int min, so that's the
// only overflow case where we will saturate.
return ClampedNumeric<T>(SaturatedNegWrapper(value_));
}
constexpr ClampedNumeric operator~() const {
return ClampedNumeric<decltype(InvertWrapper(T()))>(InvertWrapper(value_));
}
constexpr ClampedNumeric Abs() const {
// The negation of two's complement int min is int min, so that's the
// only overflow case where we will saturate.
return ClampedNumeric<T>(SaturatedAbsWrapper(value_));
}
template <typename U>
constexpr ClampedNumeric<typename MathWrapper<ClampedMaxOp, T, U>::type> Max(
const U rhs) const {
using result_type = typename MathWrapper<ClampedMaxOp, T, U>::type;
return ClampedNumeric<result_type>(
ClampedMaxOp<T, U>::Do(value_, Wrapper<U>::value(rhs)));
}
template <typename U>
constexpr ClampedNumeric<typename MathWrapper<ClampedMinOp, T, U>::type> Min(
const U rhs) const {
using result_type = typename MathWrapper<ClampedMinOp, T, U>::type;
return ClampedNumeric<result_type>(
ClampedMinOp<T, U>::Do(value_, Wrapper<U>::value(rhs)));
}
// This function is available only for integral types. It returns an unsigned
// integer of the same width as the source type, containing the absolute value
// of the source, and properly handling signed min.
constexpr ClampedNumeric<typename UnsignedOrFloatForSize<T>::type>
UnsignedAbs() const {
return ClampedNumeric<typename UnsignedOrFloatForSize<T>::type>(
SafeUnsignedAbs(value_));
}
constexpr ClampedNumeric& operator++() {
*this += 1;
return *this;
}
constexpr ClampedNumeric operator++(int) {
ClampedNumeric value = *this;
*this += 1;
return value;
}
constexpr ClampedNumeric& operator--() {
*this -= 1;
return *this;
}
constexpr ClampedNumeric operator--(int) {
ClampedNumeric value = *this;
*this -= 1;
return value;
}
// These perform the actual math operations on the ClampedNumerics.
// Binary arithmetic operations.
template <template <typename, typename, typename> class M,
typename L,
typename R>
static constexpr ClampedNumeric MathOp(const L lhs, const R rhs) {
using Math = typename MathWrapper<M, L, R>::math;
return ClampedNumeric<T>(
Math::template Do<T>(Wrapper<L>::value(lhs), Wrapper<R>::value(rhs)));
}
// Assignment arithmetic operations.
template <template <typename, typename, typename> class M, typename R>
constexpr ClampedNumeric& MathOp(const R rhs) {
using Math = typename MathWrapper<M, T, R>::math;
*this =
ClampedNumeric<T>(Math::template Do<T>(value_, Wrapper<R>::value(rhs)));
return *this;
}
template <typename Dst>
constexpr operator Dst() const {
return saturated_cast<typename ArithmeticOrUnderlyingEnum<Dst>::type>(
value_);
}
// This method extracts the raw integer value without saturating it to the
// destination type as the conversion operator does. This is useful when
// e.g. assigning to an auto type or passing as a deduced template parameter.
constexpr T RawValue() const { return value_; }
private:
T value_;
// These wrappers allow us to handle state the same way for both
// ClampedNumeric and POD arithmetic types.
template <typename Src>
struct Wrapper {
static constexpr typename UnderlyingType<Src>::type value(Src value) {
return value;
}
};
};
// Convenience wrapper to return a new ClampedNumeric from the provided
// arithmetic or ClampedNumericType.
template <typename T>
constexpr ClampedNumeric<typename UnderlyingType<T>::type> MakeClampedNum(
const T value) {
return value;
}
// These implement the variadic wrapper for the math operations.
template <template <typename, typename, typename> class M,
typename L,
typename R>
constexpr ClampedNumeric<typename MathWrapper<M, L, R>::type> ClampMathOp(
const L lhs,
const R rhs) {
using Math = typename MathWrapper<M, L, R>::math;
return ClampedNumeric<typename Math::result_type>::template MathOp<M>(lhs,
rhs);
}
// General purpose wrapper template for arithmetic operations.
template <template <typename, typename, typename> class M,
typename L,
typename R,
typename... Args>
constexpr auto ClampMathOp(const L lhs, const R rhs, const Args... args) {
return ClampMathOp<M>(ClampMathOp<M>(lhs, rhs), args...);
}
PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Add, +, +=)
PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Sub, -, -=)
PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Mul, *, *=)
PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Div, /, /=)
PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Mod, %, %=)
PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Lsh, <<, <<=)
PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Rsh, >>, >>=)
PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, And, &, &=)
PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Or, |, |=)
PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(Clamped, Clamp, Xor, ^, ^=)
PA_BASE_NUMERIC_ARITHMETIC_VARIADIC(Clamped, Clamp, Max)
PA_BASE_NUMERIC_ARITHMETIC_VARIADIC(Clamped, Clamp, Min)
PA_BASE_NUMERIC_COMPARISON_OPERATORS(Clamped, IsLess, <)
PA_BASE_NUMERIC_COMPARISON_OPERATORS(Clamped, IsLessOrEqual, <=)
PA_BASE_NUMERIC_COMPARISON_OPERATORS(Clamped, IsGreater, >)
PA_BASE_NUMERIC_COMPARISON_OPERATORS(Clamped, IsGreaterOrEqual, >=)
PA_BASE_NUMERIC_COMPARISON_OPERATORS(Clamped, IsEqual, ==)
PA_BASE_NUMERIC_COMPARISON_OPERATORS(Clamped, IsNotEqual, !=)
} // namespace internal
using internal::ClampAdd;
using internal::ClampAnd;
using internal::ClampDiv;
using internal::ClampedNumeric;
using internal::ClampLsh;
using internal::ClampMax;
using internal::ClampMin;
using internal::ClampMod;
using internal::ClampMul;
using internal::ClampOr;
using internal::ClampRsh;
using internal::ClampSub;
using internal::ClampXor;
using internal::MakeClampedNum;
} // namespace partition_alloc::internal::base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_CLAMPED_MATH_H_

View File

@ -1,337 +0,0 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_CLAMPED_MATH_IMPL_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_CLAMPED_MATH_IMPL_H_
#include <stddef.h>
#include <stdint.h>
#include <climits>
#include <cmath>
#include <cstdlib>
#include <limits>
#include <type_traits>
#include "base/allocator/partition_allocator/partition_alloc_base/numerics/checked_math.h"
#include "base/allocator/partition_allocator/partition_alloc_base/numerics/safe_conversions.h"
#include "base/allocator/partition_allocator/partition_alloc_base/numerics/safe_math_shared_impl.h"
namespace partition_alloc::internal::base::internal {
template <typename T,
typename std::enable_if<std::is_integral<T>::value &&
std::is_signed<T>::value>::type* = nullptr>
constexpr T SaturatedNegWrapper(T value) {
return PA_IsConstantEvaluated() || !ClampedNegFastOp<T>::is_supported
? (NegateWrapper(value) != std::numeric_limits<T>::lowest()
? NegateWrapper(value)
: std::numeric_limits<T>::max())
: ClampedNegFastOp<T>::Do(value);
}
template <typename T,
typename std::enable_if<std::is_integral<T>::value &&
!std::is_signed<T>::value>::type* = nullptr>
constexpr T SaturatedNegWrapper(T value) {
return T(0);
}
template <
typename T,
typename std::enable_if<std::is_floating_point<T>::value>::type* = nullptr>
constexpr T SaturatedNegWrapper(T value) {
return -value;
}
template <typename T,
typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
constexpr T SaturatedAbsWrapper(T value) {
// The calculation below is a static identity for unsigned types, but for
// signed integer types it provides a non-branching, saturated absolute value.
// This works because SafeUnsignedAbs() returns an unsigned type, which can
// represent the absolute value of all negative numbers of an equal-width
// integer type. The call to IsValueNegative() then detects overflow in the
// special case of numeric_limits<T>::min(), by evaluating the bit pattern as
// a signed integer value. If it is the overflow case, we end up subtracting
// one from the unsigned result, thus saturating to numeric_limits<T>::max().
return static_cast<T>(SafeUnsignedAbs(value) -
IsValueNegative<T>(SafeUnsignedAbs(value)));
}
template <
typename T,
typename std::enable_if<std::is_floating_point<T>::value>::type* = nullptr>
constexpr T SaturatedAbsWrapper(T value) {
return value < 0 ? -value : value;
}
template <typename T, typename U, class Enable = void>
struct ClampedAddOp {};
template <typename T, typename U>
struct ClampedAddOp<T,
U,
typename std::enable_if<std::is_integral<T>::value &&
std::is_integral<U>::value>::type> {
using result_type = typename MaxExponentPromotion<T, U>::type;
template <typename V = result_type>
static constexpr V Do(T x, U y) {
if (!PA_IsConstantEvaluated() && ClampedAddFastOp<T, U>::is_supported)
return ClampedAddFastOp<T, U>::template Do<V>(x, y);
static_assert(std::is_same<V, result_type>::value ||
IsTypeInRangeForNumericType<U, V>::value,
"The saturation result cannot be determined from the "
"provided types.");
const V saturated = CommonMaxOrMin<V>(IsValueNegative(y));
V result = {};
return PA_BASE_NUMERICS_LIKELY((CheckedAddOp<T, U>::Do(x, y, &result)))
? result
: saturated;
}
};
template <typename T, typename U, class Enable = void>
struct ClampedSubOp {};
template <typename T, typename U>
struct ClampedSubOp<T,
U,
typename std::enable_if<std::is_integral<T>::value &&
std::is_integral<U>::value>::type> {
using result_type = typename MaxExponentPromotion<T, U>::type;
template <typename V = result_type>
static constexpr V Do(T x, U y) {
if (!PA_IsConstantEvaluated() && ClampedSubFastOp<T, U>::is_supported)
return ClampedSubFastOp<T, U>::template Do<V>(x, y);
static_assert(std::is_same<V, result_type>::value ||
IsTypeInRangeForNumericType<U, V>::value,
"The saturation result cannot be determined from the "
"provided types.");
const V saturated = CommonMaxOrMin<V>(!IsValueNegative(y));
V result = {};
return PA_BASE_NUMERICS_LIKELY((CheckedSubOp<T, U>::Do(x, y, &result)))
? result
: saturated;
}
};
template <typename T, typename U, class Enable = void>
struct ClampedMulOp {};
template <typename T, typename U>
struct ClampedMulOp<T,
U,
typename std::enable_if<std::is_integral<T>::value &&
std::is_integral<U>::value>::type> {
using result_type = typename MaxExponentPromotion<T, U>::type;
template <typename V = result_type>
static constexpr V Do(T x, U y) {
if (!PA_IsConstantEvaluated() && ClampedMulFastOp<T, U>::is_supported)
return ClampedMulFastOp<T, U>::template Do<V>(x, y);
V result = {};
const V saturated =
CommonMaxOrMin<V>(IsValueNegative(x) ^ IsValueNegative(y));
return PA_BASE_NUMERICS_LIKELY((CheckedMulOp<T, U>::Do(x, y, &result)))
? result
: saturated;
}
};
template <typename T, typename U, class Enable = void>
struct ClampedDivOp {};
template <typename T, typename U>
struct ClampedDivOp<T,
U,
typename std::enable_if<std::is_integral<T>::value &&
std::is_integral<U>::value>::type> {
using result_type = typename MaxExponentPromotion<T, U>::type;
template <typename V = result_type>
static constexpr V Do(T x, U y) {
V result = {};
if (PA_BASE_NUMERICS_LIKELY((CheckedDivOp<T, U>::Do(x, y, &result))))
return result;
// Saturation goes to max, min, or NaN (if x is zero).
return x ? CommonMaxOrMin<V>(IsValueNegative(x) ^ IsValueNegative(y))
: SaturationDefaultLimits<V>::NaN();
}
};
template <typename T, typename U, class Enable = void>
struct ClampedModOp {};
template <typename T, typename U>
struct ClampedModOp<T,
U,
typename std::enable_if<std::is_integral<T>::value &&
std::is_integral<U>::value>::type> {
using result_type = typename MaxExponentPromotion<T, U>::type;
template <typename V = result_type>
static constexpr V Do(T x, U y) {
V result = {};
return PA_BASE_NUMERICS_LIKELY((CheckedModOp<T, U>::Do(x, y, &result)))
? result
: x;
}
};
template <typename T, typename U, class Enable = void>
struct ClampedLshOp {};
// Left shift. Non-zero values saturate in the direction of the sign. A zero
// shifted by any value always results in zero.
template <typename T, typename U>
struct ClampedLshOp<T,
U,
typename std::enable_if<std::is_integral<T>::value &&
std::is_integral<U>::value>::type> {
using result_type = T;
template <typename V = result_type>
static constexpr V Do(T x, U shift) {
static_assert(!std::is_signed<U>::value, "Shift value must be unsigned.");
if (PA_BASE_NUMERICS_LIKELY(shift < std::numeric_limits<T>::digits)) {
// Shift as unsigned to avoid undefined behavior.
V result = static_cast<V>(as_unsigned(x) << shift);
// If the shift can be reversed, we know it was valid.
if (PA_BASE_NUMERICS_LIKELY(result >> shift == x))
return result;
}
return x ? CommonMaxOrMin<V>(IsValueNegative(x)) : 0;
}
};
template <typename T, typename U, class Enable = void>
struct ClampedRshOp {};
// Right shift. Negative values saturate to -1. Positive or 0 saturates to 0.
template <typename T, typename U>
struct ClampedRshOp<T,
U,
typename std::enable_if<std::is_integral<T>::value &&
std::is_integral<U>::value>::type> {
using result_type = T;
template <typename V = result_type>
static constexpr V Do(T x, U shift) {
static_assert(!std::is_signed<U>::value, "Shift value must be unsigned.");
// Signed right shift is odd, because it saturates to -1 or 0.
const V saturated = as_unsigned(V(0)) - IsValueNegative(x);
return PA_BASE_NUMERICS_LIKELY(shift < IntegerBitsPlusSign<T>::value)
? saturated_cast<V>(x >> shift)
: saturated;
}
};
template <typename T, typename U, class Enable = void>
struct ClampedAndOp {};
template <typename T, typename U>
struct ClampedAndOp<T,
U,
typename std::enable_if<std::is_integral<T>::value &&
std::is_integral<U>::value>::type> {
using result_type = typename std::make_unsigned<
typename MaxExponentPromotion<T, U>::type>::type;
template <typename V>
static constexpr V Do(T x, U y) {
return static_cast<result_type>(x) & static_cast<result_type>(y);
}
};
template <typename T, typename U, class Enable = void>
struct ClampedOrOp {};
// For simplicity we promote to unsigned integers.
template <typename T, typename U>
struct ClampedOrOp<T,
U,
typename std::enable_if<std::is_integral<T>::value &&
std::is_integral<U>::value>::type> {
using result_type = typename std::make_unsigned<
typename MaxExponentPromotion<T, U>::type>::type;
template <typename V>
static constexpr V Do(T x, U y) {
return static_cast<result_type>(x) | static_cast<result_type>(y);
}
};
template <typename T, typename U, class Enable = void>
struct ClampedXorOp {};
// For simplicity we support only unsigned integers.
template <typename T, typename U>
struct ClampedXorOp<T,
U,
typename std::enable_if<std::is_integral<T>::value &&
std::is_integral<U>::value>::type> {
using result_type = typename std::make_unsigned<
typename MaxExponentPromotion<T, U>::type>::type;
template <typename V>
static constexpr V Do(T x, U y) {
return static_cast<result_type>(x) ^ static_cast<result_type>(y);
}
};
template <typename T, typename U, class Enable = void>
struct ClampedMaxOp {};
template <typename T, typename U>
struct ClampedMaxOp<
T,
U,
typename std::enable_if<std::is_arithmetic<T>::value &&
std::is_arithmetic<U>::value>::type> {
using result_type = typename MaxExponentPromotion<T, U>::type;
template <typename V = result_type>
static constexpr V Do(T x, U y) {
return IsGreater<T, U>::Test(x, y) ? saturated_cast<V>(x)
: saturated_cast<V>(y);
}
};
template <typename T, typename U, class Enable = void>
struct ClampedMinOp {};
template <typename T, typename U>
struct ClampedMinOp<
T,
U,
typename std::enable_if<std::is_arithmetic<T>::value &&
std::is_arithmetic<U>::value>::type> {
using result_type = typename LowestValuePromotion<T, U>::type;
template <typename V = result_type>
static constexpr V Do(T x, U y) {
return IsLess<T, U>::Test(x, y) ? saturated_cast<V>(x)
: saturated_cast<V>(y);
}
};
// This is just boilerplate that wraps the standard floating point arithmetic.
// A macro isn't the nicest solution, but it beats rewriting these repeatedly.
#define PA_BASE_FLOAT_ARITHMETIC_OPS(NAME, OP) \
template <typename T, typename U> \
struct Clamped##NAME##Op< \
T, U, \
typename std::enable_if<std::is_floating_point<T>::value || \
std::is_floating_point<U>::value>::type> { \
using result_type = typename MaxExponentPromotion<T, U>::type; \
template <typename V = result_type> \
static constexpr V Do(T x, U y) { \
return saturated_cast<V>(x OP y); \
} \
};
PA_BASE_FLOAT_ARITHMETIC_OPS(Add, +)
PA_BASE_FLOAT_ARITHMETIC_OPS(Sub, -)
PA_BASE_FLOAT_ARITHMETIC_OPS(Mul, *)
PA_BASE_FLOAT_ARITHMETIC_OPS(Div, /)
#undef PA_BASE_FLOAT_ARITHMETIC_OPS
} // namespace partition_alloc::internal::base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_CLAMPED_MATH_IMPL_H_

View File

@ -1,19 +0,0 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_MATH_CONSTANTS_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_MATH_CONSTANTS_H_
namespace partition_alloc::internal::base {
constexpr double kPiDouble = 3.14159265358979323846;
constexpr float kPiFloat = 3.14159265358979323846f;
// The mean acceleration due to gravity on Earth in m/s^2.
constexpr double kMeanGravityDouble = 9.80665;
constexpr float kMeanGravityFloat = 9.80665f;
} // namespace partition_alloc::internal::base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_MATH_CONSTANTS_H_

View File

@ -1,33 +0,0 @@
// Copyright 2021 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_OSTREAM_OPERATORS_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_OSTREAM_OPERATORS_H_
#include <ostream>
namespace partition_alloc::internal::base::internal {
template <typename T>
class ClampedNumeric;
template <typename T>
class StrictNumeric;
// Overload the ostream output operator to make logging work nicely.
template <typename T>
std::ostream& operator<<(std::ostream& os, const StrictNumeric<T>& value) {
os << static_cast<T>(value);
return os;
}
// Overload the ostream output operator to make logging work nicely.
template <typename T>
std::ostream& operator<<(std::ostream& os, const ClampedNumeric<T>& value) {
os << static_cast<T>(value);
return os;
}
} // namespace partition_alloc::internal::base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_OSTREAM_OPERATORS_H_

View File

@ -1,21 +0,0 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_RANGES_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_RANGES_H_
#include <cmath>
#include <type_traits>
namespace partition_alloc::internal::base {
template <typename T>
constexpr bool IsApproximatelyEqual(T lhs, T rhs, T tolerance) {
static_assert(std::is_arithmetic<T>::value, "Argument must be arithmetic");
return std::abs(rhs - lhs) <= tolerance;
}
} // namespace partition_alloc::internal::base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_RANGES_H_

View File

@ -1,381 +0,0 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_SAFE_CONVERSIONS_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_SAFE_CONVERSIONS_H_
#include <stddef.h>
#include <cmath>
#include <limits>
#include <type_traits>
#include "base/allocator/partition_allocator/partition_alloc_base/numerics/safe_conversions_impl.h"
#if defined(__ARMEL__) && !defined(__native_client__)
#include "base/allocator/partition_allocator/partition_alloc_base/numerics/safe_conversions_arm_impl.h"
#define PA_BASE_HAS_OPTIMIZED_SAFE_CONVERSIONS (1)
#else
#define PA_BASE_HAS_OPTIMIZED_SAFE_CONVERSIONS (0)
#endif
#if !PA_BASE_NUMERICS_DISABLE_OSTREAM_OPERATORS
#include <ostream>
#endif
namespace partition_alloc::internal::base {
namespace internal {
#if !PA_BASE_HAS_OPTIMIZED_SAFE_CONVERSIONS
template <typename Dst, typename Src>
struct SaturateFastAsmOp {
static constexpr bool is_supported = false;
static constexpr Dst Do(Src) {
// Force a compile failure if instantiated.
return CheckOnFailure::template HandleFailure<Dst>();
}
};
#endif // PA_BASE_HAS_OPTIMIZED_SAFE_CONVERSIONS
#undef PA_BASE_HAS_OPTIMIZED_SAFE_CONVERSIONS
// The following special case a few specific integer conversions where we can
// eke out better performance than range checking.
template <typename Dst, typename Src, typename Enable = void>
struct IsValueInRangeFastOp {
static constexpr bool is_supported = false;
static constexpr bool Do(Src value) {
// Force a compile failure if instantiated.
return CheckOnFailure::template HandleFailure<bool>();
}
};
// Signed to signed range comparison.
template <typename Dst, typename Src>
struct IsValueInRangeFastOp<
Dst,
Src,
typename std::enable_if<
std::is_integral<Dst>::value && std::is_integral<Src>::value &&
std::is_signed<Dst>::value && std::is_signed<Src>::value &&
!IsTypeInRangeForNumericType<Dst, Src>::value>::type> {
static constexpr bool is_supported = true;
static constexpr bool Do(Src value) {
// Just downcast to the smaller type, sign extend it back to the original
// type, and then see if it matches the original value.
return value == static_cast<Dst>(value);
}
};
// Signed to unsigned range comparison.
template <typename Dst, typename Src>
struct IsValueInRangeFastOp<
Dst,
Src,
typename std::enable_if<
std::is_integral<Dst>::value && std::is_integral<Src>::value &&
!std::is_signed<Dst>::value && std::is_signed<Src>::value &&
!IsTypeInRangeForNumericType<Dst, Src>::value>::type> {
static constexpr bool is_supported = true;
static constexpr bool Do(Src value) {
// We cast a signed as unsigned to overflow negative values to the top,
// then compare against whichever maximum is smaller, as our upper bound.
return as_unsigned(value) <= as_unsigned(CommonMax<Src, Dst>());
}
};
// Convenience function that returns true if the supplied value is in range
// for the destination type.
template <typename Dst, typename Src>
constexpr bool IsValueInRangeForNumericType(Src value) {
using SrcType = typename internal::UnderlyingType<Src>::type;
return internal::IsValueInRangeFastOp<Dst, SrcType>::is_supported
? internal::IsValueInRangeFastOp<Dst, SrcType>::Do(
static_cast<SrcType>(value))
: internal::DstRangeRelationToSrcRange<Dst>(
static_cast<SrcType>(value))
.IsValid();
}
// checked_cast<> is analogous to static_cast<> for numeric types,
// except that it CHECKs that the specified numeric conversion will not
// overflow or underflow. NaN source will always trigger a CHECK.
template <typename Dst,
class CheckHandler = internal::CheckOnFailure,
typename Src>
constexpr Dst checked_cast(Src value) {
// This throws a compile-time error on evaluating the constexpr if it can be
// determined at compile-time as failing, otherwise it will CHECK at runtime.
using SrcType = typename internal::UnderlyingType<Src>::type;
return PA_BASE_NUMERICS_LIKELY((IsValueInRangeForNumericType<Dst>(value)))
? static_cast<Dst>(static_cast<SrcType>(value))
: CheckHandler::template HandleFailure<Dst>();
}
// Default boundaries for integral/float: max/infinity, lowest/-infinity, 0/NaN.
// You may provide your own limits (e.g. to saturated_cast) so long as you
// implement all of the static constexpr member functions in the class below.
template <typename T>
struct SaturationDefaultLimits : public std::numeric_limits<T> {
static constexpr T NaN() {
return std::numeric_limits<T>::has_quiet_NaN
? std::numeric_limits<T>::quiet_NaN()
: T();
}
using std::numeric_limits<T>::max;
static constexpr T Overflow() {
return std::numeric_limits<T>::has_infinity
? std::numeric_limits<T>::infinity()
: std::numeric_limits<T>::max();
}
using std::numeric_limits<T>::lowest;
static constexpr T Underflow() {
return std::numeric_limits<T>::has_infinity
? std::numeric_limits<T>::infinity() * -1
: std::numeric_limits<T>::lowest();
}
};
template <typename Dst, template <typename> class S, typename Src>
constexpr Dst saturated_cast_impl(Src value, RangeCheck constraint) {
// For some reason clang generates much better code when the branch is
// structured exactly this way, rather than a sequence of checks.
return !constraint.IsOverflowFlagSet()
? (!constraint.IsUnderflowFlagSet() ? static_cast<Dst>(value)
: S<Dst>::Underflow())
// Skip this check for integral Src, which cannot be NaN.
: (std::is_integral<Src>::value || !constraint.IsUnderflowFlagSet()
? S<Dst>::Overflow()
: S<Dst>::NaN());
}
// We can reduce the number of conditions and get slightly better performance
// for normal signed and unsigned integer ranges. And in the specific case of
// Arm, we can use the optimized saturation instructions.
template <typename Dst, typename Src, typename Enable = void>
struct SaturateFastOp {
static constexpr bool is_supported = false;
static constexpr Dst Do(Src value) {
// Force a compile failure if instantiated.
return CheckOnFailure::template HandleFailure<Dst>();
}
};
template <typename Dst, typename Src>
struct SaturateFastOp<
Dst,
Src,
typename std::enable_if<std::is_integral<Src>::value &&
std::is_integral<Dst>::value &&
SaturateFastAsmOp<Dst, Src>::is_supported>::type> {
static constexpr bool is_supported = true;
static constexpr Dst Do(Src value) {
return SaturateFastAsmOp<Dst, Src>::Do(value);
}
};
template <typename Dst, typename Src>
struct SaturateFastOp<
Dst,
Src,
typename std::enable_if<std::is_integral<Src>::value &&
std::is_integral<Dst>::value &&
!SaturateFastAsmOp<Dst, Src>::is_supported>::type> {
static constexpr bool is_supported = true;
static constexpr Dst Do(Src value) {
// The exact order of the following is structured to hit the correct
// optimization heuristics across compilers. Do not change without
// checking the emitted code.
const Dst saturated = CommonMaxOrMin<Dst, Src>(
IsMaxInRangeForNumericType<Dst, Src>() ||
(!IsMinInRangeForNumericType<Dst, Src>() && IsValueNegative(value)));
return PA_BASE_NUMERICS_LIKELY(IsValueInRangeForNumericType<Dst>(value))
? static_cast<Dst>(value)
: saturated;
}
};
// saturated_cast<> is analogous to static_cast<> for numeric types, except
// that the specified numeric conversion will saturate by default rather than
// overflow or underflow, and NaN assignment to an integral will return 0.
// All boundary condition behaviors can be overridden with a custom handler.
template <typename Dst,
template <typename> class SaturationHandler = SaturationDefaultLimits,
typename Src>
constexpr Dst saturated_cast(Src value) {
using SrcType = typename UnderlyingType<Src>::type;
return !PA_IsConstantEvaluated() &&
SaturateFastOp<Dst, SrcType>::is_supported &&
std::is_same<SaturationHandler<Dst>,
SaturationDefaultLimits<Dst>>::value
? SaturateFastOp<Dst, SrcType>::Do(static_cast<SrcType>(value))
: saturated_cast_impl<Dst, SaturationHandler, SrcType>(
static_cast<SrcType>(value),
DstRangeRelationToSrcRange<Dst, SaturationHandler, SrcType>(
static_cast<SrcType>(value)));
}
// strict_cast<> is analogous to static_cast<> for numeric types, except that
// it will cause a compile failure if the destination type is not large enough
// to contain any value in the source type. It performs no runtime checking.
template <typename Dst, typename Src>
constexpr Dst strict_cast(Src value) {
using SrcType = typename UnderlyingType<Src>::type;
static_assert(UnderlyingType<Src>::is_numeric, "Argument must be numeric.");
static_assert(std::is_arithmetic<Dst>::value, "Result must be numeric.");
// If you got here from a compiler error, it's because you tried to assign
// from a source type to a destination type that has insufficient range.
// The solution may be to change the destination type you're assigning to,
// and use one large enough to represent the source.
// Alternatively, you may be better served with the checked_cast<> or
// saturated_cast<> template functions for your particular use case.
static_assert(StaticDstRangeRelationToSrcRange<Dst, SrcType>::value ==
NUMERIC_RANGE_CONTAINED,
"The source type is out of range for the destination type. "
"Please see strict_cast<> comments for more information.");
return static_cast<Dst>(static_cast<SrcType>(value));
}
// Some wrappers to statically check that a type is in range.
template <typename Dst, typename Src, class Enable = void>
struct IsNumericRangeContained {
static constexpr bool value = false;
};
template <typename Dst, typename Src>
struct IsNumericRangeContained<
Dst,
Src,
typename std::enable_if<ArithmeticOrUnderlyingEnum<Dst>::value &&
ArithmeticOrUnderlyingEnum<Src>::value>::type> {
static constexpr bool value =
StaticDstRangeRelationToSrcRange<Dst, Src>::value ==
NUMERIC_RANGE_CONTAINED;
};
// StrictNumeric implements compile time range checking between numeric types by
// wrapping assignment operations in a strict_cast. This class is intended to be
// used for function arguments and return types, to ensure the destination type
// can always contain the source type. This is essentially the same as enforcing
// -Wconversion in gcc and C4302 warnings on MSVC, but it can be applied
// incrementally at API boundaries, making it easier to convert code so that it
// compiles cleanly with truncation warnings enabled.
// This template should introduce no runtime overhead, but it also provides no
// runtime checking of any of the associated mathematical operations. Use
// CheckedNumeric for runtime range checks of the actual value being assigned.
template <typename T>
class StrictNumeric {
public:
using type = T;
constexpr StrictNumeric() : value_(0) {}
// Copy constructor.
template <typename Src>
constexpr StrictNumeric(const StrictNumeric<Src>& rhs)
: value_(strict_cast<T>(rhs.value_)) {}
// This is not an explicit constructor because we implicitly upgrade regular
// numerics to StrictNumerics to make them easier to use.
template <typename Src>
constexpr StrictNumeric(Src value) // NOLINT(runtime/explicit)
: value_(strict_cast<T>(value)) {}
// If you got here from a compiler error, it's because you tried to assign
// from a source type to a destination type that has insufficient range.
// The solution may be to change the destination type you're assigning to,
// and use one large enough to represent the source.
// If you're assigning from a CheckedNumeric<> class, you may be able to use
// the AssignIfValid() member function, specify a narrower destination type to
// the member value functions (e.g. val.template ValueOrDie<Dst>()), use one
// of the value helper functions (e.g. ValueOrDieForType<Dst>(val)).
// If you've encountered an _ambiguous overload_ you can use a static_cast<>
// to explicitly cast the result to the destination type.
// If none of that works, you may be better served with the checked_cast<> or
// saturated_cast<> template functions for your particular use case.
template <typename Dst,
typename std::enable_if<
IsNumericRangeContained<Dst, T>::value>::type* = nullptr>
constexpr operator Dst() const {
return static_cast<typename ArithmeticOrUnderlyingEnum<Dst>::type>(value_);
}
private:
const T value_;
};
// Convenience wrapper returns a StrictNumeric from the provided arithmetic
// type.
template <typename T>
constexpr StrictNumeric<typename UnderlyingType<T>::type> MakeStrictNum(
const T value) {
return value;
}
#define PA_BASE_NUMERIC_COMPARISON_OPERATORS(CLASS, NAME, OP) \
template <typename L, typename R, \
typename std::enable_if< \
internal::Is##CLASS##Op<L, R>::value>::type* = nullptr> \
constexpr bool operator OP(const L lhs, const R rhs) { \
return SafeCompare<NAME, typename UnderlyingType<L>::type, \
typename UnderlyingType<R>::type>(lhs, rhs); \
}
PA_BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsLess, <)
PA_BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsLessOrEqual, <=)
PA_BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsGreater, >)
PA_BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsGreaterOrEqual, >=)
PA_BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsEqual, ==)
PA_BASE_NUMERIC_COMPARISON_OPERATORS(Strict, IsNotEqual, !=)
} // namespace internal
using internal::as_signed;
using internal::as_unsigned;
using internal::checked_cast;
using internal::IsTypeInRangeForNumericType;
using internal::IsValueInRangeForNumericType;
using internal::IsValueNegative;
using internal::MakeStrictNum;
using internal::SafeUnsignedAbs;
using internal::saturated_cast;
using internal::strict_cast;
using internal::StrictNumeric;
// Explicitly make a shorter size_t alias for convenience.
using SizeT = StrictNumeric<size_t>;
// floating -> integral conversions that saturate and thus can actually return
// an integral type. In most cases, these should be preferred over the std::
// versions.
template <typename Dst = int,
typename Src,
typename = std::enable_if_t<std::is_integral<Dst>::value &&
std::is_floating_point<Src>::value>>
Dst ClampFloor(Src value) {
return saturated_cast<Dst>(std::floor(value));
}
template <typename Dst = int,
typename Src,
typename = std::enable_if_t<std::is_integral<Dst>::value &&
std::is_floating_point<Src>::value>>
Dst ClampCeil(Src value) {
return saturated_cast<Dst>(std::ceil(value));
}
template <typename Dst = int,
typename Src,
typename = std::enable_if_t<std::is_integral<Dst>::value &&
std::is_floating_point<Src>::value>>
Dst ClampRound(Src value) {
const Src rounded =
(value >= 0.0f) ? std::floor(value + 0.5f) : std::ceil(value - 0.5f);
return saturated_cast<Dst>(rounded);
}
} // namespace partition_alloc::internal::base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_SAFE_CONVERSIONS_H_

View File

@ -1,49 +0,0 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_SAFE_CONVERSIONS_ARM_IMPL_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_SAFE_CONVERSIONS_ARM_IMPL_H_
#include <cassert>
#include <limits>
#include <type_traits>
#include "base/allocator/partition_allocator/partition_alloc_base/numerics/safe_conversions_impl.h"
namespace partition_alloc::internal::base::internal {
// Fast saturation to a destination type.
template <typename Dst, typename Src>
struct SaturateFastAsmOp {
static constexpr bool is_supported =
kEnableAsmCode && std::is_signed<Src>::value &&
std::is_integral<Dst>::value && std::is_integral<Src>::value &&
IntegerBitsPlusSign<Src>::value <= IntegerBitsPlusSign<int32_t>::value &&
IntegerBitsPlusSign<Dst>::value <= IntegerBitsPlusSign<int32_t>::value &&
!IsTypeInRangeForNumericType<Dst, Src>::value;
__attribute__((always_inline)) static Dst Do(Src value) {
int32_t src = value;
typename std::conditional<std::is_signed<Dst>::value, int32_t,
uint32_t>::type result;
if (std::is_signed<Dst>::value) {
asm("ssat %[dst], %[shift], %[src]"
: [dst] "=r"(result)
: [src] "r"(src), [shift] "n"(IntegerBitsPlusSign<Dst>::value <= 32
? IntegerBitsPlusSign<Dst>::value
: 32));
} else {
asm("usat %[dst], %[shift], %[src]"
: [dst] "=r"(result)
: [src] "r"(src), [shift] "n"(IntegerBitsPlusSign<Dst>::value < 32
? IntegerBitsPlusSign<Dst>::value
: 31));
}
return static_cast<Dst>(result);
}
};
} // namespace partition_alloc::internal::base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_SAFE_CONVERSIONS_ARM_IMPL_H_

View File

@ -1,843 +0,0 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_SAFE_CONVERSIONS_IMPL_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_SAFE_CONVERSIONS_IMPL_H_
#include <stdint.h>
#include <limits>
#include <type_traits>
#if defined(__GNUC__) || defined(__clang__)
#define PA_BASE_NUMERICS_LIKELY(x) __builtin_expect(!!(x), 1)
#define PA_BASE_NUMERICS_UNLIKELY(x) __builtin_expect(!!(x), 0)
#else
#define PA_BASE_NUMERICS_LIKELY(x) (x)
#define PA_BASE_NUMERICS_UNLIKELY(x) (x)
#endif
namespace partition_alloc::internal::base::internal {
// The std library doesn't provide a binary max_exponent for integers, however
// we can compute an analog using std::numeric_limits<>::digits.
template <typename NumericType>
struct MaxExponent {
static const int value = std::is_floating_point<NumericType>::value
? std::numeric_limits<NumericType>::max_exponent
: std::numeric_limits<NumericType>::digits + 1;
};
// The number of bits (including the sign) in an integer. Eliminates sizeof
// hacks.
template <typename NumericType>
struct IntegerBitsPlusSign {
static const int value = std::numeric_limits<NumericType>::digits +
std::is_signed<NumericType>::value;
};
// Helper templates for integer manipulations.
template <typename Integer>
struct PositionOfSignBit {
static const size_t value = IntegerBitsPlusSign<Integer>::value - 1;
};
// Determines if a numeric value is negative without throwing compiler
// warnings on: unsigned(value) < 0.
template <typename T,
typename std::enable_if<std::is_signed<T>::value>::type* = nullptr>
constexpr bool IsValueNegative(T value) {
static_assert(std::is_arithmetic<T>::value, "Argument must be numeric.");
return value < 0;
}
template <typename T,
typename std::enable_if<!std::is_signed<T>::value>::type* = nullptr>
constexpr bool IsValueNegative(T) {
static_assert(std::is_arithmetic<T>::value, "Argument must be numeric.");
return false;
}
// This performs a fast negation, returning a signed value. It works on unsigned
// arguments, but probably doesn't do what you want for any unsigned value
// larger than max / 2 + 1 (i.e. signed min cast to unsigned).
template <typename T>
constexpr typename std::make_signed<T>::type ConditionalNegate(
T x,
bool is_negative) {
static_assert(std::is_integral<T>::value, "Type must be integral");
using SignedT = typename std::make_signed<T>::type;
using UnsignedT = typename std::make_unsigned<T>::type;
return static_cast<SignedT>(
(static_cast<UnsignedT>(x) ^ -SignedT(is_negative)) + is_negative);
}
// This performs a safe, absolute value via unsigned overflow.
template <typename T>
constexpr typename std::make_unsigned<T>::type SafeUnsignedAbs(T value) {
static_assert(std::is_integral<T>::value, "Type must be integral");
using UnsignedT = typename std::make_unsigned<T>::type;
return IsValueNegative(value)
? static_cast<UnsignedT>(0u - static_cast<UnsignedT>(value))
: static_cast<UnsignedT>(value);
}
// TODO(jschuh): Switch to std::is_constant_evaluated() once C++20 is supported.
// Alternately, the usage could be restructured for "consteval if" in C++23.
#define PA_IsConstantEvaluated() (__builtin_is_constant_evaluated())
// TODO(jschuh): Debug builds don't reliably propagate constants, so we restrict
// some accelerated runtime paths to release builds until this can be forced
// with consteval support in C++20 or C++23.
#if defined(NDEBUG)
constexpr bool kEnableAsmCode = true;
#else
constexpr bool kEnableAsmCode = false;
#endif
// Forces a crash, like a CHECK(false). Used for numeric boundary errors.
// Also used in a constexpr template to trigger a compilation failure on
// an error condition.
struct CheckOnFailure {
template <typename T>
static T HandleFailure() {
#if defined(_MSC_VER)
__debugbreak();
#elif defined(__GNUC__) || defined(__clang__)
__builtin_trap();
#else
((void)(*(volatile char*)0 = 0));
#endif
return T();
}
};
enum IntegerRepresentation {
INTEGER_REPRESENTATION_UNSIGNED,
INTEGER_REPRESENTATION_SIGNED
};
// A range for a given nunmeric Src type is contained for a given numeric Dst
// type if both numeric_limits<Src>::max() <= numeric_limits<Dst>::max() and
// numeric_limits<Src>::lowest() >= numeric_limits<Dst>::lowest() are true.
// We implement this as template specializations rather than simple static
// comparisons to ensure type correctness in our comparisons.
enum NumericRangeRepresentation {
NUMERIC_RANGE_NOT_CONTAINED,
NUMERIC_RANGE_CONTAINED
};
// Helper templates to statically determine if our destination type can contain
// maximum and minimum values represented by the source type.
template <typename Dst,
typename Src,
IntegerRepresentation DstSign = std::is_signed<Dst>::value
? INTEGER_REPRESENTATION_SIGNED
: INTEGER_REPRESENTATION_UNSIGNED,
IntegerRepresentation SrcSign = std::is_signed<Src>::value
? INTEGER_REPRESENTATION_SIGNED
: INTEGER_REPRESENTATION_UNSIGNED>
struct StaticDstRangeRelationToSrcRange;
// Same sign: Dst is guaranteed to contain Src only if its range is equal or
// larger.
template <typename Dst, typename Src, IntegerRepresentation Sign>
struct StaticDstRangeRelationToSrcRange<Dst, Src, Sign, Sign> {
static const NumericRangeRepresentation value =
MaxExponent<Dst>::value >= MaxExponent<Src>::value
? NUMERIC_RANGE_CONTAINED
: NUMERIC_RANGE_NOT_CONTAINED;
};
// Unsigned to signed: Dst is guaranteed to contain source only if its range is
// larger.
template <typename Dst, typename Src>
struct StaticDstRangeRelationToSrcRange<Dst,
Src,
INTEGER_REPRESENTATION_SIGNED,
INTEGER_REPRESENTATION_UNSIGNED> {
static const NumericRangeRepresentation value =
MaxExponent<Dst>::value > MaxExponent<Src>::value
? NUMERIC_RANGE_CONTAINED
: NUMERIC_RANGE_NOT_CONTAINED;
};
// Signed to unsigned: Dst cannot be statically determined to contain Src.
template <typename Dst, typename Src>
struct StaticDstRangeRelationToSrcRange<Dst,
Src,
INTEGER_REPRESENTATION_UNSIGNED,
INTEGER_REPRESENTATION_SIGNED> {
static const NumericRangeRepresentation value = NUMERIC_RANGE_NOT_CONTAINED;
};
// This class wraps the range constraints as separate booleans so the compiler
// can identify constants and eliminate unused code paths.
class RangeCheck {
public:
constexpr RangeCheck(bool is_in_lower_bound, bool is_in_upper_bound)
: is_underflow_(!is_in_lower_bound), is_overflow_(!is_in_upper_bound) {}
constexpr RangeCheck() : is_underflow_(false), is_overflow_(false) {}
constexpr bool IsValid() const { return !is_overflow_ && !is_underflow_; }
constexpr bool IsInvalid() const { return is_overflow_ && is_underflow_; }
constexpr bool IsOverflow() const { return is_overflow_ && !is_underflow_; }
constexpr bool IsUnderflow() const { return !is_overflow_ && is_underflow_; }
constexpr bool IsOverflowFlagSet() const { return is_overflow_; }
constexpr bool IsUnderflowFlagSet() const { return is_underflow_; }
constexpr bool operator==(const RangeCheck rhs) const {
return is_underflow_ == rhs.is_underflow_ &&
is_overflow_ == rhs.is_overflow_;
}
constexpr bool operator!=(const RangeCheck rhs) const {
return !(*this == rhs);
}
private:
// Do not change the order of these member variables. The integral conversion
// optimization depends on this exact order.
const bool is_underflow_;
const bool is_overflow_;
};
// The following helper template addresses a corner case in range checks for
// conversion from a floating-point type to an integral type of smaller range
// but larger precision (e.g. float -> unsigned). The problem is as follows:
// 1. Integral maximum is always one less than a power of two, so it must be
// truncated to fit the mantissa of the floating point. The direction of
// rounding is implementation defined, but by default it's always IEEE
// floats, which round to nearest and thus result in a value of larger
// magnitude than the integral value.
// Example: float f = UINT_MAX; // f is 4294967296f but UINT_MAX
// // is 4294967295u.
// 2. If the floating point value is equal to the promoted integral maximum
// value, a range check will erroneously pass.
// Example: (4294967296f <= 4294967295u) // This is true due to a precision
// // loss in rounding up to float.
// 3. When the floating point value is then converted to an integral, the
// resulting value is out of range for the target integral type and
// thus is implementation defined.
// Example: unsigned u = (float)INT_MAX; // u will typically overflow to 0.
// To fix this bug we manually truncate the maximum value when the destination
// type is an integral of larger precision than the source floating-point type,
// such that the resulting maximum is represented exactly as a floating point.
template <typename Dst, typename Src, template <typename> class Bounds>
struct NarrowingRange {
using SrcLimits = std::numeric_limits<Src>;
using DstLimits = typename std::numeric_limits<Dst>;
// Computes the mask required to make an accurate comparison between types.
static const int kShift =
(MaxExponent<Src>::value > MaxExponent<Dst>::value &&
SrcLimits::digits < DstLimits::digits)
? (DstLimits::digits - SrcLimits::digits)
: 0;
template <
typename T,
typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
// Masks out the integer bits that are beyond the precision of the
// intermediate type used for comparison.
static constexpr T Adjust(T value) {
static_assert(std::is_same<T, Dst>::value, "");
static_assert(kShift < DstLimits::digits, "");
return static_cast<T>(
ConditionalNegate(SafeUnsignedAbs(value) & ~((T(1) << kShift) - T(1)),
IsValueNegative(value)));
}
template <typename T,
typename std::enable_if<std::is_floating_point<T>::value>::type* =
nullptr>
static constexpr T Adjust(T value) {
static_assert(std::is_same<T, Dst>::value, "");
static_assert(kShift == 0, "");
return value;
}
static constexpr Dst max() { return Adjust(Bounds<Dst>::max()); }
static constexpr Dst lowest() { return Adjust(Bounds<Dst>::lowest()); }
};
template <typename Dst,
typename Src,
template <typename>
class Bounds,
IntegerRepresentation DstSign = std::is_signed<Dst>::value
? INTEGER_REPRESENTATION_SIGNED
: INTEGER_REPRESENTATION_UNSIGNED,
IntegerRepresentation SrcSign = std::is_signed<Src>::value
? INTEGER_REPRESENTATION_SIGNED
: INTEGER_REPRESENTATION_UNSIGNED,
NumericRangeRepresentation DstRange =
StaticDstRangeRelationToSrcRange<Dst, Src>::value>
struct DstRangeRelationToSrcRangeImpl;
// The following templates are for ranges that must be verified at runtime. We
// split it into checks based on signedness to avoid confusing casts and
// compiler warnings on signed an unsigned comparisons.
// Same sign narrowing: The range is contained for normal limits.
template <typename Dst,
typename Src,
template <typename>
class Bounds,
IntegerRepresentation DstSign,
IntegerRepresentation SrcSign>
struct DstRangeRelationToSrcRangeImpl<Dst,
Src,
Bounds,
DstSign,
SrcSign,
NUMERIC_RANGE_CONTAINED> {
static constexpr RangeCheck Check(Src value) {
using SrcLimits = std::numeric_limits<Src>;
using DstLimits = NarrowingRange<Dst, Src, Bounds>;
return RangeCheck(
static_cast<Dst>(SrcLimits::lowest()) >= DstLimits::lowest() ||
static_cast<Dst>(value) >= DstLimits::lowest(),
static_cast<Dst>(SrcLimits::max()) <= DstLimits::max() ||
static_cast<Dst>(value) <= DstLimits::max());
}
};
// Signed to signed narrowing: Both the upper and lower boundaries may be
// exceeded for standard limits.
template <typename Dst, typename Src, template <typename> class Bounds>
struct DstRangeRelationToSrcRangeImpl<Dst,
Src,
Bounds,
INTEGER_REPRESENTATION_SIGNED,
INTEGER_REPRESENTATION_SIGNED,
NUMERIC_RANGE_NOT_CONTAINED> {
static constexpr RangeCheck Check(Src value) {
using DstLimits = NarrowingRange<Dst, Src, Bounds>;
return RangeCheck(value >= DstLimits::lowest(), value <= DstLimits::max());
}
};
// Unsigned to unsigned narrowing: Only the upper bound can be exceeded for
// standard limits.
template <typename Dst, typename Src, template <typename> class Bounds>
struct DstRangeRelationToSrcRangeImpl<Dst,
Src,
Bounds,
INTEGER_REPRESENTATION_UNSIGNED,
INTEGER_REPRESENTATION_UNSIGNED,
NUMERIC_RANGE_NOT_CONTAINED> {
static constexpr RangeCheck Check(Src value) {
using DstLimits = NarrowingRange<Dst, Src, Bounds>;
return RangeCheck(
DstLimits::lowest() == Dst(0) || value >= DstLimits::lowest(),
value <= DstLimits::max());
}
};
// Unsigned to signed: Only the upper bound can be exceeded for standard limits.
template <typename Dst, typename Src, template <typename> class Bounds>
struct DstRangeRelationToSrcRangeImpl<Dst,
Src,
Bounds,
INTEGER_REPRESENTATION_SIGNED,
INTEGER_REPRESENTATION_UNSIGNED,
NUMERIC_RANGE_NOT_CONTAINED> {
static constexpr RangeCheck Check(Src value) {
using DstLimits = NarrowingRange<Dst, Src, Bounds>;
using Promotion = decltype(Src() + Dst());
return RangeCheck(DstLimits::lowest() <= Dst(0) ||
static_cast<Promotion>(value) >=
static_cast<Promotion>(DstLimits::lowest()),
static_cast<Promotion>(value) <=
static_cast<Promotion>(DstLimits::max()));
}
};
// Signed to unsigned: The upper boundary may be exceeded for a narrower Dst,
// and any negative value exceeds the lower boundary for standard limits.
template <typename Dst, typename Src, template <typename> class Bounds>
struct DstRangeRelationToSrcRangeImpl<Dst,
Src,
Bounds,
INTEGER_REPRESENTATION_UNSIGNED,
INTEGER_REPRESENTATION_SIGNED,
NUMERIC_RANGE_NOT_CONTAINED> {
static constexpr RangeCheck Check(Src value) {
using SrcLimits = std::numeric_limits<Src>;
using DstLimits = NarrowingRange<Dst, Src, Bounds>;
using Promotion = decltype(Src() + Dst());
bool ge_zero = false;
// Converting floating-point to integer will discard fractional part, so
// values in (-1.0, -0.0) will truncate to 0 and fit in Dst.
if (std::is_floating_point<Src>::value) {
ge_zero = value > Src(-1);
} else {
ge_zero = value >= Src(0);
}
return RangeCheck(
ge_zero && (DstLimits::lowest() == 0 ||
static_cast<Dst>(value) >= DstLimits::lowest()),
static_cast<Promotion>(SrcLimits::max()) <=
static_cast<Promotion>(DstLimits::max()) ||
static_cast<Promotion>(value) <=
static_cast<Promotion>(DstLimits::max()));
}
};
// Simple wrapper for statically checking if a type's range is contained.
template <typename Dst, typename Src>
struct IsTypeInRangeForNumericType {
static const bool value = StaticDstRangeRelationToSrcRange<Dst, Src>::value ==
NUMERIC_RANGE_CONTAINED;
};
template <typename Dst,
template <typename> class Bounds = std::numeric_limits,
typename Src>
constexpr RangeCheck DstRangeRelationToSrcRange(Src value) {
static_assert(std::is_arithmetic<Src>::value, "Argument must be numeric.");
static_assert(std::is_arithmetic<Dst>::value, "Result must be numeric.");
static_assert(Bounds<Dst>::lowest() < Bounds<Dst>::max(), "");
return DstRangeRelationToSrcRangeImpl<Dst, Src, Bounds>::Check(value);
}
// Integer promotion templates used by the portable checked integer arithmetic.
template <size_t Size, bool IsSigned>
struct IntegerForDigitsAndSign;
#define PA_INTEGER_FOR_DIGITS_AND_SIGN(I) \
template <> \
struct IntegerForDigitsAndSign<IntegerBitsPlusSign<I>::value, \
std::is_signed<I>::value> { \
using type = I; \
}
PA_INTEGER_FOR_DIGITS_AND_SIGN(int8_t);
PA_INTEGER_FOR_DIGITS_AND_SIGN(uint8_t);
PA_INTEGER_FOR_DIGITS_AND_SIGN(int16_t);
PA_INTEGER_FOR_DIGITS_AND_SIGN(uint16_t);
PA_INTEGER_FOR_DIGITS_AND_SIGN(int32_t);
PA_INTEGER_FOR_DIGITS_AND_SIGN(uint32_t);
PA_INTEGER_FOR_DIGITS_AND_SIGN(int64_t);
PA_INTEGER_FOR_DIGITS_AND_SIGN(uint64_t);
#undef PA_INTEGER_FOR_DIGITS_AND_SIGN
// WARNING: We have no IntegerForSizeAndSign<16, *>. If we ever add one to
// support 128-bit math, then the ArithmeticPromotion template below will need
// to be updated (or more likely replaced with a decltype expression).
static_assert(IntegerBitsPlusSign<intmax_t>::value == 64,
"Max integer size not supported for this toolchain.");
template <typename Integer, bool IsSigned = std::is_signed<Integer>::value>
struct TwiceWiderInteger {
using type =
typename IntegerForDigitsAndSign<IntegerBitsPlusSign<Integer>::value * 2,
IsSigned>::type;
};
enum ArithmeticPromotionCategory {
LEFT_PROMOTION, // Use the type of the left-hand argument.
RIGHT_PROMOTION // Use the type of the right-hand argument.
};
// Determines the type that can represent the largest positive value.
template <typename Lhs,
typename Rhs,
ArithmeticPromotionCategory Promotion =
(MaxExponent<Lhs>::value > MaxExponent<Rhs>::value)
? LEFT_PROMOTION
: RIGHT_PROMOTION>
struct MaxExponentPromotion;
template <typename Lhs, typename Rhs>
struct MaxExponentPromotion<Lhs, Rhs, LEFT_PROMOTION> {
using type = Lhs;
};
template <typename Lhs, typename Rhs>
struct MaxExponentPromotion<Lhs, Rhs, RIGHT_PROMOTION> {
using type = Rhs;
};
// Determines the type that can represent the lowest arithmetic value.
template <typename Lhs,
typename Rhs,
ArithmeticPromotionCategory Promotion =
std::is_signed<Lhs>::value
? (std::is_signed<Rhs>::value
? (MaxExponent<Lhs>::value > MaxExponent<Rhs>::value
? LEFT_PROMOTION
: RIGHT_PROMOTION)
: LEFT_PROMOTION)
: (std::is_signed<Rhs>::value
? RIGHT_PROMOTION
: (MaxExponent<Lhs>::value < MaxExponent<Rhs>::value
? LEFT_PROMOTION
: RIGHT_PROMOTION))>
struct LowestValuePromotion;
template <typename Lhs, typename Rhs>
struct LowestValuePromotion<Lhs, Rhs, LEFT_PROMOTION> {
using type = Lhs;
};
template <typename Lhs, typename Rhs>
struct LowestValuePromotion<Lhs, Rhs, RIGHT_PROMOTION> {
using type = Rhs;
};
// Determines the type that is best able to represent an arithmetic result.
template <
typename Lhs,
typename Rhs = Lhs,
bool is_intmax_type =
std::is_integral<typename MaxExponentPromotion<Lhs, Rhs>::type>::value&&
IntegerBitsPlusSign<typename MaxExponentPromotion<Lhs, Rhs>::type>::
value == IntegerBitsPlusSign<intmax_t>::value,
bool is_max_exponent =
StaticDstRangeRelationToSrcRange<
typename MaxExponentPromotion<Lhs, Rhs>::type,
Lhs>::value ==
NUMERIC_RANGE_CONTAINED&& StaticDstRangeRelationToSrcRange<
typename MaxExponentPromotion<Lhs, Rhs>::type,
Rhs>::value == NUMERIC_RANGE_CONTAINED>
struct BigEnoughPromotion;
// The side with the max exponent is big enough.
template <typename Lhs, typename Rhs, bool is_intmax_type>
struct BigEnoughPromotion<Lhs, Rhs, is_intmax_type, true> {
using type = typename MaxExponentPromotion<Lhs, Rhs>::type;
static const bool is_contained = true;
};
// We can use a twice wider type to fit.
template <typename Lhs, typename Rhs>
struct BigEnoughPromotion<Lhs, Rhs, false, false> {
using type =
typename TwiceWiderInteger<typename MaxExponentPromotion<Lhs, Rhs>::type,
std::is_signed<Lhs>::value ||
std::is_signed<Rhs>::value>::type;
static const bool is_contained = true;
};
// No type is large enough.
template <typename Lhs, typename Rhs>
struct BigEnoughPromotion<Lhs, Rhs, true, false> {
using type = typename MaxExponentPromotion<Lhs, Rhs>::type;
static const bool is_contained = false;
};
// We can statically check if operations on the provided types can wrap, so we
// can skip the checked operations if they're not needed. So, for an integer we
// care if the destination type preserves the sign and is twice the width of
// the source.
template <typename T, typename Lhs, typename Rhs = Lhs>
struct IsIntegerArithmeticSafe {
static const bool value =
!std::is_floating_point<T>::value &&
!std::is_floating_point<Lhs>::value &&
!std::is_floating_point<Rhs>::value &&
std::is_signed<T>::value >= std::is_signed<Lhs>::value &&
IntegerBitsPlusSign<T>::value >= (2 * IntegerBitsPlusSign<Lhs>::value) &&
std::is_signed<T>::value >= std::is_signed<Rhs>::value &&
IntegerBitsPlusSign<T>::value >= (2 * IntegerBitsPlusSign<Rhs>::value);
};
// Promotes to a type that can represent any possible result of a binary
// arithmetic operation with the source types.
template <typename Lhs,
typename Rhs,
bool is_promotion_possible = IsIntegerArithmeticSafe<
typename std::conditional<std::is_signed<Lhs>::value ||
std::is_signed<Rhs>::value,
intmax_t,
uintmax_t>::type,
typename MaxExponentPromotion<Lhs, Rhs>::type>::value>
struct FastIntegerArithmeticPromotion;
template <typename Lhs, typename Rhs>
struct FastIntegerArithmeticPromotion<Lhs, Rhs, true> {
using type =
typename TwiceWiderInteger<typename MaxExponentPromotion<Lhs, Rhs>::type,
std::is_signed<Lhs>::value ||
std::is_signed<Rhs>::value>::type;
static_assert(IsIntegerArithmeticSafe<type, Lhs, Rhs>::value, "");
static const bool is_contained = true;
};
template <typename Lhs, typename Rhs>
struct FastIntegerArithmeticPromotion<Lhs, Rhs, false> {
using type = typename BigEnoughPromotion<Lhs, Rhs>::type;
static const bool is_contained = false;
};
// Extracts the underlying type from an enum.
template <typename T, bool is_enum = std::is_enum<T>::value>
struct ArithmeticOrUnderlyingEnum;
template <typename T>
struct ArithmeticOrUnderlyingEnum<T, true> {
using type = typename std::underlying_type<T>::type;
static const bool value = std::is_arithmetic<type>::value;
};
template <typename T>
struct ArithmeticOrUnderlyingEnum<T, false> {
using type = T;
static const bool value = std::is_arithmetic<type>::value;
};
// The following are helper templates used in the CheckedNumeric class.
template <typename T>
class CheckedNumeric;
template <typename T>
class ClampedNumeric;
template <typename T>
class StrictNumeric;
// Used to treat CheckedNumeric and arithmetic underlying types the same.
template <typename T>
struct UnderlyingType {
using type = typename ArithmeticOrUnderlyingEnum<T>::type;
static const bool is_numeric = std::is_arithmetic<type>::value;
static const bool is_checked = false;
static const bool is_clamped = false;
static const bool is_strict = false;
};
template <typename T>
struct UnderlyingType<CheckedNumeric<T>> {
using type = T;
static const bool is_numeric = true;
static const bool is_checked = true;
static const bool is_clamped = false;
static const bool is_strict = false;
};
template <typename T>
struct UnderlyingType<ClampedNumeric<T>> {
using type = T;
static const bool is_numeric = true;
static const bool is_checked = false;
static const bool is_clamped = true;
static const bool is_strict = false;
};
template <typename T>
struct UnderlyingType<StrictNumeric<T>> {
using type = T;
static const bool is_numeric = true;
static const bool is_checked = false;
static const bool is_clamped = false;
static const bool is_strict = true;
};
template <typename L, typename R>
struct IsCheckedOp {
static const bool value =
UnderlyingType<L>::is_numeric && UnderlyingType<R>::is_numeric &&
(UnderlyingType<L>::is_checked || UnderlyingType<R>::is_checked);
};
template <typename L, typename R>
struct IsClampedOp {
static const bool value =
UnderlyingType<L>::is_numeric && UnderlyingType<R>::is_numeric &&
(UnderlyingType<L>::is_clamped || UnderlyingType<R>::is_clamped) &&
!(UnderlyingType<L>::is_checked || UnderlyingType<R>::is_checked);
};
template <typename L, typename R>
struct IsStrictOp {
static const bool value =
UnderlyingType<L>::is_numeric && UnderlyingType<R>::is_numeric &&
(UnderlyingType<L>::is_strict || UnderlyingType<R>::is_strict) &&
!(UnderlyingType<L>::is_checked || UnderlyingType<R>::is_checked) &&
!(UnderlyingType<L>::is_clamped || UnderlyingType<R>::is_clamped);
};
// as_signed<> returns the supplied integral value (or integral castable
// Numeric template) cast as a signed integral of equivalent precision.
// I.e. it's mostly an alias for: static_cast<std::make_signed<T>::type>(t)
template <typename Src>
constexpr typename std::make_signed<
typename base::internal::UnderlyingType<Src>::type>::type
as_signed(const Src value) {
static_assert(std::is_integral<decltype(as_signed(value))>::value,
"Argument must be a signed or unsigned integer type.");
return static_cast<decltype(as_signed(value))>(value);
}
// as_unsigned<> returns the supplied integral value (or integral castable
// Numeric template) cast as an unsigned integral of equivalent precision.
// I.e. it's mostly an alias for: static_cast<std::make_unsigned<T>::type>(t)
template <typename Src>
constexpr typename std::make_unsigned<
typename base::internal::UnderlyingType<Src>::type>::type
as_unsigned(const Src value) {
static_assert(std::is_integral<decltype(as_unsigned(value))>::value,
"Argument must be a signed or unsigned integer type.");
return static_cast<decltype(as_unsigned(value))>(value);
}
template <typename L, typename R>
constexpr bool IsLessImpl(const L lhs,
const R rhs,
const RangeCheck l_range,
const RangeCheck r_range) {
return l_range.IsUnderflow() || r_range.IsOverflow() ||
(l_range == r_range && static_cast<decltype(lhs + rhs)>(lhs) <
static_cast<decltype(lhs + rhs)>(rhs));
}
template <typename L, typename R>
struct IsLess {
static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
"Types must be numeric.");
static constexpr bool Test(const L lhs, const R rhs) {
return IsLessImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
DstRangeRelationToSrcRange<L>(rhs));
}
};
template <typename L, typename R>
constexpr bool IsLessOrEqualImpl(const L lhs,
const R rhs,
const RangeCheck l_range,
const RangeCheck r_range) {
return l_range.IsUnderflow() || r_range.IsOverflow() ||
(l_range == r_range && static_cast<decltype(lhs + rhs)>(lhs) <=
static_cast<decltype(lhs + rhs)>(rhs));
}
template <typename L, typename R>
struct IsLessOrEqual {
static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
"Types must be numeric.");
static constexpr bool Test(const L lhs, const R rhs) {
return IsLessOrEqualImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
DstRangeRelationToSrcRange<L>(rhs));
}
};
template <typename L, typename R>
constexpr bool IsGreaterImpl(const L lhs,
const R rhs,
const RangeCheck l_range,
const RangeCheck r_range) {
return l_range.IsOverflow() || r_range.IsUnderflow() ||
(l_range == r_range && static_cast<decltype(lhs + rhs)>(lhs) >
static_cast<decltype(lhs + rhs)>(rhs));
}
template <typename L, typename R>
struct IsGreater {
static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
"Types must be numeric.");
static constexpr bool Test(const L lhs, const R rhs) {
return IsGreaterImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
DstRangeRelationToSrcRange<L>(rhs));
}
};
template <typename L, typename R>
constexpr bool IsGreaterOrEqualImpl(const L lhs,
const R rhs,
const RangeCheck l_range,
const RangeCheck r_range) {
return l_range.IsOverflow() || r_range.IsUnderflow() ||
(l_range == r_range && static_cast<decltype(lhs + rhs)>(lhs) >=
static_cast<decltype(lhs + rhs)>(rhs));
}
template <typename L, typename R>
struct IsGreaterOrEqual {
static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
"Types must be numeric.");
static constexpr bool Test(const L lhs, const R rhs) {
return IsGreaterOrEqualImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
DstRangeRelationToSrcRange<L>(rhs));
}
};
template <typename L, typename R>
struct IsEqual {
static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
"Types must be numeric.");
static constexpr bool Test(const L lhs, const R rhs) {
return DstRangeRelationToSrcRange<R>(lhs) ==
DstRangeRelationToSrcRange<L>(rhs) &&
static_cast<decltype(lhs + rhs)>(lhs) ==
static_cast<decltype(lhs + rhs)>(rhs);
}
};
template <typename L, typename R>
struct IsNotEqual {
static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
"Types must be numeric.");
static constexpr bool Test(const L lhs, const R rhs) {
return DstRangeRelationToSrcRange<R>(lhs) !=
DstRangeRelationToSrcRange<L>(rhs) ||
static_cast<decltype(lhs + rhs)>(lhs) !=
static_cast<decltype(lhs + rhs)>(rhs);
}
};
// These perform the actual math operations on the CheckedNumerics.
// Binary arithmetic operations.
template <template <typename, typename> class C, typename L, typename R>
constexpr bool SafeCompare(const L lhs, const R rhs) {
static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
"Types must be numeric.");
using Promotion = BigEnoughPromotion<L, R>;
using BigType = typename Promotion::type;
return Promotion::is_contained
// Force to a larger type for speed if both are contained.
? C<BigType, BigType>::Test(
static_cast<BigType>(static_cast<L>(lhs)),
static_cast<BigType>(static_cast<R>(rhs)))
// Let the template functions figure it out for mixed types.
: C<L, R>::Test(lhs, rhs);
}
template <typename Dst, typename Src>
constexpr bool IsMaxInRangeForNumericType() {
return IsGreaterOrEqual<Dst, Src>::Test(std::numeric_limits<Dst>::max(),
std::numeric_limits<Src>::max());
}
template <typename Dst, typename Src>
constexpr bool IsMinInRangeForNumericType() {
return IsLessOrEqual<Dst, Src>::Test(std::numeric_limits<Dst>::lowest(),
std::numeric_limits<Src>::lowest());
}
template <typename Dst, typename Src>
constexpr Dst CommonMax() {
return !IsMaxInRangeForNumericType<Dst, Src>()
? Dst(std::numeric_limits<Dst>::max())
: Dst(std::numeric_limits<Src>::max());
}
template <typename Dst, typename Src>
constexpr Dst CommonMin() {
return !IsMinInRangeForNumericType<Dst, Src>()
? Dst(std::numeric_limits<Dst>::lowest())
: Dst(std::numeric_limits<Src>::lowest());
}
// This is a wrapper to generate return the max or min for a supplied type.
// If the argument is false, the returned value is the maximum. If true the
// returned value is the minimum.
template <typename Dst, typename Src = Dst>
constexpr Dst CommonMaxOrMin(bool is_min) {
return is_min ? CommonMin<Dst, Src>() : CommonMax<Dst, Src>();
}
} // namespace partition_alloc::internal::base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_SAFE_CONVERSIONS_IMPL_H_

View File

@ -1,12 +0,0 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_SAFE_MATH_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_SAFE_MATH_H_
#include "base/allocator/partition_allocator/partition_alloc_base/numerics/checked_math.h"
#include "base/allocator/partition_allocator/partition_alloc_base/numerics/clamped_math.h"
#include "base/allocator/partition_allocator/partition_alloc_base/numerics/safe_conversions.h"
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_SAFE_MATH_H_

View File

@ -1,123 +0,0 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_SAFE_MATH_ARM_IMPL_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_SAFE_MATH_ARM_IMPL_H_
#include <cassert>
#include <type_traits>
#include "base/allocator/partition_allocator/partition_alloc_base/numerics/safe_conversions.h"
namespace partition_alloc::internal::base::internal {
template <typename T, typename U>
struct CheckedMulFastAsmOp {
static const bool is_supported =
kEnableAsmCode && FastIntegerArithmeticPromotion<T, U>::is_contained;
// The following is not an assembler routine and is thus constexpr safe, it
// just emits much more efficient code than the Clang and GCC builtins for
// performing overflow-checked multiplication when a twice wider type is
// available. The below compiles down to 2-3 instructions, depending on the
// width of the types in use.
// As an example, an int32_t multiply compiles to:
// smull r0, r1, r0, r1
// cmp r1, r1, asr #31
// And an int16_t multiply compiles to:
// smulbb r1, r1, r0
// asr r2, r1, #16
// cmp r2, r1, asr #15
template <typename V>
static constexpr bool Do(T x, U y, V* result) {
using Promotion = typename FastIntegerArithmeticPromotion<T, U>::type;
Promotion presult;
presult = static_cast<Promotion>(x) * static_cast<Promotion>(y);
if (!IsValueInRangeForNumericType<V>(presult))
return false;
*result = static_cast<V>(presult);
return true;
}
};
template <typename T, typename U>
struct ClampedAddFastAsmOp {
static const bool is_supported =
kEnableAsmCode && BigEnoughPromotion<T, U>::is_contained &&
IsTypeInRangeForNumericType<
int32_t,
typename BigEnoughPromotion<T, U>::type>::value;
template <typename V>
__attribute__((always_inline)) static V Do(T x, U y) {
// This will get promoted to an int, so let the compiler do whatever is
// clever and rely on the saturated cast to bounds check.
if (IsIntegerArithmeticSafe<int, T, U>::value)
return saturated_cast<V>(x + y);
int32_t result;
int32_t x_i32 = checked_cast<int32_t>(x);
int32_t y_i32 = checked_cast<int32_t>(y);
asm("qadd %[result], %[first], %[second]"
: [result] "=r"(result)
: [first] "r"(x_i32), [second] "r"(y_i32));
return saturated_cast<V>(result);
}
};
template <typename T, typename U>
struct ClampedSubFastAsmOp {
static const bool is_supported =
kEnableAsmCode && BigEnoughPromotion<T, U>::is_contained &&
IsTypeInRangeForNumericType<
int32_t,
typename BigEnoughPromotion<T, U>::type>::value;
template <typename V>
__attribute__((always_inline)) static V Do(T x, U y) {
// This will get promoted to an int, so let the compiler do whatever is
// clever and rely on the saturated cast to bounds check.
if (IsIntegerArithmeticSafe<int, T, U>::value)
return saturated_cast<V>(x - y);
int32_t result;
int32_t x_i32 = checked_cast<int32_t>(x);
int32_t y_i32 = checked_cast<int32_t>(y);
asm("qsub %[result], %[first], %[second]"
: [result] "=r"(result)
: [first] "r"(x_i32), [second] "r"(y_i32));
return saturated_cast<V>(result);
}
};
template <typename T, typename U>
struct ClampedMulFastAsmOp {
static const bool is_supported =
kEnableAsmCode && CheckedMulFastAsmOp<T, U>::is_supported;
template <typename V>
__attribute__((always_inline)) static V Do(T x, U y) {
// Use the CheckedMulFastAsmOp for full-width 32-bit values, because
// it's fewer instructions than promoting and then saturating.
if (!IsIntegerArithmeticSafe<int32_t, T, U>::value &&
!IsIntegerArithmeticSafe<uint32_t, T, U>::value) {
V result;
return CheckedMulFastAsmOp<T, U>::Do(x, y, &result)
? result
: CommonMaxOrMin<V>(IsValueNegative(x) ^ IsValueNegative(y));
}
assert((FastIntegerArithmeticPromotion<T, U>::is_contained));
using Promotion = typename FastIntegerArithmeticPromotion<T, U>::type;
return saturated_cast<V>(static_cast<Promotion>(x) *
static_cast<Promotion>(y));
}
};
} // namespace partition_alloc::internal::base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_SAFE_MATH_ARM_IMPL_H_

View File

@ -1,155 +0,0 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_SAFE_MATH_CLANG_GCC_IMPL_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_SAFE_MATH_CLANG_GCC_IMPL_H_
#include <cassert>
#include <limits>
#include <type_traits>
#include "base/allocator/partition_allocator/partition_alloc_base/numerics/safe_conversions.h"
#if !defined(__native_client__) && (defined(__ARMEL__) || defined(__arch64__))
#include "base/allocator/partition_allocator/partition_alloc_base/numerics/safe_math_arm_impl.h"
#define PA_BASE_HAS_ASSEMBLER_SAFE_MATH (1)
#else
#define PA_BASE_HAS_ASSEMBLER_SAFE_MATH (0)
#endif
namespace partition_alloc::internal::base::internal {
// These are the non-functioning boilerplate implementations of the optimized
// safe math routines.
#if !PA_BASE_HAS_ASSEMBLER_SAFE_MATH
template <typename T, typename U>
struct CheckedMulFastAsmOp {
static const bool is_supported = false;
template <typename V>
static constexpr bool Do(T, U, V*) {
// Force a compile failure if instantiated.
return CheckOnFailure::template HandleFailure<bool>();
}
};
template <typename T, typename U>
struct ClampedAddFastAsmOp {
static const bool is_supported = false;
template <typename V>
static constexpr V Do(T, U) {
// Force a compile failure if instantiated.
return CheckOnFailure::template HandleFailure<V>();
}
};
template <typename T, typename U>
struct ClampedSubFastAsmOp {
static const bool is_supported = false;
template <typename V>
static constexpr V Do(T, U) {
// Force a compile failure if instantiated.
return CheckOnFailure::template HandleFailure<V>();
}
};
template <typename T, typename U>
struct ClampedMulFastAsmOp {
static const bool is_supported = false;
template <typename V>
static constexpr V Do(T, U) {
// Force a compile failure if instantiated.
return CheckOnFailure::template HandleFailure<V>();
}
};
#endif // PA_BASE_HAS_ASSEMBLER_SAFE_MATH
#undef PA_BASE_HAS_ASSEMBLER_SAFE_MATH
template <typename T, typename U>
struct CheckedAddFastOp {
static const bool is_supported = true;
template <typename V>
__attribute__((always_inline)) static constexpr bool Do(T x, U y, V* result) {
return !__builtin_add_overflow(x, y, result);
}
};
template <typename T, typename U>
struct CheckedSubFastOp {
static const bool is_supported = true;
template <typename V>
__attribute__((always_inline)) static constexpr bool Do(T x, U y, V* result) {
return !__builtin_sub_overflow(x, y, result);
}
};
template <typename T, typename U>
struct CheckedMulFastOp {
#if defined(__clang__)
// TODO(jschuh): Get the Clang runtime library issues sorted out so we can
// support full-width, mixed-sign multiply builtins.
// https://crbug.com/613003
// We can support intptr_t, uintptr_t, or a smaller common type.
static const bool is_supported =
(IsTypeInRangeForNumericType<intptr_t, T>::value &&
IsTypeInRangeForNumericType<intptr_t, U>::value) ||
(IsTypeInRangeForNumericType<uintptr_t, T>::value &&
IsTypeInRangeForNumericType<uintptr_t, U>::value);
#else
static const bool is_supported = true;
#endif
template <typename V>
__attribute__((always_inline)) static constexpr bool Do(T x, U y, V* result) {
return CheckedMulFastAsmOp<T, U>::is_supported
? CheckedMulFastAsmOp<T, U>::Do(x, y, result)
: !__builtin_mul_overflow(x, y, result);
}
};
template <typename T, typename U>
struct ClampedAddFastOp {
static const bool is_supported = ClampedAddFastAsmOp<T, U>::is_supported;
template <typename V>
__attribute__((always_inline)) static V Do(T x, U y) {
return ClampedAddFastAsmOp<T, U>::template Do<V>(x, y);
}
};
template <typename T, typename U>
struct ClampedSubFastOp {
static const bool is_supported = ClampedSubFastAsmOp<T, U>::is_supported;
template <typename V>
__attribute__((always_inline)) static V Do(T x, U y) {
return ClampedSubFastAsmOp<T, U>::template Do<V>(x, y);
}
};
template <typename T, typename U>
struct ClampedMulFastOp {
static const bool is_supported = ClampedMulFastAsmOp<T, U>::is_supported;
template <typename V>
__attribute__((always_inline)) static V Do(T x, U y) {
return ClampedMulFastAsmOp<T, U>::template Do<V>(x, y);
}
};
template <typename T>
struct ClampedNegFastOp {
static const bool is_supported = std::is_signed<T>::value;
__attribute__((always_inline)) static T Do(T value) {
// Use this when there is no assembler path available.
if (!ClampedSubFastAsmOp<T, T>::is_supported) {
T result;
return !__builtin_sub_overflow(T(0), value, &result)
? result
: std::numeric_limits<T>::max();
}
// Fallback to the normal subtraction path.
return ClampedSubFastOp<T, T>::template Do<T>(T(0), value);
}
};
} // namespace partition_alloc::internal::base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_SAFE_MATH_CLANG_GCC_IMPL_H_

View File

@ -1,215 +0,0 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_SAFE_MATH_SHARED_IMPL_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_SAFE_MATH_SHARED_IMPL_H_
#include <stddef.h>
#include <stdint.h>
#include <cassert>
#include <climits>
#include <cmath>
#include <cstdlib>
#include <limits>
#include <type_traits>
#include "base/allocator/partition_allocator/partition_alloc_base/numerics/safe_conversions.h"
#include "build/build_config.h"
#if BUILDFLAG(IS_ASMJS)
// Optimized safe math instructions are incompatible with asmjs.
#define PA_BASE_HAS_OPTIMIZED_SAFE_MATH (0)
// Where available use builtin math overflow support on Clang and GCC.
#elif !defined(__native_client__) && \
((defined(__clang__) && \
((__clang_major__ > 3) || \
(__clang_major__ == 3 && __clang_minor__ >= 4))) || \
(defined(__GNUC__) && __GNUC__ >= 5))
#include "base/allocator/partition_allocator/partition_alloc_base/numerics/safe_math_clang_gcc_impl.h"
#define PA_BASE_HAS_OPTIMIZED_SAFE_MATH (1)
#else
#define PA_BASE_HAS_OPTIMIZED_SAFE_MATH (0)
#endif
namespace partition_alloc::internal::base::internal {
// These are the non-functioning boilerplate implementations of the optimized
// safe math routines.
#if !PA_BASE_HAS_OPTIMIZED_SAFE_MATH
template <typename T, typename U>
struct CheckedAddFastOp {
static const bool is_supported = false;
template <typename V>
static constexpr bool Do(T, U, V*) {
// Force a compile failure if instantiated.
return CheckOnFailure::template HandleFailure<bool>();
}
};
template <typename T, typename U>
struct CheckedSubFastOp {
static const bool is_supported = false;
template <typename V>
static constexpr bool Do(T, U, V*) {
// Force a compile failure if instantiated.
return CheckOnFailure::template HandleFailure<bool>();
}
};
template <typename T, typename U>
struct CheckedMulFastOp {
static const bool is_supported = false;
template <typename V>
static constexpr bool Do(T, U, V*) {
// Force a compile failure if instantiated.
return CheckOnFailure::template HandleFailure<bool>();
}
};
template <typename T, typename U>
struct ClampedAddFastOp {
static const bool is_supported = false;
template <typename V>
static constexpr V Do(T, U) {
// Force a compile failure if instantiated.
return CheckOnFailure::template HandleFailure<V>();
}
};
template <typename T, typename U>
struct ClampedSubFastOp {
static const bool is_supported = false;
template <typename V>
static constexpr V Do(T, U) {
// Force a compile failure if instantiated.
return CheckOnFailure::template HandleFailure<V>();
}
};
template <typename T, typename U>
struct ClampedMulFastOp {
static const bool is_supported = false;
template <typename V>
static constexpr V Do(T, U) {
// Force a compile failure if instantiated.
return CheckOnFailure::template HandleFailure<V>();
}
};
template <typename T>
struct ClampedNegFastOp {
static const bool is_supported = false;
static constexpr T Do(T) {
// Force a compile failure if instantiated.
return CheckOnFailure::template HandleFailure<T>();
}
};
#endif // PA_BASE_HAS_OPTIMIZED_SAFE_MATH
#undef PA_BASE_HAS_OPTIMIZED_SAFE_MATH
// This is used for UnsignedAbs, where we need to support floating-point
// template instantiations even though we don't actually support the operations.
// However, there is no corresponding implementation of e.g. SafeUnsignedAbs,
// so the float versions will not compile.
template <typename Numeric,
bool IsInteger = std::is_integral<Numeric>::value,
bool IsFloat = std::is_floating_point<Numeric>::value>
struct UnsignedOrFloatForSize;
template <typename Numeric>
struct UnsignedOrFloatForSize<Numeric, true, false> {
using type = typename std::make_unsigned<Numeric>::type;
};
template <typename Numeric>
struct UnsignedOrFloatForSize<Numeric, false, true> {
using type = Numeric;
};
// Wrap the unary operations to allow SFINAE when instantiating integrals versus
// floating points. These don't perform any overflow checking. Rather, they
// exhibit well-defined overflow semantics and rely on the caller to detect
// if an overflow occurred.
template <typename T,
typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
constexpr T NegateWrapper(T value) {
using UnsignedT = typename std::make_unsigned<T>::type;
// This will compile to a NEG on Intel, and is normal negation on ARM.
return static_cast<T>(UnsignedT(0) - static_cast<UnsignedT>(value));
}
template <
typename T,
typename std::enable_if<std::is_floating_point<T>::value>::type* = nullptr>
constexpr T NegateWrapper(T value) {
return -value;
}
template <typename T,
typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
constexpr typename std::make_unsigned<T>::type InvertWrapper(T value) {
return ~value;
}
template <typename T,
typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
constexpr T AbsWrapper(T value) {
return static_cast<T>(SafeUnsignedAbs(value));
}
template <
typename T,
typename std::enable_if<std::is_floating_point<T>::value>::type* = nullptr>
constexpr T AbsWrapper(T value) {
return value < 0 ? -value : value;
}
template <template <typename, typename, typename> class M,
typename L,
typename R>
struct MathWrapper {
using math = M<typename UnderlyingType<L>::type,
typename UnderlyingType<R>::type,
void>;
using type = typename math::result_type;
};
// The following macros are just boilerplate for the standard arithmetic
// operator overloads and variadic function templates. A macro isn't the nicest
// solution, but it beats rewriting these over and over again.
#define PA_BASE_NUMERIC_ARITHMETIC_VARIADIC(CLASS, CL_ABBR, OP_NAME) \
template <typename L, typename R, typename... Args> \
constexpr auto CL_ABBR##OP_NAME(const L lhs, const R rhs, \
const Args... args) { \
return CL_ABBR##MathOp<CLASS##OP_NAME##Op, L, R, Args...>(lhs, rhs, \
args...); \
}
#define PA_BASE_NUMERIC_ARITHMETIC_OPERATORS(CLASS, CL_ABBR, OP_NAME, OP, \
CMP_OP) \
/* Binary arithmetic operator for all CLASS##Numeric operations. */ \
template <typename L, typename R, \
typename std::enable_if<Is##CLASS##Op<L, R>::value>::type* = \
nullptr> \
constexpr CLASS##Numeric< \
typename MathWrapper<CLASS##OP_NAME##Op, L, R>::type> \
operator OP(const L lhs, const R rhs) { \
return decltype(lhs OP rhs)::template MathOp<CLASS##OP_NAME##Op>(lhs, \
rhs); \
} \
/* Assignment arithmetic operator implementation from CLASS##Numeric. */ \
template <typename L> \
template <typename R> \
constexpr CLASS##Numeric<L>& CLASS##Numeric<L>::operator CMP_OP( \
const R rhs) { \
return MathOp<CLASS##OP_NAME##Op>(rhs); \
} \
/* Variadic arithmetic functions that return CLASS##Numeric. */ \
PA_BASE_NUMERIC_ARITHMETIC_VARIADIC(CLASS, CL_ABBR, OP_NAME)
} // namespace partition_alloc::internal::base::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_NUMERICS_SAFE_MATH_SHARED_IMPL_H_

View File

@ -1,58 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This provides a wrapper around system calls which may be interrupted by a
// signal and return EINTR. See man 7 signal.
// To prevent long-lasting loops (which would likely be a bug, such as a signal
// that should be masked) to go unnoticed, there is a limit after which the
// caller will nonetheless see an EINTR in Debug builds.
//
// On Windows and Fuchsia, this wrapper macro does nothing because there are no
// signals.
//
// Don't wrap close calls in HANDLE_EINTR. Use IGNORE_EINTR if the return
// value of close is significant. See http://crbug.com/269623.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_POSIX_EINTR_WRAPPER_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_POSIX_EINTR_WRAPPER_H_
#include "build/build_config.h"
#if BUILDFLAG(IS_POSIX)
#include <errno.h>
#if defined(NDEBUG)
#define PA_HANDLE_EINTR(x) \
({ \
decltype(x) eintr_wrapper_result; \
do { \
eintr_wrapper_result = (x); \
} while (eintr_wrapper_result == -1 && errno == EINTR); \
eintr_wrapper_result; \
})
#else
#define PA_HANDLE_EINTR(x) \
({ \
int eintr_wrapper_counter = 0; \
decltype(x) eintr_wrapper_result; \
do { \
eintr_wrapper_result = (x); \
} while (eintr_wrapper_result == -1 && errno == EINTR && \
eintr_wrapper_counter++ < 100); \
eintr_wrapper_result; \
})
#endif // NDEBUG
#else // !BUILDFLAG(IS_POSIX)
#define PA_HANDLE_EINTR(x) (x)
#endif // !BUILDFLAG(IS_POSIX)
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_POSIX_EINTR_WRAPPER_H_

View File

@ -1,113 +0,0 @@
// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/partition_alloc_base/posix/safe_strerror.h"
#include <errno.h>
#include <stdio.h>
#include <string.h>
#include "build/build_config.h"
namespace partition_alloc::internal::base {
#if defined(__GLIBC__) || BUILDFLAG(IS_NACL)
#define USE_HISTORICAL_STRERROR_R 1
// Post-L versions of bionic define the GNU-specific strerror_r if _GNU_SOURCE
// is defined, but the symbol is renamed to __gnu_strerror_r which only exists
// on those later versions. For parity, add the same condition as bionic.
#elif defined(__BIONIC__) && defined(_GNU_SOURCE) && __ANDROID_API__ >= 23
#define USE_HISTORICAL_STRERROR_R 1
#else
#define USE_HISTORICAL_STRERROR_R 0
#endif
#if USE_HISTORICAL_STRERROR_R
// glibc has two strerror_r functions: a historical GNU-specific one that
// returns type char *, and a POSIX.1-2001 compliant one available since 2.3.4
// that returns int. This wraps the GNU-specific one.
[[maybe_unused]] static void wrap_posix_strerror_r(
char* (*strerror_r_ptr)(int, char*, size_t),
int err,
char* buf,
size_t len) {
// GNU version.
char* rc = (*strerror_r_ptr)(err, buf, len);
if (rc != buf) {
// glibc did not use buf and returned a static string instead. Copy it
// into buf.
buf[0] = '\0';
strncat(buf, rc, len - 1);
}
// The GNU version never fails. Unknown errors get an "unknown error" message.
// The result is always null terminated.
}
#endif // USE_HISTORICAL_STRERROR_R
// Wrapper for strerror_r functions that implement the POSIX interface. POSIX
// does not define the behaviour for some of the edge cases, so we wrap it to
// guarantee that they are handled. This is compiled on all POSIX platforms, but
// it will only be used on Linux if the POSIX strerror_r implementation is
// being used (see below).
[[maybe_unused]] static void wrap_posix_strerror_r(
int (*strerror_r_ptr)(int, char*, size_t),
int err,
char* buf,
size_t len) {
int old_errno = errno;
// Have to cast since otherwise we get an error if this is the GNU version
// (but in such a scenario this function is never called). Sadly we can't use
// C++-style casts because the appropriate one is reinterpret_cast but it's
// considered illegal to reinterpret_cast a type to itself, so we get an
// error in the opposite case.
int result = (*strerror_r_ptr)(err, buf, len);
if (result == 0) {
// POSIX is vague about whether the string will be terminated, although
// it indirectly implies that typically ERANGE will be returned, instead
// of truncating the string. We play it safe by always terminating the
// string explicitly.
buf[len - 1] = '\0';
} else {
// Error. POSIX is vague about whether the return value is itself a system
// error code or something else. On Linux currently it is -1 and errno is
// set. On BSD-derived systems it is a system error and errno is unchanged.
// We try and detect which case it is so as to put as much useful info as
// we can into our message.
int strerror_error; // The error encountered in strerror
int new_errno = errno;
if (new_errno != old_errno) {
// errno was changed, so probably the return value is just -1 or something
// else that doesn't provide any info, and errno is the error.
strerror_error = new_errno;
} else {
// Either the error from strerror_r was the same as the previous value, or
// errno wasn't used. Assume the latter.
strerror_error = result;
}
// snprintf truncates and always null-terminates.
snprintf(buf, len, "Error %d while retrieving error %d", strerror_error,
err);
}
errno = old_errno;
}
void safe_strerror_r(int err, char* buf, size_t len) {
if (buf == nullptr || len <= 0) {
return;
}
// If using glibc (i.e., Linux), the compiler will automatically select the
// appropriate overloaded function based on the function type of strerror_r.
// The other one will be elided from the translation unit since both are
// static.
wrap_posix_strerror_r(&strerror_r, err, buf, len);
}
std::string safe_strerror(int err) {
const int buffer_size = 256;
char buf[buffer_size];
safe_strerror_r(err, buf, sizeof(buf));
return std::string(buf);
}
} // namespace partition_alloc::internal::base

View File

@ -1,45 +0,0 @@
// Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_POSIX_SAFE_STRERROR_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_POSIX_SAFE_STRERROR_H_
#include <stddef.h>
#include <string>
#include "base/allocator/partition_allocator/partition_alloc_base/migration_adapter.h"
#include "base/base_export.h"
namespace partition_alloc::internal::base {
// BEFORE using anything from this file, first look at PLOG and friends in
// logging.h and use them instead if applicable.
//
// This file declares safe, portable alternatives to the POSIX strerror()
// function. strerror() is inherently unsafe in multi-threaded apps and should
// never be used. Doing so can cause crashes. Additionally, the thread-safe
// alternative strerror_r varies in semantics across platforms. Use these
// functions instead.
// Thread-safe strerror function with dependable semantics that never fails.
// It will write the string form of error "err" to buffer buf of length len.
// If there is an error calling the OS's strerror_r() function then a message to
// that effect will be printed into buf, truncating if necessary. The final
// result is always null-terminated. The value of errno is never changed.
//
// Use this instead of strerror_r().
BASE_EXPORT void safe_strerror_r(int err, char* buf, size_t len);
// Calls safe_strerror_r with a buffer of suitable size and returns the result
// in a C++ string.
//
// Use this instead of strerror(). Note though that safe_strerror_r will be
// more robust in the case of heap corruption errors, since it doesn't need to
// allocate a string.
BASE_EXPORT std::string safe_strerror(int err);
} // namespace partition_alloc::internal::base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_POSIX_SAFE_STRERROR_H_

View File

@ -1,74 +0,0 @@
// Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/partition_alloc_base/rand_util.h"
#include <limits.h>
#include <math.h>
#include <stdint.h>
#include <limits>
#include "base/check_op.h"
namespace partition_alloc::internal::base {
uint64_t RandUint64() {
uint64_t number;
RandBytes(&number, sizeof(number));
return number;
}
uint64_t RandGenerator(uint64_t range) {
DCHECK_GT(range, 0u);
// We must discard random results above this number, as they would
// make the random generator non-uniform (consider e.g. if
// MAX_UINT64 was 7 and |range| was 5, then a result of 1 would be twice
// as likely as a result of 3 or 4).
uint64_t max_acceptable_value =
(std::numeric_limits<uint64_t>::max() / range) * range - 1;
uint64_t value;
do {
value = base::RandUint64();
} while (value > max_acceptable_value);
return value % range;
}
InsecureRandomGenerator::InsecureRandomGenerator() {
a_ = base::RandUint64();
b_ = base::RandUint64();
}
void InsecureRandomGenerator::ReseedForTesting(uint64_t seed) {
a_ = seed;
b_ = seed;
}
uint64_t InsecureRandomGenerator::RandUint64() {
// Using XorShift128+, which is simple and widely used. See
// https://en.wikipedia.org/wiki/Xorshift#xorshift+ for details.
uint64_t t = a_;
const uint64_t s = b_;
a_ = s;
t ^= t << 23;
t ^= t >> 17;
t ^= s ^ (s >> 26);
b_ = t;
return t + s;
}
uint32_t InsecureRandomGenerator::RandUint32() {
// The generator usually returns an uint64_t, truncate it.
//
// It is noted in this paper (https://arxiv.org/abs/1810.05313) that the
// lowest 32 bits fail some statistical tests from the Big Crush
// suite. Use the higher ones instead.
return this->RandUint64() >> 32;
}
} // namespace partition_alloc::internal::base

View File

@ -1,95 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_RAND_UTIL_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_RAND_UTIL_H_
#include <stddef.h>
#include <stdint.h>
#include "base/allocator/partition_allocator/partition_alloc_base/gtest_prod_util.h"
#include "base/allocator/partition_allocator/partition_alloc_base/migration_adapter.h"
#include "base/base_export.h"
#include "build/build_config.h"
namespace partition_alloc {
class RandomGenerator;
} // namespace partition_alloc
namespace partition_alloc::internal::base {
// Returns a random number in range [0, UINT64_MAX]. Thread-safe.
BASE_EXPORT uint64_t RandUint64();
// Returns a random number in range [0, range). Thread-safe.
BASE_EXPORT uint64_t RandGenerator(uint64_t range);
// Fills |output_length| bytes of |output| with random data. Thread-safe.
//
// Although implementations are required to use a cryptographically secure
// random number source, code outside of base/ that relies on this should use
// crypto::RandBytes instead to ensure the requirement is easily discoverable.
BASE_EXPORT void RandBytes(void* output, size_t output_length);
// Fast, insecure pseudo-random number generator.
//
// WARNING: This is not the generator you are looking for. This has significant
// caveats:
// - It is non-cryptographic, so easy to miuse
// - It is neither fork() nor clone()-safe.
// - Synchronization is up to the client.
//
// Always prefer base::Rand*() above, unless you have a use case where its
// overhead is too high, or system calls are disallowed.
//
// Performance: As of 2021, rough overhead on Linux on a desktop machine of
// base::RandUint64() is ~800ns per call (it performs a system call). On Windows
// it is lower. On the same machine, this generator's cost is ~2ns per call,
// regardless of platform.
//
// This is different from |Rand*()| above as it is guaranteed to never make a
// system call to generate a new number, except to seed it. This should *never*
// be used for cryptographic applications, and is not thread-safe.
//
// It is seeded using base::RandUint64() in the constructor, meaning that it
// doesn't need to be seeded. It can be re-seeded though, with
// ReseedForTesting(). Its period is long enough that it should not need to be
// re-seeded during use.
//
// Uses the XorShift128+ generator under the hood.
class BASE_EXPORT InsecureRandomGenerator {
public:
// Never use outside testing, not enough entropy.
void ReseedForTesting(uint64_t seed);
uint32_t RandUint32();
uint64_t RandUint64();
private:
InsecureRandomGenerator();
// State.
uint64_t a_ = 0, b_ = 0;
// Before adding a new friend class, make sure that the overhead of
// base::Rand*() is too high, using something more representative than a
// microbenchmark.
//
// PartitionAlloc allocations should not take more than 40-50ns per
// malloc()/free() pair, otherwise high-level benchmarks regress, and does not
// need a secure PRNG, as it's used for ASLR and zeroing some allocations at
// free() time.
friend class ::partition_alloc::RandomGenerator;
PA_FRIEND_TEST_ALL_PREFIXES(
PartitionAllocBaseRandUtilTest,
InsecureRandomGeneratorProducesBothValuesOfAllBits);
PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocBaseRandUtilTest,
InsecureRandomGeneratorChiSquared);
PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocBaseRandUtilTest,
InsecureRandomGeneratorRandDouble);
};
} // namespace partition_alloc::internal::base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_RAND_UTIL_H_

View File

@ -1,15 +0,0 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/partition_alloc_base/rand_util.h"
#include <zircon/syscalls.h>
namespace partition_alloc::internal::base {
void RandBytes(void* output, size_t output_length) {
zx_cprng_draw(output, output_length);
}
} // namespace partition_alloc::internal::base

View File

@ -1,104 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/partition_alloc_base/rand_util.h"
#include <errno.h>
#include <fcntl.h>
#include <stddef.h>
#include <stdint.h>
#include <unistd.h>
#include <sstream>
#include "base/allocator/partition_allocator/partition_alloc_base/files/file_util.h"
#include "base/allocator/partition_allocator/partition_alloc_base/no_destructor.h"
#include "base/allocator/partition_allocator/partition_alloc_base/posix/eintr_wrapper.h"
#include "base/check.h"
#include "base/compiler_specific.h"
#include "build/build_config.h"
#if (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)) && !BUILDFLAG(IS_NACL)
#include "third_party/lss/linux_syscall_support.h"
#elif BUILDFLAG(IS_MAC)
// TODO(crbug.com/995996): Waiting for this header to appear in the iOS SDK.
// (See below.)
#include <sys/random.h>
#endif
namespace {
#if BUILDFLAG(IS_AIX)
// AIX has no 64-bit support for O_CLOEXEC.
static constexpr int kOpenFlags = O_RDONLY;
#else
static constexpr int kOpenFlags = O_RDONLY | O_CLOEXEC;
#endif
// We keep the file descriptor for /dev/urandom around so we don't need to
// reopen it (which is expensive), and since we may not even be able to reopen
// it if we are later put in a sandbox. This class wraps the file descriptor so
// we can use a static-local variable to handle opening it on the first access.
class URandomFd {
public:
URandomFd() : fd_(PA_HANDLE_EINTR(open("/dev/urandom", kOpenFlags))) {
CHECK(fd_ >= 0) << "Cannot open /dev/urandom";
}
~URandomFd() { close(fd_); }
int fd() const { return fd_; }
private:
const int fd_;
};
int GetUrandomFD() {
static partition_alloc::internal::base::NoDestructor<URandomFd> urandom_fd;
return urandom_fd->fd();
}
} // namespace
namespace partition_alloc::internal::base {
// NOTE: In an ideal future, all implementations of this function will just
// wrap BoringSSL's `RAND_bytes`. TODO(crbug.com/995996): Figure out the
// build/test/performance issues with dcheng's CL
// (https://chromium-review.googlesource.com/c/chromium/src/+/1545096) and land
// it or some form of it.
void RandBytes(void* output, size_t output_length) {
#if (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)) && !BUILDFLAG(IS_NACL)
// We have to call `getrandom` via Linux Syscall Support, rather than through
// the libc wrapper, because we might not have an up-to-date libc (e.g. on
// some bots).
const ssize_t r = PA_HANDLE_EINTR(sys_getrandom(output, output_length, 0));
// Return success only on total success. In case errno == ENOSYS (or any other
// error), we'll fall through to reading from urandom below.
if (output_length == static_cast<size_t>(r)) {
MSAN_UNPOISON(output, output_length);
return;
}
#elif BUILDFLAG(IS_MAC)
// TODO(crbug.com/995996): Enable this on iOS too, when sys/random.h arrives
// in its SDK.
if (__builtin_available(macOS 10.12, *)) {
if (getentropy(output, output_length) == 0) {
return;
}
}
#endif
// If the OS-specific mechanisms didn't work, fall through to reading from
// urandom.
//
// TODO(crbug.com/995996): When we no longer need to support old Linux
// kernels, we can get rid of this /dev/urandom branch altogether.
const int urandom_fd = GetUrandomFD();
const bool success =
ReadFromFD(urandom_fd, static_cast<char*>(output), output_length);
CHECK(success);
}
} // namespace partition_alloc::internal::base

View File

@ -1,38 +0,0 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/partition_alloc_base/rand_util.h"
#include <stddef.h>
#include <stdint.h>
#include <windows.h>
// #define needed to link in RtlGenRandom(), a.k.a. SystemFunction036. See the
// "Community Additions" comment on MSDN here:
// http://msdn.microsoft.com/en-us/library/windows/desktop/aa387694.aspx
#define SystemFunction036 NTAPI SystemFunction036
#include <NTSecAPI.h>
#undef SystemFunction036
#include <algorithm>
#include <limits>
#include "base/check.h"
namespace partition_alloc::internal::base {
void RandBytes(void* output, size_t output_length) {
char* output_ptr = static_cast<char*>(output);
while (output_length > 0) {
const ULONG output_bytes_this_pass = static_cast<ULONG>(std::min(
output_length, static_cast<size_t>(std::numeric_limits<ULONG>::max())));
const bool success =
RtlGenRandom(output_ptr, output_bytes_this_pass) != FALSE;
CHECK(success);
output_length -= output_bytes_this_pass;
output_ptr += output_bytes_this_pass;
}
}
} // namespace partition_alloc::internal::base

View File

@ -1,56 +0,0 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_SCOPED_CLEAR_LAST_ERROR_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_SCOPED_CLEAR_LAST_ERROR_H_
#include <errno.h>
#include "base/allocator/partition_allocator/partition_alloc_base/migration_adapter.h"
#include "base/base_export.h"
#include "build/build_config.h"
namespace partition_alloc::internal::base {
// ScopedClearLastError stores and resets the value of thread local error codes
// (errno, GetLastError()), and restores them in the destructor. This is useful
// to avoid side effects on these values in instrumentation functions that
// interact with the OS.
// Common implementation of ScopedClearLastError for all platforms. Use
// ScopedClearLastError instead.
class BASE_EXPORT ScopedClearLastErrorBase {
public:
ScopedClearLastErrorBase() : last_errno_(errno) { errno = 0; }
ScopedClearLastErrorBase(const ScopedClearLastErrorBase&) = delete;
ScopedClearLastErrorBase& operator=(const ScopedClearLastErrorBase&) = delete;
~ScopedClearLastErrorBase() { errno = last_errno_; }
private:
const int last_errno_;
};
#if BUILDFLAG(IS_WIN)
// Windows specific implementation of ScopedClearLastError.
class BASE_EXPORT ScopedClearLastError : public ScopedClearLastErrorBase {
public:
ScopedClearLastError();
ScopedClearLastError(const ScopedClearLastError&) = delete;
ScopedClearLastError& operator=(const ScopedClearLastError&) = delete;
~ScopedClearLastError();
private:
const unsigned long last_system_error_;
};
#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
using ScopedClearLastError = ScopedClearLastErrorBase;
#endif // BUILDFLAG(IS_WIN)
} // namespace partition_alloc::internal::base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_SCOPED_CLEAR_LAST_ERROR_H_

View File

@ -1,20 +0,0 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/allocator/partition_allocator/partition_alloc_base/scoped_clear_last_error.h"
#include <windows.h>
namespace partition_alloc::internal::base {
ScopedClearLastError::ScopedClearLastError()
: ScopedClearLastErrorBase(), last_system_error_(GetLastError()) {
SetLastError(0);
}
ScopedClearLastError::~ScopedClearLastError() {
SetLastError(last_system_error_);
}
} // namespace partition_alloc::internal::base

View File

@ -9,9 +9,8 @@
#include "base/allocator/buildflags.h" #include "base/allocator/buildflags.h"
#include "base/allocator/partition_allocator/page_allocator_constants.h" #include "base/allocator/partition_allocator/page_allocator_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_base/debug/alias.h"
#include "base/check.h" #include "base/check.h"
#include "base/dcheck_is_on.h" #include "base/debug/alias.h"
#include "base/immediate_crash.h" #include "base/immediate_crash.h"
#include "build/build_config.h" #include "build/build_config.h"
@ -39,7 +38,7 @@
// could operate with inconsistent allocator state. // could operate with inconsistent allocator state.
#define PA_CHECK(condition) \ #define PA_CHECK(condition) \
UNLIKELY(!(condition)) \ UNLIKELY(!(condition)) \
? ::logging::RawCheck( \ ? logging::RawCheck( \
__FILE__ "(" PA_STRINGIFY(__LINE__) ") Check failed: " #condition) \ __FILE__ "(" PA_STRINGIFY(__LINE__) ") Check failed: " #condition) \
: EAT_CHECK_STREAM_PARAMS() : EAT_CHECK_STREAM_PARAMS()
#endif // defined(OFFICIAL_BUILD) && defined(NDEBUG) #endif // defined(OFFICIAL_BUILD) && defined(NDEBUG)
@ -53,7 +52,7 @@
#define PA_PCHECK(condition) \ #define PA_PCHECK(condition) \
if (!(condition)) { \ if (!(condition)) { \
int error = errno; \ int error = errno; \
::partition_alloc::internal::base::debug::Alias(&error); \ base::debug::Alias(&error); \
IMMEDIATE_CRASH(); \ IMMEDIATE_CRASH(); \
} }
@ -158,6 +157,6 @@ struct PA_DEBUGKV_ALIGN DebugKv {
// to see the data. With lldb, "x <STACK_POINTER> <FRAME_POJNTER>" can be used. // to see the data. With lldb, "x <STACK_POINTER> <FRAME_POJNTER>" can be used.
#define PA_DEBUG_DATA_ON_STACK(name, value) \ #define PA_DEBUG_DATA_ON_STACK(name, value) \
::partition_alloc::internal::DebugKv PA_DEBUG_UNIQUE_NAME{name, value}; \ ::partition_alloc::internal::DebugKv PA_DEBUG_UNIQUE_NAME{name, value}; \
::partition_alloc::internal::base::debug::Alias(&PA_DEBUG_UNIQUE_NAME); ::base::debug::Alias(&PA_DEBUG_UNIQUE_NAME);
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CHECK_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CHECK_H_

View File

@ -23,7 +23,7 @@
namespace partition_alloc { namespace partition_alloc {
// Bit flag constants used as `flag` argument of PartitionRoot::AllocWithFlags, // Bit flag constants used at `flag` argument of PartitionRoot::AllocWithFlags,
// AlignedAllocWithFlags, etc. // AlignedAllocWithFlags, etc.
struct AllocFlags { struct AllocFlags {
// In order to support bit operations like `flag_a | flag_b`, the old- // In order to support bit operations like `flag_a | flag_b`, the old-
@ -31,16 +31,12 @@ struct AllocFlags {
enum : int { enum : int {
kReturnNull = 1 << 0, kReturnNull = 1 << 0,
kZeroFill = 1 << 1, kZeroFill = 1 << 1,
// Don't allow allocation override hooks. Override hooks are expected to kNoHooks = 1 << 2, // Internal only.
// check for the presence of this flag and return false if it is active.
kNoOverrideHooks = 1 << 2,
// Don't allow any hooks (override or observers).
kNoHooks = 1 << 3, // Internal only.
// If the allocation requires a "slow path" (such as allocating/committing a // If the allocation requires a "slow path" (such as allocating/committing a
// new slot span), return nullptr instead. Note this makes all large // new slot span), return nullptr instead. Note this makes all large
// allocations return nullptr, such as direct-mapped ones, and even for // allocations return nullptr, such as direct-mapped ones, and even for
// smaller ones, a nullptr value is common. // smaller ones, a nullptr value is common.
kFastPathOrReturnNull = 1 << 4, // Internal only. kFastPathOrReturnNull = 1 << 3, // Internal only.
kLastFlag = kFastPathOrReturnNull kLastFlag = kFastPathOrReturnNull
}; };
@ -429,16 +425,65 @@ constexpr size_t kInvalidBucketSize = 1;
} // namespace internal } // namespace internal
// These constants are used outside PartitionAlloc itself, so we provide
// non-internal aliases here.
using ::partition_alloc::internal::kInvalidBucketSize;
using ::partition_alloc::internal::kMaxSuperPagesInPool;
using ::partition_alloc::internal::kMaxSupportedAlignment;
using ::partition_alloc::internal::kNumBuckets;
using ::partition_alloc::internal::kSuperPageSize;
using ::partition_alloc::internal::MaxDirectMapped;
using ::partition_alloc::internal::PartitionPageSize;
} // namespace partition_alloc } // namespace partition_alloc
namespace base {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::partition_alloc::AllocFlags;
using ::partition_alloc::internal::DirectMapAllocationGranularity;
using ::partition_alloc::internal::DirectMapAllocationGranularityOffsetMask;
using ::partition_alloc::internal::DirectMapAllocationGranularityShift;
#if defined(PA_HAS_MEMORY_TAGGING)
using ::partition_alloc::internal::HasOverflowTag;
#endif // defined(PA_HAS_MEMORY_TAGGING)
using ::partition_alloc::internal::kBitsPerSizeT;
using ::partition_alloc::internal::kBRPPoolHandle;
using ::partition_alloc::internal::kConfigurablePoolHandle;
using ::partition_alloc::internal::kDefaultEmptySlotSpanRingSize;
using ::partition_alloc::internal::kEmptyCacheIndexBits;
using ::partition_alloc::internal::kFreedByte;
using ::partition_alloc::internal::kGiB;
using ::partition_alloc::internal::kInvalidBucketSize;
using ::partition_alloc::internal::kMaxBucketed;
using ::partition_alloc::internal::kMaxBucketedOrder;
using ::partition_alloc::internal::kMaxBucketSpacing;
using ::partition_alloc::internal::kMaxFreeableSpans;
using ::partition_alloc::internal::kMaxMemoryTaggingSize;
using ::partition_alloc::internal::kMaxPartitionPagesPerRegularSlotSpan;
using ::partition_alloc::internal::kMaxSuperPagesInPool;
using ::partition_alloc::internal::kMaxSupportedAlignment;
using ::partition_alloc::internal::kMinBucketedOrder;
using ::partition_alloc::internal::kMinDirectMappedDownsize;
using ::partition_alloc::internal::kNumBucketedOrders;
using ::partition_alloc::internal::kNumBuckets;
using ::partition_alloc::internal::kNumBucketsPerOrder;
using ::partition_alloc::internal::kNumBucketsPerOrderBits;
using ::partition_alloc::internal::kNumPools;
using ::partition_alloc::internal::kPartitionCachelineSize;
using ::partition_alloc::internal::kPoolMaxSize;
using ::partition_alloc::internal::kQuarantinedByte;
using ::partition_alloc::internal::kReasonableSizeOfUnusedPages;
using ::partition_alloc::internal::kRegularPoolHandle;
using ::partition_alloc::internal::kSmallestBucket;
using ::partition_alloc::internal::kSuperPageAlignment;
using ::partition_alloc::internal::kSuperPageBaseMask;
using ::partition_alloc::internal::kSuperPageOffsetMask;
using ::partition_alloc::internal::kSuperPageShift;
using ::partition_alloc::internal::kSuperPageSize;
using ::partition_alloc::internal::kUninitializedByte;
using ::partition_alloc::internal::MaxDirectMapped;
using ::partition_alloc::internal::MaxRegularSlotSpanSize;
using ::partition_alloc::internal::MaxSuperPagesInPool;
using ::partition_alloc::internal::MaxSystemPagesPerRegularSlotSpan;
using ::partition_alloc::internal::NumPartitionPagesPerSuperPage;
using ::partition_alloc::internal::NumSystemPagesPerPartitionPage;
using ::partition_alloc::internal::PartitionPageBaseMask;
using ::partition_alloc::internal::PartitionPageOffsetMask;
using ::partition_alloc::internal::PartitionPageShift;
using ::partition_alloc::internal::PartitionPageSize;
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CONSTANTS_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CONSTANTS_H_

View File

@ -46,7 +46,7 @@ struct SlotSpanMetadata;
#if (DCHECK_IS_ON() || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)) && \ #if (DCHECK_IS_ON() || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)) && \
BUILDFLAG(USE_BACKUP_REF_PTR) BUILDFLAG(USE_BACKUP_REF_PTR)
BASE_EXPORT void CheckThatSlotOffsetIsZero(uintptr_t address); void CheckThatSlotOffsetIsZero(uintptr_t address);
#endif #endif
} // namespace internal } // namespace internal
@ -65,6 +65,21 @@ namespace base {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once // TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done. // the migration to the new namespaces gets done.
using ::partition_alloc::PartitionRoot; using ::partition_alloc::PartitionRoot;
using ::partition_alloc::PartitionStatsDumper;
using ::partition_alloc::ThreadSafePartitionRoot;
using ::partition_alloc::internal::kAlignment;
namespace internal {
using ::partition_alloc::internal::SlotSpanMetadata;
using ::partition_alloc::internal::ThreadSafe;
#if (DCHECK_IS_ON() || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)) && \
BUILDFLAG(USE_BACKUP_REF_PTR)
using ::partition_alloc::internal::CheckThatSlotOffsetIsZero;
#endif
} // namespace internal
} // namespace base } // namespace base

View File

@ -17,7 +17,7 @@
#if BUILDFLAG(ENABLE_LOG_ERROR_NOT_REACHED) #if BUILDFLAG(ENABLE_LOG_ERROR_NOT_REACHED)
#define PA_NOTREACHED() \ #define PA_NOTREACHED() \
true ? ::logging::RawError(__FILE__ \ true ? logging::RawError(__FILE__ \
"(" PA_STRINGIFY(__LINE__) ") NOTREACHED() hit.") \ "(" PA_STRINGIFY(__LINE__) ") NOTREACHED() hit.") \
: EAT_CHECK_STREAM_PARAMS() : EAT_CHECK_STREAM_PARAMS()
@ -39,7 +39,7 @@
// So define PA_NOTREACHED() by using async-signal-safe RawCheck(). // So define PA_NOTREACHED() by using async-signal-safe RawCheck().
#define PA_NOTREACHED() \ #define PA_NOTREACHED() \
UNLIKELY(true) \ UNLIKELY(true) \
? ::logging::RawCheck(__FILE__ \ ? logging::RawCheck(__FILE__ \
"(" PA_STRINGIFY(__LINE__) ") NOTREACHED() hit.") \ "(" PA_STRINGIFY(__LINE__) ") NOTREACHED() hit.") \
: EAT_CHECK_STREAM_PARAMS() : EAT_CHECK_STREAM_PARAMS()

View File

@ -8,13 +8,12 @@
#include "base/allocator/buildflags.h" #include "base/allocator/buildflags.h"
#include "base/allocator/partition_allocator/address_pool_manager.h" #include "base/allocator/partition_allocator/address_pool_manager.h"
#include "base/allocator/partition_allocator/base/bits.h"
#include "base/allocator/partition_allocator/oom.h" #include "base/allocator/partition_allocator/oom.h"
#include "base/allocator/partition_allocator/page_allocator.h" #include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/page_allocator_constants.h" #include "base/allocator/partition_allocator/page_allocator_constants.h"
#include "base/allocator/partition_allocator/partition_address_space.h" #include "base/allocator/partition_allocator/partition_address_space.h"
#include "base/allocator/partition_allocator/partition_alloc.h" #include "base/allocator/partition_allocator/partition_alloc.h"
#include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
#include "base/allocator/partition_allocator/partition_alloc_base/debug/alias.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h" #include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h" #include "base/allocator/partition_allocator/partition_alloc_constants.h"
@ -27,6 +26,7 @@
#include "base/allocator/partition_allocator/starscan/state_bitmap.h" #include "base/allocator/partition_allocator/starscan/state_bitmap.h"
#include "base/allocator/partition_allocator/tagging.h" #include "base/allocator/partition_allocator/tagging.h"
#include "base/check.h" #include "base/check.h"
#include "base/debug/alias.h"
#include "build/build_config.h" #include "build/build_config.h"
namespace partition_alloc::internal { namespace partition_alloc::internal {
@ -37,7 +37,7 @@ template <bool thread_safe>
[[noreturn]] NOINLINE void PartitionOutOfMemoryMappingFailure( [[noreturn]] NOINLINE void PartitionOutOfMemoryMappingFailure(
PartitionRoot<thread_safe>* root, PartitionRoot<thread_safe>* root,
size_t size) LOCKS_EXCLUDED(root->lock_) { size_t size) LOCKS_EXCLUDED(root->lock_) {
PA_NO_CODE_FOLDING(); NO_CODE_FOLDING();
root->OutOfMemory(size); root->OutOfMemory(size);
IMMEDIATE_CRASH(); // Not required, kept as documentation. IMMEDIATE_CRASH(); // Not required, kept as documentation.
} }
@ -46,7 +46,7 @@ template <bool thread_safe>
[[noreturn]] NOINLINE void PartitionOutOfMemoryCommitFailure( [[noreturn]] NOINLINE void PartitionOutOfMemoryCommitFailure(
PartitionRoot<thread_safe>* root, PartitionRoot<thread_safe>* root,
size_t size) LOCKS_EXCLUDED(root->lock_) { size_t size) LOCKS_EXCLUDED(root->lock_) {
PA_NO_CODE_FOLDING(); NO_CODE_FOLDING();
root->OutOfMemory(size); root->OutOfMemory(size);
IMMEDIATE_CRASH(); // Not required, kept as documentation. IMMEDIATE_CRASH(); // Not required, kept as documentation.
} }

View File

@ -7,7 +7,7 @@
#include <cstdint> #include <cstdint>
#include "base/allocator/partition_allocator/partition_alloc_base/bits.h" #include "base/allocator/partition_allocator/base/bits.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h" #include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h" #include "base/allocator/partition_allocator/partition_alloc_constants.h"

View File

@ -8,7 +8,6 @@
#include "base/allocator/buildflags.h" #include "base/allocator/buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/compiler_specific.h" #include "base/compiler_specific.h"
#include "base/dcheck_is_on.h"
namespace partition_alloc::internal { namespace partition_alloc::internal {

View File

@ -9,9 +9,9 @@
#include <cstdint> #include <cstdint>
#include "base/allocator/buildflags.h" #include "base/allocator/buildflags.h"
#include "base/allocator/partition_allocator/base/bits.h"
#include "base/allocator/partition_allocator/base/sys_byteorder.h"
#include "base/allocator/partition_allocator/partition_alloc-inl.h" #include "base/allocator/partition_allocator/partition_alloc-inl.h"
#include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
#include "base/allocator/partition_allocator/partition_alloc_base/sys_byteorder.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h" #include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h" #include "base/allocator/partition_allocator/partition_alloc_constants.h"

View File

@ -11,7 +11,6 @@
#include "base/allocator/buildflags.h" #include "base/allocator/buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/spinning_mutex.h" #include "base/allocator/partition_allocator/spinning_mutex.h"
#include "base/dcheck_is_on.h"
#include "base/thread_annotations.h" #include "base/thread_annotations.h"
#include "base/threading/platform_thread.h" #include "base/threading/platform_thread.h"
#include "build/build_config.h" #include "build/build_config.h"

View File

@ -5,8 +5,8 @@
#include "base/allocator/partition_allocator/partition_oom.h" #include "base/allocator/partition_allocator/partition_oom.h"
#include "base/allocator/partition_allocator/oom.h" #include "base/allocator/partition_allocator/oom.h"
#include "base/allocator/partition_allocator/partition_alloc_base/debug/alias.h"
#include "base/compiler_specific.h" #include "base/compiler_specific.h"
#include "base/debug/alias.h"
#include "build/build_config.h" #include "build/build_config.h"
namespace partition_alloc::internal { namespace partition_alloc::internal {
@ -14,20 +14,20 @@ namespace partition_alloc::internal {
OomFunction g_oom_handling_function = nullptr; OomFunction g_oom_handling_function = nullptr;
NOINLINE void NOT_TAIL_CALLED PartitionExcessiveAllocationSize(size_t size) { NOINLINE void NOT_TAIL_CALLED PartitionExcessiveAllocationSize(size_t size) {
PA_NO_CODE_FOLDING(); NO_CODE_FOLDING();
OOM_CRASH(size); OOM_CRASH(size);
} }
#if !defined(ARCH_CPU_64_BITS) #if !defined(ARCH_CPU_64_BITS)
NOINLINE void NOT_TAIL_CALLED NOINLINE void NOT_TAIL_CALLED
PartitionOutOfMemoryWithLotsOfUncommitedPages(size_t size) { PartitionOutOfMemoryWithLotsOfUncommitedPages(size_t size) {
PA_NO_CODE_FOLDING(); NO_CODE_FOLDING();
OOM_CRASH(size); OOM_CRASH(size);
} }
[[noreturn]] NOINLINE void NOT_TAIL_CALLED [[noreturn]] NOINLINE void NOT_TAIL_CALLED
PartitionOutOfMemoryWithLargeVirtualSize(size_t virtual_size) { PartitionOutOfMemoryWithLargeVirtualSize(size_t virtual_size) {
PA_NO_CODE_FOLDING(); NO_CODE_FOLDING();
OOM_CRASH(virtual_size); OOM_CRASH(virtual_size);
} }

View File

@ -9,10 +9,10 @@
#include "base/allocator/buildflags.h" #include "base/allocator/buildflags.h"
#include "base/allocator/partition_allocator/address_pool_manager.h" #include "base/allocator/partition_allocator/address_pool_manager.h"
#include "base/allocator/partition_allocator/base/bits.h"
#include "base/allocator/partition_allocator/page_allocator.h" #include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/page_allocator_constants.h" #include "base/allocator/partition_allocator/page_allocator_constants.h"
#include "base/allocator/partition_allocator/partition_address_space.h" #include "base/allocator/partition_allocator/partition_address_space.h"
#include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h" #include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h" #include "base/allocator/partition_allocator/partition_alloc_constants.h"

View File

@ -13,8 +13,8 @@
#include "base/allocator/buildflags.h" #include "base/allocator/buildflags.h"
#include "base/allocator/partition_allocator/address_pool_manager.h" #include "base/allocator/partition_allocator/address_pool_manager.h"
#include "base/allocator/partition_allocator/address_pool_manager_types.h" #include "base/allocator/partition_allocator/address_pool_manager_types.h"
#include "base/allocator/partition_allocator/base/bits.h"
#include "base/allocator/partition_allocator/partition_address_space.h" #include "base/allocator/partition_allocator/partition_address_space.h"
#include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h" #include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_forward.h" #include "base/allocator/partition_allocator/partition_alloc_forward.h"
@ -117,9 +117,8 @@ using AllocationStateMap =
// booted out of the active list. If there are no suitable active slot spans // booted out of the active list. If there are no suitable active slot spans
// found, an empty or decommitted slot spans (if one exists) will be pulled // found, an empty or decommitted slot spans (if one exists) will be pulled
// from the empty/decommitted list on to the active list. // from the empty/decommitted list on to the active list.
#pragma pack(push, 1)
template <bool thread_safe> template <bool thread_safe>
struct SlotSpanMetadata { struct __attribute__((packed)) SlotSpanMetadata {
private: private:
PartitionFreelistEntry* freelist_head = nullptr; PartitionFreelistEntry* freelist_head = nullptr;
@ -303,7 +302,6 @@ struct SlotSpanMetadata {
empty_cache_index_(0), empty_cache_index_(0),
unused2_(0) {} unused2_(0) {}
}; };
#pragma pack(pop)
static_assert(sizeof(SlotSpanMetadata<ThreadSafe>) <= kPageMetadataSize, static_assert(sizeof(SlotSpanMetadata<ThreadSafe>) <= kPageMetadataSize,
"SlotSpanMetadata must fit into a Page Metadata slot."); "SlotSpanMetadata must fit into a Page Metadata slot.");
@ -326,12 +324,11 @@ struct SubsequentPageMetadata {
// first page of a slot span, describes that slot span. If a slot span spans // first page of a slot span, describes that slot span. If a slot span spans
// more than 1 page, the page metadata may contain rudimentary additional // more than 1 page, the page metadata may contain rudimentary additional
// information. // information.
template <bool thread_safe>
struct __attribute__((packed)) PartitionPage {
// "Pack" the union so that common page metadata still fits within // "Pack" the union so that common page metadata still fits within
// kPageMetadataSize. (SlotSpanMetadata is also "packed".) // kPageMetadataSize. (SlotSpanMetadata is also "packed".)
#pragma pack(push, 1) union __attribute__((packed)) {
template <bool thread_safe>
struct PartitionPage {
union {
SlotSpanMetadata<thread_safe> slot_span_metadata; SlotSpanMetadata<thread_safe> slot_span_metadata;
SubsequentPageMetadata subsequent_page_metadata; SubsequentPageMetadata subsequent_page_metadata;
@ -369,7 +366,7 @@ struct PartitionPage {
ALWAYS_INLINE static PartitionPage* FromAddr(uintptr_t address); ALWAYS_INLINE static PartitionPage* FromAddr(uintptr_t address);
}; };
#pragma pack(pop)
static_assert(sizeof(PartitionPage<ThreadSafe>) == kPageMetadataSize, static_assert(sizeof(PartitionPage<ThreadSafe>) == kPageMetadataSize,
"PartitionPage must be able to fit in a metadata slot"); "PartitionPage must be able to fit in a metadata slot");

View File

@ -30,7 +30,7 @@ namespace partition_alloc::internal {
namespace { namespace {
[[noreturn]] NOINLINE NOT_TAIL_CALLED void DoubleFreeOrCorruptionDetected() { [[noreturn]] NOINLINE NOT_TAIL_CALLED void DoubleFreeOrCorruptionDetected() {
PA_NO_CODE_FOLDING(); NO_CODE_FOLDING();
IMMEDIATE_CRASH(); IMMEDIATE_CRASH();
} }

View File

@ -8,10 +8,10 @@
#include "base/allocator/buildflags.h" #include "base/allocator/buildflags.h"
#include "base/allocator/partition_allocator/address_pool_manager_bitmap.h" #include "base/allocator/partition_allocator/address_pool_manager_bitmap.h"
#include "base/allocator/partition_allocator/base/bits.h"
#include "base/allocator/partition_allocator/oom.h" #include "base/allocator/partition_allocator/oom.h"
#include "base/allocator/partition_allocator/page_allocator.h" #include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/partition_address_space.h" #include "base/allocator/partition_allocator/partition_address_space.h"
#include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h" #include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h" #include "base/allocator/partition_allocator/partition_alloc_constants.h"
@ -475,11 +475,9 @@ static void PartitionDumpSlotSpanStats(
if (slot_span->CanStoreRawSize()) { if (slot_span->CanStoreRawSize()) {
stats_out->active_bytes += static_cast<uint32_t>(slot_span->GetRawSize()); stats_out->active_bytes += static_cast<uint32_t>(slot_span->GetRawSize());
stats_out->active_count += 1;
} else { } else {
stats_out->active_bytes += stats_out->active_bytes +=
(slot_span->num_allocated_slots * stats_out->bucket_slot_size); (slot_span->num_allocated_slots * stats_out->bucket_slot_size);
stats_out->active_count += slot_span->num_allocated_slots;
} }
size_t slot_span_bytes_resident = RoundUpToSystemPage( size_t slot_span_bytes_resident = RoundUpToSystemPage(
@ -523,7 +521,6 @@ static void PartitionDumpBucketStats(
size_t bucket_useful_storage = stats_out->bucket_slot_size * bucket_num_slots; size_t bucket_useful_storage = stats_out->bucket_slot_size * bucket_num_slots;
stats_out->allocated_slot_span_size = bucket->get_bytes_per_span(); stats_out->allocated_slot_span_size = bucket->get_bytes_per_span();
stats_out->active_bytes = bucket->num_full_slot_spans * bucket_useful_storage; stats_out->active_bytes = bucket->num_full_slot_spans * bucket_useful_storage;
stats_out->active_count = bucket->num_full_slot_spans * bucket_num_slots;
stats_out->resident_bytes = stats_out->resident_bytes =
bucket->num_full_slot_spans * stats_out->allocated_slot_span_size; bucket->num_full_slot_spans * stats_out->allocated_slot_span_size;
@ -1182,7 +1179,6 @@ void PartitionRoot<thread_safe>::DumpStats(const char* partition_name,
if (bucket_stats[i].is_valid) { if (bucket_stats[i].is_valid) {
stats.total_resident_bytes += bucket_stats[i].resident_bytes; stats.total_resident_bytes += bucket_stats[i].resident_bytes;
stats.total_active_bytes += bucket_stats[i].active_bytes; stats.total_active_bytes += bucket_stats[i].active_bytes;
stats.total_active_count += bucket_stats[i].active_count;
stats.total_decommittable_bytes += bucket_stats[i].decommittable_bytes; stats.total_decommittable_bytes += bucket_stats[i].decommittable_bytes;
stats.total_discardable_bytes += bucket_stats[i].discardable_bytes; stats.total_discardable_bytes += bucket_stats[i].discardable_bytes;
} }
@ -1202,7 +1198,6 @@ void PartitionRoot<thread_safe>::DumpStats(const char* partition_name,
stats.total_resident_bytes += direct_mapped_allocations_total_size; stats.total_resident_bytes += direct_mapped_allocations_total_size;
stats.total_active_bytes += direct_mapped_allocations_total_size; stats.total_active_bytes += direct_mapped_allocations_total_size;
stats.total_active_count += num_direct_mapped_allocations;
stats.has_thread_cache = with_thread_cache; stats.has_thread_cache = with_thread_cache;
if (stats.has_thread_cache) { if (stats.has_thread_cache) {
@ -1230,7 +1225,6 @@ void PartitionRoot<thread_safe>::DumpStats(const char* partition_name,
mapped_stats.allocated_slot_span_size = size; mapped_stats.allocated_slot_span_size = size;
mapped_stats.bucket_slot_size = size; mapped_stats.bucket_slot_size = size;
mapped_stats.active_bytes = size; mapped_stats.active_bytes = size;
mapped_stats.active_count = 1;
mapped_stats.resident_bytes = size; mapped_stats.resident_bytes = size;
dumper->PartitionsDumpBucketStats(partition_name, &mapped_stats); dumper->PartitionsDumpBucketStats(partition_name, &mapped_stats);
} }

View File

@ -38,11 +38,11 @@
#include "base/allocator/buildflags.h" #include "base/allocator/buildflags.h"
#include "base/allocator/partition_allocator/address_pool_manager_types.h" #include "base/allocator/partition_allocator/address_pool_manager_types.h"
#include "base/allocator/partition_allocator/allocation_guard.h" #include "base/allocator/partition_allocator/allocation_guard.h"
#include "base/allocator/partition_allocator/base/bits.h"
#include "base/allocator/partition_allocator/page_allocator.h" #include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/page_allocator_constants.h" #include "base/allocator/partition_allocator/page_allocator_constants.h"
#include "base/allocator/partition_allocator/partition_address_space.h" #include "base/allocator/partition_allocator/partition_address_space.h"
#include "base/allocator/partition_allocator/partition_alloc-inl.h" #include "base/allocator/partition_allocator/partition_alloc-inl.h"
#include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h" #include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h" #include "base/allocator/partition_allocator/partition_alloc_constants.h"
@ -65,7 +65,6 @@
#include "base/allocator/partition_allocator/thread_cache.h" #include "base/allocator/partition_allocator/thread_cache.h"
#include "base/base_export.h" #include "base/base_export.h"
#include "base/compiler_specific.h" #include "base/compiler_specific.h"
#include "base/dcheck_is_on.h"
#include "base/time/time.h" #include "base/time/time.h"
#include "build/build_config.h" #include "build/build_config.h"
#include "build/chromecast_buildflags.h" #include "build/chromecast_buildflags.h"
@ -138,7 +137,7 @@ struct PurgeFlags {
// Options struct used to configure PartitionRoot and PartitionAllocator. // Options struct used to configure PartitionRoot and PartitionAllocator.
struct PartitionOptions { struct PartitionOptions {
enum class AlignedAlloc : uint8_t { enum class AlignedAlloc : uint8_t {
// By default all allocations will be aligned to `kAlignment`, // By default all allocations will be aligned to `base::kAlignment`,
// likely to be 8B or 16B depending on platforms and toolchains. // likely to be 8B or 16B depending on platforms and toolchains.
// AlignedAlloc() allows to enforce higher alignment. // AlignedAlloc() allows to enforce higher alignment.
// This option determines whether it is supported for the partition. // This option determines whether it is supported for the partition.
@ -979,7 +978,7 @@ ALWAYS_INLINE void PartitionAllocFreeForRefCounting(uintptr_t slot_start) {
// memset() can be really expensive. // memset() can be really expensive.
#if EXPENSIVE_DCHECKS_ARE_ON() #if EXPENSIVE_DCHECKS_ARE_ON()
DebugMemset(reinterpret_cast<void*>(slot_start), kFreedByte, memset(reinterpret_cast<void*>(slot_start), kFreedByte,
slot_span->GetUtilizedSlotSize() slot_span->GetUtilizedSlotSize()
#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT) #if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
- sizeof(PartitionRefCount) - sizeof(PartitionRefCount)
@ -1257,7 +1256,7 @@ ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooksImmediate(
// memset() can be really expensive. // memset() can be really expensive.
#if EXPENSIVE_DCHECKS_ARE_ON() #if EXPENSIVE_DCHECKS_ARE_ON()
internal::DebugMemset(SlotStartAddr2Ptr(slot_start), internal::kFreedByte, memset(SlotStartAddr2Ptr(slot_start), internal::kFreedByte,
slot_span->GetUtilizedSlotSize() slot_span->GetUtilizedSlotSize()
#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT) #if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
- sizeof(internal::PartitionRefCount) - sizeof(internal::PartitionRefCount)
@ -1795,7 +1794,7 @@ ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocWithFlagsNoHooks(
if (LIKELY(!zero_fill)) { if (LIKELY(!zero_fill)) {
// memset() can be really expensive. // memset() can be really expensive.
#if EXPENSIVE_DCHECKS_ARE_ON() #if EXPENSIVE_DCHECKS_ARE_ON()
internal::DebugMemset(object, internal::kUninitializedByte, usable_size); memset(object, internal::kUninitializedByte, usable_size);
#endif #endif
} else if (!is_already_zeroed) { } else if (!is_already_zeroed) {
memset(object, 0, usable_size); memset(object, 0, usable_size);

View File

@ -51,7 +51,6 @@ struct PartitionMemoryStats {
size_t max_allocated_bytes; // Max size of allocations. size_t max_allocated_bytes; // Max size of allocations.
size_t total_resident_bytes; // Total bytes provisioned by the partition. size_t total_resident_bytes; // Total bytes provisioned by the partition.
size_t total_active_bytes; // Total active bytes in the partition. size_t total_active_bytes; // Total active bytes in the partition.
size_t total_active_count; // Total count of active objects in the partition.
size_t total_decommittable_bytes; // Total bytes that could be decommitted. size_t total_decommittable_bytes; // Total bytes that could be decommitted.
size_t total_discardable_bytes; // Total bytes that could be discarded. size_t total_discardable_bytes; // Total bytes that could be discarded.
#if BUILDFLAG(USE_BACKUP_REF_PTR) #if BUILDFLAG(USE_BACKUP_REF_PTR)
@ -81,7 +80,6 @@ struct PartitionBucketMemoryStats {
uint32_t allocated_slot_span_size; // Total size the slot span allocated uint32_t allocated_slot_span_size; // Total size the slot span allocated
// from the system (committed pages). // from the system (committed pages).
uint32_t active_bytes; // Total active bytes used in the bucket. uint32_t active_bytes; // Total active bytes used in the bucket.
uint32_t active_count; // Total active objects allocated in the bucket.
uint32_t resident_bytes; // Total bytes provisioned in the bucket. uint32_t resident_bytes; // Total bytes provisioned in the bucket.
uint32_t decommittable_bytes; // Total bytes that could be decommitted. uint32_t decommittable_bytes; // Total bytes that could be decommitted.
uint32_t discardable_bytes; // Total bytes that could be discarded. uint32_t discardable_bytes; // Total bytes that could be discarded.

View File

@ -18,7 +18,6 @@
#include "base/allocator/partition_allocator/partition_tag_bitmap.h" #include "base/allocator/partition_allocator/partition_tag_bitmap.h"
#include "base/allocator/partition_allocator/reservation_offset_table.h" #include "base/allocator/partition_allocator/reservation_offset_table.h"
#include "base/allocator/partition_allocator/tagging.h" #include "base/allocator/partition_allocator/tagging.h"
#include "base/dcheck_is_on.h"
#include "build/build_config.h" #include "build/build_config.h"
namespace partition_alloc { namespace partition_alloc {

View File

@ -6,8 +6,8 @@
#include <type_traits> #include <type_traits>
#include "base/allocator/partition_allocator/partition_alloc_base/rand_util.h"
#include "base/allocator/partition_allocator/partition_lock.h" #include "base/allocator/partition_allocator/partition_lock.h"
#include "base/rand_util.h"
namespace partition_alloc { namespace partition_alloc {
@ -29,15 +29,15 @@ class RandomGenerator {
::partition_alloc::internal::Lock lock_ = {}; ::partition_alloc::internal::Lock lock_ = {};
bool initialized_ GUARDED_BY(lock_) = false; bool initialized_ GUARDED_BY(lock_) = false;
union { union {
internal::base::InsecureRandomGenerator instance_ GUARDED_BY(lock_); base::InsecureRandomGenerator instance_ GUARDED_BY(lock_);
uint8_t instance_buffer_[sizeof( uint8_t instance_buffer_[sizeof(base::InsecureRandomGenerator)] GUARDED_BY(
internal::base::InsecureRandomGenerator)] GUARDED_BY(lock_) = {}; lock_) = {};
}; };
internal::base::InsecureRandomGenerator* GetGenerator() base::InsecureRandomGenerator* GetGenerator()
EXCLUSIVE_LOCKS_REQUIRED(lock_) { EXCLUSIVE_LOCKS_REQUIRED(lock_) {
if (!initialized_) { if (!initialized_) {
new (instance_buffer_) internal::base::InsecureRandomGenerator(); new (instance_buffer_) base::InsecureRandomGenerator();
initialized_ = true; initialized_ = true;
} }
return &instance_; return &instance_;

View File

@ -18,7 +18,6 @@
#include "base/allocator/partition_allocator/tagging.h" #include "base/allocator/partition_allocator/tagging.h"
#include "base/base_export.h" #include "base/base_export.h"
#include "base/compiler_specific.h" #include "base/compiler_specific.h"
#include "base/dcheck_is_on.h"
#include "build/build_config.h" #include "build/build_config.h"
namespace partition_alloc::internal { namespace partition_alloc::internal {

View File

@ -6,7 +6,7 @@
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_LOGGING_H_ #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_STARSCAN_LOGGING_H_
#include "base/allocator/partition_allocator/allocation_guard.h" #include "base/allocator/partition_allocator/allocation_guard.h"
#include "base/allocator/partition_allocator/partition_alloc_base/logging.h" #include "base/logging.h"
namespace partition_alloc::internal { namespace partition_alloc::internal {
@ -31,8 +31,7 @@ struct LoggerWithAllowedAllocations : ScopedAllowAllocations,
// are tricky to enforce and easy to mess up with. Since verbose *Scan logging // are tricky to enforce and easy to mess up with. Since verbose *Scan logging
// is essential for debugging, we choose to provide support for it inside *Scan. // is essential for debugging, we choose to provide support for it inside *Scan.
#define PA_PCSCAN_VLOG(verbose_level) \ #define PA_PCSCAN_VLOG(verbose_level) \
PA_LAZY_STREAM(PA_PCSCAN_VLOG_STREAM(verbose_level), \ LAZY_STREAM(PA_PCSCAN_VLOG_STREAM(verbose_level), VLOG_IS_ON(verbose_level))
PA_VLOG_IS_ON(verbose_level))
} // namespace partition_alloc::internal } // namespace partition_alloc::internal

View File

@ -6,7 +6,7 @@
#include <cstring> #include <cstring>
#include "base/allocator/partition_allocator/partition_alloc_base/no_destructor.h" #include "base/no_destructor.h"
namespace partition_alloc::internal { namespace partition_alloc::internal {
@ -22,8 +22,7 @@ constexpr PartitionOptions kConfig{
} // namespace } // namespace
ThreadSafePartitionRoot& PCScanMetadataAllocator() { ThreadSafePartitionRoot& PCScanMetadataAllocator() {
static internal::base::NoDestructor<ThreadSafePartitionRoot> allocator( static base::NoDestructor<ThreadSafePartitionRoot> allocator(kConfig);
kConfig);
return *allocator; return *allocator;
} }

View File

@ -20,16 +20,11 @@
#include "base/allocator/partition_allocator/address_pool_manager.h" #include "base/allocator/partition_allocator/address_pool_manager.h"
#include "base/allocator/partition_allocator/address_pool_manager_bitmap.h" #include "base/allocator/partition_allocator/address_pool_manager_bitmap.h"
#include "base/allocator/partition_allocator/allocation_guard.h" #include "base/allocator/partition_allocator/allocation_guard.h"
#include "base/allocator/partition_allocator/base/bits.h"
#include "base/allocator/partition_allocator/page_allocator.h" #include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/page_allocator_constants.h" #include "base/allocator/partition_allocator/page_allocator_constants.h"
#include "base/allocator/partition_allocator/partition_address_space.h" #include "base/allocator/partition_allocator/partition_address_space.h"
#include "base/allocator/partition_allocator/partition_alloc.h" #include "base/allocator/partition_allocator/partition_alloc.h"
#include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
#include "base/allocator/partition_allocator/partition_alloc_base/cpu.h"
#include "base/allocator/partition_allocator/partition_alloc_base/debug/alias.h"
#include "base/allocator/partition_allocator/partition_alloc_base/memory/ref_counted.h"
#include "base/allocator/partition_allocator/partition_alloc_base/memory/scoped_refptr.h"
#include "base/allocator/partition_allocator/partition_alloc_base/no_destructor.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h" #include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h" #include "base/allocator/partition_allocator/partition_alloc_constants.h"
@ -46,7 +41,13 @@
#include "base/allocator/partition_allocator/tagging.h" #include "base/allocator/partition_allocator/tagging.h"
#include "base/allocator/partition_allocator/thread_cache.h" #include "base/allocator/partition_allocator/thread_cache.h"
#include "base/compiler_specific.h" #include "base/compiler_specific.h"
#include "base/cpu.h"
#include "base/debug/alias.h"
#include "base/immediate_crash.h" #include "base/immediate_crash.h"
#include "base/logging.h"
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_refptr.h"
#include "base/no_destructor.h"
#include "base/threading/platform_thread.h" #include "base/threading/platform_thread.h"
#include "base/time/time.h" #include "base/time/time.h"
#include "build/build_config.h" #include "build/build_config.h"
@ -63,8 +64,13 @@
namespace partition_alloc::internal { namespace partition_alloc::internal {
namespace base {
using ::base::MakeRefCounted;
using ::base::RefCountedThreadSafe;
} // namespace base
[[noreturn]] NOINLINE NOT_TAIL_CALLED void DoubleFreeAttempt() { [[noreturn]] NOINLINE NOT_TAIL_CALLED void DoubleFreeAttempt() {
PA_NO_CODE_FOLDING(); NO_CODE_FOLDING();
IMMEDIATE_CRASH(); IMMEDIATE_CRASH();
} }
@ -1177,7 +1183,7 @@ class PCScan::PCScanThread final {
static PCScanThread& Instance() { static PCScanThread& Instance() {
// Lazily instantiate the scanning thread. // Lazily instantiate the scanning thread.
static internal::base::NoDestructor<PCScanThread> instance; static base::NoDestructor<PCScanThread> instance;
return *instance; return *instance;
} }
@ -1203,7 +1209,7 @@ class PCScan::PCScanThread final {
} }
private: private:
friend class internal::base::NoDestructor<PCScanThread>; friend class base::NoDestructor<PCScanThread>;
PCScanThread() { PCScanThread() {
ScopedAllowAllocations allow_allocations_within_std_thread; ScopedAllowAllocations allow_allocations_within_std_thread;

View File

@ -13,12 +13,12 @@
#include <utility> #include <utility>
#include <vector> #include <vector>
#include "base/allocator/partition_allocator/partition_alloc_base/memory/scoped_refptr.h"
#include "base/allocator/partition_allocator/partition_alloc_base/no_destructor.h"
#include "base/allocator/partition_allocator/starscan/metadata_allocator.h" #include "base/allocator/partition_allocator/starscan/metadata_allocator.h"
#include "base/allocator/partition_allocator/starscan/pcscan.h" #include "base/allocator/partition_allocator/starscan/pcscan.h"
#include "base/allocator/partition_allocator/starscan/starscan_fwd.h" #include "base/allocator/partition_allocator/starscan/starscan_fwd.h"
#include "base/allocator/partition_allocator/starscan/write_protector.h" #include "base/allocator/partition_allocator/starscan/write_protector.h"
#include "base/memory/scoped_refptr.h"
#include "base/no_destructor.h"
// TODO(crbug.com/1288247): Remove this when migration is complete. // TODO(crbug.com/1288247): Remove this when migration is complete.
namespace partition_alloc::internal { namespace partition_alloc::internal {
@ -46,7 +46,7 @@ class PCScanInternal final {
static PCScanInternal& Instance() { static PCScanInternal& Instance() {
// Since the data that PCScanInternal holds is cold, it's fine to have the // Since the data that PCScanInternal holds is cold, it's fine to have the
// runtime check for thread-safe local static initialization. // runtime check for thread-safe local static initialization.
static internal::base::NoDestructor<PCScanInternal> instance; static base::NoDestructor<PCScanInternal> instance;
return *instance; return *instance;
} }
@ -110,7 +110,7 @@ class PCScanInternal final {
partition_alloc::StatsReporter& GetReporter(); partition_alloc::StatsReporter& GetReporter();
private: private:
friend internal::base::NoDestructor<PCScanInternal>; friend base::NoDestructor<PCScanInternal>;
friend class partition_alloc::internal::StarScanSnapshot; friend class partition_alloc::internal::StarScanSnapshot;
using StackTops = std::unordered_map< using StackTops = std::unordered_map<

View File

@ -9,10 +9,10 @@
#include <atomic> #include <atomic>
#include <vector> #include <vector>
#include "base/allocator/partition_allocator/partition_alloc_base/rand_util.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/starscan/metadata_allocator.h" #include "base/allocator/partition_allocator/starscan/metadata_allocator.h"
#include "base/compiler_specific.h" #include "base/compiler_specific.h"
#include "base/rand_util.h"
namespace partition_alloc::internal { namespace partition_alloc::internal {
@ -38,8 +38,8 @@ class RacefulWorklist {
explicit RandomizedView(RacefulWorklist& worklist) explicit RandomizedView(RacefulWorklist& worklist)
: worklist_(worklist), offset_(0) { : worklist_(worklist), offset_(0) {
if (worklist.data_.size() > 0) if (worklist.data_.size() > 0)
offset_ = static_cast<size_t>( offset_ =
internal::base::RandGenerator(worklist.data_.size())); static_cast<size_t>(base::RandGenerator(worklist.data_.size()));
} }
RandomizedView(const RandomizedView&) = delete; RandomizedView(const RandomizedView&) = delete;

View File

@ -15,7 +15,7 @@
#include <tuple> #include <tuple>
#include <utility> #include <utility>
#include "base/allocator/partition_allocator/partition_alloc_base/bits.h" #include "base/allocator/partition_allocator/base/bits.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/compiler_specific.h" #include "base/compiler_specific.h"

View File

@ -64,8 +64,7 @@ void StatsCollector::ReportTracesAndHistsImpl(
continue; continue;
} }
reporter.ReportTraceEvent(static_cast<IdType<context>>(id), tid, reporter.ReportTraceEvent(static_cast<IdType<context>>(id), tid,
event.start_time.ToInternalValue(), event.start_time, event.end_time);
event.end_time.ToInternalValue());
accumulated_events[id] += (event.end_time - event.start_time); accumulated_events[id] += (event.end_time - event.start_time);
} }
} }
@ -76,7 +75,7 @@ void StatsCollector::ReportTracesAndHistsImpl(
if (accumulated_events[id].is_zero()) if (accumulated_events[id].is_zero())
continue; continue;
reporter.ReportStats(ToUMAString(static_cast<IdType<context>>(id)).c_str(), reporter.ReportStats(ToUMAString(static_cast<IdType<context>>(id)).c_str(),
accumulated_events[id].InMicroseconds()); accumulated_events[id]);
} }
} }

View File

@ -7,6 +7,7 @@
#include "base/allocator/partition_allocator/starscan/stats_collector.h" #include "base/allocator/partition_allocator/starscan/stats_collector.h"
#include "base/threading/platform_thread.h" #include "base/threading/platform_thread.h"
#include "base/time/time.h"
namespace partition_alloc { namespace partition_alloc {
@ -17,18 +18,18 @@ class StatsReporter {
public: public:
virtual void ReportTraceEvent(internal::StatsCollector::ScannerId id, virtual void ReportTraceEvent(internal::StatsCollector::ScannerId id,
const base::PlatformThreadId tid, const base::PlatformThreadId tid,
int64_t start_time_ticks_internal_value, base::TimeTicks start_time,
int64_t end_time_ticks_internal_value) {} base::TimeTicks end_time) {}
virtual void ReportTraceEvent(internal::StatsCollector::MutatorId id, virtual void ReportTraceEvent(internal::StatsCollector::MutatorId id,
const base::PlatformThreadId tid, const base::PlatformThreadId tid,
int64_t start_time_ticks_internal_value, base::TimeTicks start_time,
int64_t end_time_ticks_internal_value) {} base::TimeTicks end_time) {}
virtual void ReportSurvivedQuarantineSize(size_t survived_size) {} virtual void ReportSurvivedQuarantineSize(size_t survived_size) {}
virtual void ReportSurvivedQuarantinePercent(double survivied_rate) {} virtual void ReportSurvivedQuarantinePercent(double survivied_rate) {}
virtual void ReportStats(const char* stats_name, int64_t sample_in_usec) {} virtual void ReportStats(const char* stats_name, base::TimeDelta sample) {}
}; };
} // namespace partition_alloc } // namespace partition_alloc

View File

@ -9,9 +9,9 @@
#include "base/allocator/partition_allocator/address_pool_manager.h" #include "base/allocator/partition_allocator/address_pool_manager.h"
#include "base/allocator/partition_allocator/partition_address_space.h" #include "base/allocator/partition_allocator/partition_address_space.h"
#include "base/allocator/partition_allocator/partition_alloc_base/logging.h"
#include "base/allocator/partition_allocator/partition_alloc_base/posix/eintr_wrapper.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/logging.h"
#include "base/posix/eintr_wrapper.h"
#include "base/threading/platform_thread.h" #include "base/threading/platform_thread.h"
#include "build/build_config.h" #include "build/build_config.h"
@ -43,12 +43,12 @@ void UserFaultFDThread(int uffd) {
while (true) { while (true) {
// Pool on the uffd descriptor for page fault events. // Pool on the uffd descriptor for page fault events.
pollfd pollfd{.fd = uffd, .events = POLLIN}; pollfd pollfd{.fd = uffd, .events = POLLIN};
const int nready = PA_HANDLE_EINTR(poll(&pollfd, 1, -1)); const int nready = HANDLE_EINTR(poll(&pollfd, 1, -1));
PA_CHECK(-1 != nready); PA_CHECK(-1 != nready);
// Get page fault info. // Get page fault info.
uffd_msg msg; uffd_msg msg;
const int nread = PA_HANDLE_EINTR(read(uffd, &msg, sizeof(msg))); const int nread = HANDLE_EINTR(read(uffd, &msg, sizeof(msg)));
PA_CHECK(0 != nread); PA_CHECK(0 != nread);
// We only expect page faults. // We only expect page faults.
@ -66,7 +66,7 @@ void UserFaultFDThread(int uffd) {
UserFaultFDWriteProtector::UserFaultFDWriteProtector() UserFaultFDWriteProtector::UserFaultFDWriteProtector()
: uffd_(syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK)) { : uffd_(syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK)) {
if (uffd_ == -1) { if (uffd_ == -1) {
PA_LOG(WARNING) << "userfaultfd is not supported by the current kernel"; LOG(WARNING) << "userfaultfd is not supported by the current kernel";
return; return;
} }

View File

@ -4,9 +4,11 @@
#include "base/allocator/partition_allocator/tagging.h" #include "base/allocator/partition_allocator/tagging.h"
#include "base/allocator/partition_allocator/partition_alloc_base/cpu.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h" #include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/cpu.h"
#include "base/files/file_path.h"
#include "base/native_library.h"
#include "build/build_config.h" #include "build/build_config.h"
#if defined(PA_HAS_MEMORY_TAGGING) #if defined(PA_HAS_MEMORY_TAGGING)
@ -38,8 +40,7 @@
#endif #endif
#if BUILDFLAG(IS_ANDROID) #if BUILDFLAG(IS_ANDROID)
#include "base/allocator/partition_allocator/partition_alloc_base/files/file_path.h" #include "base/native_library.h"
#include "base/allocator/partition_allocator/partition_alloc_base/native_library.h"
#endif // BUILDFLAG(IS_ANDROID) #endif // BUILDFLAG(IS_ANDROID)
namespace partition_alloc { namespace partition_alloc {
@ -47,7 +48,7 @@ namespace partition_alloc {
#if defined(PA_HAS_MEMORY_TAGGING) #if defined(PA_HAS_MEMORY_TAGGING)
namespace { namespace {
void ChangeMemoryTaggingModeInternal(unsigned prctl_mask) { void ChangeMemoryTaggingModeInternal(unsigned prctl_mask) {
internal::base::CPU cpu; base::CPU cpu;
if (cpu.has_mte()) { if (cpu.has_mte()) {
int status = prctl(PR_SET_TAGGED_ADDR_CTRL, prctl_mask, 0, 0, 0); int status = prctl(PR_SET_TAGGED_ADDR_CTRL, prctl_mask, 0, 0, 0);
PA_CHECK(status == 0); PA_CHECK(status == 0);
@ -85,8 +86,7 @@ void ChangeMemoryTaggingModeForAllThreadsPerProcess(
base::FilePath module_path; base::FilePath module_path;
base::NativeLibraryLoadError load_error; base::NativeLibraryLoadError load_error;
base::FilePath library_path = module_path.Append("libc.so"); base::FilePath library_path = module_path.Append("libc.so");
base::NativeLibrary library = base::NativeLibrary library = LoadNativeLibrary(library_path, &load_error);
base::LoadNativeLibrary(library_path, &load_error);
PA_CHECK(library); PA_CHECK(library);
void* func_ptr = void* func_ptr =
base::GetFunctionPointerFromNativeLibrary(library, "mallopt"); base::GetFunctionPointerFromNativeLibrary(library, "mallopt");

View File

@ -10,7 +10,6 @@
#include <atomic> #include <atomic>
#include <cstdint> #include <cstdint>
#include "base/allocator/partition_allocator/partition_alloc_base/cxx17_backports.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h" #include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h" #include "base/allocator/partition_allocator/partition_alloc_constants.h"
@ -18,6 +17,7 @@
#include "base/base_export.h" #include "base/base_export.h"
#include "base/callback.h" #include "base/callback.h"
#include "base/compiler_specific.h" #include "base/compiler_specific.h"
#include "base/cxx17_backports.h"
#include "base/dcheck_is_on.h" #include "base/dcheck_is_on.h"
#include "build/build_config.h" #include "build/build_config.h"
@ -294,7 +294,7 @@ void ThreadCacheRegistry::RunPeriodicPurge() {
// of cached memory cannot change between calls (since we do not purge // of cached memory cannot change between calls (since we do not purge
// background threads, but only ask them to purge their own cache at the next // background threads, but only ask them to purge their own cache at the next
// allocation). // allocation).
periodic_purge_next_interval_ = internal::base::clamp( periodic_purge_next_interval_ = std::clamp(
periodic_purge_next_interval_, kMinPurgeInterval, kMaxPurgeInterval); periodic_purge_next_interval_, kMinPurgeInterval, kMaxPurgeInterval);
PurgeAll(); PurgeAll();
@ -411,8 +411,8 @@ void ThreadCache::SetGlobalLimits(PartitionRoot<>* root, float multiplier) {
constexpr size_t kMinLimit = 1; constexpr size_t kMinLimit = 1;
// |PutInBucket()| is called on a full bucket, which should not overflow. // |PutInBucket()| is called on a full bucket, which should not overflow.
constexpr size_t kMaxLimit = std::numeric_limits<uint8_t>::max() - 1; constexpr size_t kMaxLimit = std::numeric_limits<uint8_t>::max() - 1;
global_limits_[index] = static_cast<uint8_t>( global_limits_[index] =
internal::base::clamp(value, kMinLimit, kMaxLimit)); static_cast<uint8_t>(base::clamp(value, kMinLimit, kMaxLimit));
PA_DCHECK(global_limits_[index] >= kMinLimit); PA_DCHECK(global_limits_[index] >= kMinLimit);
PA_DCHECK(global_limits_[index] <= kMaxLimit); PA_DCHECK(global_limits_[index] <= kMaxLimit);
} }

View File

@ -10,7 +10,6 @@
#include <limits> #include <limits>
#include <memory> #include <memory>
#include "base/allocator/partition_allocator/partition_alloc_base/gtest_prod_util.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h" #include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_forward.h" #include "base/allocator/partition_allocator/partition_alloc_forward.h"
#include "base/allocator/partition_allocator/partition_bucket_lookup.h" #include "base/allocator/partition_allocator/partition_bucket_lookup.h"
@ -21,6 +20,7 @@
#include "base/base_export.h" #include "base/base_export.h"
#include "base/compiler_specific.h" #include "base/compiler_specific.h"
#include "base/dcheck_is_on.h" #include "base/dcheck_is_on.h"
#include "base/gtest_prod_util.h"
#include "base/time/time.h" #include "base/time/time.h"
#include "build/build_config.h" #include "build/build_config.h"
@ -418,29 +418,26 @@ class BASE_EXPORT ThreadCache {
friend class ThreadCacheRegistry; friend class ThreadCacheRegistry;
friend class PartitionAllocThreadCacheTest; friend class PartitionAllocThreadCacheTest;
friend class tools::ThreadCacheInspector; friend class tools::ThreadCacheInspector;
PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest, Simple); FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest, Simple);
PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest, FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest,
MultipleObjectsCachedPerBucket); MultipleObjectsCachedPerBucket);
PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest, FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest,
LargeAllocationsAreNotCached); LargeAllocationsAreNotCached);
PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest, FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest, MultipleThreadCaches);
MultipleThreadCaches); FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest, RecordStats);
PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest, RecordStats); FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest, ThreadCacheRegistry);
PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest, FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest,
ThreadCacheRegistry);
PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest,
MultipleThreadCachesAccounting); MultipleThreadCachesAccounting);
PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest, FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest,
DynamicCountPerBucket); DynamicCountPerBucket);
PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest, FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest,
DynamicCountPerBucketClamping); DynamicCountPerBucketClamping);
PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest, FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest,
DynamicCountPerBucketMultipleThreads); DynamicCountPerBucketMultipleThreads);
PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest, FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest, DynamicSizeThreshold);
DynamicSizeThreshold); FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest,
PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest,
DynamicSizeThresholdPurge); DynamicSizeThresholdPurge);
PA_FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest, ClearFromTail); FRIEND_TEST_ALL_PREFIXES(PartitionAllocThreadCacheTest, ClearFromTail);
}; };
ALWAYS_INLINE bool ThreadCache::MaybePutInCache(uintptr_t slot_start, ALWAYS_INLINE bool ThreadCache::MaybePutInCache(uintptr_t slot_start,
@ -509,13 +506,12 @@ ALWAYS_INLINE uintptr_t ThreadCache::GetFromCache(size_t bucket_index,
} }
PA_DCHECK(bucket.count != 0); PA_DCHECK(bucket.count != 0);
internal::PartitionFreelistEntry* result = bucket.freelist_head; auto* result = bucket.freelist_head;
// Passes the bucket size to |GetNext()|, so that in case of freelist // Passes the bucket size to |GetNext()|, so that in case of freelist
// corruption, we know the bucket size that lead to the crash, helping to // corruption, we know the bucket size that lead to the crash, helping to
// narrow down the search for culprit. |bucket| was touched just now, so this // narrow down the search for culprit. |bucket| was touched just now, so this
// does not introduce another cache miss. // does not introduce another cache miss.
internal::PartitionFreelistEntry* next = auto* next = result->GetNextForThreadCache<true>(bucket.slot_size);
result->GetNextForThreadCache<true>(bucket.slot_size);
PA_DCHECK(result != next); PA_DCHECK(result != next);
bucket.count--; bucket.count--;
PA_DCHECK(bucket.count != 0 || !next); PA_DCHECK(bucket.count != 0 || !next);

Some files were not shown because too many files have changed in this diff Show More