Compare commits

...

No commits in common. "900f316fde0c9b0d6aa08750b955b64bc23da9bf" and "0efdd5d766d82fc319aed5028eb3fb0c149ea4a9" have entirely different histories.

1383 changed files with 25079 additions and 25841 deletions

View File

@ -1 +1 @@
113.0.5672.62 112.0.5615.49

View File

@ -21,7 +21,7 @@ The Naïve server here works as a forward proxy and a packet length padding laye
## Download NaïveProxy ## Download NaïveProxy
Download [here](https://github.com/klzgrad/naiveproxy/releases/latest). Supported platforms include: Windows, Android (with [NekoBox](https://github.com/MatsuriDayo/NekoBoxForAndroid)), Linux, Mac OS, and OpenWrt ([support status](https://github.com/klzgrad/naiveproxy/wiki/OpenWrt-Support)). Download [here](https://github.com/klzgrad/naiveproxy/releases/latest). Supported platforms include: Windows, Android (with [SagerNet](https://github.com/SagerNet/SagerNet)), Linux, Mac OS, and OpenWrt ([support status](https://github.com/klzgrad/naiveproxy/wiki/OpenWrt-Support)).
Users should always use the latest version to keep signatures identical to Chrome. Users should always use the latest version to keep signatures identical to Chrome.

View File

@ -10,7 +10,6 @@ Standard: Cpp11
# TODO(crbug.com/1392808): Remove when InsertBraces has been upstreamed into # TODO(crbug.com/1392808): Remove when InsertBraces has been upstreamed into
# the Chromium style (is implied by BasedOnStyle: Chromium). # the Chromium style (is implied by BasedOnStyle: Chromium).
InsertBraces: true InsertBraces: true
InsertNewlineAtEOF: true
# Make sure code like: # Make sure code like:
# IPC_BEGIN_MESSAGE_MAP() # IPC_BEGIN_MESSAGE_MAP()

View File

@ -39,7 +39,6 @@ Aditya Bhargava <heuristicist@gmail.com>
Adrian Belgun <adrian.belgun@intel.com> Adrian Belgun <adrian.belgun@intel.com>
Adrian Ratiu <adrian.ratiu@collabora.corp-partner.google.com> Adrian Ratiu <adrian.ratiu@collabora.corp-partner.google.com>
Adrià Vilanova Martínez <me@avm99963.com> Adrià Vilanova Martínez <me@avm99963.com>
Ahmed Elwasefi <a.m.elwasefi@gmail.com>
Ahmet Emir Ercin <ahmetemiremir@gmail.com> Ahmet Emir Ercin <ahmetemiremir@gmail.com>
Ajay Berwal <a.berwal@samsung.com> Ajay Berwal <a.berwal@samsung.com>
Ajay Berwal <ajay.berwal@samsung.com> Ajay Berwal <ajay.berwal@samsung.com>
@ -139,7 +138,6 @@ Arnaud Mandy <arnaud.mandy@intel.com>
Arnaud Renevier <a.renevier@samsung.com> Arnaud Renevier <a.renevier@samsung.com>
Arpita Bahuguna <a.bah@samsung.com> Arpita Bahuguna <a.bah@samsung.com>
Arthur Lussos <developer0420@gmail.com> Arthur Lussos <developer0420@gmail.com>
Artin Lindqvist <artin.lindqvist.chromium@gmail.com>
Artur Akerberg <artur.aker@gmail.com> Artur Akerberg <artur.aker@gmail.com>
Arun Kulkarni <kulkarni.a@samsung.com> Arun Kulkarni <kulkarni.a@samsung.com>
Arun Kumar <arun87.kumar@samsung.com> Arun Kumar <arun87.kumar@samsung.com>
@ -237,7 +235,6 @@ Cheng Zhao <zcbenz@gmail.com>
Cheng Yu <yuzichengcode@gmail.com> Cheng Yu <yuzichengcode@gmail.com>
Choongwoo Han <cwhan.tunz@gmail.com> Choongwoo Han <cwhan.tunz@gmail.com>
Choudhury M. Shamsujjoha <choudhury.s@samsung.com> Choudhury M. Shamsujjoha <choudhury.s@samsung.com>
Chris Dalton <chris@rive.app>
Chris Greene <cwgreene@amazon.com> Chris Greene <cwgreene@amazon.com>
Chris Harrelson <chrishtr@gmail.com> Chris Harrelson <chrishtr@gmail.com>
Chris Nardi <hichris123@gmail.com> Chris Nardi <hichris123@gmail.com>
@ -282,7 +279,6 @@ Dániel Bátyai <dbatyai@inf.u-szeged.hu>
Dániel Vince <vinced@inf.u-szeged.hu> Dániel Vince <vinced@inf.u-szeged.hu>
Daniil Suvorov <severecloud@gmail.com> Daniil Suvorov <severecloud@gmail.com>
Danny Weiss <danny.weiss.fr@gmail.com> Danny Weiss <danny.weiss.fr@gmail.com>
Danylo Boiko <danielboyko02@gmail.com>
Daoming Qiu <daoming.qiu@intel.com> Daoming Qiu <daoming.qiu@intel.com>
Darik Harter <darik.harter@gmail.com> Darik Harter <darik.harter@gmail.com>
Darshan Sen <raisinten@gmail.com> Darshan Sen <raisinten@gmail.com>
@ -304,7 +300,6 @@ David Sanders <dsanders11@ucsbalum.com>
David Spellman <dspell@amazon.com> David Spellman <dspell@amazon.com>
David Valachovic <adenflorian@gmail.com> David Valachovic <adenflorian@gmail.com>
Dax Kelson <dkelson@gurulabs.com> Dax Kelson <dkelson@gurulabs.com>
Debadree Chatterjee <debadree333@gmail.com>
Debashish Samantaray <d.samantaray@samsung.com> Debashish Samantaray <d.samantaray@samsung.com>
Debug Wang <debugwang@tencent.com> Debug Wang <debugwang@tencent.com>
Deepak Dilip Borade <deepak.db@samsung.com> Deepak Dilip Borade <deepak.db@samsung.com>
@ -469,7 +464,6 @@ Horia Olaru <olaru@adobe.com>
Hosung You <hosung.you@samsung.com> Hosung You <hosung.you@samsung.com>
Huapeng Li <huapengl@amazon.com> Huapeng Li <huapengl@amazon.com>
Huayong Xu <huayong.xu@samsung.com> Huayong Xu <huayong.xu@samsung.com>
Hung Ngo <ngotienhung195@gmail.com>
Hugo Holgersson <hugo.holgersson@sonymobile.com> Hugo Holgersson <hugo.holgersson@sonymobile.com>
Hui Wang <wanghui07050707@gmail.com> Hui Wang <wanghui07050707@gmail.com>
Hui Wang <wanghui210@huawei.com> Hui Wang <wanghui210@huawei.com>
@ -509,7 +503,6 @@ Ivan Naydonov <samogot@gmail.com>
Ivan Pavlotskiy <ivan.pavlotskiy@lgepartner.com> Ivan Pavlotskiy <ivan.pavlotskiy@lgepartner.com>
Ivan Sham <ivansham@amazon.com> Ivan Sham <ivansham@amazon.com>
Jack Bates <jack@nottheoilrig.com> Jack Bates <jack@nottheoilrig.com>
Jackson Loeffler <j@jloeffler.com>
Jacky Hu <flameddd@gmail.com> Jacky Hu <flameddd@gmail.com>
Jacob Clark <jacob.jh.clark@googlemail.com> Jacob Clark <jacob.jh.clark@googlemail.com>
Jacob Mandelson <jacob@mandelson.org> Jacob Mandelson <jacob@mandelson.org>
@ -577,7 +570,6 @@ Jiangzhen Hou <houjiangzhen@360.cn>
Jianjun Zhu <jianjun.zhu@intel.com> Jianjun Zhu <jianjun.zhu@intel.com>
Jianneng Zhong <muzuiget@gmail.com> Jianneng Zhong <muzuiget@gmail.com>
Jiawei Shao <jiawei.shao@intel.com> Jiawei Shao <jiawei.shao@intel.com>
Jiawei Chen <jiawei.chen@dolby.com>
Jiaxun Wei <leuisken@gmail.com> Jiaxun Wei <leuisken@gmail.com>
Jiaxun Yang <jiaxun.yang@flygoat.com> Jiaxun Yang <jiaxun.yang@flygoat.com>
Jidong Qin <qinjidong@qianxin.com> Jidong Qin <qinjidong@qianxin.com>
@ -610,7 +602,6 @@ Joe Thomas <mhx348@motorola.com>
Joel Stanley <joel@jms.id.au> Joel Stanley <joel@jms.id.au>
Joey Jiao <joeyjiao0810@gmail.com> Joey Jiao <joeyjiao0810@gmail.com>
Joey Mou <joeymou@amazon.com> Joey Mou <joeymou@amazon.com>
Johann <johann@duck.com>
Johannes Rudolph <johannes.rudolph@googlemail.com> Johannes Rudolph <johannes.rudolph@googlemail.com>
John Ingve Olsen <johningveolsen@gmail.com> John Ingve Olsen <johningveolsen@gmail.com>
John Kleinschmidt <kleinschmidtorama@gmail.com> John Kleinschmidt <kleinschmidtorama@gmail.com>
@ -752,8 +743,6 @@ Leon Han <leon.han@intel.com>
Leung Wing Chung <lwchkg@gmail.com> Leung Wing Chung <lwchkg@gmail.com>
Li Yanbo <liyanbo.monster@bytedance.com> Li Yanbo <liyanbo.monster@bytedance.com>
Li Yin <li.yin@intel.com> Li Yin <li.yin@intel.com>
Lian Ruilong <lianrl@dingdao.com>
Lian Ruilong <lianruilong1108@gmail.com>
Lidwine Genevet <lgenevet@cisco.com> Lidwine Genevet <lgenevet@cisco.com>
Lin Sun <lin.sun@intel.com> Lin Sun <lin.sun@intel.com>
Lin Peng <penglin220@gmail.com> Lin Peng <penglin220@gmail.com>
@ -775,7 +764,7 @@ Luka Dojcilovic <l.dojcilovic@gmail.com>
Lukas Lihotzki <lukas@lihotzki.de> Lukas Lihotzki <lukas@lihotzki.de>
Lukasz Krakowiak <lukasz.krakowiak@mobica.com> Lukasz Krakowiak <lukasz.krakowiak@mobica.com>
Luke Inman-Semerau <luke.semerau@gmail.com> Luke Inman-Semerau <luke.semerau@gmail.com>
Luke Gu <gulukesh@gmail.com> Luke Seunghoe Gu <gulukesh@gmail.com>
Luke Zarko <lukezarko@gmail.com> Luke Zarko <lukezarko@gmail.com>
Luoxi Pan <l.panpax@gmail.com> Luoxi Pan <l.panpax@gmail.com>
Lu Yahan <yahan@iscas.ac.cn> Lu Yahan <yahan@iscas.ac.cn>
@ -797,7 +786,6 @@ Manuel Lagana <manuel.lagana.dev@gmail.com>
Mao Yujie <maojie0924@gmail.com> Mao Yujie <maojie0924@gmail.com>
Mao Yujie <yujie.mao@intel.com> Mao Yujie <yujie.mao@intel.com>
Marc des Garets <marc.desgarets@googlemail.com> Marc des Garets <marc.desgarets@googlemail.com>
Marcio Caroso <msscaroso@gmail.com>
Marcin Wiacek <marcin@mwiacek.com> Marcin Wiacek <marcin@mwiacek.com>
Marco Rodrigues <gothicx@gmail.com> Marco Rodrigues <gothicx@gmail.com>
Marcos Caceres <marcos@marcosc.com> Marcos Caceres <marcos@marcosc.com>
@ -1302,7 +1290,6 @@ Vinoth Chandar <vinoth@uber.com>
Vipul Bhasin <vipul.bhasin@gmail.com> Vipul Bhasin <vipul.bhasin@gmail.com>
Visa Putkinen <v.putkinen@partner.samsung.com> Visa Putkinen <v.putkinen@partner.samsung.com>
Vishal Bhatnagar <vishal.b@samsung.com> Vishal Bhatnagar <vishal.b@samsung.com>
Vishal Lingam <vishal.reddy@samsung.com>
Vitaliy Kharin <kvserr@gmail.com> Vitaliy Kharin <kvserr@gmail.com>
Vivek Galatage <vivek.vg@samsung.com> Vivek Galatage <vivek.vg@samsung.com>
Volker Sorge <volker.sorge@gmail.com> Volker Sorge <volker.sorge@gmail.com>
@ -1317,7 +1304,6 @@ Wenxiang Qian <leonwxqian@gmail.com>
WenSheng He <wensheng.he@samsung.com> WenSheng He <wensheng.he@samsung.com>
Wesley Lancel <wesleylancel@gmail.com> Wesley Lancel <wesleylancel@gmail.com>
Wei Wang <wei4.wang@intel.com> Wei Wang <wei4.wang@intel.com>
Wei Wen <wenwei.wenwei@bytedance.com>
Wesley Wigham <wwigham@gmail.com> Wesley Wigham <wwigham@gmail.com>
Will Cohen <wwcohen@gmail.com> Will Cohen <wwcohen@gmail.com>
Will Hirsch <chromium@willhirsch.co.uk> Will Hirsch <chromium@willhirsch.co.uk>

View File

@ -33,24 +33,37 @@ if (is_official_build) {
assert(!is_component_build) assert(!is_component_build)
} }
# The `gn_all` target is used to list all of the main targets in the build, so # This file defines the following two main targets:
# that we can figure out which BUILD.gn files to process, following the process
# described at the top of this file.
# #
# Because of the way GN works (again, as described above), there may be targets # "gn_all" is used to create explicit dependencies from the root BUILD.gn to
# built by `all` that aren't built by `gn_all`. We always want `all` to build, # each top-level component that we wish to include when building everything via
# so there's really never a reason you'd want to build `gn_all` instead of # "all". This is required since the set of targets built by "all" is determined
# `all`, and no tooling should depend directly on this target. Tools should # automatically based on reachability from the root BUILD.gn (for details, see
# should depend on either an explicit list of targets, or `all`. # crbug.com/503241). Builders should typically use "all", or list targets
# explicitly, rather than relying on "gn_all".
#
# "gn_visibility": targets that are normally not visible to top-level targets,
# but are built anyway by "all". Since we don't want any such targets, we have
# this placeholder to make sure hidden targets that aren't otherwise depended
# on yet are accounted for.
group("gn_all") { group("gn_all") {
testonly = true testonly = true
deps = [ deps = [
":gn_visibility",
"//net", "//net",
] ]
} }
group("gn_visibility") {
deps = [
"//build/config/sanitizers:options_sources",
# "//third_party/pdfium:pdfium_embeddertests", # TODO(GYP): visibility?
# "//third_party/pdfium:pdfium_unittests", # TODO(GYP): visibility?
]
}
if (is_android) { if (is_android) {
group("optimize_gn_gen") { group("optimize_gn_gen") {
deps = [ deps = [

378
src/DEPS

File diff suppressed because it is too large Load Diff

View File

@ -19,7 +19,6 @@
import("//base/allocator/allocator.gni") import("//base/allocator/allocator.gni")
import("//base/allocator/partition_allocator/partition_alloc.gni") import("//base/allocator/partition_allocator/partition_alloc.gni")
import("//base/debug/debug.gni")
import("//base/trace_event/tracing.gni") import("//base/trace_event/tracing.gni")
import("//build/buildflag_header.gni") import("//build/buildflag_header.gni")
import("//build/config/arm.gni") import("//build/config/arm.gni")
@ -121,21 +120,12 @@ if (is_fuchsia) {
# TODO(crbug.com/1304707): Drop toolchain_has_rust after we have support for all # TODO(crbug.com/1304707): Drop toolchain_has_rust after we have support for all
# our toolchains: Linux x86 is missing in order to build for Android. # our toolchains: Linux x86 is missing in order to build for Android.
# #
# Rust to C++ type conversions.
build_rust_base_conversions = toolchain_has_rust && enable_rust_base_conversions
# The Rust implementation of base::JSONReader. # The Rust implementation of base::JSONReader.
build_rust_json_reader = toolchain_has_rust && enable_rust_json build_rust_json_reader = toolchain_has_rust && enable_rust_json
assert(build_rust_base_conversions || !build_rust_json_reader, buildflag_header("parsing_buildflags") {
"Cannot enable Rust JSON decoder without also base conversions") header = "parsing_buildflags.h"
flags = [ "BUILD_RUST_JSON_READER=$build_rust_json_reader" ]
buildflag_header("rust_buildflags") {
header = "rust_buildflags.h"
flags = [
"BUILD_RUST_JSON_READER=$build_rust_json_reader",
"BUILD_RUST_BASE_CONVERSIONS=$build_rust_base_conversions",
]
} }
if (is_win) { if (is_win) {
@ -254,6 +244,7 @@ component("base") {
"big_endian.h", "big_endian.h",
"bit_cast.h", "bit_cast.h",
"bits.h", "bits.h",
"build_time.cc",
"build_time.h", "build_time.h",
"callback_list.cc", "callback_list.cc",
"callback_list.h", "callback_list.h",
@ -451,7 +442,6 @@ component("base") {
"memory/shared_memory_tracker.cc", "memory/shared_memory_tracker.cc",
"memory/shared_memory_tracker.h", "memory/shared_memory_tracker.h",
"memory/singleton.h", "memory/singleton.h",
"memory/stack_allocated.h",
"memory/unsafe_shared_memory_pool.cc", "memory/unsafe_shared_memory_pool.cc",
"memory/unsafe_shared_memory_pool.h", "memory/unsafe_shared_memory_pool.h",
"memory/unsafe_shared_memory_region.cc", "memory/unsafe_shared_memory_region.cc",
@ -480,8 +470,6 @@ component("base") {
"metrics/dummy_histogram.h", "metrics/dummy_histogram.h",
"metrics/field_trial.cc", "metrics/field_trial.cc",
"metrics/field_trial.h", "metrics/field_trial.h",
"metrics/field_trial_list_including_low_anonymity.cc",
"metrics/field_trial_list_including_low_anonymity.h",
"metrics/field_trial_param_associator.cc", "metrics/field_trial_param_associator.cc",
"metrics/field_trial_param_associator.h", "metrics/field_trial_param_associator.h",
"metrics/field_trial_params.cc", "metrics/field_trial_params.cc",
@ -942,7 +930,6 @@ component("base") {
"types/pass_key.h", "types/pass_key.h",
"types/strong_alias.h", "types/strong_alias.h",
"types/token_type.h", "types/token_type.h",
"types/variant_util.h",
"unguessable_token.cc", "unguessable_token.cc",
"unguessable_token.h", "unguessable_token.h",
"value_iterators.cc", "value_iterators.cc",
@ -1077,9 +1064,9 @@ component("base") {
":ios_cronet_buildflags", ":ios_cronet_buildflags",
":logging_buildflags", ":logging_buildflags",
":orderfile_buildflags", ":orderfile_buildflags",
":parsing_buildflags",
":power_monitor_buildflags", ":power_monitor_buildflags",
":profiler_buildflags", ":profiler_buildflags",
":rust_buildflags",
":sanitizer_buildflags", ":sanitizer_buildflags",
":synchronization_buildflags", ":synchronization_buildflags",
":tracing_buildflags", ":tracing_buildflags",
@ -1087,10 +1074,11 @@ component("base") {
"//base/numerics:base_numerics", "//base/numerics:base_numerics",
"//build:chromecast_buildflags", "//build:chromecast_buildflags",
"//build:chromeos_buildflags", "//build:chromeos_buildflags",
"//build/rust:rust_buildflags",
"//third_party/abseil-cpp:absl", "//third_party/abseil-cpp:absl",
] ]
if (build_rust_base_conversions) { if (toolchain_has_rust) {
# Base provides conversions between CXX types and base types (e.g. # Base provides conversions between CXX types and base types (e.g.
# StringPiece). # StringPiece).
public_deps += [ "//build/rust:cxx_cppdeps" ] public_deps += [ "//build/rust:cxx_cppdeps" ]
@ -1216,8 +1204,6 @@ component("base") {
"os_compat_android.cc", "os_compat_android.cc",
"os_compat_android.h", "os_compat_android.h",
"process/process_android.cc", "process/process_android.cc",
"profiler/native_unwinder_android_map_delegate.h",
"profiler/native_unwinder_android_memory_regions_map.h",
"profiler/stack_sampler_android.cc", "profiler/stack_sampler_android.cc",
"system/sys_info_android.cc", "system/sys_info_android.cc",
"threading/platform_thread_android_stub.cc", "threading/platform_thread_android_stub.cc",
@ -1278,13 +1264,6 @@ component("base") {
libs += [ "android/library_loader/anchor_functions.lds" ] libs += [ "android/library_loader/anchor_functions.lds" ]
} # is_android } # is_android
if (build_allocation_stack_trace_recorder) {
sources += [
"debug/allocation_trace.cc",
"debug/allocation_trace.h",
]
}
if (is_robolectric) { if (is_robolectric) {
# Make jni.h available. # Make jni.h available.
configs += [ "//third_party/jdk" ] configs += [ "//third_party/jdk" ]
@ -1356,12 +1335,10 @@ component("base") {
"files/memory_mapped_file_posix.cc", "files/memory_mapped_file_posix.cc",
"fuchsia/default_job.cc", "fuchsia/default_job.cc",
"fuchsia/default_job.h", "fuchsia/default_job.h",
"fuchsia/fidl_event_handler.h",
"fuchsia/file_utils.cc", "fuchsia/file_utils.cc",
"fuchsia/file_utils.h", "fuchsia/file_utils.h",
"fuchsia/filtered_service_directory.cc", "fuchsia/filtered_service_directory.cc",
"fuchsia/filtered_service_directory.h", "fuchsia/filtered_service_directory.h",
"fuchsia/fuchsia_component_connect.h",
"fuchsia/fuchsia_logging.cc", "fuchsia/fuchsia_logging.cc",
"fuchsia/fuchsia_logging.h", "fuchsia/fuchsia_logging.h",
"fuchsia/intl_profile_watcher.cc", "fuchsia/intl_profile_watcher.cc",
@ -1430,16 +1407,13 @@ component("base") {
public_deps += [ public_deps += [
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.component.runner:fuchsia.component.runner_hlcpp", "//third_party/fuchsia-sdk/sdk/fidl/fuchsia.component.runner:fuchsia.component.runner_hlcpp",
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.intl:fuchsia.intl_hlcpp", "//third_party/fuchsia-sdk/sdk/fidl/fuchsia.intl:fuchsia.intl_hlcpp",
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.io:fuchsia.io_cpp",
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.io:fuchsia.io_hlcpp", "//third_party/fuchsia-sdk/sdk/fidl/fuchsia.io:fuchsia.io_hlcpp",
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.logger:fuchsia.logger_cpp", "//third_party/fuchsia-sdk/sdk/fidl/fuchsia.logger:fuchsia.logger_hlcpp",
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.mem:fuchsia.mem_hlcpp", "//third_party/fuchsia-sdk/sdk/fidl/fuchsia.mem:fuchsia.mem_hlcpp",
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.process.lifecycle:fuchsia.process.lifecycle_cpp", "//third_party/fuchsia-sdk/sdk/fidl/fuchsia.process.lifecycle:fuchsia.process.lifecycle_hlcpp",
"//third_party/fuchsia-sdk/sdk/pkg/async", "//third_party/fuchsia-sdk/sdk/pkg/async",
"//third_party/fuchsia-sdk/sdk/pkg/component_incoming_cpp",
"//third_party/fuchsia-sdk/sdk/pkg/fdio", "//third_party/fuchsia-sdk/sdk/pkg/fdio",
"//third_party/fuchsia-sdk/sdk/pkg/fidl_cpp", "//third_party/fuchsia-sdk/sdk/pkg/fidl_cpp",
"//third_party/fuchsia-sdk/sdk/pkg/fidl_cpp_wire",
"//third_party/fuchsia-sdk/sdk/pkg/sync", "//third_party/fuchsia-sdk/sdk/pkg/sync",
"//third_party/fuchsia-sdk/sdk/pkg/sys_cpp", "//third_party/fuchsia-sdk/sdk/pkg/sys_cpp",
"//third_party/fuchsia-sdk/sdk/pkg/syslog_structured_backend", "//third_party/fuchsia-sdk/sdk/pkg/syslog_structured_backend",
@ -1448,10 +1422,9 @@ component("base") {
] ]
deps += [ deps += [
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.buildinfo:fuchsia.buildinfo_cpp", "//third_party/fuchsia-sdk/sdk/fidl/fuchsia.buildinfo:fuchsia.buildinfo_hlcpp",
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.hwinfo:fuchsia.hwinfo_cpp", "//third_party/fuchsia-sdk/sdk/fidl/fuchsia.hwinfo:fuchsia.hwinfo_hlcpp",
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.io:fuchsia.io_cpp_hlcpp_conversion", "//third_party/fuchsia-sdk/sdk/fidl/fuchsia.media:fuchsia.media_hlcpp",
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.media:fuchsia.media_cpp",
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.sys:fuchsia.sys_hlcpp", "//third_party/fuchsia-sdk/sdk/fidl/fuchsia.sys:fuchsia.sys_hlcpp",
"//third_party/fuchsia-sdk/sdk/pkg/async-default", "//third_party/fuchsia-sdk/sdk/pkg/async-default",
"//third_party/fuchsia-sdk/sdk/pkg/async-loop-cpp", "//third_party/fuchsia-sdk/sdk/pkg/async-loop-cpp",
@ -1844,7 +1817,7 @@ component("base") {
"win/sphelper.h", "win/sphelper.h",
"win/startup_information.cc", "win/startup_information.cc",
"win/startup_information.h", "win/startup_information.h",
"win/variant_conversions.h", "win/variant_util.h",
"win/variant_vector.cc", "win/variant_vector.cc",
"win/variant_vector.h", "win/variant_vector.h",
"win/vector.cc", "win/vector.cc",
@ -1927,6 +1900,8 @@ component("base") {
"mac/authorization_util.h", "mac/authorization_util.h",
"mac/authorization_util.mm", "mac/authorization_util.mm",
"mac/close_nocancel.cc", "mac/close_nocancel.cc",
"mac/dispatch_source_mach.cc",
"mac/dispatch_source_mach.h",
"mac/launch_application.h", "mac/launch_application.h",
"mac/launch_application.mm", "mac/launch_application.mm",
"mac/launchd.cc", "mac/launchd.cc",
@ -1991,6 +1966,7 @@ component("base") {
"CoreFoundation.framework", "CoreFoundation.framework",
"IOKit.framework", "IOKit.framework",
"OpenDirectory.framework", "OpenDirectory.framework",
"Security.framework",
] ]
} }
@ -2007,8 +1983,6 @@ component("base") {
"mac/call_with_eh_frame.cc", "mac/call_with_eh_frame.cc",
"mac/call_with_eh_frame.h", "mac/call_with_eh_frame.h",
"mac/call_with_eh_frame_asm.S", "mac/call_with_eh_frame_asm.S",
"mac/dispatch_source_mach.cc",
"mac/dispatch_source_mach.h",
"mac/foundation_util.h", "mac/foundation_util.h",
"mac/foundation_util.mm", "mac/foundation_util.mm",
"mac/mac_logging.h", "mac/mac_logging.h",
@ -2037,7 +2011,6 @@ component("base") {
"threading/platform_thread_mac.mm", "threading/platform_thread_mac.mm",
"time/time_mac.mm", "time/time_mac.mm",
] ]
frameworks += [ "Security.framework" ]
} }
# Linux. # Linux.
@ -2095,7 +2068,9 @@ component("base") {
"mac/bridging.h", "mac/bridging.h",
"native_library_ios.mm", "native_library_ios.mm",
"power_monitor/power_monitor_device_source_ios.mm", "power_monitor/power_monitor_device_source_ios.mm",
"process/launch_ios.cc",
"process/process_metrics_ios.cc", "process/process_metrics_ios.cc",
"process/process_stubs.cc",
"profiler/stack_sampler_ios.cc", "profiler/stack_sampler_ios.cc",
"system/sys_info_ios.mm", "system/sys_info_ios.mm",
] ]
@ -2105,27 +2080,13 @@ component("base") {
"files/file_path_watcher_kqueue.cc", "files/file_path_watcher_kqueue.cc",
"files/file_path_watcher_kqueue.h", "files/file_path_watcher_kqueue.h",
"files/file_path_watcher_mac.cc", "files/file_path_watcher_mac.cc",
"ios/sim_header_shims.h",
"mac/mach_port_rendezvous.cc",
"mac/mach_port_rendezvous.h",
"process/launch_mac.cc",
"process/memory_mac.mm", "process/memory_mac.mm",
"process/port_provider_mac.cc",
"process/port_provider_mac.h",
"process/process_handle_mac.cc",
"process/process_iterator_ios.mm", "process/process_iterator_ios.mm",
"process/process_mac.cc",
"process/process_posix.cc",
"sync_socket_posix.cc", "sync_socket_posix.cc",
"synchronization/waitable_event_watcher_mac.cc", "synchronization/waitable_event_watcher_mac.cc",
] ]
libs += [ "bsm" ]
} else { } else {
sources += [ sources += [ "process/memory_stubs.cc" ]
"process/launch_ios.cc",
"process/memory_stubs.cc",
"process/process_stubs.cc",
]
} }
if (is_cronet_build) { if (is_cronet_build) {
@ -2157,8 +2118,6 @@ component("base") {
] ]
} else { } else {
sources -= [ sources -= [
"allocator/dispatcher/dispatcher.cc",
"allocator/dispatcher/dispatcher.h",
"sampling_heap_profiler/poisson_allocation_sampler.cc", "sampling_heap_profiler/poisson_allocation_sampler.cc",
"sampling_heap_profiler/poisson_allocation_sampler.h", "sampling_heap_profiler/poisson_allocation_sampler.h",
"sampling_heap_profiler/sampling_heap_profiler.cc", "sampling_heap_profiler/sampling_heap_profiler.cc",
@ -2417,10 +2376,6 @@ buildflag_header("debugging_buildflags") {
enable_lldbinit_warning = enable_lldbinit_warning =
is_debug && strip_absolute_paths_from_debug_symbols && is_mac is_debug && strip_absolute_paths_from_debug_symbols && is_mac
# TODO(crbug.com/1420774): Try and enable these checks on Android too.
enable_commandline_sequence_checks =
(is_debug || dcheck_always_on) && !is_android
flags = [ flags = [
"DCHECK_IS_CONFIGURABLE=$dcheck_is_configurable", "DCHECK_IS_CONFIGURABLE=$dcheck_is_configurable",
"ENABLE_LOCATION_SOURCE=$enable_location_source", "ENABLE_LOCATION_SOURCE=$enable_location_source",
@ -2433,8 +2388,6 @@ buildflag_header("debugging_buildflags") {
"ENABLE_LLDBINIT_WARNING=$enable_lldbinit_warning", "ENABLE_LLDBINIT_WARNING=$enable_lldbinit_warning",
"EXPENSIVE_DCHECKS_ARE_ON=$enable_expensive_dchecks", "EXPENSIVE_DCHECKS_ARE_ON=$enable_expensive_dchecks",
"ENABLE_STACK_TRACE_LINE_NUMBERS=$enable_stack_trace_line_numbers", "ENABLE_STACK_TRACE_LINE_NUMBERS=$enable_stack_trace_line_numbers",
"ENABLE_COMMANDLINE_SEQUENCE_CHECKS=$enable_commandline_sequence_checks",
"ENABLE_ALLOCATION_STACK_TRACE_RECORDER=$build_allocation_stack_trace_recorder",
] ]
} }
@ -2589,7 +2542,7 @@ if (use_custom_libcxx && enable_safe_libcxx && !is_debug) {
} }
action("build_date") { action("build_date") {
script = "write_build_date_header.py" script = "//build/write_build_date_header.py"
outputs = [ "$target_gen_dir/generated_build_date.h" ] outputs = [ "$target_gen_dir/generated_build_date.h" ]

View File

@ -18,8 +18,6 @@ buildflag_header("buildflags") {
"USE_ALLOCATOR_SHIM=$use_allocator_shim", "USE_ALLOCATOR_SHIM=$use_allocator_shim",
"USE_PARTITION_ALLOC_AS_GWP_ASAN_STORE=$enable_backup_ref_ptr_support", "USE_PARTITION_ALLOC_AS_GWP_ASAN_STORE=$enable_backup_ref_ptr_support",
"USE_ALLOCATION_EVENT_DISPATCHER=$use_allocation_event_dispatcher",
] ]
} }

View File

@ -12,11 +12,6 @@ if (is_ios) {
declare_args() { declare_args() {
# Causes all the allocations to be routed via allocator_shim.cc. # Causes all the allocations to be routed via allocator_shim.cc.
use_allocator_shim = use_allocator_shim_default use_allocator_shim = use_allocator_shim_default
# Use the new allocation event dispatcher to distribute events to event observers.
# If set to false, PoissonAllocationSampler will hook into PartitionAllocator and
# AllocatorShims directly.
use_allocation_event_dispatcher = false
} }
assert( assert(
@ -34,12 +29,14 @@ if (is_win && use_allocator_shim) {
# Chromium-specific asserts. External embedders _may_ elect to use these # Chromium-specific asserts. External embedders _may_ elect to use these
# features even without PA-E. # features even without PA-E.
if (!use_partition_alloc_as_malloc) { if (!use_partition_alloc_as_malloc) {
# In theory, BackupRefPtr will work just fine without # In theory, BackupRefPtr/MTECheckedPtr will work just fine without
# PartitionAlloc-Everywhere, but its scope would be limited to partitions # PartitionAlloc-Everywhere, but their scope would be limited to partitions
# that are invoked explicitly (not via malloc). These are only Blink # that are invoked explicitly (not via malloc). These are only Blink
# partition, where we currently don't even use raw_ptr<T>. # partition, where we currently don't even use raw_ptr<T>.
assert(!enable_backup_ref_ptr_support, assert(!enable_backup_ref_ptr_support,
"Chromium does not use BRP without PA-E") "Chromium does not use BRP without PA-E")
assert(!enable_mte_checked_ptr_support,
"Chromium does not use MTECheckedPtr without PA-E")
# Pointer compression works only if all pointers are guaranteed to be # Pointer compression works only if all pointers are guaranteed to be
# allocated by PA (in one of its core pools, to be precise). In theory, # allocated by PA (in one of its core pools, to be precise). In theory,

View File

@ -18,7 +18,6 @@
#include <atomic> #include <atomic>
#endif #endif
#if !BUILDFLAG(USE_ALLOCATION_EVENT_DISPATCHER)
#if BUILDFLAG(USE_ALLOCATOR_SHIM) #if BUILDFLAG(USE_ALLOCATOR_SHIM)
namespace base::allocator::dispatcher::allocator_shim_details { namespace base::allocator::dispatcher::allocator_shim_details {
namespace { namespace {
@ -224,11 +223,9 @@ void PartitionFreeHook(void* address) {
} // namespace } // namespace
} // namespace base::allocator::dispatcher::partition_allocator_details } // namespace base::allocator::dispatcher::partition_allocator_details
#endif // BUILDFLAG(USE_PARTITION_ALLOC) #endif // BUILDFLAG(USE_PARTITION_ALLOC)
#endif // !BUILDFLAG(USE_ALLOCATION_EVENT_DISPATCHER)
namespace base::allocator::dispatcher { namespace base::allocator::dispatcher {
#if !BUILDFLAG(USE_ALLOCATION_EVENT_DISPATCHER)
void InstallStandardAllocatorHooks() { void InstallStandardAllocatorHooks() {
#if BUILDFLAG(USE_ALLOCATOR_SHIM) #if BUILDFLAG(USE_ALLOCATOR_SHIM)
allocator_shim::InsertAllocatorDispatch( allocator_shim::InsertAllocatorDispatch(
@ -245,7 +242,10 @@ void InstallStandardAllocatorHooks() {
&partition_allocator_details::PartitionFreeHook); &partition_allocator_details::PartitionFreeHook);
#endif // BUILDFLAG(USE_PARTITION_ALLOC) #endif // BUILDFLAG(USE_PARTITION_ALLOC)
} }
#endif // !BUILDFLAG(USE_ALLOCATION_EVENT_DISPATCHER)
} // namespace base::allocator::dispatcher
namespace base::allocator::dispatcher {
// The private implementation of Dispatcher. // The private implementation of Dispatcher.
struct Dispatcher::Impl { struct Dispatcher::Impl {

View File

@ -5,7 +5,6 @@
#ifndef BASE_ALLOCATOR_DISPATCHER_DISPATCHER_H_ #ifndef BASE_ALLOCATOR_DISPATCHER_DISPATCHER_H_
#define BASE_ALLOCATOR_DISPATCHER_DISPATCHER_H_ #define BASE_ALLOCATOR_DISPATCHER_DISPATCHER_H_
#include "base/allocator/buildflags.h"
#include "base/allocator/dispatcher/internal/dispatcher_internal.h" #include "base/allocator/dispatcher/internal/dispatcher_internal.h"
#include "base/base_export.h" #include "base/base_export.h"
@ -13,9 +12,7 @@
namespace base::allocator::dispatcher { namespace base::allocator::dispatcher {
#if !BUILDFLAG(USE_ALLOCATION_EVENT_DISPATCHER)
void BASE_EXPORT InstallStandardAllocatorHooks(); void BASE_EXPORT InstallStandardAllocatorHooks();
#endif
namespace internal { namespace internal {
struct DispatchData; struct DispatchData;

View File

@ -274,7 +274,7 @@ struct DispatcherImpl {
static AllocatorDispatch allocator_dispatch_; static AllocatorDispatch allocator_dispatch_;
#endif #endif
ALWAYS_INLINE static void DoNotifyAllocation( static ALWAYS_INLINE void DoNotifyAllocation(
void* address, void* address,
size_t size, size_t size,
AllocationSubsystem subSystem, AllocationSubsystem subSystem,
@ -283,7 +283,7 @@ struct DispatcherImpl {
subSystem, type_name); subSystem, type_name);
} }
ALWAYS_INLINE static void DoNotifyFree(void* address) { static ALWAYS_INLINE void DoNotifyFree(void* address) {
PerformFreeNotification(s_observers, AllObservers{}, address); PerformFreeNotification(s_observers, AllObservers{}, address);
} }

View File

@ -32,7 +32,7 @@ const base::FeatureParam<UnretainedDanglingPtrMode>
BASE_FEATURE(kPartitionAllocDanglingPtr, BASE_FEATURE(kPartitionAllocDanglingPtr,
"PartitionAllocDanglingPtr", "PartitionAllocDanglingPtr",
#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_FEATURE_FLAG) #if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_FEATURE_FLAGS_FOR_BOTS)
FEATURE_ENABLED_BY_DEFAULT FEATURE_ENABLED_BY_DEFAULT
#else #else
FEATURE_DISABLED_BY_DEFAULT FEATURE_DISABLED_BY_DEFAULT
@ -104,8 +104,8 @@ BASE_FEATURE(kPartitionAllocLargeEmptySlotSpanRing,
BASE_FEATURE(kPartitionAllocBackupRefPtr, BASE_FEATURE(kPartitionAllocBackupRefPtr,
"PartitionAllocBackupRefPtr", "PartitionAllocBackupRefPtr",
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_WIN) || \ #if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_WIN) || \
BUILDFLAG(ENABLE_BACKUP_REF_PTR_FEATURE_FLAG) || \ BUILDFLAG(ENABLE_DANGLING_RAW_PTR_FEATURE_FLAGS_FOR_BOTS) || \
(BUILDFLAG(USE_ASAN_BACKUP_REF_PTR) && BUILDFLAG(IS_LINUX)) (BUILDFLAG(USE_ASAN_BACKUP_REF_PTR) && BUILDFLAG(IS_LINUX))
FEATURE_ENABLED_BY_DEFAULT FEATURE_ENABLED_BY_DEFAULT
#else #else
@ -124,8 +124,7 @@ constexpr FeatureParam<BackupRefPtrEnabledProcesses>::Option
const base::FeatureParam<BackupRefPtrEnabledProcesses> const base::FeatureParam<BackupRefPtrEnabledProcesses>
kBackupRefPtrEnabledProcessesParam { kBackupRefPtrEnabledProcessesParam {
&kPartitionAllocBackupRefPtr, "enabled-processes", &kPartitionAllocBackupRefPtr, "enabled-processes",
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_WIN) || \ #if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_WIN) || \
BUILDFLAG(ENABLE_BACKUP_REF_PTR_FEATURE_FLAG) || \
(BUILDFLAG(USE_ASAN_BACKUP_REF_PTR) && BUILDFLAG(IS_LINUX)) (BUILDFLAG(USE_ASAN_BACKUP_REF_PTR) && BUILDFLAG(IS_LINUX))
BackupRefPtrEnabledProcesses::kNonRenderer, BackupRefPtrEnabledProcesses::kNonRenderer,
#else #else
@ -138,12 +137,8 @@ constexpr FeatureParam<BackupRefPtrMode>::Option kBackupRefPtrModeOptions[] = {
{BackupRefPtrMode::kDisabled, "disabled"}, {BackupRefPtrMode::kDisabled, "disabled"},
{BackupRefPtrMode::kEnabled, "enabled"}, {BackupRefPtrMode::kEnabled, "enabled"},
{BackupRefPtrMode::kEnabledWithoutZapping, "enabled-without-zapping"}, {BackupRefPtrMode::kEnabledWithoutZapping, "enabled-without-zapping"},
{BackupRefPtrMode::kEnabledWithMemoryReclaimer,
"enabled-with-memory-reclaimer"},
{BackupRefPtrMode::kDisabledButSplitPartitions2Way, {BackupRefPtrMode::kDisabledButSplitPartitions2Way,
"disabled-but-2-way-split"}, "disabled-but-2-way-split"},
{BackupRefPtrMode::kDisabledButSplitPartitions2WayWithMemoryReclaimer,
"disabled-but-2-way-split-with-memory-reclaimer"},
{BackupRefPtrMode::kDisabledButSplitPartitions3Way, {BackupRefPtrMode::kDisabledButSplitPartitions3Way,
"disabled-but-3-way-split"}, "disabled-but-3-way-split"},
{BackupRefPtrMode::kDisabledButAddDummyRefCount, {BackupRefPtrMode::kDisabledButAddDummyRefCount,

View File

@ -90,17 +90,10 @@ enum class BackupRefPtrMode {
// Same as kEnabled but without zapping quarantined objects. // Same as kEnabled but without zapping quarantined objects.
kEnabledWithoutZapping, kEnabledWithoutZapping,
// Same as kEnabled but registers the main partition to memory reclaimer.
kEnabledWithMemoryReclaimer,
// BRP is disabled, but the main partition is split out, as if BRP was enabled // BRP is disabled, but the main partition is split out, as if BRP was enabled
// in the "previous slot" mode. // in the "previous slot" mode.
kDisabledButSplitPartitions2Way, kDisabledButSplitPartitions2Way,
// Same as kDisabledButSplitPartitions2Way but registers the main partition to
// memory reclaimer.
kDisabledButSplitPartitions2WayWithMemoryReclaimer,
// BRP is disabled, but the main partition *and* aligned partition are split // BRP is disabled, but the main partition *and* aligned partition are split
// out, as if BRP was enabled in the "before allocation" mode. // out, as if BRP was enabled in the "before allocation" mode.
kDisabledButSplitPartitions3Way, kDisabledButSplitPartitions3Way,

View File

@ -336,22 +336,11 @@ std::map<std::string, std::string> ProposeSyntheticFinchTrials() {
brp_group_name = "EnabledPrevSlotWithoutZapping"; brp_group_name = "EnabledPrevSlotWithoutZapping";
#else #else
brp_group_name = "EnabledBeforeAllocWithoutZapping"; brp_group_name = "EnabledBeforeAllocWithoutZapping";
#endif
break;
case features::BackupRefPtrMode::kEnabledWithMemoryReclaimer:
#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
brp_group_name = "EnabledPrevSlotWithMemoryReclaimer";
#else
brp_group_name = "EnabledBeforeAllocWithMemoryReclaimer";
#endif #endif
break; break;
case features::BackupRefPtrMode::kDisabledButSplitPartitions2Way: case features::BackupRefPtrMode::kDisabledButSplitPartitions2Way:
brp_group_name = "DisabledBut2WaySplit"; brp_group_name = "DisabledBut2WaySplit";
break; break;
case features::BackupRefPtrMode::
kDisabledButSplitPartitions2WayWithMemoryReclaimer:
brp_group_name = "DisabledBut2WaySplitWithMemoryReclaimer";
break;
case features::BackupRefPtrMode::kDisabledButSplitPartitions3Way: case features::BackupRefPtrMode::kDisabledButSplitPartitions3Way:
brp_group_name = "DisabledBut3WaySplit"; brp_group_name = "DisabledBut3WaySplit";
break; break;
@ -631,19 +620,18 @@ void DanglingRawPtrReleased(uintptr_t id) {
<< StringPrintf("0x%016" PRIxPTR, id) << ":\n" << StringPrintf("0x%016" PRIxPTR, id) << ":\n"
<< dangling_signature << "\n\n" << dangling_signature << "\n\n"
<< "The memory was freed at:\n" << "The memory was freed at:\n"
<< free_info->stack_trace << "\n" << free_info->stack_trace << free_info->task_trace << "\n"
<< free_info->task_trace << "\n"
<< "The dangling raw_ptr was released at:\n" << "The dangling raw_ptr was released at:\n"
<< stack_trace_release << "\n" << stack_trace_release << task_trace_release
<< task_trace_release << dangling_ptr_footer; << dangling_ptr_footer;
} else { } else {
LOG(ERROR) << "Detected dangling raw_ptr with id=" LOG(ERROR) << "Detected dangling raw_ptr with id="
<< StringPrintf("0x%016" PRIxPTR, id) << ":\n\n" << StringPrintf("0x%016" PRIxPTR, id) << ":\n\n"
<< dangling_signature << "\n\n" << dangling_signature << "\n\n"
<< "It was not recorded where the memory was freed.\n\n" << "It was not recorded where the memory was freed.\n\n"
<< "The dangling raw_ptr was released at:\n" << "The dangling raw_ptr was released at:\n"
<< stack_trace_release << "\n" << stack_trace_release << task_trace_release
<< task_trace_release << dangling_ptr_footer; << dangling_ptr_footer;
} }
if constexpr (dangling_pointer_mode == features::DanglingPtrMode::kCrash) { if constexpr (dangling_pointer_mode == features::DanglingPtrMode::kCrash) {
@ -840,132 +828,10 @@ void ReconfigurePartitionForKnownProcess(const std::string& process_type) {
PartitionAllocSupport::PartitionAllocSupport() = default; PartitionAllocSupport::PartitionAllocSupport() = default;
void PartitionAllocSupport::ReconfigureForTests() {
ReconfigureEarlyish("");
base::AutoLock scoped_lock(lock_);
called_for_tests_ = true;
}
// static
PartitionAllocSupport::BrpConfiguration
PartitionAllocSupport::GetBrpConfiguration(const std::string& process_type) {
// TODO(bartekn): Switch to DCHECK once confirmed there are no issues.
CHECK(base::FeatureList::GetInstance());
bool enable_brp = false;
bool enable_brp_zapping = false;
bool split_main_partition = false;
bool use_dedicated_aligned_partition = false;
bool add_dummy_ref_count = false;
bool process_affected_by_brp_flag = false;
bool enable_memory_reclaimer = false;
#if (BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && \
BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)) || \
BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
if (base::FeatureList::IsEnabled(
base::features::kPartitionAllocBackupRefPtr)) {
// No specified process type means this is the Browser process.
switch (base::features::kBackupRefPtrEnabledProcessesParam.Get()) {
case base::features::BackupRefPtrEnabledProcesses::kBrowserOnly:
process_affected_by_brp_flag = process_type.empty();
break;
case base::features::BackupRefPtrEnabledProcesses::kBrowserAndRenderer:
process_affected_by_brp_flag =
process_type.empty() ||
(process_type == switches::kRendererProcess);
break;
case base::features::BackupRefPtrEnabledProcesses::kNonRenderer:
process_affected_by_brp_flag =
(process_type != switches::kRendererProcess);
break;
case base::features::BackupRefPtrEnabledProcesses::kAllProcesses:
process_affected_by_brp_flag = true;
break;
}
}
#endif // (BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&
// BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)) ||
// BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && \
BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
if (process_affected_by_brp_flag) {
switch (base::features::kBackupRefPtrModeParam.Get()) {
case base::features::BackupRefPtrMode::kDisabled:
// Do nothing. Equivalent to !IsEnabled(kPartitionAllocBackupRefPtr).
break;
case base::features::BackupRefPtrMode::kEnabledWithMemoryReclaimer:
enable_memory_reclaimer = true;
ABSL_FALLTHROUGH_INTENDED;
case base::features::BackupRefPtrMode::kEnabled:
enable_brp_zapping = true;
ABSL_FALLTHROUGH_INTENDED;
case base::features::BackupRefPtrMode::kEnabledWithoutZapping:
enable_brp = true;
split_main_partition = true;
#if !BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
// AlignedAlloc relies on natural alignment offered by the allocator
// (see the comment inside PartitionRoot::AlignedAllocFlags). Any extras
// in front of the allocation will mess up that alignment. Such extras
// are used when BackupRefPtr is on, in which case, we need a separate
// partition, dedicated to handle only aligned allocations, where those
// extras are disabled. However, if the "previous slot" variant is used,
// no dedicated partition is needed, as the extras won't interfere with
// the alignment requirements.
use_dedicated_aligned_partition = true;
#endif
break;
case base::features::BackupRefPtrMode::kDisabledButSplitPartitions2Way:
split_main_partition = true;
break;
case base::features::BackupRefPtrMode::
kDisabledButSplitPartitions2WayWithMemoryReclaimer:
split_main_partition = true;
enable_memory_reclaimer = true;
break;
case base::features::BackupRefPtrMode::kDisabledButSplitPartitions3Way:
split_main_partition = true;
use_dedicated_aligned_partition = true;
break;
case base::features::BackupRefPtrMode::kDisabledButAddDummyRefCount:
split_main_partition = true;
add_dummy_ref_count = true;
#if !BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
use_dedicated_aligned_partition = true;
#endif
break;
}
}
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&
// BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
return {enable_brp,
enable_brp_zapping,
enable_memory_reclaimer,
split_main_partition,
use_dedicated_aligned_partition,
add_dummy_ref_count,
process_affected_by_brp_flag};
}
void PartitionAllocSupport::ReconfigureEarlyish( void PartitionAllocSupport::ReconfigureEarlyish(
const std::string& process_type) { const std::string& process_type) {
{ {
base::AutoLock scoped_lock(lock_); base::AutoLock scoped_lock(lock_);
// In tests, ReconfigureEarlyish() is called by ReconfigureForTest(), which
// is earlier than ContentMain().
if (called_for_tests_) {
DCHECK(called_earlyish_);
return;
}
// TODO(bartekn): Switch to DCHECK once confirmed there are no issues. // TODO(bartekn): Switch to DCHECK once confirmed there are no issues.
CHECK(!called_earlyish_) CHECK(!called_earlyish_)
<< "ReconfigureEarlyish was already called for process '" << "ReconfigureEarlyish was already called for process '"
@ -1016,11 +882,8 @@ void PartitionAllocSupport::ReconfigureAfterZygoteFork(
} }
void PartitionAllocSupport::ReconfigureAfterFeatureListInit( void PartitionAllocSupport::ReconfigureAfterFeatureListInit(
const std::string& process_type, const std::string& process_type) {
bool configure_dangling_pointer_detector) { base::allocator::InstallDanglingRawPtrChecks();
if (configure_dangling_pointer_detector) {
base::allocator::InstallDanglingRawPtrChecks();
}
base::allocator::InstallUnretainedDanglingRawPtrChecks(); base::allocator::InstallUnretainedDanglingRawPtrChecks();
{ {
base::AutoLock scoped_lock(lock_); base::AutoLock scoped_lock(lock_);
@ -1051,11 +914,46 @@ void PartitionAllocSupport::ReconfigureAfterFeatureListInit(
} }
DCHECK_NE(process_type, switches::kZygoteProcess); DCHECK_NE(process_type, switches::kZygoteProcess);
[[maybe_unused]] BrpConfiguration brp_config = // TODO(bartekn): Switch to DCHECK once confirmed there are no issues.
GetBrpConfiguration(process_type); CHECK(base::FeatureList::GetInstance());
[[maybe_unused]] bool enable_brp = false;
[[maybe_unused]] bool enable_brp_zapping = false;
[[maybe_unused]] bool split_main_partition = false;
[[maybe_unused]] bool use_dedicated_aligned_partition = false;
[[maybe_unused]] bool add_dummy_ref_count = false;
[[maybe_unused]] bool process_affected_by_brp_flag = false;
#if (BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && \
BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)) || \
BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
if (base::FeatureList::IsEnabled(
base::features::kPartitionAllocBackupRefPtr)) {
// No specified process type means this is the Browser process.
switch (base::features::kBackupRefPtrEnabledProcessesParam.Get()) {
case base::features::BackupRefPtrEnabledProcesses::kBrowserOnly:
process_affected_by_brp_flag = process_type.empty();
break;
case base::features::BackupRefPtrEnabledProcesses::kBrowserAndRenderer:
process_affected_by_brp_flag =
process_type.empty() ||
(process_type == switches::kRendererProcess);
break;
case base::features::BackupRefPtrEnabledProcesses::kNonRenderer:
process_affected_by_brp_flag =
(process_type != switches::kRendererProcess);
break;
case base::features::BackupRefPtrEnabledProcesses::kAllProcesses:
process_affected_by_brp_flag = true;
break;
}
}
#endif // (BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&
// BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)) ||
// BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
#if BUILDFLAG(USE_ASAN_BACKUP_REF_PTR) #if BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
if (brp_config.process_affected_by_brp_flag) { if (process_affected_by_brp_flag) {
base::RawPtrAsanService::GetInstance().Configure( base::RawPtrAsanService::GetInstance().Configure(
base::EnableDereferenceCheck( base::EnableDereferenceCheck(
base::features::kBackupRefPtrAsanEnableDereferenceCheckParam.Get()), base::features::kBackupRefPtrAsanEnableDereferenceCheckParam.Get()),
@ -1071,16 +969,62 @@ void PartitionAllocSupport::ReconfigureAfterFeatureListInit(
} }
#endif // BUILDFLAG(USE_ASAN_BACKUP_REF_PTR) #endif // BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && \
BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
if (process_affected_by_brp_flag) {
switch (base::features::kBackupRefPtrModeParam.Get()) {
case base::features::BackupRefPtrMode::kDisabled:
// Do nothing. Equivalent to !IsEnabled(kPartitionAllocBackupRefPtr).
break;
case base::features::BackupRefPtrMode::kEnabled:
enable_brp_zapping = true;
ABSL_FALLTHROUGH_INTENDED;
case base::features::BackupRefPtrMode::kEnabledWithoutZapping:
enable_brp = true;
split_main_partition = true;
#if !BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
// AlignedAlloc relies on natural alignment offered by the allocator
// (see the comment inside PartitionRoot::AlignedAllocFlags). Any extras
// in front of the allocation will mess up that alignment. Such extras
// are used when BackupRefPtr is on, in which case, we need a separate
// partition, dedicated to handle only aligned allocations, where those
// extras are disabled. However, if the "previous slot" variant is used,
// no dedicated partition is needed, as the extras won't interfere with
// the alignment requirements.
use_dedicated_aligned_partition = true;
#endif
break;
case base::features::BackupRefPtrMode::kDisabledButSplitPartitions2Way:
split_main_partition = true;
break;
case base::features::BackupRefPtrMode::kDisabledButSplitPartitions3Way:
split_main_partition = true;
use_dedicated_aligned_partition = true;
break;
case base::features::BackupRefPtrMode::kDisabledButAddDummyRefCount:
split_main_partition = true;
add_dummy_ref_count = true;
#if !BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
use_dedicated_aligned_partition = true;
#endif
break;
}
}
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&
// BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) #if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
allocator_shim::ConfigurePartitions( allocator_shim::ConfigurePartitions(
allocator_shim::EnableBrp(brp_config.enable_brp), allocator_shim::EnableBrp(enable_brp),
allocator_shim::EnableBrpZapping(brp_config.enable_brp_zapping), allocator_shim::EnableBrpZapping(enable_brp_zapping),
allocator_shim::EnableBrpPartitionMemoryReclaimer( allocator_shim::SplitMainPartition(split_main_partition),
brp_config.enable_brp_partition_memory_reclaimer),
allocator_shim::SplitMainPartition(brp_config.split_main_partition),
allocator_shim::UseDedicatedAlignedPartition( allocator_shim::UseDedicatedAlignedPartition(
brp_config.use_dedicated_aligned_partition), use_dedicated_aligned_partition),
allocator_shim::AddDummyRefCount(brp_config.add_dummy_ref_count), allocator_shim::AddDummyRefCount(add_dummy_ref_count),
allocator_shim::AlternateBucketDistribution( allocator_shim::AlternateBucketDistribution(
base::features::kPartitionAllocAlternateBucketDistributionParam base::features::kPartitionAllocAlternateBucketDistributionParam
.Get())); .Get()));
@ -1089,7 +1033,7 @@ void PartitionAllocSupport::ReconfigureAfterFeatureListInit(
// If BRP is not enabled, check if any of PCScan flags is enabled. // If BRP is not enabled, check if any of PCScan flags is enabled.
[[maybe_unused]] bool scan_enabled = false; [[maybe_unused]] bool scan_enabled = false;
#if BUILDFLAG(USE_STARSCAN) #if BUILDFLAG(USE_STARSCAN)
if (!brp_config.enable_brp) { if (!enable_brp) {
scan_enabled = EnablePCScanForMallocPartitionsIfNeeded(); scan_enabled = EnablePCScanForMallocPartitionsIfNeeded();
// No specified process type means this is the Browser process. // No specified process type means this is the Browser process.
if (process_type.empty()) { if (process_type.empty()) {

View File

@ -43,15 +43,6 @@ BASE_EXPORT void InstallUnretainedDanglingRawPtrChecks();
// Allows to re-configure PartitionAlloc at run-time. // Allows to re-configure PartitionAlloc at run-time.
class BASE_EXPORT PartitionAllocSupport { class BASE_EXPORT PartitionAllocSupport {
public: public:
struct BrpConfiguration {
bool enable_brp = false;
bool enable_brp_zapping = false;
bool enable_brp_partition_memory_reclaimer = false;
bool split_main_partition = false;
bool use_dedicated_aligned_partition = false;
bool add_dummy_ref_count = false;
bool process_affected_by_brp_flag = false;
};
// Reconfigure* functions re-configure PartitionAlloc. It is impossible to // Reconfigure* functions re-configure PartitionAlloc. It is impossible to
// configure PartitionAlloc before/at its initialization using information not // configure PartitionAlloc before/at its initialization using information not
// known at compile-time (e.g. process type, Finch), because by the time this // known at compile-time (e.g. process type, Finch), because by the time this
@ -75,12 +66,9 @@ class BASE_EXPORT PartitionAllocSupport {
// re-configuration steps exactly once. // re-configuration steps exactly once.
// //
// *AfterTaskRunnerInit() may be called more than once. // *AfterTaskRunnerInit() may be called more than once.
void ReconfigureForTests();
void ReconfigureEarlyish(const std::string& process_type); void ReconfigureEarlyish(const std::string& process_type);
void ReconfigureAfterZygoteFork(const std::string& process_type); void ReconfigureAfterZygoteFork(const std::string& process_type);
void ReconfigureAfterFeatureListInit( void ReconfigureAfterFeatureListInit(const std::string& process_type);
const std::string& process_type,
bool configure_dangling_pointer_detector = true);
void ReconfigureAfterTaskRunnerInit(const std::string& process_type); void ReconfigureAfterTaskRunnerInit(const std::string& process_type);
// |has_main_frame| tells us if the renderer contains a main frame. // |has_main_frame| tells us if the renderer contains a main frame.
@ -97,13 +85,10 @@ class BASE_EXPORT PartitionAllocSupport {
return singleton; return singleton;
} }
static BrpConfiguration GetBrpConfiguration(const std::string& process_type);
private: private:
PartitionAllocSupport(); PartitionAllocSupport();
base::Lock lock_; base::Lock lock_;
bool called_for_tests_ GUARDED_BY(lock_) = false;
bool called_earlyish_ GUARDED_BY(lock_) = false; bool called_earlyish_ GUARDED_BY(lock_) = false;
bool called_after_zygote_fork_ GUARDED_BY(lock_) = false; bool called_after_zygote_fork_ GUARDED_BY(lock_) = false;
bool called_after_feature_list_init_ GUARDED_BY(lock_) = false; bool called_after_feature_list_init_ GUARDED_BY(lock_) = false;

View File

@ -92,7 +92,6 @@ component("partition_alloc") {
"partition_alloc_base/cpu.cc", "partition_alloc_base/cpu.cc",
"partition_alloc_base/cpu.h", "partition_alloc_base/cpu.h",
"partition_alloc_base/cxx17_backports.h", "partition_alloc_base/cxx17_backports.h",
"partition_alloc_base/cxx20_is_constant_evaluated.h",
"partition_alloc_base/debug/alias.cc", "partition_alloc_base/debug/alias.cc",
"partition_alloc_base/debug/alias.h", "partition_alloc_base/debug/alias.h",
"partition_alloc_base/gtest_prod_util.h", "partition_alloc_base/gtest_prod_util.h",
@ -103,6 +102,7 @@ component("partition_alloc") {
"partition_alloc_base/memory/ref_counted.h", "partition_alloc_base/memory/ref_counted.h",
"partition_alloc_base/memory/scoped_policy.h", "partition_alloc_base/memory/scoped_policy.h",
"partition_alloc_base/memory/scoped_refptr.h", "partition_alloc_base/memory/scoped_refptr.h",
"partition_alloc_base/migration_adapter.h",
"partition_alloc_base/no_destructor.h", "partition_alloc_base/no_destructor.h",
"partition_alloc_base/numerics/checked_math.h", "partition_alloc_base/numerics/checked_math.h",
"partition_alloc_base/numerics/checked_math_impl.h", "partition_alloc_base/numerics/checked_math_impl.h",
@ -160,6 +160,9 @@ component("partition_alloc") {
"partition_root.h", "partition_root.h",
"partition_stats.cc", "partition_stats.cc",
"partition_stats.h", "partition_stats.h",
"partition_tag.h",
"partition_tag_bitmap.h",
"partition_tag_types.h",
"partition_tls.h", "partition_tls.h",
"pkey.cc", "pkey.cc",
"pkey.h", "pkey.h",
@ -426,11 +429,9 @@ buildflag_header("partition_alloc_buildflags") {
"ENABLE_BACKUP_REF_PTR_SUPPORT=$enable_backup_ref_ptr_support", "ENABLE_BACKUP_REF_PTR_SUPPORT=$enable_backup_ref_ptr_support",
"ENABLE_BACKUP_REF_PTR_SLOW_CHECKS=$enable_backup_ref_ptr_slow_checks", "ENABLE_BACKUP_REF_PTR_SLOW_CHECKS=$enable_backup_ref_ptr_slow_checks",
"ENABLE_BACKUP_REF_PTR_FEATURE_FLAG=$enable_backup_ref_ptr_feature_flag",
"ENABLE_DANGLING_RAW_PTR_CHECKS=$enable_dangling_raw_ptr_checks", "ENABLE_DANGLING_RAW_PTR_CHECKS=$enable_dangling_raw_ptr_checks",
"ENABLE_DANGLING_RAW_PTR_FEATURE_FLAG=$enable_dangling_raw_ptr_feature_flag", "ENABLE_DANGLING_RAW_PTR_FEATURE_FLAGS_FOR_BOTS=$enable_dangling_raw_ptr_feature_flags_for_bots",
"ENABLE_DANGLING_RAW_PTR_PERF_EXPERIMENT=$enable_dangling_raw_ptr_perf_experiment", "ENABLE_DANGLING_RAW_PTR_PERF_EXPERIMENT=$enable_dangling_raw_ptr_perf_experiment",
"ENABLE_POINTER_SUBTRACTION_CHECK=$enable_pointer_subtraction_check",
"BACKUP_REF_PTR_POISON_OOB_PTR=$backup_ref_ptr_poison_oob_ptr", "BACKUP_REF_PTR_POISON_OOB_PTR=$backup_ref_ptr_poison_oob_ptr",
"PUT_REF_COUNT_IN_PREVIOUS_SLOT=$put_ref_count_in_previous_slot", "PUT_REF_COUNT_IN_PREVIOUS_SLOT=$put_ref_count_in_previous_slot",
"USE_ASAN_BACKUP_REF_PTR=$use_asan_backup_ref_ptr", "USE_ASAN_BACKUP_REF_PTR=$use_asan_backup_ref_ptr",
@ -440,6 +441,10 @@ buildflag_header("partition_alloc_buildflags") {
"FORCE_ENABLE_RAW_PTR_EXCLUSION=$force_enable_raw_ptr_exclusion", "FORCE_ENABLE_RAW_PTR_EXCLUSION=$force_enable_raw_ptr_exclusion",
# Not to be used directly - instead use
# PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
"ENABLE_MTE_CHECKED_PTR_SUPPORT=$enable_mte_checked_ptr_support",
"RECORD_ALLOC_INFO=$_record_alloc_info", "RECORD_ALLOC_INFO=$_record_alloc_info",
"USE_FREESLOT_BITMAP=$use_freeslot_bitmap", "USE_FREESLOT_BITMAP=$use_freeslot_bitmap",
"GLUE_CORE_POOLS=$glue_core_pools", "GLUE_CORE_POOLS=$glue_core_pools",

View File

@ -139,9 +139,6 @@ include_rules = [
specific_include_rules = { specific_include_rules = {
".*_(perf|unit)test\.cc$": [ ".*_(perf|unit)test\.cc$": [
"+base/allocator/allocator_shim_default_dispatch_to_partition_alloc.h", "+base/allocator/allocator_shim_default_dispatch_to_partition_alloc.h",
"+base/allocator/dispatcher/dispatcher.h",
"+base/debug/allocation_trace.h",
"+base/debug/debugging_buildflags.h",
"+base/debug/proc_maps_linux.h", "+base/debug/proc_maps_linux.h",
"+base/system/sys_info.h", "+base/system/sys_info.h",
"+base/test/gtest_util.h", "+base/test/gtest_util.h",

View File

@ -62,18 +62,16 @@ void AddressPoolManager::GetPoolUsedSuperPages(
pool_handle handle, pool_handle handle,
std::bitset<kMaxSuperPagesInPool>& used) { std::bitset<kMaxSuperPagesInPool>& used) {
Pool* pool = GetPool(handle); Pool* pool = GetPool(handle);
if (!pool) { if (!pool)
return; return;
}
pool->GetUsedSuperPages(used); pool->GetUsedSuperPages(used);
} }
uintptr_t AddressPoolManager::GetPoolBaseAddress(pool_handle handle) { uintptr_t AddressPoolManager::GetPoolBaseAddress(pool_handle handle) {
Pool* pool = GetPool(handle); Pool* pool = GetPool(handle);
if (!pool) { if (!pool)
return 0; return 0;
}
return pool->GetBaseAddress(); return pool->GetBaseAddress();
} }
@ -94,13 +92,11 @@ uintptr_t AddressPoolManager::Reserve(pool_handle handle,
uintptr_t requested_address, uintptr_t requested_address,
size_t length) { size_t length) {
Pool* pool = GetPool(handle); Pool* pool = GetPool(handle);
if (!requested_address) { if (!requested_address)
return pool->FindChunk(length); return pool->FindChunk(length);
}
const bool is_available = pool->TryReserveChunk(requested_address, length); const bool is_available = pool->TryReserveChunk(requested_address, length);
if (is_available) { if (is_available)
return requested_address; return requested_address;
}
return pool->FindChunk(length); return pool->FindChunk(length);
} }
@ -167,9 +163,8 @@ uintptr_t AddressPoolManager::Pool::FindChunk(size_t requested_size) {
// |end_bit| points 1 past the last bit that needs to be 0. If it goes past // |end_bit| points 1 past the last bit that needs to be 0. If it goes past
// |total_bits_|, return |nullptr| to signal no free chunk was found. // |total_bits_|, return |nullptr| to signal no free chunk was found.
size_t end_bit = beg_bit + need_bits; size_t end_bit = beg_bit + need_bits;
if (end_bit > total_bits_) { if (end_bit > total_bits_)
return 0; return 0;
}
bool found = true; bool found = true;
for (; curr_bit < end_bit; ++curr_bit) { for (; curr_bit < end_bit; ++curr_bit) {
@ -181,9 +176,8 @@ uintptr_t AddressPoolManager::Pool::FindChunk(size_t requested_size) {
// next outer loop pass from checking the same bits. // next outer loop pass from checking the same bits.
beg_bit = curr_bit + 1; beg_bit = curr_bit + 1;
found = false; found = false;
if (bit_hint_ == curr_bit) { if (bit_hint_ == curr_bit)
++bit_hint_; ++bit_hint_;
}
} }
} }
@ -218,14 +212,12 @@ bool AddressPoolManager::Pool::TryReserveChunk(uintptr_t address,
const size_t need_bits = requested_size / kSuperPageSize; const size_t need_bits = requested_size / kSuperPageSize;
const size_t end_bit = begin_bit + need_bits; const size_t end_bit = begin_bit + need_bits;
// Check that requested address is not too high. // Check that requested address is not too high.
if (end_bit > total_bits_) { if (end_bit > total_bits_)
return false; return false;
}
// Check if any bit of the requested region is set already. // Check if any bit of the requested region is set already.
for (size_t i = begin_bit; i < end_bit; ++i) { for (size_t i = begin_bit; i < end_bit; ++i) {
if (alloc_bitset_.test(i)) { if (alloc_bitset_.test(i))
return false; return false;
}
} }
// Otherwise, set the bits. // Otherwise, set the bits.
for (size_t i = begin_bit; i < end_bit; ++i) { for (size_t i = begin_bit; i < end_bit; ++i) {
@ -528,9 +520,8 @@ bool AddressPoolManager::GetStats(AddressSpaceStats* stats) {
// Get blocklist size. // Get blocklist size.
for (const auto& blocked : for (const auto& blocked :
AddressPoolManagerBitmap::brp_forbidden_super_page_map_) { AddressPoolManagerBitmap::brp_forbidden_super_page_map_) {
if (blocked.load(std::memory_order_relaxed)) { if (blocked.load(std::memory_order_relaxed))
stats->blocklist_size += 1; stats->blocklist_size += 1;
}
} }
// Count failures in finding non-blocklisted addresses. // Count failures in finding non-blocklisted addresses.

View File

@ -33,12 +33,10 @@ uintptr_t GetRandomPageBase() {
// randomization isn't buying anything. In that case we just skip it. // randomization isn't buying anything. In that case we just skip it.
// TODO(palmer): Just dump the randomization when HE-ASLR is present. // TODO(palmer): Just dump the randomization when HE-ASLR is present.
static BOOL is_wow64 = -1; static BOOL is_wow64 = -1;
if (is_wow64 == -1 && !IsWow64Process(GetCurrentProcess(), &is_wow64)) { if (is_wow64 == -1 && !IsWow64Process(GetCurrentProcess(), &is_wow64))
is_wow64 = FALSE; is_wow64 = FALSE;
} if (!is_wow64)
if (!is_wow64) {
return 0; return 0;
}
#endif // BUILDFLAG(IS_WIN) #endif // BUILDFLAG(IS_WIN)
random &= internal::ASLRMask(); random &= internal::ASLRMask();
random += internal::ASLROffset(); random += internal::ASLROffset();

View File

@ -20,11 +20,11 @@ PA_COMPONENT_EXPORT(PARTITION_ALLOC) uintptr_t GetRandomPageBase();
namespace internal { namespace internal {
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
AslrAddress(uintptr_t mask) { AslrAddress(uintptr_t mask) {
return mask & PageAllocationGranularityBaseMask(); return mask & PageAllocationGranularityBaseMask();
} }
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
AslrMask(uintptr_t bits) { AslrMask(uintptr_t bits) {
return AslrAddress((1ULL << bits) - 1ULL); return AslrAddress((1ULL << bits) - 1ULL);
} }
@ -45,11 +45,11 @@ AslrMask(uintptr_t bits) {
// hard-coded in those tools, bad things happen. This address range is // hard-coded in those tools, bad things happen. This address range is
// copied from TSAN source but works with all tools. See // copied from TSAN source but works with all tools. See
// https://crbug.com/539863. // https://crbug.com/539863.
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
ASLRMask() { ASLRMask() {
return AslrAddress(0x007fffffffffULL); return AslrAddress(0x007fffffffffULL);
} }
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
ASLROffset() { ASLROffset() {
return AslrAddress(0x7e8000000000ULL); return AslrAddress(0x7e8000000000ULL);
} }
@ -59,11 +59,11 @@ AslrMask(uintptr_t bits) {
// Windows 8.10 and newer support the full 48 bit address range. Since // Windows 8.10 and newer support the full 48 bit address range. Since
// ASLROffset() is non-zero and may cause a carry, use 47 bit masks. See // ASLROffset() is non-zero and may cause a carry, use 47 bit masks. See
// http://www.alex-ionescu.com/?p=246 // http://www.alex-ionescu.com/?p=246
PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() { constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(47); return AslrMask(47);
} }
// Try not to map pages into the range where Windows loads DLLs by default. // Try not to map pages into the range where Windows loads DLLs by default.
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() { constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
return 0x80000000ULL; return 0x80000000ULL;
} }
@ -82,11 +82,11 @@ AslrMask(uintptr_t bits) {
// //
// TODO(crbug.com/738925): Remove this limitation if/when the macOS behavior // TODO(crbug.com/738925): Remove this limitation if/when the macOS behavior
// changes. // changes.
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
ASLRMask() { ASLRMask() {
return AslrMask(38); return AslrMask(38);
} }
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
ASLROffset() { ASLROffset() {
// Be careful, there is a zone where macOS will not map memory, at least // Be careful, there is a zone where macOS will not map memory, at least
// on ARM64. From an ARM64 machine running 12.3, the range seems to be // on ARM64. From an ARM64 machine running 12.3, the range seems to be
@ -104,10 +104,10 @@ AslrMask(uintptr_t bits) {
// Linux (and macOS) support the full 47-bit user space of x64 processors. // Linux (and macOS) support the full 47-bit user space of x64 processors.
// Use only 46 to allow the kernel a chance to fulfill the request. // Use only 46 to allow the kernel a chance to fulfill the request.
PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() { constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(46); return AslrMask(46);
} }
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() { constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0); return AslrAddress(0);
} }
@ -117,10 +117,10 @@ AslrMask(uintptr_t bits) {
// Restrict the address range on Android to avoid a large performance // Restrict the address range on Android to avoid a large performance
// regression in single-process WebViews. See https://crbug.com/837640. // regression in single-process WebViews. See https://crbug.com/837640.
PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() { constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(30); return AslrMask(30);
} }
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() { constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0x20000000ULL); return AslrAddress(0x20000000ULL);
} }
@ -130,11 +130,11 @@ AslrMask(uintptr_t bits) {
// page size and number of levels of translation pages used. We use // page size and number of levels of translation pages used. We use
// 39-bit as base as all setups should support this, lowered to 38-bit // 39-bit as base as all setups should support this, lowered to 38-bit
// as ASLROffset() could cause a carry. // as ASLROffset() could cause a carry.
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
ASLRMask() { ASLRMask() {
return AslrMask(38); return AslrMask(38);
} }
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
ASLROffset() { ASLROffset() {
return AslrAddress(0x1000000000ULL); return AslrAddress(0x1000000000ULL);
} }
@ -143,10 +143,10 @@ AslrMask(uintptr_t bits) {
// ARM64 on Linux has 39-bit user space. Use 38 bits since ASLROffset() // ARM64 on Linux has 39-bit user space. Use 38 bits since ASLROffset()
// could cause a carry. // could cause a carry.
PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() { constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(38); return AslrMask(38);
} }
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() { constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0x1000000000ULL); return AslrAddress(0x1000000000ULL);
} }
@ -159,30 +159,30 @@ AslrMask(uintptr_t bits) {
// AIX has 64 bits of virtual addressing, but we limit the address range // AIX has 64 bits of virtual addressing, but we limit the address range
// to (a) minimize segment lookaside buffer (SLB) misses; and (b) use // to (a) minimize segment lookaside buffer (SLB) misses; and (b) use
// extra address space to isolate the mmap regions. // extra address space to isolate the mmap regions.
PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() { constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(30); return AslrMask(30);
} }
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() { constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0x400000000000ULL); return AslrAddress(0x400000000000ULL);
} }
#elif defined(ARCH_CPU_BIG_ENDIAN) #elif defined(ARCH_CPU_BIG_ENDIAN)
// Big-endian Linux PPC has 44 bits of virtual addressing. Use 42. // Big-endian Linux PPC has 44 bits of virtual addressing. Use 42.
PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() { constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(42); return AslrMask(42);
} }
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() { constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0); return AslrAddress(0);
} }
#else // !BUILDFLAG(IS_AIX) && !defined(ARCH_CPU_BIG_ENDIAN) #else // !BUILDFLAG(IS_AIX) && !defined(ARCH_CPU_BIG_ENDIAN)
// Little-endian Linux PPC has 48 bits of virtual addressing. Use 46. // Little-endian Linux PPC has 48 bits of virtual addressing. Use 46.
PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() { constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(46); return AslrMask(46);
} }
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() { constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0); return AslrAddress(0);
} }
@ -193,10 +193,10 @@ AslrMask(uintptr_t bits) {
// Linux on Z uses bits 22 - 32 for Region Indexing, which translates to // Linux on Z uses bits 22 - 32 for Region Indexing, which translates to
// 42 bits of virtual addressing. Truncate to 40 bits to allow kernel a // 42 bits of virtual addressing. Truncate to 40 bits to allow kernel a
// chance to fulfill the request. // chance to fulfill the request.
PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() { constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(40); return AslrMask(40);
} }
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() { constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0); return AslrAddress(0);
} }
@ -204,10 +204,10 @@ AslrMask(uintptr_t bits) {
// 31 bits of virtual addressing. Truncate to 29 bits to allow the kernel // 31 bits of virtual addressing. Truncate to 29 bits to allow the kernel
// a chance to fulfill the request. // a chance to fulfill the request.
PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() { constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(29); return AslrMask(29);
} }
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() { constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0); return AslrAddress(0);
} }
@ -215,7 +215,7 @@ AslrMask(uintptr_t bits) {
// !defined(ARCH_CPU_S390X) && !defined(ARCH_CPU_S390) // !defined(ARCH_CPU_S390X) && !defined(ARCH_CPU_S390)
// For all other POSIX variants, use 30 bits. // For all other POSIX variants, use 30 bits.
PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() { constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(30); return AslrMask(30);
} }
@ -231,7 +231,7 @@ AslrMask(uintptr_t bits) {
// fails allocate as if there were no hint at all. The high hint // fails allocate as if there were no hint at all. The high hint
// prevents the break from getting hemmed in at low values, ceding half // prevents the break from getting hemmed in at low values, ceding half
// of the address space to the system heap. // of the address space to the system heap.
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() { constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0x80000000ULL); return AslrAddress(0x80000000ULL);
} }
@ -239,7 +239,7 @@ AslrMask(uintptr_t bits) {
// The range 0x30000000 - 0xD0000000 is available on AIX; choose the // The range 0x30000000 - 0xD0000000 is available on AIX; choose the
// upper range. // upper range.
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() { constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0x90000000ULL); return AslrAddress(0x90000000ULL);
} }
@ -248,7 +248,7 @@ AslrMask(uintptr_t bits) {
// The range 0x20000000 - 0x60000000 is relatively unpopulated across a // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
// variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macOS // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macOS
// 10.6 and 10.7. // 10.6 and 10.7.
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() { constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0x20000000ULL); return AslrAddress(0x20000000ULL);
} }
@ -264,10 +264,10 @@ AslrMask(uintptr_t bits) {
// This is a good range on 32-bit Windows and Android (the only platforms on // This is a good range on 32-bit Windows and Android (the only platforms on
// which we support 32-bitness). Allocates in the 0.5 - 1.5 GiB region. There // which we support 32-bitness). Allocates in the 0.5 - 1.5 GiB region. There
// is no issue with carries here. // is no issue with carries here.
PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() { constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
return AslrMask(30); return AslrMask(30);
} }
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() { constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
return AslrAddress(0x20000000ULL); return AslrAddress(0x20000000ULL);
} }

View File

@ -15,9 +15,8 @@ thread_local bool g_disallow_allocations;
} // namespace } // namespace
ScopedDisallowAllocations::ScopedDisallowAllocations() { ScopedDisallowAllocations::ScopedDisallowAllocations() {
if (g_disallow_allocations) { if (g_disallow_allocations)
PA_IMMEDIATE_CRASH(); PA_IMMEDIATE_CRASH();
}
g_disallow_allocations = true; g_disallow_allocations = true;
} }

View File

@ -32,8 +32,8 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ScopedAllowAllocations {
#else #else
struct [[maybe_unused]] ScopedDisallowAllocations {}; struct [[maybe_unused]] ScopedDisallowAllocations{};
struct [[maybe_unused]] ScopedAllowAllocations {}; struct [[maybe_unused]] ScopedAllowAllocations{};
#endif // PA_CONFIG(HAS_ALLOCATION_GUARD) #endif // PA_CONFIG(HAS_ALLOCATION_GUARD)

View File

@ -13,6 +13,7 @@ import("//build_overrides/build.gni")
use_partition_alloc_as_malloc_default = false use_partition_alloc_as_malloc_default = false
use_allocator_shim_default = false use_allocator_shim_default = false
enable_backup_ref_ptr_support_default = false enable_backup_ref_ptr_support_default = false
enable_mte_checked_ptr_support_default = false
put_ref_count_in_previous_slot_default = true put_ref_count_in_previous_slot_default = true
enable_backup_ref_ptr_slow_checks_default = false enable_backup_ref_ptr_slow_checks_default = false
enable_dangling_raw_ptr_checks_default = false enable_dangling_raw_ptr_checks_default = false

View File

@ -227,7 +227,7 @@ class PA_TRIVIAL_ABI CompressedPointer final {
internal::CompressedPointerBaseGlobal::kBitsToShift + internal::CompressedPointerBaseGlobal::kBitsToShift +
kBitsForSignExtension; kBitsForSignExtension;
PA_ALWAYS_INLINE static UnderlyingType Compress(T* ptr) { static PA_ALWAYS_INLINE UnderlyingType Compress(T* ptr) {
static constexpr size_t kMinimalRequiredAlignment = 8; static constexpr size_t kMinimalRequiredAlignment = 8;
static_assert((1 << kOverallBitsToShift) == kMinimalRequiredAlignment); static_assert((1 << kOverallBitsToShift) == kMinimalRequiredAlignment);
@ -252,14 +252,13 @@ class PA_TRIVIAL_ABI CompressedPointer final {
// frequent operation, we let more work here in favor of faster // frequent operation, we let more work here in favor of faster
// decompression. // decompression.
// TODO(1376980): Avoid this by overreserving the heap. // TODO(1376980): Avoid this by overreserving the heap.
if (compressed) { if (compressed)
compressed |= (1u << (sizeof(uint32_t) * CHAR_BIT - 1)); compressed |= (1u << (sizeof(uint32_t) * CHAR_BIT - 1));
}
return compressed; return compressed;
} }
PA_ALWAYS_INLINE static T* Decompress(UnderlyingType ptr) { static PA_ALWAYS_INLINE T* Decompress(UnderlyingType ptr) {
PA_DCHECK(internal::CompressedPointerBaseGlobal::IsSet()); PA_DCHECK(internal::CompressedPointerBaseGlobal::IsSet());
const uintptr_t base = internal::CompressedPointerBaseGlobal::Get(); const uintptr_t base = internal::CompressedPointerBaseGlobal::Get();
// Treat compressed pointer as signed and cast it to uint64_t, which will // Treat compressed pointer as signed and cast it to uint64_t, which will
@ -461,13 +460,13 @@ class PA_TRIVIAL_ABI UncompressedPointer final {
template <typename U, template <typename U,
std::enable_if_t<std::is_convertible_v<U*, T*>>* = nullptr> std::enable_if_t<std::is_convertible_v<U*, T*>>* = nullptr>
PA_ALWAYS_INLINE constexpr explicit UncompressedPointer( PA_ALWAYS_INLINE constexpr UncompressedPointer(
const UncompressedPointer<U>& other) const UncompressedPointer<U>& other)
: ptr_(other.ptr_) {} : ptr_(other.ptr_) {}
template <typename U, template <typename U,
std::enable_if_t<std::is_convertible_v<U*, T*>>* = nullptr> std::enable_if_t<std::is_convertible_v<U*, T*>>* = nullptr>
PA_ALWAYS_INLINE constexpr explicit UncompressedPointer( PA_ALWAYS_INLINE constexpr UncompressedPointer(
UncompressedPointer<U>&& other) noexcept UncompressedPointer<U>&& other) noexcept
: ptr_(std::move(other.ptr_)) {} : ptr_(std::move(other.ptr_)) {}

View File

@ -18,9 +18,8 @@ namespace {
void DisableThreadCacheForRootIfEnabled(ThreadSafePartitionRoot* root) { void DisableThreadCacheForRootIfEnabled(ThreadSafePartitionRoot* root) {
// Some platforms don't have a thread cache, or it could already have been // Some platforms don't have a thread cache, or it could already have been
// disabled. // disabled.
if (!root || !root->flags.with_thread_cache) { if (!root || !root->flags.with_thread_cache)
return; return;
}
ThreadCacheRegistry::Instance().PurgeAll(); ThreadCacheRegistry::Instance().PurgeAll();
root->flags.with_thread_cache = false; root->flags.with_thread_cache = false;
@ -31,9 +30,8 @@ void DisableThreadCacheForRootIfEnabled(ThreadSafePartitionRoot* root) {
void EnablePartitionAllocThreadCacheForRootIfDisabled( void EnablePartitionAllocThreadCacheForRootIfDisabled(
ThreadSafePartitionRoot* root) { ThreadSafePartitionRoot* root) {
if (!root) { if (!root)
return; return;
}
root->flags.with_thread_cache = true; root->flags.with_thread_cache = true;
} }
@ -44,9 +42,8 @@ void DisablePartitionAllocThreadCacheForProcess() {
auto* aligned_allocator = auto* aligned_allocator =
allocator_shim::internal::PartitionAllocMalloc::AlignedAllocator(); allocator_shim::internal::PartitionAllocMalloc::AlignedAllocator();
DisableThreadCacheForRootIfEnabled(regular_allocator); DisableThreadCacheForRootIfEnabled(regular_allocator);
if (aligned_allocator != regular_allocator) { if (aligned_allocator != regular_allocator)
DisableThreadCacheForRootIfEnabled(aligned_allocator); DisableThreadCacheForRootIfEnabled(aligned_allocator);
}
DisableThreadCacheForRootIfEnabled( DisableThreadCacheForRootIfEnabled(
allocator_shim::internal::PartitionAllocMalloc::OriginalAllocator()); allocator_shim::internal::PartitionAllocMalloc::OriginalAllocator());
} }
@ -56,79 +53,45 @@ void DisablePartitionAllocThreadCacheForProcess() {
#endif // PA_CONFIG(THREAD_CACHE_SUPPORTED) #endif // PA_CONFIG(THREAD_CACHE_SUPPORTED)
void SwapOutProcessThreadCacheForTesting(ThreadSafePartitionRoot* root) {
#if PA_CONFIG(THREAD_CACHE_SUPPORTED)
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
DisablePartitionAllocThreadCacheForProcess();
#else
PA_CHECK(!ThreadCache::IsValid(ThreadCache::Get()));
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
ThreadCache::SwapForTesting(root);
EnablePartitionAllocThreadCacheForRootIfDisabled(root);
#endif // PA_CONFIG(THREAD_CACHE_SUPPORTED)
}
void SwapInProcessThreadCacheForTesting(ThreadSafePartitionRoot* root) {
#if PA_CONFIG(THREAD_CACHE_SUPPORTED)
// First, disable the test thread cache we have.
DisableThreadCacheForRootIfEnabled(root);
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
auto* regular_allocator =
allocator_shim::internal::PartitionAllocMalloc::Allocator();
EnablePartitionAllocThreadCacheForRootIfDisabled(regular_allocator);
ThreadCache::SwapForTesting(regular_allocator);
#else
ThreadCache::SwapForTesting(nullptr);
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
#endif // PA_CONFIG(THREAD_CACHE_SUPPORTED)
}
ThreadAllocStats GetAllocStatsForCurrentThread() { ThreadAllocStats GetAllocStatsForCurrentThread() {
ThreadCache* thread_cache = ThreadCache::Get(); ThreadCache* thread_cache = ThreadCache::Get();
if (ThreadCache::IsValid(thread_cache)) { if (ThreadCache::IsValid(thread_cache))
return thread_cache->thread_alloc_stats(); return thread_cache->thread_alloc_stats();
}
return {}; return {};
} }
#if PA_CONFIG(THREAD_CACHE_SUPPORTED)
ThreadCacheProcessScopeForTesting::ThreadCacheProcessScopeForTesting(
ThreadSafePartitionRoot* root)
: root_(root) {
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
auto* regular_allocator =
allocator_shim::internal::PartitionAllocMalloc::Allocator();
regular_was_enabled_ =
regular_allocator && regular_allocator->flags.with_thread_cache;
if (root_ != regular_allocator) {
// Another |root| is ThreadCache's PartitionRoot. Need to disable
// thread cache for the process.
DisablePartitionAllocThreadCacheForProcess();
EnablePartitionAllocThreadCacheForRootIfDisabled(root_);
// Replace ThreadCache's PartitionRoot.
ThreadCache::SwapForTesting(root_);
} else {
if (!regular_was_enabled_) {
EnablePartitionAllocThreadCacheForRootIfDisabled(root_);
ThreadCache::SwapForTesting(root_);
}
}
#else
PA_CHECK(!ThreadCache::IsValid(ThreadCache::Get()));
EnablePartitionAllocThreadCacheForRootIfDisabled(root_);
ThreadCache::SwapForTesting(root_);
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
PA_CHECK(ThreadCache::Get());
}
ThreadCacheProcessScopeForTesting::~ThreadCacheProcessScopeForTesting() {
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
auto* regular_allocator =
allocator_shim::internal::PartitionAllocMalloc::Allocator();
bool regular_enabled =
regular_allocator && regular_allocator->flags.with_thread_cache;
if (regular_was_enabled_) {
if (!regular_enabled) {
// Need to re-enable ThreadCache for the process.
EnablePartitionAllocThreadCacheForRootIfDisabled(regular_allocator);
// In the case, |regular_allocator| must be ThreadCache's root.
ThreadCache::SwapForTesting(regular_allocator);
} else {
// ThreadCache is enabled for the process, but we need to be
// careful about ThreadCache's PartitionRoot. If it is different from
// |regular_allocator|, we need to invoke SwapForTesting().
if (regular_allocator != root_) {
ThreadCache::SwapForTesting(regular_allocator);
}
}
} else {
// ThreadCache for all processes was disabled.
DisableThreadCacheForRootIfEnabled(regular_allocator);
ThreadCache::SwapForTesting(nullptr);
}
#else
// First, disable the test thread cache we have.
DisableThreadCacheForRootIfEnabled(root_);
ThreadCache::SwapForTesting(nullptr);
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
}
#endif // PA_CONFIG(THREAD_CACHE_SUPPORTED)
} // namespace partition_alloc::internal } // namespace partition_alloc::internal

View File

@ -5,38 +5,25 @@
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_EXTENDED_API_H_ #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_EXTENDED_API_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_EXTENDED_API_H_ #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_EXTENDED_API_H_
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_root.h" #include "base/allocator/partition_allocator/partition_root.h"
#include "base/allocator/partition_allocator/partition_stats.h" #include "base/allocator/partition_allocator/partition_stats.h"
#include "base/allocator/partition_allocator/thread_cache.h" #include "base/allocator/partition_allocator/thread_cache.h"
namespace partition_alloc::internal { namespace partition_alloc::internal {
// These two functions are unsafe to run if there are multiple threads running
// in the process.
//
// Disables the thread cache for the entire process, and replaces it with a
// thread cache for |root|.
void SwapOutProcessThreadCacheForTesting(ThreadSafePartitionRoot* root);
// Disables the current thread cache, and replaces it with the default for the
// process.
void SwapInProcessThreadCacheForTesting(ThreadSafePartitionRoot* root);
// Get allocation stats for the thread cache partition on the current // Get allocation stats for the thread cache partition on the current
// thread. See the documentation of ThreadAllocStats for details. // thread. See the documentation of ThreadAllocStats for details.
ThreadAllocStats GetAllocStatsForCurrentThread(); ThreadAllocStats GetAllocStatsForCurrentThread();
// Creates a scope for testing which:
// - if the given |root| is a default malloc root for the entire process,
// enables the thread cache for the entire process.
// (This may happen if UsePartitionAllocAsMalloc is enabled.)
// - otherwise, disables the thread cache for the entire process, and
// replaces it with a thread cache for |root|.
// This class is unsafe to run if there are multiple threads running
// in the process.
class ThreadCacheProcessScopeForTesting {
public:
explicit ThreadCacheProcessScopeForTesting(ThreadSafePartitionRoot* root);
~ThreadCacheProcessScopeForTesting();
ThreadCacheProcessScopeForTesting() = delete;
private:
ThreadSafePartitionRoot* root_ = nullptr;
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
bool regular_was_enabled_ = false;
#endif
};
} // namespace partition_alloc::internal } // namespace partition_alloc::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_EXTENDED_API_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_EXTENDED_API_H_

View File

@ -12,6 +12,7 @@
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h" #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h" #include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_forward.h" #include "base/allocator/partition_allocator/partition_alloc_forward.h"
#include "base/allocator/partition_allocator/partition_tag_bitmap.h"
#include "base/allocator/partition_allocator/reservation_offset_table.h" #include "base/allocator/partition_allocator/reservation_offset_table.h"
namespace partition_alloc::internal { namespace partition_alloc::internal {
@ -26,7 +27,7 @@ constexpr size_t kFreeSlotBitmapOffsetMask = kFreeSlotBitmapBitsPerCell - 1;
constexpr size_t kFreeSlotBitmapSize = constexpr size_t kFreeSlotBitmapSize =
(kSuperPageSize / kSmallestBucket) / CHAR_BIT; (kSuperPageSize / kSmallestBucket) / CHAR_BIT;
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
ReservedFreeSlotBitmapSize() { ReservedFreeSlotBitmapSize() {
#if BUILDFLAG(USE_FREESLOT_BITMAP) #if BUILDFLAG(USE_FREESLOT_BITMAP)
return base::bits::AlignUp(kFreeSlotBitmapSize, PartitionPageSize()); return base::bits::AlignUp(kFreeSlotBitmapSize, PartitionPageSize());
@ -35,7 +36,7 @@ ReservedFreeSlotBitmapSize() {
#endif #endif
} }
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
CommittedFreeSlotBitmapSize() { CommittedFreeSlotBitmapSize() {
#if BUILDFLAG(USE_FREESLOT_BITMAP) #if BUILDFLAG(USE_FREESLOT_BITMAP)
return base::bits::AlignUp(kFreeSlotBitmapSize, SystemPageSize()); return base::bits::AlignUp(kFreeSlotBitmapSize, SystemPageSize());
@ -44,7 +45,7 @@ CommittedFreeSlotBitmapSize() {
#endif #endif
} }
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
NumPartitionPagesPerFreeSlotBitmap() { NumPartitionPagesPerFreeSlotBitmap() {
return ReservedFreeSlotBitmapSize() / PartitionPageSize(); return ReservedFreeSlotBitmapSize() / PartitionPageSize();
} }
@ -52,10 +53,11 @@ NumPartitionPagesPerFreeSlotBitmap() {
#if BUILDFLAG(USE_FREESLOT_BITMAP) #if BUILDFLAG(USE_FREESLOT_BITMAP)
PA_ALWAYS_INLINE uintptr_t SuperPageFreeSlotBitmapAddr(uintptr_t super_page) { PA_ALWAYS_INLINE uintptr_t SuperPageFreeSlotBitmapAddr(uintptr_t super_page) {
PA_DCHECK(!(super_page % kSuperPageAlignment)); PA_DCHECK(!(super_page % kSuperPageAlignment));
return super_page + PartitionPageSize(); return super_page + PartitionPageSize() +
(IsManagedByNormalBuckets(super_page) ? ReservedTagBitmapSize() : 0);
} }
#endif #endif
} // namespace partition_alloc::internal } // namespace partition_alloc::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_FREESLOT_BITMAP_CONSTANTS_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_FREESLOT_BITMAP_CONSTANTS_H_

View File

@ -65,9 +65,8 @@ void* GwpAsanSupport::MapRegion(size_t slot_count,
super_page_span_start = bucket->AllocNewSuperPageSpanForGwpAsan( super_page_span_start = bucket->AllocNewSuperPageSpanForGwpAsan(
root.get(), super_page_count, 0); root.get(), super_page_count, 0);
if (!super_page_span_start) { if (!super_page_span_start)
return nullptr; return nullptr;
}
#if defined(ARCH_CPU_64_BITS) #if defined(ARCH_CPU_64_BITS)
// Mapping the GWP-ASan region in to the lower 32-bits of address space // Mapping the GWP-ASan region in to the lower 32-bits of address space

View File

@ -70,7 +70,7 @@ namespace internal {
// The crash is generated in a PA_NOINLINE function so that we can classify the // The crash is generated in a PA_NOINLINE function so that we can classify the
// crash as an OOM solely by analyzing the stack trace. It is tagged as // crash as an OOM solely by analyzing the stack trace. It is tagged as
// PA_NOT_TAIL_CALLED to ensure that its parent function stays on the stack. // PA_NOT_TAIL_CALLED to ensure that its parent function stays on the stack.
[[noreturn]] PA_NOINLINE PA_NOT_TAIL_CALLED void OnNoMemory(size_t size) { [[noreturn]] PA_NOINLINE void PA_NOT_TAIL_CALLED OnNoMemory(size_t size) {
RunPartitionAllocOomCallback(); RunPartitionAllocOomCallback();
TerminateBecauseOutOfMemory(size); TerminateBecauseOutOfMemory(size);
PA_IMMEDIATE_CRASH(); PA_IMMEDIATE_CRASH();

View File

@ -49,8 +49,8 @@ namespace internal {
// The crash is generated in a PA_NOINLINE function so that we can classify the // The crash is generated in a PA_NOINLINE function so that we can classify the
// crash as an OOM solely by analyzing the stack trace. It is tagged as // crash as an OOM solely by analyzing the stack trace. It is tagged as
// PA_NOT_TAIL_CALLED to ensure that its parent function stays on the stack. // PA_NOT_TAIL_CALLED to ensure that its parent function stays on the stack.
[[noreturn]] PA_NOT_TAIL_CALLED PA_COMPONENT_EXPORT( [[noreturn]] PA_COMPONENT_EXPORT(PARTITION_ALLOC) void PA_NOT_TAIL_CALLED
PARTITION_ALLOC) void OnNoMemory(size_t size); OnNoMemory(size_t size);
// OOM_CRASH(size) - Specialization of IMMEDIATE_CRASH which will raise a custom // OOM_CRASH(size) - Specialization of IMMEDIATE_CRASH which will raise a custom
// exception on Windows to signal this is OOM and not a normal assert. // exception on Windows to signal this is OOM and not a normal assert.

View File

@ -19,9 +19,8 @@ void SetPartitionAllocOomCallback(PartitionAllocOomCallback callback) {
namespace internal { namespace internal {
void RunPartitionAllocOomCallback() { void RunPartitionAllocOomCallback() {
if (g_oom_callback) { if (g_oom_callback)
g_oom_callback(); g_oom_callback();
}
} }
} // namespace internal } // namespace internal

View File

@ -113,11 +113,10 @@ uintptr_t NextAlignedWithOffset(uintptr_t address,
uintptr_t actual_offset = address & (alignment - 1); uintptr_t actual_offset = address & (alignment - 1);
uintptr_t new_address; uintptr_t new_address;
if (actual_offset <= requested_offset) { if (actual_offset <= requested_offset)
new_address = address + requested_offset - actual_offset; new_address = address + requested_offset - actual_offset;
} else { else
new_address = address + alignment + requested_offset - actual_offset; new_address = address + alignment + requested_offset - actual_offset;
}
PA_DCHECK(new_address >= address); PA_DCHECK(new_address >= address);
PA_DCHECK(new_address - address < alignment); PA_DCHECK(new_address - address < alignment);
PA_DCHECK(new_address % alignment == requested_offset); PA_DCHECK(new_address % alignment == requested_offset);
@ -136,9 +135,8 @@ uintptr_t SystemAllocPages(uintptr_t hint,
PA_DCHECK(!(hint & internal::PageAllocationGranularityOffsetMask())); PA_DCHECK(!(hint & internal::PageAllocationGranularityOffsetMask()));
uintptr_t ret = internal::SystemAllocPagesInternal( uintptr_t ret = internal::SystemAllocPagesInternal(
hint, length, accessibility, page_tag, file_descriptor_for_shared_alloc); hint, length, accessibility, page_tag, file_descriptor_for_shared_alloc);
if (ret) { if (ret)
g_total_mapped_address_space.fetch_add(length, std::memory_order_relaxed); g_total_mapped_address_space.fetch_add(length, std::memory_order_relaxed);
}
return ret; return ret;
} }
@ -212,16 +210,14 @@ uintptr_t AllocPagesWithAlignOffset(
file_descriptor_for_shared_alloc); file_descriptor_for_shared_alloc);
if (ret) { if (ret) {
// If the alignment is to our liking, we're done. // If the alignment is to our liking, we're done.
if ((ret & align_offset_mask) == align_offset) { if ((ret & align_offset_mask) == align_offset)
return ret; return ret;
}
// Free the memory and try again. // Free the memory and try again.
FreePages(ret, length); FreePages(ret, length);
} else { } else {
// |ret| is null; if this try was unhinted, we're OOM. // |ret| is null; if this try was unhinted, we're OOM.
if (internal::kHintIsAdvisory || !address) { if (internal::kHintIsAdvisory || !address)
return 0; return 0;
}
} }
#if defined(ARCH_CPU_32_BITS) #if defined(ARCH_CPU_32_BITS)
@ -372,9 +368,8 @@ bool ReserveAddressSpace(size_t size) {
bool ReleaseReservation() { bool ReleaseReservation() {
// To avoid deadlock, call only FreePages. // To avoid deadlock, call only FreePages.
internal::ScopedGuard guard(GetReserveLock()); internal::ScopedGuard guard(GetReserveLock());
if (!s_reservation_address) { if (!s_reservation_address)
return false; return false;
}
FreePages(s_reservation_address, s_reservation_size); FreePages(s_reservation_address, s_reservation_size);
s_reservation_address = 0; s_reservation_address = 0;

View File

@ -34,12 +34,12 @@ struct PageAccessibilityConfiguration {
}; };
#if BUILDFLAG(ENABLE_PKEYS) #if BUILDFLAG(ENABLE_PKEYS)
constexpr explicit PageAccessibilityConfiguration(Permissions permissions) explicit constexpr PageAccessibilityConfiguration(Permissions permissions)
: permissions(permissions), pkey(0) {} : permissions(permissions), pkey(0) {}
constexpr PageAccessibilityConfiguration(Permissions permissions, int pkey) constexpr PageAccessibilityConfiguration(Permissions permissions, int pkey)
: permissions(permissions), pkey(pkey) {} : permissions(permissions), pkey(pkey) {}
#else #else
constexpr explicit PageAccessibilityConfiguration(Permissions permissions) explicit constexpr PageAccessibilityConfiguration(Permissions permissions)
: permissions(permissions) {} : permissions(permissions) {}
#endif // BUILDFLAG(ENABLE_PKEYS) #endif // BUILDFLAG(ENABLE_PKEYS)
@ -300,7 +300,7 @@ void DiscardSystemPages(void* address, size_t length);
// Rounds up |address| to the next multiple of |SystemPageSize()|. Returns // Rounds up |address| to the next multiple of |SystemPageSize()|. Returns
// 0 for an |address| of 0. // 0 for an |address| of 0.
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
RoundUpToSystemPage(uintptr_t address) { RoundUpToSystemPage(uintptr_t address) {
return (address + internal::SystemPageOffsetMask()) & return (address + internal::SystemPageOffsetMask()) &
internal::SystemPageBaseMask(); internal::SystemPageBaseMask();
@ -308,14 +308,14 @@ RoundUpToSystemPage(uintptr_t address) {
// Rounds down |address| to the previous multiple of |SystemPageSize()|. Returns // Rounds down |address| to the previous multiple of |SystemPageSize()|. Returns
// 0 for an |address| of 0. // 0 for an |address| of 0.
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
RoundDownToSystemPage(uintptr_t address) { RoundDownToSystemPage(uintptr_t address) {
return address & internal::SystemPageBaseMask(); return address & internal::SystemPageBaseMask();
} }
// Rounds up |address| to the next multiple of |PageAllocationGranularity()|. // Rounds up |address| to the next multiple of |PageAllocationGranularity()|.
// Returns 0 for an |address| of 0. // Returns 0 for an |address| of 0.
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
RoundUpToPageAllocationGranularity(uintptr_t address) { RoundUpToPageAllocationGranularity(uintptr_t address) {
return (address + internal::PageAllocationGranularityOffsetMask()) & return (address + internal::PageAllocationGranularityOffsetMask()) &
internal::PageAllocationGranularityBaseMask(); internal::PageAllocationGranularityBaseMask();
@ -323,7 +323,7 @@ RoundUpToPageAllocationGranularity(uintptr_t address) {
// Rounds down |address| to the previous multiple of // Rounds down |address| to the previous multiple of
// |PageAllocationGranularity()|. Returns 0 for an |address| of 0. // |PageAllocationGranularity()|. Returns 0 for an |address| of 0.
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
RoundDownToPageAllocationGranularity(uintptr_t address) { RoundDownToPageAllocationGranularity(uintptr_t address) {
return address & internal::PageAllocationGranularityBaseMask(); return address & internal::PageAllocationGranularityBaseMask();
} }

View File

@ -7,8 +7,8 @@
#include <stddef.h> #include <stddef.h>
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h" #include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "build/build_config.h" #include "build/build_config.h"
#if BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS) #if BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)
@ -66,10 +66,10 @@ extern PageCharacteristics page_characteristics;
namespace partition_alloc::internal { namespace partition_alloc::internal {
// Forward declaration, implementation below // Forward declaration, implementation below
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
PageAllocationGranularity(); PageAllocationGranularity();
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
PageAllocationGranularityShift() { PageAllocationGranularityShift() {
#if BUILDFLAG(IS_WIN) || defined(ARCH_CPU_PPC64) #if BUILDFLAG(IS_WIN) || defined(ARCH_CPU_PPC64)
// Modern ppc64 systems support 4kB (shift = 12) and 64kB (shift = 16) page // Modern ppc64 systems support 4kB (shift = 12) and 64kB (shift = 16) page
@ -96,7 +96,7 @@ PageAllocationGranularityShift() {
#endif #endif
} }
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
PageAllocationGranularity() { PageAllocationGranularity() {
#if BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS) #if BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)
// This is literally equivalent to |1 << PageAllocationGranularityShift()| // This is literally equivalent to |1 << PageAllocationGranularityShift()|
@ -116,17 +116,17 @@ PageAllocationGranularity() {
#endif #endif
} }
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
PageAllocationGranularityOffsetMask() { PageAllocationGranularityOffsetMask() {
return PageAllocationGranularity() - 1; return PageAllocationGranularity() - 1;
} }
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
PageAllocationGranularityBaseMask() { PageAllocationGranularityBaseMask() {
return ~PageAllocationGranularityOffsetMask(); return ~PageAllocationGranularityOffsetMask();
} }
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
SystemPageShift() { SystemPageShift() {
// On Windows allocation granularity is higher than the page size. This comes // On Windows allocation granularity is higher than the page size. This comes
// into play when reserving address space range (allocation granularity), // into play when reserving address space range (allocation granularity),
@ -138,7 +138,7 @@ SystemPageShift() {
#endif #endif
} }
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
SystemPageSize() { SystemPageSize() {
#if (BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)) || \ #if (BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)) || \
(BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)) (BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64))
@ -151,12 +151,12 @@ SystemPageSize() {
#endif #endif
} }
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
SystemPageOffsetMask() { SystemPageOffsetMask() {
return SystemPageSize() - 1; return SystemPageSize() - 1;
} }
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
SystemPageBaseMask() { SystemPageBaseMask() {
return ~SystemPageOffsetMask(); return ~SystemPageOffsetMask();
} }

View File

@ -111,8 +111,9 @@ uintptr_t SystemAllocPagesInternal(
} }
uint64_t address; uint64_t address;
status = zx::vmar::root_self()->map(options, vmar_offset, vmo, status =
/*vmo_offset=*/0, length, &address); zx::vmar::root_self()->map(options, vmar_offset, vmo,
/*vmo_offset=*/0, length, &address);
if (status != ZX_OK) { if (status != ZX_OK) {
// map() is expected to fail if |hint| is set to an already-in-use location. // map() is expected to fail if |hint| is set to an already-in-use location.
if (!hint) { if (!hint) {

View File

@ -130,9 +130,8 @@ bool UseMapJit() {
base::ScopedCFTypeRef<CFTypeRef> jit_entitlement( base::ScopedCFTypeRef<CFTypeRef> jit_entitlement(
SecTaskCopyValueForEntitlement( SecTaskCopyValueForEntitlement(
task.get(), CFSTR("com.apple.security.cs.allow-jit"), nullptr)); task.get(), CFSTR("com.apple.security.cs.allow-jit"), nullptr));
if (!jit_entitlement) { if (!jit_entitlement)
return false; return false;
}
return base::mac::CFCast<CFBooleanRef>(jit_entitlement.get()) == return base::mac::CFCast<CFBooleanRef>(jit_entitlement.get()) ==
kCFBooleanTrue; kCFBooleanTrue;
@ -249,9 +248,8 @@ void SetSystemPagesAccessInternal(
// //
// In this case, we are almost certainly bumping into the sandbox limit, mark // In this case, we are almost certainly bumping into the sandbox limit, mark
// the crash as OOM. See SandboxLinux::LimitAddressSpace() for details. // the crash as OOM. See SandboxLinux::LimitAddressSpace() for details.
if (ret == -1 && errno == ENOMEM && (access_flags & PROT_WRITE)) { if (ret == -1 && errno == ENOMEM && (access_flags & PROT_WRITE))
OOM_CRASH(length); OOM_CRASH(length);
}
PA_PCHECK(0 == ret); PA_PCHECK(0 == ret);
} }
@ -367,9 +365,8 @@ bool TryRecommitSystemPagesInternal(
if (accessibility_disposition == if (accessibility_disposition ==
PageAccessibilityDisposition::kRequireUpdate) { PageAccessibilityDisposition::kRequireUpdate) {
bool ok = TrySetSystemPagesAccess(address, length, accessibility); bool ok = TrySetSystemPagesAccess(address, length, accessibility);
if (!ok) { if (!ok)
return false; return false;
}
} }
#if BUILDFLAG(IS_APPLE) #if BUILDFLAG(IS_APPLE)

View File

@ -72,9 +72,8 @@ void* VirtualAllocWithRetry(void* address,
// Only retry for commit failures. If this is an address space problem // Only retry for commit failures. If this is an address space problem
// (e.g. caller asked for an address which is not available), this is // (e.g. caller asked for an address which is not available), this is
// unlikely to be resolved by waiting. // unlikely to be resolved by waiting.
if (ret || !should_retry || !IsOutOfMemory(GetLastError())) { if (ret || !should_retry || !IsOutOfMemory(GetLastError()))
break; break;
}
Sleep(kDelayMs); Sleep(kDelayMs);
} }
@ -143,9 +142,8 @@ bool TrySetSystemPagesAccessInternal(
PageAccessibilityConfiguration accessibility) { PageAccessibilityConfiguration accessibility) {
void* ptr = reinterpret_cast<void*>(address); void* ptr = reinterpret_cast<void*>(address);
if (accessibility.permissions == if (accessibility.permissions ==
PageAccessibilityConfiguration::kInaccessible) { PageAccessibilityConfiguration::kInaccessible)
return VirtualFree(ptr, length, MEM_DECOMMIT) != 0; return VirtualFree(ptr, length, MEM_DECOMMIT) != 0;
}
// Call the retry path even though this function can fail, because callers of // Call the retry path even though this function can fail, because callers of
// this are likely to crash the process when this function fails, and we don't // this are likely to crash the process when this function fails, and we don't
// want that for transient failures. // want that for transient failures.
@ -169,9 +167,8 @@ void SetSystemPagesAccessInternal(
if (!VirtualAllocWithRetry(ptr, length, MEM_COMMIT, if (!VirtualAllocWithRetry(ptr, length, MEM_COMMIT,
GetAccessFlags(accessibility))) { GetAccessFlags(accessibility))) {
int32_t error = GetLastError(); int32_t error = GetLastError();
if (error == ERROR_COMMITMENT_LIMIT) { if (error == ERROR_COMMITMENT_LIMIT)
OOM_CRASH(length); OOM_CRASH(length);
}
// We check `GetLastError` for `ERROR_SUCCESS` here so that in a crash // We check `GetLastError` for `ERROR_SUCCESS` here so that in a crash
// report we get the error number. // report we get the error number.
PA_CHECK(ERROR_SUCCESS == error); PA_CHECK(ERROR_SUCCESS == error);

View File

@ -37,16 +37,16 @@ namespace internal {
class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace { class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
public: public:
#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE) #if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
PA_ALWAYS_INLINE static uintptr_t RegularPoolBaseMask() { static PA_ALWAYS_INLINE uintptr_t RegularPoolBaseMask() {
return setup_.regular_pool_base_mask_; return setup_.regular_pool_base_mask_;
} }
#else #else
PA_ALWAYS_INLINE static constexpr uintptr_t RegularPoolBaseMask() { static PA_ALWAYS_INLINE constexpr uintptr_t RegularPoolBaseMask() {
return kRegularPoolBaseMask; return kRegularPoolBaseMask;
} }
#endif #endif
PA_ALWAYS_INLINE static std::pair<pool_handle, uintptr_t> GetPoolAndOffset( static PA_ALWAYS_INLINE std::pair<pool_handle, uintptr_t> GetPoolAndOffset(
uintptr_t address) { uintptr_t address) {
// When USE_BACKUP_REF_PTR is off, BRP pool isn't used. // When USE_BACKUP_REF_PTR is off, BRP pool isn't used.
#if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
@ -76,10 +76,10 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
} }
return std::make_pair(pool, address - base); return std::make_pair(pool, address - base);
} }
PA_ALWAYS_INLINE static constexpr size_t ConfigurablePoolMaxSize() { static PA_ALWAYS_INLINE constexpr size_t ConfigurablePoolMaxSize() {
return kConfigurablePoolMaxSize; return kConfigurablePoolMaxSize;
} }
PA_ALWAYS_INLINE static constexpr size_t ConfigurablePoolMinSize() { static PA_ALWAYS_INLINE constexpr size_t ConfigurablePoolMinSize() {
return kConfigurablePoolMinSize; return kConfigurablePoolMinSize;
} }
@ -100,7 +100,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
static void UninitForTesting(); static void UninitForTesting();
static void UninitConfigurablePoolForTesting(); static void UninitConfigurablePoolForTesting();
PA_ALWAYS_INLINE static bool IsInitialized() { static PA_ALWAYS_INLINE bool IsInitialized() {
// Either neither or both regular and BRP pool are initialized. The // Either neither or both regular and BRP pool are initialized. The
// configurable and pkey pool are initialized separately. // configurable and pkey pool are initialized separately.
if (setup_.regular_pool_base_address_ != kUninitializedPoolBaseAddress) { if (setup_.regular_pool_base_address_ != kUninitializedPoolBaseAddress) {
@ -112,19 +112,19 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
return false; return false;
} }
PA_ALWAYS_INLINE static bool IsConfigurablePoolInitialized() { static PA_ALWAYS_INLINE bool IsConfigurablePoolInitialized() {
return setup_.configurable_pool_base_address_ != return setup_.configurable_pool_base_address_ !=
kUninitializedPoolBaseAddress; kUninitializedPoolBaseAddress;
} }
#if BUILDFLAG(ENABLE_PKEYS) #if BUILDFLAG(ENABLE_PKEYS)
PA_ALWAYS_INLINE static bool IsPkeyPoolInitialized() { static PA_ALWAYS_INLINE bool IsPkeyPoolInitialized() {
return setup_.pkey_pool_base_address_ != kUninitializedPoolBaseAddress; return setup_.pkey_pool_base_address_ != kUninitializedPoolBaseAddress;
} }
#endif #endif
// Returns false for nullptr. // Returns false for nullptr.
PA_ALWAYS_INLINE static bool IsInRegularPool(uintptr_t address) { static PA_ALWAYS_INLINE bool IsInRegularPool(uintptr_t address) {
#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE) #if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
const uintptr_t regular_pool_base_mask = setup_.regular_pool_base_mask_; const uintptr_t regular_pool_base_mask = setup_.regular_pool_base_mask_;
#else #else
@ -134,12 +134,12 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
setup_.regular_pool_base_address_; setup_.regular_pool_base_address_;
} }
PA_ALWAYS_INLINE static uintptr_t RegularPoolBase() { static PA_ALWAYS_INLINE uintptr_t RegularPoolBase() {
return setup_.regular_pool_base_address_; return setup_.regular_pool_base_address_;
} }
// Returns false for nullptr. // Returns false for nullptr.
PA_ALWAYS_INLINE static bool IsInBRPPool(uintptr_t address) { static PA_ALWAYS_INLINE bool IsInBRPPool(uintptr_t address) {
#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE) #if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
const uintptr_t brp_pool_base_mask = setup_.brp_pool_base_mask_; const uintptr_t brp_pool_base_mask = setup_.brp_pool_base_mask_;
#else #else
@ -151,7 +151,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
#if PA_CONFIG(GLUE_CORE_POOLS) #if PA_CONFIG(GLUE_CORE_POOLS)
// Checks whether the address belongs to either regular or BRP pool. // Checks whether the address belongs to either regular or BRP pool.
// Returns false for nullptr. // Returns false for nullptr.
PA_ALWAYS_INLINE static bool IsInCorePools(uintptr_t address) { static PA_ALWAYS_INLINE bool IsInCorePools(uintptr_t address) {
#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE) #if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
const uintptr_t core_pools_base_mask = setup_.core_pools_base_mask_; const uintptr_t core_pools_base_mask = setup_.core_pools_base_mask_;
#else #else
@ -166,40 +166,40 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
return ret; return ret;
} }
#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE) #if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
PA_ALWAYS_INLINE static size_t CorePoolsSize() { static PA_ALWAYS_INLINE size_t CorePoolsSize() {
return RegularPoolSize() * 2; return RegularPoolSize() * 2;
} }
#else #else
PA_ALWAYS_INLINE static constexpr size_t CorePoolsSize() { static PA_ALWAYS_INLINE constexpr size_t CorePoolsSize() {
return RegularPoolSize() * 2; return RegularPoolSize() * 2;
} }
#endif // PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE) #endif // PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
#endif // PA_CONFIG(GLUE_CORE_POOLS) #endif // PA_CONFIG(GLUE_CORE_POOLS)
PA_ALWAYS_INLINE static uintptr_t OffsetInBRPPool(uintptr_t address) { static PA_ALWAYS_INLINE uintptr_t OffsetInBRPPool(uintptr_t address) {
PA_DCHECK(IsInBRPPool(address)); PA_DCHECK(IsInBRPPool(address));
return address - setup_.brp_pool_base_address_; return address - setup_.brp_pool_base_address_;
} }
// Returns false for nullptr. // Returns false for nullptr.
PA_ALWAYS_INLINE static bool IsInConfigurablePool(uintptr_t address) { static PA_ALWAYS_INLINE bool IsInConfigurablePool(uintptr_t address) {
return (address & setup_.configurable_pool_base_mask_) == return (address & setup_.configurable_pool_base_mask_) ==
setup_.configurable_pool_base_address_; setup_.configurable_pool_base_address_;
} }
PA_ALWAYS_INLINE static uintptr_t ConfigurablePoolBase() { static PA_ALWAYS_INLINE uintptr_t ConfigurablePoolBase() {
return setup_.configurable_pool_base_address_; return setup_.configurable_pool_base_address_;
} }
#if BUILDFLAG(ENABLE_PKEYS) #if BUILDFLAG(ENABLE_PKEYS)
// Returns false for nullptr. // Returns false for nullptr.
PA_ALWAYS_INLINE static bool IsInPkeyPool(uintptr_t address) { static PA_ALWAYS_INLINE bool IsInPkeyPool(uintptr_t address) {
return (address & kPkeyPoolBaseMask) == setup_.pkey_pool_base_address_; return (address & kPkeyPoolBaseMask) == setup_.pkey_pool_base_address_;
} }
#endif #endif
#if PA_CONFIG(ENABLE_SHADOW_METADATA) #if PA_CONFIG(ENABLE_SHADOW_METADATA)
PA_ALWAYS_INLINE static std::ptrdiff_t ShadowPoolOffset(pool_handle pool) { static PA_ALWAYS_INLINE std::ptrdiff_t ShadowPoolOffset(pool_handle pool) {
if (pool == kRegularPoolHandle) { if (pool == kRegularPoolHandle) {
return regular_pool_shadow_offset_; return regular_pool_shadow_offset_;
} else if (pool == kBRPPoolHandle) { } else if (pool == kBRPPoolHandle) {
@ -222,20 +222,20 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
private: private:
#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE) #if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
PA_ALWAYS_INLINE static size_t RegularPoolSize(); static PA_ALWAYS_INLINE size_t RegularPoolSize();
PA_ALWAYS_INLINE static size_t BRPPoolSize(); static PA_ALWAYS_INLINE size_t BRPPoolSize();
#else #else
// The pool sizes should be as large as maximum whenever possible. // The pool sizes should be as large as maximum whenever possible.
PA_ALWAYS_INLINE static constexpr size_t RegularPoolSize() { constexpr static PA_ALWAYS_INLINE size_t RegularPoolSize() {
return kRegularPoolSize; return kRegularPoolSize;
} }
PA_ALWAYS_INLINE static constexpr size_t BRPPoolSize() { constexpr static PA_ALWAYS_INLINE size_t BRPPoolSize() {
return kBRPPoolSize; return kBRPPoolSize;
} }
#endif // PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE) #endif // PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
#if BUILDFLAG(ENABLE_PKEYS) #if BUILDFLAG(ENABLE_PKEYS)
PA_ALWAYS_INLINE static constexpr size_t PkeyPoolSize() { constexpr static PA_ALWAYS_INLINE size_t PkeyPoolSize() {
return kPkeyPoolSize; return kPkeyPoolSize;
} }
#endif #endif

View File

@ -68,15 +68,12 @@ declare_args() {
# recommended to enable PA-E above, but isn't strictly necessary. Embedders # recommended to enable PA-E above, but isn't strictly necessary. Embedders
# can create and use PA partitions explicitly. # can create and use PA partitions explicitly.
enable_pointer_compression_support = false enable_pointer_compression_support = false
# Enables a bounds check when two pointers (at least one being raw_ptr) are
# subtracted (if supported by the underlying implementation).
enable_pointer_subtraction_check = false
} }
declare_args() { declare_args() {
# Build support for Use-after-Free protection via BackupRefPtr (BRP), # Build support for Use-after-Free protection via BackupRefPtr (BRP) or
# making the raw_ptr<T> implementation to RawPtrBackupRefImpl if active. # MTECheckedPtr, and switch the raw_ptr<T> implementation to RawPtrBackupRefImpl
# and MTECheckedPtrImp, respectively. They're mutually exclusive.
# #
# These are effective only for memory allocated from PartitionAlloc, so it is # These are effective only for memory allocated from PartitionAlloc, so it is
# recommended to enable PA-E above, but isn't strictly necessary. Embedders # recommended to enable PA-E above, but isn't strictly necessary. Embedders
@ -87,12 +84,17 @@ declare_args() {
# partition_alloc::PartitionOptions::BackupRefPtr::kEnabled. # partition_alloc::PartitionOptions::BackupRefPtr::kEnabled.
enable_backup_ref_ptr_support = enable_backup_ref_ptr_support =
use_partition_alloc && enable_backup_ref_ptr_support_default use_partition_alloc && enable_backup_ref_ptr_support_default
enable_mte_checked_ptr_support =
use_partition_alloc && enable_mte_checked_ptr_support_default
# RAW_PTR_EXCLUSION macro is disabled on official builds because it increased # RAW_PTR_EXCLUSION macro is disabled on official builds because it increased
# binary size. This flag can be used to enable it for official builds too. # binary size. This flag can be used to enable it for official builds too.
force_enable_raw_ptr_exclusion = false force_enable_raw_ptr_exclusion = false
} }
assert(!(enable_backup_ref_ptr_support && enable_mte_checked_ptr_support),
"MTECheckedPtrSupport conflicts with BRPSupport.")
assert(!enable_pointer_compression_support || glue_core_pools, assert(!enable_pointer_compression_support || glue_core_pools,
"Pointer compression relies on core pools being contiguous.") "Pointer compression relies on core pools being contiguous.")
@ -131,24 +133,16 @@ declare_args() {
enable_backup_ref_ptr_slow_checks = enable_backup_ref_ptr_slow_checks =
enable_backup_ref_ptr_slow_checks_default && enable_backup_ref_ptr_support enable_backup_ref_ptr_slow_checks_default && enable_backup_ref_ptr_support
# Enable the feature flag required to activate backup ref pointers. That is to
# say `PartitionAllocBackupRefPtr`.
#
# This is meant to be used primarily on bots. It is much easier to override
# the feature flags using a binary flag instead of updating multiple bots's
# scripts to pass command line arguments.
enable_backup_ref_ptr_feature_flag = false
enable_dangling_raw_ptr_checks = enable_dangling_raw_ptr_checks =
enable_dangling_raw_ptr_checks_default && enable_backup_ref_ptr_support enable_dangling_raw_ptr_checks_default && enable_backup_ref_ptr_support
# Enable the feature flag required to check for dangling pointers. That is to # Enable the feature flags required to check for dangling pointers. That is to
# say `PartitionAllocDanglingPtr`. # say `PartitionAllocBackupRefPtr` and `PartitionAllocDanglingPtr`.
# #
# This is meant to be used primarily on bots. It is much easier to override # This is meant to be used on bots only. It is much easier to override the
# the feature flags using a binary flag instead of updating multiple bots's # feature flags using a binary flag instead of updating multiple bots's
# scripts to pass command line arguments. # scripts to pass command line arguments.
enable_dangling_raw_ptr_feature_flag = false enable_dangling_raw_ptr_feature_flags_for_bots = false
# Enables the dangling raw_ptr checks feature for the performance experiment. # Enables the dangling raw_ptr checks feature for the performance experiment.
# Not every dangling pointers have been fixed or annotated yet. To avoid # Not every dangling pointers have been fixed or annotated yet. To avoid
@ -160,7 +154,6 @@ declare_args() {
# to go through build_overrides # to go through build_overrides
enable_dangling_raw_ptr_perf_experiment = false enable_dangling_raw_ptr_perf_experiment = false
# Set to `enable_backup_ref_ptr_support && has_64_bit_pointers` when enabling.
backup_ref_ptr_poison_oob_ptr = false backup_ref_ptr_poison_oob_ptr = false
} }
@ -194,6 +187,7 @@ if (is_nacl) {
if (!use_partition_alloc) { if (!use_partition_alloc) {
use_partition_alloc_as_malloc = false use_partition_alloc_as_malloc = false
enable_backup_ref_ptr_support = false enable_backup_ref_ptr_support = false
enable_mte_checked_ptr_support = false
use_asan_backup_ref_ptr = false use_asan_backup_ref_ptr = false
use_asan_unowned_ptr = false use_asan_unowned_ptr = false
use_hookable_raw_ptr = false use_hookable_raw_ptr = false
@ -201,7 +195,6 @@ if (!use_partition_alloc) {
enable_backup_ref_ptr_slow_checks = false enable_backup_ref_ptr_slow_checks = false
enable_dangling_raw_ptr_checks = false enable_dangling_raw_ptr_checks = false
enable_dangling_raw_ptr_perf_experiment = false enable_dangling_raw_ptr_perf_experiment = false
enable_pointer_subtraction_check = false
backup_ref_ptr_poison_oob_ptr = false backup_ref_ptr_poison_oob_ptr = false
use_starscan = false use_starscan = false
} }
@ -234,8 +227,6 @@ assert(
assert( assert(
enable_backup_ref_ptr_support || !backup_ref_ptr_poison_oob_ptr, enable_backup_ref_ptr_support || !backup_ref_ptr_poison_oob_ptr,
"Can't enable poisoning for OOB pointers if BackupRefPtr isn't enabled at all") "Can't enable poisoning for OOB pointers if BackupRefPtr isn't enabled at all")
assert(has_64_bit_pointers || !backup_ref_ptr_poison_oob_ptr,
"Can't enable poisoning for OOB pointers if pointers are only 32-bit")
# AsanBackupRefPtr and AsanUnownedPtr are mutually exclusive variants of raw_ptr. # AsanBackupRefPtr and AsanUnownedPtr are mutually exclusive variants of raw_ptr.
assert( assert(

View File

@ -13,6 +13,7 @@
#include <type_traits> #include <type_traits>
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h" #include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_base/migration_adapter.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "build/build_config.h" #include "build/build_config.h"

View File

@ -7,6 +7,19 @@
#include "build/build_config.h" #include "build/build_config.h"
// This is a wrapper around `__has_cpp_attribute`, which can be used to test for
// the presence of an attribute. In case the compiler does not support this
// macro it will simply evaluate to 0.
//
// References:
// https://wg21.link/sd6#testing-for-the-presence-of-an-attribute-__has_cpp_attribute
// https://wg21.link/cpp.cond#:__has_cpp_attribute
#if defined(__has_cpp_attribute)
#define PA_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x)
#else
#define PA_HAS_CPP_ATTRIBUTE(x) 0
#endif
// A wrapper around `__has_attribute`, similar to HAS_CPP_ATTRIBUTE. // A wrapper around `__has_attribute`, similar to HAS_CPP_ATTRIBUTE.
#if defined(__has_attribute) #if defined(__has_attribute)
#define PA_HAS_ATTRIBUTE(x) __has_attribute(x) #define PA_HAS_ATTRIBUTE(x) __has_attribute(x)
@ -24,9 +37,7 @@
// Annotate a function indicating it should not be inlined. // Annotate a function indicating it should not be inlined.
// Use like: // Use like:
// NOINLINE void DoStuff() { ... } // NOINLINE void DoStuff() { ... }
#if defined(__clang__) && PA_HAS_ATTRIBUTE(noinline) #if defined(COMPILER_GCC) || defined(__clang__)
#define PA_NOINLINE [[clang::noinline]]
#elif defined(COMPILER_GCC) && PA_HAS_ATTRIBUTE(noinline)
#define PA_NOINLINE __attribute__((noinline)) #define PA_NOINLINE __attribute__((noinline))
#elif defined(COMPILER_MSVC) #elif defined(COMPILER_MSVC)
#define PA_NOINLINE __declspec(noinline) #define PA_NOINLINE __declspec(noinline)
@ -34,10 +45,7 @@
#define PA_NOINLINE #define PA_NOINLINE
#endif #endif
#if defined(__clang__) && defined(NDEBUG) && PA_HAS_ATTRIBUTE(always_inline) #if defined(COMPILER_GCC) && defined(NDEBUG)
#define PA_ALWAYS_INLINE [[clang::always_inline]] inline
#elif defined(COMPILER_GCC) && defined(NDEBUG) && \
PA_HAS_ATTRIBUTE(always_inline)
#define PA_ALWAYS_INLINE inline __attribute__((__always_inline__)) #define PA_ALWAYS_INLINE inline __attribute__((__always_inline__))
#elif defined(COMPILER_MSVC) && defined(NDEBUG) #elif defined(COMPILER_MSVC) && defined(NDEBUG)
#define PA_ALWAYS_INLINE __forceinline #define PA_ALWAYS_INLINE __forceinline
@ -54,42 +62,72 @@
// Use like: // Use like:
// void NOT_TAIL_CALLED FooBar(); // void NOT_TAIL_CALLED FooBar();
#if defined(__clang__) && PA_HAS_ATTRIBUTE(not_tail_called) #if defined(__clang__) && PA_HAS_ATTRIBUTE(not_tail_called)
#define PA_NOT_TAIL_CALLED [[clang::not_tail_called]] #define PA_NOT_TAIL_CALLED __attribute__((not_tail_called))
#else #else
#define PA_NOT_TAIL_CALLED #define PA_NOT_TAIL_CALLED
#endif #endif
// Specify memory alignment for structs, classes, etc. // Specify memory alignment for structs, classes, etc.
// Use like: // Use like:
// class PA_ALIGNAS(16) MyClass { ... } // class ALIGNAS(16) MyClass { ... }
// PA_ALIGNAS(16) int array[4]; // ALIGNAS(16) int array[4];
// //
// In most places you can use the C++11 keyword "alignas", which is preferred. // In most places you can use the C++11 keyword "alignas", which is preferred.
// //
// Historically, compilers had trouble mixing __attribute__((...)) syntax with // But compilers have trouble mixing __attribute__((...)) syntax with
// alignas(...) syntax. However, at least Clang is very accepting nowadays. It // alignas(...) syntax.
// may be that this macro can be removed entirely. //
#if defined(__clang__) // Doesn't work in clang or gcc:
#define PA_ALIGNAS(byte_alignment) alignas(byte_alignment) // struct alignas(16) __attribute__((packed)) S { char c; };
#elif defined(COMPILER_MSVC) // Works in clang but not gcc:
// struct __attribute__((packed)) alignas(16) S2 { char c; };
// Works in clang and gcc:
// struct alignas(16) S3 { char c; } __attribute__((packed));
//
// There are also some attributes that must be specified *before* a class
// definition: visibility (used for exporting functions/classes) is one of
// these attributes. This means that it is not possible to use alignas() with a
// class that is marked as exported.
#if defined(COMPILER_MSVC)
#define PA_ALIGNAS(byte_alignment) __declspec(align(byte_alignment)) #define PA_ALIGNAS(byte_alignment) __declspec(align(byte_alignment))
#elif defined(COMPILER_GCC) && PA_HAS_ATTRIBUTE(aligned) #elif defined(COMPILER_GCC)
#define PA_ALIGNAS(byte_alignment) __attribute__((aligned(byte_alignment))) #define PA_ALIGNAS(byte_alignment) __attribute__((aligned(byte_alignment)))
#endif #endif
// Tells the compiler a function is using a printf-style format string. // In case the compiler supports it NO_UNIQUE_ADDRESS evaluates to the C++20
// attribute [[no_unique_address]]. This allows annotating data members so that
// they need not have an address distinct from all other non-static data members
// of its class.
//
// References:
// * https://en.cppreference.com/w/cpp/language/attributes/no_unique_address
// * https://wg21.link/dcl.attr.nouniqueaddr
#if PA_HAS_CPP_ATTRIBUTE(no_unique_address)
#define PA_NO_UNIQUE_ADDRESS [[no_unique_address]]
#else
#define PA_NO_UNIQUE_ADDRESS
#endif
// Tell the compiler a function is using a printf-style format string.
// |format_param| is the one-based index of the format string parameter; // |format_param| is the one-based index of the format string parameter;
// |dots_param| is the one-based index of the "..." parameter. // |dots_param| is the one-based index of the "..." parameter.
// For v*printf functions (which take a va_list), pass 0 for dots_param. // For v*printf functions (which take a va_list), pass 0 for dots_param.
// (This is undocumented but matches what the system C headers do.) // (This is undocumented but matches what the system C headers do.)
// For member functions, the implicit this parameter counts as index 1. // For member functions, the implicit this parameter counts as index 1.
#if (defined(COMPILER_GCC) || defined(__clang__)) && PA_HAS_ATTRIBUTE(format) #if defined(COMPILER_GCC) || defined(__clang__)
#define PA_PRINTF_FORMAT(format_param, dots_param) \ #define PA_PRINTF_FORMAT(format_param, dots_param) \
__attribute__((format(printf, format_param, dots_param))) __attribute__((format(printf, format_param, dots_param)))
#else #else
#define PA_PRINTF_FORMAT(format_param, dots_param) #define PA_PRINTF_FORMAT(format_param, dots_param)
#endif #endif
// WPRINTF_FORMAT is the same, but for wide format strings.
// This doesn't appear to yet be implemented in any compiler.
// See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=38308 .
#define PA_WPRINTF_FORMAT(format_param, dots_param)
// If available, it would look like:
// __attribute__((format(wprintf, format_param, dots_param)))
// Sanitizers annotations. // Sanitizers annotations.
#if PA_HAS_ATTRIBUTE(no_sanitize) #if PA_HAS_ATTRIBUTE(no_sanitize)
#define PA_NO_SANITIZE(what) __attribute__((no_sanitize(what))) #define PA_NO_SANITIZE(what) __attribute__((no_sanitize(what)))
@ -106,10 +144,27 @@
// Use this to annotate code that deliberately reads uninitialized data, for // Use this to annotate code that deliberately reads uninitialized data, for
// example a GC scavenging root set pointers from the stack. // example a GC scavenging root set pointers from the stack.
#define PA_MSAN_UNPOISON(p, size) __msan_unpoison(p, size) #define PA_MSAN_UNPOISON(p, size) __msan_unpoison(p, size)
// Check a memory region for initializedness, as if it was being used here.
// If any bits are uninitialized, crash with an MSan report.
// Use this to sanitize data which MSan won't be able to track, e.g. before
// passing data to another process via shared memory.
#define PA_MSAN_CHECK_MEM_IS_INITIALIZED(p, size) \
__msan_check_mem_is_initialized(p, size)
#else // MEMORY_SANITIZER #else // MEMORY_SANITIZER
#define PA_MSAN_UNPOISON(p, size) #define PA_MSAN_UNPOISON(p, size)
#define PA_MSAN_CHECK_MEM_IS_INITIALIZED(p, size)
#endif // MEMORY_SANITIZER #endif // MEMORY_SANITIZER
// Macro useful for writing cross-platform function pointers.
#if !defined(PA_CDECL)
#if BUILDFLAG(IS_WIN)
#define PA_CDECL __cdecl
#else // BUILDFLAG(IS_WIN)
#define PA_CDECL
#endif // BUILDFLAG(IS_WIN)
#endif // !defined(PA_CDECL)
// Macro for hinting that an expression is likely to be false. // Macro for hinting that an expression is likely to be false.
#if !defined(PA_UNLIKELY) #if !defined(PA_UNLIKELY)
#if defined(COMPILER_GCC) || defined(__clang__) #if defined(COMPILER_GCC) || defined(__clang__)
@ -127,6 +182,23 @@
#endif // defined(COMPILER_GCC) #endif // defined(COMPILER_GCC)
#endif // !defined(PA_LIKELY) #endif // !defined(PA_LIKELY)
// Compiler feature-detection.
// clang.llvm.org/docs/LanguageExtensions.html#has-feature-and-has-extension
#if defined(__has_feature)
#define PA_HAS_FEATURE(FEATURE) __has_feature(FEATURE)
#else
#define PA_HAS_FEATURE(FEATURE) 0
#endif
#if defined(COMPILER_GCC)
#define PA_PRETTY_FUNCTION __PRETTY_FUNCTION__
#elif defined(COMPILER_MSVC)
#define PA_PRETTY_FUNCTION __FUNCSIG__
#else
// See https://en.cppreference.com/w/c/language/function_definition#func
#define PA_PRETTY_FUNCTION __func__
#endif
#if !defined(PA_CPU_ARM_NEON) #if !defined(PA_CPU_ARM_NEON)
#if defined(__arm__) #if defined(__arm__)
#if !defined(__ARMEB__) && !defined(__ARM_EABI__) && !defined(__EABI__) && \ #if !defined(__ARMEB__) && !defined(__ARM_EABI__) && !defined(__EABI__) && \
@ -145,6 +217,63 @@
#endif #endif
#endif #endif
#if defined(__clang__) && PA_HAS_ATTRIBUTE(uninitialized)
// Attribute "uninitialized" disables -ftrivial-auto-var-init=pattern for
// the specified variable.
// Library-wide alternative is
// 'configs -= [ "//build/config/compiler:default_init_stack_vars" ]' in .gn
// file.
//
// See "init_stack_vars" in build/config/compiler/BUILD.gn and
// http://crbug.com/977230
// "init_stack_vars" is enabled for non-official builds and we hope to enable it
// in official build in 2020 as well. The flag writes fixed pattern into
// uninitialized parts of all local variables. In rare cases such initialization
// is undesirable and attribute can be used:
// 1. Degraded performance
// In most cases compiler is able to remove additional stores. E.g. if memory is
// never accessed or properly initialized later. Preserved stores mostly will
// not affect program performance. However if compiler failed on some
// performance critical code we can get a visible regression in a benchmark.
// 2. memset, memcpy calls
// Compiler may replaces some memory writes with memset or memcpy calls. This is
// not -ftrivial-auto-var-init specific, but it can happen more likely with the
// flag. It can be a problem if code is not linked with C run-time library.
//
// Note: The flag is security risk mitigation feature. So in future the
// attribute uses should be avoided when possible. However to enable this
// mitigation on the most of the code we need to be less strict now and minimize
// number of exceptions later. So if in doubt feel free to use attribute, but
// please document the problem for someone who is going to cleanup it later.
// E.g. platform, bot, benchmark or test name in patch description or next to
// the attribute.
#define PA_STACK_UNINITIALIZED __attribute__((uninitialized))
#else
#define PA_STACK_UNINITIALIZED
#endif
// Attribute "no_stack_protector" disables -fstack-protector for the specified
// function.
//
// "stack_protector" is enabled on most POSIX builds. The flag adds a canary
// to each stack frame, which on function return is checked against a reference
// canary. If the canaries do not match, it's likely that a stack buffer
// overflow has occurred, so immediately crashing will prevent exploitation in
// many cases.
//
// In some cases it's desirable to remove this, e.g. on hot functions, or if
// we have purposely changed the reference canary.
#if defined(COMPILER_GCC) || defined(__clang__)
#if PA_HAS_ATTRIBUTE(__no_stack_protector__)
#define PA_NO_STACK_PROTECTOR __attribute__((__no_stack_protector__))
#else
#define PA_NO_STACK_PROTECTOR \
__attribute__((__optimize__("-fno-stack-protector")))
#endif
#else
#define PA_NO_STACK_PROTECTOR
#endif
// The ANALYZER_ASSUME_TRUE(bool arg) macro adds compiler-specific hints // The ANALYZER_ASSUME_TRUE(bool arg) macro adds compiler-specific hints
// to Clang which control what code paths are statically analyzed, // to Clang which control what code paths are statically analyzed,
// and is meant to be used in conjunction with assert & assert-like functions. // and is meant to be used in conjunction with assert & assert-like functions.
@ -214,6 +343,15 @@ inline constexpr bool AnalyzerAssumeTrue(bool arg) {
#define PA_TRIVIAL_ABI #define PA_TRIVIAL_ABI
#endif #endif
// Marks a member function as reinitializing a moved-from variable.
// See also
// https://clang.llvm.org/extra/clang-tidy/checks/bugprone-use-after-move.html#reinitialization
#if defined(__clang__) && PA_HAS_ATTRIBUTE(reinitializes)
#define PA_REINITIALIZES_AFTER_MOVE [[clang::reinitializes]]
#else
#define PA_REINITIALIZES_AFTER_MOVE
#endif
// Requires constant initialization. See constinit in C++20. Allows to rely on a // Requires constant initialization. See constinit in C++20. Allows to rely on a
// variable being initialized before execution, and not requiring a global // variable being initialized before execution, and not requiring a global
// constructor. // constructor.
@ -225,8 +363,10 @@ inline constexpr bool AnalyzerAssumeTrue(bool arg) {
#endif #endif
#if defined(__clang__) #if defined(__clang__)
#define PA_GSL_OWNER [[gsl::Owner]]
#define PA_GSL_POINTER [[gsl::Pointer]] #define PA_GSL_POINTER [[gsl::Pointer]]
#else #else
#define PA_GSL_OWNER
#define PA_GSL_POINTER #define PA_GSL_POINTER
#endif #endif

View File

@ -6,6 +6,7 @@
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_CPU_H_ #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_CPU_H_
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h" #include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_base/migration_adapter.h"
#include "build/build_config.h" #include "build/build_config.h"
namespace partition_alloc::internal::base { namespace partition_alloc::internal::base {

View File

@ -1,33 +0,0 @@
// Copyright 2023 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_CXX20_IS_CONSTANT_EVALUATED_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_CXX20_IS_CONSTANT_EVALUATED_H_
namespace partition_alloc::internal::base {
// std::is_constant_evaluated was introduced in C++20. PartitionAlloc's minimum
// supported C++ version is C++17.
#if defined(__cpp_lib_is_constant_evaluated) && \
__cpp_lib_is_constant_evaluated >= 201811L
#include <type_traits>
using std::is_constant_evaluated;
#else
// Implementation of C++20's std::is_constant_evaluated.
//
// References:
// - https://en.cppreference.com/w/cpp/types/is_constant_evaluated
// - https://wg21.link/meta.const.eval
constexpr bool is_constant_evaluated() noexcept {
return __builtin_is_constant_evaluated();
}
#endif
} // namespace partition_alloc::internal::base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_CXX20_IS_CONSTANT_EVALUATED_H_

View File

@ -8,6 +8,7 @@
#include <stddef.h> #include <stddef.h>
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h" #include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_base/migration_adapter.h"
namespace partition_alloc::internal::base::debug { namespace partition_alloc::internal::base::debug {

View File

@ -107,6 +107,7 @@
#include <string> #include <string>
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h" #include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_base/migration_adapter.h"
#include "build/build_config.h" #include "build/build_config.h"
// Windows-style drive letter support and pathname separator characters can be // Windows-style drive letter support and pathname separator characters can be

View File

@ -15,6 +15,7 @@
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h" #include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h" #include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h" #include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_base/migration_adapter.h"
#include "base/allocator/partition_allocator/partition_alloc_base/scoped_clear_last_error.h" #include "base/allocator/partition_allocator/partition_alloc_base/scoped_clear_last_error.h"
#include "build/build_config.h" #include "build/build_config.h"

View File

@ -0,0 +1,22 @@
// Copyright 2022 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_MIGRATION_ADAPTER_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_MIGRATION_ADAPTER_H_
namespace base {
class LapTimer;
} // namespace base
namespace partition_alloc::internal::base {
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
// the migration to the new namespaces gets done.
using ::base::LapTimer;
} // namespace partition_alloc::internal::base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_MIGRATION_ADAPTER_H_

View File

@ -12,6 +12,7 @@
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h" #include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_base/files/file_path.h" #include "base/allocator/partition_allocator/partition_alloc_base/files/file_path.h"
#include "base/allocator/partition_allocator/partition_alloc_base/migration_adapter.h"
#include "build/build_config.h" #include "build/build_config.h"
#if BUILDFLAG(IS_WIN) #if BUILDFLAG(IS_WIN)

View File

@ -10,6 +10,7 @@
#include <string> #include <string>
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h" #include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_base/migration_adapter.h"
namespace partition_alloc::internal::base { namespace partition_alloc::internal::base {

View File

@ -10,6 +10,7 @@
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h" #include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_base/gtest_prod_util.h" #include "base/allocator/partition_allocator/partition_alloc_base/gtest_prod_util.h"
#include "base/allocator/partition_allocator/partition_alloc_base/migration_adapter.h"
#include "build/build_config.h" #include "build/build_config.h"
namespace partition_alloc { namespace partition_alloc {

View File

@ -8,6 +8,7 @@
#include <errno.h> #include <errno.h>
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h" #include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_base/migration_adapter.h"
#include "build/build_config.h" #include "build/build_config.h"
namespace partition_alloc::internal::base { namespace partition_alloc::internal::base {

View File

@ -14,6 +14,7 @@
#include <iosfwd> #include <iosfwd>
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h" #include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_base/migration_adapter.h"
#include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_ref.h" #include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_ref.h"
#include "base/allocator/partition_allocator/partition_alloc_base/time/time.h" #include "base/allocator/partition_allocator/partition_alloc_base/time/time.h"
#include "build/build_config.h" #include "build/build_config.h"

View File

@ -13,6 +13,7 @@
#include <iosfwd> #include <iosfwd>
#include "base/allocator/partition_allocator/partition_alloc_base/migration_adapter.h"
#include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread.h" #include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread.h"
#include "build/build_config.h" #include "build/build_config.h"

View File

@ -13,6 +13,7 @@
#include <iosfwd> #include <iosfwd>
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h" #include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_base/migration_adapter.h"
#include "build/build_config.h" #include "build/build_config.h"
#if BUILDFLAG(IS_WIN) #if BUILDFLAG(IS_WIN)

View File

@ -70,6 +70,7 @@
#include "base/allocator/partition_allocator/chromeos_buildflags.h" #include "base/allocator/partition_allocator/chromeos_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h" #include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_base/migration_adapter.h"
#include "base/allocator/partition_allocator/partition_alloc_base/numerics/clamped_math.h" #include "base/allocator/partition_allocator/partition_alloc_base/numerics/clamped_math.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h" #include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "build/build_config.h" #include "build/build_config.h"

View File

@ -134,9 +134,8 @@ struct PA_DEBUGKV_ALIGN DebugKv {
for (int index = 0; index < 8; index++) { for (int index = 0; index < 8; index++) {
k[index] = key[index]; k[index] = key[index];
if (key[index] == '\0') { if (key[index] == '\0')
break; break;
}
} }
} }
}; };

View File

@ -168,8 +168,27 @@ static_assert(sizeof(void*) != 8, "");
static_assert(sizeof(void*) == 8); static_assert(sizeof(void*) == 8);
#endif #endif
#if BUILDFLAG(HAS_64_BIT_POINTERS) && BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
#define PA_CONFIG_USE_OOB_POISON() 1
#else
#define PA_CONFIG_USE_OOB_POISON() 0
#endif
// Build MTECheckedPtr code.
//
// Only applicable to code with 64-bit pointers. Currently conflicts with true
// hardware MTE.
#if BUILDFLAG(ENABLE_MTE_CHECKED_PTR_SUPPORT) && \
BUILDFLAG(HAS_64_BIT_POINTERS) && !PA_CONFIG(HAS_MEMORY_TAGGING)
static_assert(sizeof(void*) == 8);
#define PA_CONFIG_ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS() 1
#else
#define PA_CONFIG_ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS() 0
#endif
// Specifies whether allocation extras need to be added. // Specifies whether allocation extras need to be added.
#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) || \
PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
#define PA_CONFIG_EXTRAS_REQUIRED() 1 #define PA_CONFIG_EXTRAS_REQUIRED() 1
#else #else
#define PA_CONFIG_EXTRAS_REQUIRED() 0 #define PA_CONFIG_EXTRAS_REQUIRED() 0
@ -314,7 +333,8 @@ constexpr bool kUseLazyCommit = false;
#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE) #if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
#error "Dynamically selected pool size is currently not supported" #error "Dynamically selected pool size is currently not supported"
#endif #endif
#if PA_CONFIG(HAS_MEMORY_TAGGING) #if PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS) || \
PA_CONFIG(HAS_MEMORY_TAGGING)
// TODO(1376980): Address MTE once it's enabled. // TODO(1376980): Address MTE once it's enabled.
#error "Compressed pointers don't support tag in the upper bits" #error "Compressed pointers don't support tag in the upper bits"
#endif #endif

View File

@ -82,36 +82,36 @@ constexpr size_t kPartitionCachelineSize = 64;
// up against the end of a system page. // up against the end of a system page.
#if defined(_MIPS_ARCH_LOONGSON) || defined(ARCH_CPU_LOONG64) #if defined(_MIPS_ARCH_LOONGSON) || defined(ARCH_CPU_LOONG64)
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
PartitionPageShift() { PartitionPageShift() {
return 16; // 64 KiB return 16; // 64 KiB
} }
#elif defined(ARCH_CPU_PPC64) #elif defined(ARCH_CPU_PPC64)
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
PartitionPageShift() { PartitionPageShift() {
return 18; // 256 KiB return 18; // 256 KiB
} }
#elif (BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)) || \ #elif (BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)) || \
(BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64)) (BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64))
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
PartitionPageShift() { PartitionPageShift() {
return PageAllocationGranularityShift() + 2; return PageAllocationGranularityShift() + 2;
} }
#else #else
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
PartitionPageShift() { PartitionPageShift() {
return 14; // 16 KiB return 14; // 16 KiB
} }
#endif #endif
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
PartitionPageSize() { PartitionPageSize() {
return 1 << PartitionPageShift(); return 1 << PartitionPageShift();
} }
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
PartitionPageOffsetMask() { PartitionPageOffsetMask() {
return PartitionPageSize() - 1; return PartitionPageSize() - 1;
} }
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
PartitionPageBaseMask() { PartitionPageBaseMask() {
return ~PartitionPageOffsetMask(); return ~PartitionPageOffsetMask();
} }
@ -131,18 +131,18 @@ constexpr size_t kMaxPartitionPagesPerRegularSlotSpan = 4;
// dirty a private page, which is very wasteful if we never actually store // dirty a private page, which is very wasteful if we never actually store
// objects there. // objects there.
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
NumSystemPagesPerPartitionPage() { NumSystemPagesPerPartitionPage() {
return PartitionPageSize() >> SystemPageShift(); return PartitionPageSize() >> SystemPageShift();
} }
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
MaxSystemPagesPerRegularSlotSpan() { MaxSystemPagesPerRegularSlotSpan() {
return NumSystemPagesPerPartitionPage() * return NumSystemPagesPerPartitionPage() *
kMaxPartitionPagesPerRegularSlotSpan; kMaxPartitionPagesPerRegularSlotSpan;
} }
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
MaxRegularSlotSpanSize() { MaxRegularSlotSpanSize() {
return kMaxPartitionPagesPerRegularSlotSpan << PartitionPageShift(); return kMaxPartitionPagesPerRegularSlotSpan << PartitionPageShift();
} }
@ -179,6 +179,7 @@ constexpr size_t kHighThresholdForAlternateDistribution =
// | Guard page (4 KiB) | // | Guard page (4 KiB) |
// | Metadata page (4 KiB) | // | Metadata page (4 KiB) |
// | Guard pages (8 KiB) | // | Guard pages (8 KiB) |
// | TagBitmap |
// | Free Slot Bitmap | // | Free Slot Bitmap |
// | *Scan State Bitmap | // | *Scan State Bitmap |
// | Slot span | // | Slot span |
@ -188,6 +189,8 @@ constexpr size_t kHighThresholdForAlternateDistribution =
// | Guard pages (16 KiB) | // | Guard pages (16 KiB) |
// +-----------------------+ // +-----------------------+
// //
// TagBitmap is only present when
// PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS) is true.
// Free Slot Bitmap is only present when USE_FREESLOT_BITMAP is true. State // Free Slot Bitmap is only present when USE_FREESLOT_BITMAP is true. State
// Bitmap is inserted for partitions that may have quarantine enabled. // Bitmap is inserted for partitions that may have quarantine enabled.
// //
@ -329,23 +332,23 @@ PA_ALWAYS_INLINE bool HasOverflowTag(void* object) {
} }
#endif // PA_CONFIG(HAS_MEMORY_TAGGING) #endif // PA_CONFIG(HAS_MEMORY_TAGGING)
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
NumPartitionPagesPerSuperPage() { NumPartitionPagesPerSuperPage() {
return kSuperPageSize >> PartitionPageShift(); return kSuperPageSize >> PartitionPageShift();
} }
PA_ALWAYS_INLINE constexpr size_t MaxSuperPagesInPool() { constexpr PA_ALWAYS_INLINE size_t MaxSuperPagesInPool() {
return kMaxSuperPagesInPool; return kMaxSuperPagesInPool;
} }
#if BUILDFLAG(HAS_64_BIT_POINTERS) #if BUILDFLAG(HAS_64_BIT_POINTERS)
// In 64-bit mode, the direct map allocation granularity is super page size, // In 64-bit mode, the direct map allocation granularity is super page size,
// because this is the reservation granularity of the pools. // because this is the reservation granularity of the pools.
PA_ALWAYS_INLINE constexpr size_t DirectMapAllocationGranularity() { constexpr PA_ALWAYS_INLINE size_t DirectMapAllocationGranularity() {
return kSuperPageSize; return kSuperPageSize;
} }
PA_ALWAYS_INLINE constexpr size_t DirectMapAllocationGranularityShift() { constexpr PA_ALWAYS_INLINE size_t DirectMapAllocationGranularityShift() {
return kSuperPageShift; return kSuperPageShift;
} }
#else // BUILDFLAG(HAS_64_BIT_POINTERS) #else // BUILDFLAG(HAS_64_BIT_POINTERS)
@ -353,18 +356,18 @@ PA_ALWAYS_INLINE constexpr size_t DirectMapAllocationGranularityShift() {
// allocation granularity, which is the lowest possible address space allocation // allocation granularity, which is the lowest possible address space allocation
// unit. However, don't go below partition page size, so that pool bitmaps // unit. However, don't go below partition page size, so that pool bitmaps
// don't get too large. See kBytesPer1BitOfBRPPoolBitmap. // don't get too large. See kBytesPer1BitOfBRPPoolBitmap.
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
DirectMapAllocationGranularity() { DirectMapAllocationGranularity() {
return std::max(PageAllocationGranularity(), PartitionPageSize()); return std::max(PageAllocationGranularity(), PartitionPageSize());
} }
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
DirectMapAllocationGranularityShift() { DirectMapAllocationGranularityShift() {
return std::max(PageAllocationGranularityShift(), PartitionPageShift()); return std::max(PageAllocationGranularityShift(), PartitionPageShift());
} }
#endif // BUILDFLAG(HAS_64_BIT_POINTERS) #endif // BUILDFLAG(HAS_64_BIT_POINTERS)
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
DirectMapAllocationGranularityOffsetMask() { DirectMapAllocationGranularityOffsetMask() {
return DirectMapAllocationGranularity() - 1; return DirectMapAllocationGranularity() - 1;
} }
@ -412,7 +415,7 @@ constexpr size_t kMinDirectMappedDownsize = kMaxBucketed + 1;
// The definition of MaxDirectMapped does only depend on constants that are // The definition of MaxDirectMapped does only depend on constants that are
// unconditionally constexpr. Therefore it is not necessary to use // unconditionally constexpr. Therefore it is not necessary to use
// PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR here. // PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR here.
PA_ALWAYS_INLINE constexpr size_t MaxDirectMapped() { constexpr PA_ALWAYS_INLINE size_t MaxDirectMapped() {
// Subtract kSuperPageSize to accommodate for granularity inside // Subtract kSuperPageSize to accommodate for granularity inside
// PartitionRoot::GetDirectMapReservationSize. // PartitionRoot::GetDirectMapReservationSize.
return (1UL << 31) - kSuperPageSize; return (1UL << 31) - kSuperPageSize;
@ -500,6 +503,18 @@ using ::partition_alloc::internal::kSuperPageSize;
using ::partition_alloc::internal::MaxDirectMapped; using ::partition_alloc::internal::MaxDirectMapped;
using ::partition_alloc::internal::PartitionPageSize; using ::partition_alloc::internal::PartitionPageSize;
// Return values to indicate where a pointer is pointing relative to the bounds
// of an allocation.
enum class PtrPosWithinAlloc {
// When PA_USE_OOB_POISON is disabled, end-of-allocation pointers are also
// considered in-bounds.
kInBounds,
#if PA_CONFIG(USE_OOB_POISON)
kAllocEnd,
#endif
kFarOOB
};
} // namespace partition_alloc } // namespace partition_alloc
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CONSTANTS_H_ #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CONSTANTS_H_

View File

@ -51,9 +51,27 @@ void CheckThatSlotOffsetIsZero(uintptr_t address);
// We support pointer offsets in signed (ptrdiff_t) or unsigned (size_t) values. // We support pointer offsets in signed (ptrdiff_t) or unsigned (size_t) values.
// Smaller types are also allowed. // Smaller types are also allowed.
template <typename Z> template <typename Z>
static constexpr bool is_offset_type = static constexpr bool offset_type =
std::is_integral_v<Z> && sizeof(Z) <= sizeof(ptrdiff_t); std::is_integral_v<Z> && sizeof(Z) <= sizeof(ptrdiff_t);
template <typename Z, typename = std::enable_if_t<offset_type<Z>, void>>
struct PtrDelta {
Z delta_in_bytes;
#if PA_CONFIG(USE_OOB_POISON)
// Size of the element type referenced by the pointer
size_t type_size;
#endif
constexpr PtrDelta(Z delta_in_bytes, size_t type_size)
: delta_in_bytes(delta_in_bytes)
#if PA_CONFIG(USE_OOB_POISON)
,
type_size(type_size)
#endif
{
}
};
} // namespace internal } // namespace internal
class PartitionStatsDumper; class PartitionStatsDumper;

View File

@ -71,9 +71,8 @@ void PartitionAllocHooks::AllocationObserverHookIfEnabled(
void* address, void* address,
size_t size, size_t size,
const char* type_name) { const char* type_name) {
if (auto* hook = allocation_observer_hook_.load(std::memory_order_relaxed)) { if (auto* hook = allocation_observer_hook_.load(std::memory_order_relaxed))
hook(address, size, type_name); hook(address, size, type_name);
}
} }
bool PartitionAllocHooks::AllocationOverrideHookIfEnabled( bool PartitionAllocHooks::AllocationOverrideHookIfEnabled(
@ -81,22 +80,19 @@ bool PartitionAllocHooks::AllocationOverrideHookIfEnabled(
unsigned int flags, unsigned int flags,
size_t size, size_t size,
const char* type_name) { const char* type_name) {
if (auto* hook = allocation_override_hook_.load(std::memory_order_relaxed)) { if (auto* hook = allocation_override_hook_.load(std::memory_order_relaxed))
return hook(out, flags, size, type_name); return hook(out, flags, size, type_name);
}
return false; return false;
} }
void PartitionAllocHooks::FreeObserverHookIfEnabled(void* address) { void PartitionAllocHooks::FreeObserverHookIfEnabled(void* address) {
if (auto* hook = free_observer_hook_.load(std::memory_order_relaxed)) { if (auto* hook = free_observer_hook_.load(std::memory_order_relaxed))
hook(address); hook(address);
}
} }
bool PartitionAllocHooks::FreeOverrideHookIfEnabled(void* address) { bool PartitionAllocHooks::FreeOverrideHookIfEnabled(void* address) {
if (auto* hook = free_override_hook_.load(std::memory_order_relaxed)) { if (auto* hook = free_override_hook_.load(std::memory_order_relaxed))
return hook(address); return hook(address);
}
return false; return false;
} }

View File

@ -31,6 +31,8 @@
#include "base/allocator/partition_allocator/partition_direct_map_extent.h" #include "base/allocator/partition_allocator/partition_direct_map_extent.h"
#include "base/allocator/partition_allocator/partition_oom.h" #include "base/allocator/partition_allocator/partition_oom.h"
#include "base/allocator/partition_allocator/partition_page.h" #include "base/allocator/partition_allocator/partition_page.h"
#include "base/allocator/partition_allocator/partition_tag.h"
#include "base/allocator/partition_allocator/partition_tag_bitmap.h"
#include "base/allocator/partition_allocator/reservation_offset_table.h" #include "base/allocator/partition_allocator/reservation_offset_table.h"
#include "base/allocator/partition_allocator/tagging.h" #include "base/allocator/partition_allocator/tagging.h"
#include "build/build_config.h" #include "build/build_config.h"
@ -235,6 +237,10 @@ SlotSpanMetadata<thread_safe>* PartitionDirectMap(
PartitionDirectMapExtent<thread_safe>* map_extent = nullptr; PartitionDirectMapExtent<thread_safe>* map_extent = nullptr;
PartitionPage<thread_safe>* page = nullptr; PartitionPage<thread_safe>* page = nullptr;
#if PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
const PartitionTag tag = root->GetNewPartitionTag();
#endif // PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
{ {
// Getting memory for direct-mapped allocations doesn't interact with the // Getting memory for direct-mapped allocations doesn't interact with the
// rest of the allocator, but takes a long time, as it involves several // rest of the allocator, but takes a long time, as it involves several
@ -449,6 +455,10 @@ SlotSpanMetadata<thread_safe>* PartitionDirectMap(
map_extent->reservation_size = reservation_size; map_extent->reservation_size = reservation_size;
map_extent->padding_for_alignment = padding_for_alignment; map_extent->padding_for_alignment = padding_for_alignment;
map_extent->bucket = &metadata->bucket; map_extent->bucket = &metadata->bucket;
#if PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
DirectMapPartitionTagSetValue(slot_start, tag);
#endif // PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
} }
root->lock_.AssertAcquired(); root->lock_.AssertAcquired();
@ -692,6 +702,28 @@ PartitionBucket<thread_safe>::AllocNewSlotSpan(PartitionRoot<thread_safe>* root,
// span. // span.
PA_DCHECK(root->next_partition_page <= root->next_partition_page_end); PA_DCHECK(root->next_partition_page <= root->next_partition_page_end);
#if PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
PA_DCHECK(root->next_tag_bitmap_page);
uintptr_t next_tag_bitmap_page =
base::bits::AlignUp(reinterpret_cast<uintptr_t>(
PartitionTagPointer(root->next_partition_page)),
SystemPageSize());
if (root->next_tag_bitmap_page < next_tag_bitmap_page) {
#if BUILDFLAG(PA_DCHECK_IS_ON)
uintptr_t super_page =
reinterpret_cast<uintptr_t>(slot_span) & kSuperPageBaseMask;
uintptr_t tag_bitmap = super_page + PartitionPageSize();
PA_DCHECK(next_tag_bitmap_page <= tag_bitmap + ActualTagBitmapSize());
PA_DCHECK(next_tag_bitmap_page > tag_bitmap);
#endif
SetSystemPagesAccess(root->next_tag_bitmap_page,
next_tag_bitmap_page - root->next_tag_bitmap_page,
PageAccessibilityConfiguration(
PageAccessibilityConfiguration::kReadWrite));
root->next_tag_bitmap_page = next_tag_bitmap_page;
}
#endif // PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
return slot_span; return slot_span;
} }
@ -757,7 +789,9 @@ PA_ALWAYS_INLINE uintptr_t PartitionBucket<thread_safe>::InitializeSuperPage(
root->next_super_page = super_page + kSuperPageSize; root->next_super_page = super_page + kSuperPageSize;
uintptr_t state_bitmap = uintptr_t state_bitmap =
super_page + PartitionPageSize() + super_page + PartitionPageSize() +
(is_direct_mapped() ? 0 : ReservedFreeSlotBitmapSize()); (is_direct_mapped()
? 0
: ReservedTagBitmapSize() + ReservedFreeSlotBitmapSize());
#if BUILDFLAG(USE_STARSCAN) #if BUILDFLAG(USE_STARSCAN)
PA_DCHECK(SuperPageStateBitmapAddr(super_page) == state_bitmap); PA_DCHECK(SuperPageStateBitmapAddr(super_page) == state_bitmap);
const size_t state_bitmap_reservation_size = const size_t state_bitmap_reservation_size =
@ -862,6 +896,19 @@ PA_ALWAYS_INLINE uintptr_t PartitionBucket<thread_safe>::InitializeSuperPage(
payload < SuperPagesEndFromExtent(current_extent)); payload < SuperPagesEndFromExtent(current_extent));
} }
#if PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
// `root->next_partition_page` currently points at the start of the
// super page payload. We point `root->next_tag_bitmap_page` to the
// corresponding point in the tag bitmap and let the caller
// (slot span allocation) take care of the rest.
root->next_tag_bitmap_page =
base::bits::AlignDown(reinterpret_cast<uintptr_t>(
PartitionTagPointer(root->next_partition_page)),
SystemPageSize());
PA_DCHECK(root->next_tag_bitmap_page >= super_page + PartitionPageSize())
<< "tag bitmap can never intrude on metadata partition page";
#endif // PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
// If PCScan is used, commit the state bitmap. Otherwise, leave it uncommitted // If PCScan is used, commit the state bitmap. Otherwise, leave it uncommitted
// and let PartitionRoot::RegisterScannableRoot() commit it when needed. Make // and let PartitionRoot::RegisterScannableRoot() commit it when needed. Make
// sure to register the super-page after it has been fully initialized. // sure to register the super-page after it has been fully initialized.
@ -883,7 +930,8 @@ PA_ALWAYS_INLINE uintptr_t PartitionBucket<thread_safe>::InitializeSuperPage(
#if BUILDFLAG(USE_FREESLOT_BITMAP) #if BUILDFLAG(USE_FREESLOT_BITMAP)
// Commit the pages for freeslot bitmap. // Commit the pages for freeslot bitmap.
if (!is_direct_mapped()) { if (!is_direct_mapped()) {
uintptr_t freeslot_bitmap_addr = super_page + PartitionPageSize(); uintptr_t freeslot_bitmap_addr =
super_page + PartitionPageSize() + ReservedTagBitmapSize();
PA_DCHECK(SuperPageFreeSlotBitmapAddr(super_page) == freeslot_bitmap_addr); PA_DCHECK(SuperPageFreeSlotBitmapAddr(super_page) == freeslot_bitmap_addr);
ScopedSyscallTimer timer{root}; ScopedSyscallTimer timer{root};
RecommitSystemPages(freeslot_bitmap_addr, CommittedFreeSlotBitmapSize(), RecommitSystemPages(freeslot_bitmap_addr, CommittedFreeSlotBitmapSize(),
@ -969,10 +1017,14 @@ PartitionBucket<thread_safe>::ProvisionMoreSlotsAndAllocOne(
} }
if (PA_LIKELY(slot_size <= kMaxMemoryTaggingSize && if (PA_LIKELY(slot_size <= kMaxMemoryTaggingSize &&
root->memory_tagging_enabled())) { root->IsMemoryTaggingEnabled())) {
// Ensure the MTE-tag of the memory pointed by |return_slot| is unguessable. // Ensure the MTE-tag of the memory pointed by |return_slot| is unguessable.
TagMemoryRangeRandomly(return_slot, slot_size); TagMemoryRangeRandomly(return_slot, slot_size);
} }
#if PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
NormalBucketPartitionTagSetValue(return_slot, slot_size,
root->GetNewPartitionTag());
#endif // PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
// Add all slots that fit within so far committed pages to the free list. // Add all slots that fit within so far committed pages to the free list.
PartitionFreelistEntry* prev_entry = nullptr; PartitionFreelistEntry* prev_entry = nullptr;
@ -989,6 +1041,10 @@ PartitionBucket<thread_safe>::ProvisionMoreSlotsAndAllocOne(
// No MTE-tagging for larger slots, just cast. // No MTE-tagging for larger slots, just cast.
next_slot_ptr = reinterpret_cast<void*>(next_slot); next_slot_ptr = reinterpret_cast<void*>(next_slot);
} }
#if PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
NormalBucketPartitionTagSetValue(next_slot, slot_size,
root->GetNewPartitionTag());
#endif // PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
auto* entry = PartitionFreelistEntry::EmplaceAndInitNull(next_slot_ptr); auto* entry = PartitionFreelistEntry::EmplaceAndInitNull(next_slot_ptr);
if (!slot_span->get_freelist_head()) { if (!slot_span->get_freelist_head()) {
PA_DCHECK(!prev_entry); PA_DCHECK(!prev_entry);

View File

@ -73,13 +73,13 @@ struct PartitionBucket {
// |PartitionRoot::AllocFromBucket|.) // |PartitionRoot::AllocFromBucket|.)
// //
// Note the matching Free() functions are in SlotSpanMetadata. // Note the matching Free() functions are in SlotSpanMetadata.
PA_NOINLINE PA_COMPONENT_EXPORT(PARTITION_ALLOC) uintptr_t PA_COMPONENT_EXPORT(PARTITION_ALLOC)
SlowPathAlloc(PartitionRoot<thread_safe>* root, PA_NOINLINE uintptr_t SlowPathAlloc(PartitionRoot<thread_safe>* root,
unsigned int flags, unsigned int flags,
size_t raw_size, size_t raw_size,
size_t slot_span_alignment, size_t slot_span_alignment,
bool* is_already_zeroed) bool* is_already_zeroed)
PA_EXCLUSIVE_LOCKS_REQUIRED(root->lock_); PA_EXCLUSIVE_LOCKS_REQUIRED(root->lock_);
PA_ALWAYS_INLINE bool CanStoreRawSize() const { PA_ALWAYS_INLINE bool CanStoreRawSize() const {
// For direct-map as well as single-slot slot spans (recognized by checking // For direct-map as well as single-slot slot spans (recognized by checking
@ -87,9 +87,8 @@ struct PartitionBucket {
// subsequent PartitionPage to store the raw size. It isn't only metadata // subsequent PartitionPage to store the raw size. It isn't only metadata
// space though, slot spans that have more than one slot can't have raw size // space though, slot spans that have more than one slot can't have raw size
// stored, because we wouldn't know which slot it applies to. // stored, because we wouldn't know which slot it applies to.
if (PA_LIKELY(slot_size <= MaxRegularSlotSpanSize())) { if (PA_LIKELY(slot_size <= MaxRegularSlotSpanSize()))
return false; return false;
}
PA_DCHECK((slot_size % SystemPageSize()) == 0); PA_DCHECK((slot_size % SystemPageSize()) == 0);
PA_DCHECK(is_direct_mapped() || get_slots_per_span() == 1); PA_DCHECK(is_direct_mapped() || get_slots_per_span() == 1);

View File

@ -25,17 +25,15 @@ namespace partition_alloc::internal {
// sub_order_index_mask is a mask for the remaining bits == 11 (masking to 01 // sub_order_index_mask is a mask for the remaining bits == 11 (masking to 01
// for the sub_order_index). // for the sub_order_index).
constexpr uint8_t OrderIndexShift(uint8_t order) { constexpr uint8_t OrderIndexShift(uint8_t order) {
if (order < kNumBucketsPerOrderBits + 1) { if (order < kNumBucketsPerOrderBits + 1)
return 0; return 0;
}
return order - (kNumBucketsPerOrderBits + 1); return order - (kNumBucketsPerOrderBits + 1);
} }
constexpr size_t OrderSubIndexMask(uint8_t order) { constexpr size_t OrderSubIndexMask(uint8_t order) {
if (order == kBitsPerSizeT) { if (order == kBitsPerSizeT)
return static_cast<size_t>(-1) >> (kNumBucketsPerOrderBits + 1); return static_cast<size_t>(-1) >> (kNumBucketsPerOrderBits + 1);
}
return ((static_cast<size_t>(1) << order) - 1) >> return ((static_cast<size_t>(1) << order) - 1) >>
(kNumBucketsPerOrderBits + 1); (kNumBucketsPerOrderBits + 1);
@ -106,10 +104,10 @@ inline constexpr size_t kOrderSubIndexMask[PA_BITS_PER_SIZE_T + 1] = {
// The class used to generate the bucket lookup table at compile-time. // The class used to generate the bucket lookup table at compile-time.
class BucketIndexLookup final { class BucketIndexLookup final {
public: public:
PA_ALWAYS_INLINE static constexpr uint16_t GetIndexForDenserBuckets( PA_ALWAYS_INLINE constexpr static uint16_t GetIndexForDenserBuckets(
size_t size); size_t size);
PA_ALWAYS_INLINE static constexpr uint16_t GetIndexFor8Buckets(size_t size); PA_ALWAYS_INLINE constexpr static uint16_t GetIndexFor8Buckets(size_t size);
PA_ALWAYS_INLINE static constexpr uint16_t GetIndex(size_t size); PA_ALWAYS_INLINE constexpr static uint16_t GetIndex(size_t size);
constexpr BucketIndexLookup() { constexpr BucketIndexLookup() {
constexpr uint16_t sentinel_bucket_index = kNumBuckets; constexpr uint16_t sentinel_bucket_index = kNumBuckets;
@ -264,11 +262,10 @@ PA_ALWAYS_INLINE constexpr uint16_t BucketIndexLookup::GetIndexForDenserBuckets(
// //
// We also do not want to go about the index for the max bucketed size. // We also do not want to go about the index for the max bucketed size.
if (size > kAlignment * kNumBucketsPerOrder && if (size > kAlignment * kNumBucketsPerOrder &&
index < GetIndexFor8Buckets(kMaxBucketed)) { index < GetIndexFor8Buckets(kMaxBucketed))
return RoundUpToOdd(index); return RoundUpToOdd(index);
} else { else
return index; return index;
}
} }
// static // static
@ -291,9 +288,8 @@ PA_ALWAYS_INLINE constexpr uint16_t BucketIndexLookup::GetIndex(size_t size) {
// //
// So, an allocation of size 1.4*2^10 would go into the 1.5*2^10 bucket under // So, an allocation of size 1.4*2^10 would go into the 1.5*2^10 bucket under
// Distribution A, but to the 2^11 bucket under Distribution B. // Distribution A, but to the 2^11 bucket under Distribution B.
if (1 << 8 < size && size < kHighThresholdForAlternateDistribution) { if (1 << 8 < size && size < kHighThresholdForAlternateDistribution)
return BucketIndexLookup::GetIndexForDenserBuckets(RoundUpSize(size)); return BucketIndexLookup::GetIndexForDenserBuckets(RoundUpSize(size));
}
return BucketIndexLookup::GetIndexForDenserBuckets(size); return BucketIndexLookup::GetIndexForDenserBuckets(size);
} }

View File

@ -23,15 +23,13 @@ inline constexpr unsigned char kCookieValue[kCookieSize] = {
constexpr size_t kPartitionCookieSizeAdjustment = kCookieSize; constexpr size_t kPartitionCookieSizeAdjustment = kCookieSize;
PA_ALWAYS_INLINE void PartitionCookieCheckValue(unsigned char* cookie_ptr) { PA_ALWAYS_INLINE void PartitionCookieCheckValue(unsigned char* cookie_ptr) {
for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr) { for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr)
PA_DCHECK(*cookie_ptr == kCookieValue[i]); PA_DCHECK(*cookie_ptr == kCookieValue[i]);
}
} }
PA_ALWAYS_INLINE void PartitionCookieWriteValue(unsigned char* cookie_ptr) { PA_ALWAYS_INLINE void PartitionCookieWriteValue(unsigned char* cookie_ptr) {
for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr) { for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr)
*cookie_ptr = kCookieValue[i]; *cookie_ptr = kCookieValue[i];
}
} }
#else #else

View File

@ -41,10 +41,10 @@ class PartitionFreelistEntry;
class EncodedPartitionFreelistEntryPtr { class EncodedPartitionFreelistEntryPtr {
private: private:
PA_ALWAYS_INLINE constexpr explicit EncodedPartitionFreelistEntryPtr( explicit PA_ALWAYS_INLINE constexpr EncodedPartitionFreelistEntryPtr(
std::nullptr_t) std::nullptr_t)
: encoded_(Transform(0)) {} : encoded_(Transform(0)) {}
PA_ALWAYS_INLINE explicit EncodedPartitionFreelistEntryPtr(void* ptr) explicit PA_ALWAYS_INLINE EncodedPartitionFreelistEntryPtr(void* ptr)
// The encoded pointer stays MTE-tagged. // The encoded pointer stays MTE-tagged.
: encoded_(Transform(reinterpret_cast<uintptr_t>(ptr))) {} : encoded_(Transform(reinterpret_cast<uintptr_t>(ptr))) {}
@ -58,7 +58,7 @@ class EncodedPartitionFreelistEntryPtr {
encoded_ = encoded; encoded_ = encoded;
} }
PA_ALWAYS_INLINE constexpr explicit operator bool() const { return encoded_; } explicit PA_ALWAYS_INLINE constexpr operator bool() const { return encoded_; }
// Transform() works the same in both directions, so can be used for // Transform() works the same in both directions, so can be used for
// encoding and decoding. // encoding and decoding.
@ -90,7 +90,7 @@ class EncodedPartitionFreelistEntryPtr {
// the rationale and mechanism, respectively. // the rationale and mechanism, respectively.
class PartitionFreelistEntry { class PartitionFreelistEntry {
private: private:
constexpr explicit PartitionFreelistEntry(std::nullptr_t) explicit constexpr PartitionFreelistEntry(std::nullptr_t)
: encoded_next_(EncodedPartitionFreelistEntryPtr(nullptr)) : encoded_next_(EncodedPartitionFreelistEntryPtr(nullptr))
#if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY) #if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
, ,
@ -121,13 +121,13 @@ class PartitionFreelistEntry {
// Emplaces the freelist entry at the beginning of the given slot span, and // Emplaces the freelist entry at the beginning of the given slot span, and
// initializes it as null-terminated. // initializes it as null-terminated.
PA_ALWAYS_INLINE static PartitionFreelistEntry* EmplaceAndInitNull( static PA_ALWAYS_INLINE PartitionFreelistEntry* EmplaceAndInitNull(
void* slot_start_tagged) { void* slot_start_tagged) {
// |slot_start_tagged| is MTE-tagged. // |slot_start_tagged| is MTE-tagged.
auto* entry = new (slot_start_tagged) PartitionFreelistEntry(nullptr); auto* entry = new (slot_start_tagged) PartitionFreelistEntry(nullptr);
return entry; return entry;
} }
PA_ALWAYS_INLINE static PartitionFreelistEntry* EmplaceAndInitNull( static PA_ALWAYS_INLINE PartitionFreelistEntry* EmplaceAndInitNull(
uintptr_t slot_start) { uintptr_t slot_start) {
return EmplaceAndInitNull(SlotStartAddr2Ptr(slot_start)); return EmplaceAndInitNull(SlotStartAddr2Ptr(slot_start));
} }
@ -138,7 +138,7 @@ class PartitionFreelistEntry {
// This freelist is built for the purpose of thread-cache. This means that we // This freelist is built for the purpose of thread-cache. This means that we
// can't perform a check that this and the next pointer belong to the same // can't perform a check that this and the next pointer belong to the same
// super page, as thread-cache spans may chain slots across super pages. // super page, as thread-cache spans may chain slots across super pages.
PA_ALWAYS_INLINE static PartitionFreelistEntry* EmplaceAndInitForThreadCache( static PA_ALWAYS_INLINE PartitionFreelistEntry* EmplaceAndInitForThreadCache(
uintptr_t slot_start, uintptr_t slot_start,
PartitionFreelistEntry* next) { PartitionFreelistEntry* next) {
auto* entry = auto* entry =
@ -151,7 +151,7 @@ class PartitionFreelistEntry {
// //
// This is for testing purposes only! |make_shadow_match| allows you to choose // This is for testing purposes only! |make_shadow_match| allows you to choose
// if the shadow matches the next pointer properly or is trash. // if the shadow matches the next pointer properly or is trash.
PA_ALWAYS_INLINE static void EmplaceAndInitForTest(uintptr_t slot_start, static PA_ALWAYS_INLINE void EmplaceAndInitForTest(uintptr_t slot_start,
void* next, void* next,
bool make_shadow_match) { bool make_shadow_match) {
new (SlotStartAddr2Ptr(slot_start)) new (SlotStartAddr2Ptr(slot_start))
@ -225,7 +225,7 @@ class PartitionFreelistEntry {
size_t extra, size_t extra,
bool for_thread_cache) const; bool for_thread_cache) const;
PA_ALWAYS_INLINE static bool IsSane(const PartitionFreelistEntry* here, static PA_ALWAYS_INLINE bool IsSane(const PartitionFreelistEntry* here,
const PartitionFreelistEntry* next, const PartitionFreelistEntry* next,
bool for_thread_cache) { bool for_thread_cache) {
// Don't allow the freelist to be blindly followed to any location. // Don't allow the freelist to be blindly followed to any location.
@ -260,12 +260,11 @@ class PartitionFreelistEntry {
bool not_in_metadata = bool not_in_metadata =
(next_address & kSuperPageOffsetMask) >= PartitionPageSize(); (next_address & kSuperPageOffsetMask) >= PartitionPageSize();
if (for_thread_cache) { if (for_thread_cache)
return shadow_ptr_ok & not_in_metadata; return shadow_ptr_ok & not_in_metadata;
} else { else
return shadow_ptr_ok & same_superpage & marked_as_free_in_bitmap & return shadow_ptr_ok & same_superpage & marked_as_free_in_bitmap &
not_in_metadata; not_in_metadata;
}
} }
EncodedPartitionFreelistEntryPtr encoded_next_; EncodedPartitionFreelistEntryPtr encoded_next_;
@ -298,9 +297,8 @@ PartitionFreelistEntry::GetNextInternal(size_t extra,
bool for_thread_cache) const { bool for_thread_cache) const {
// GetNext() can be called on discarded memory, in which case |encoded_next_| // GetNext() can be called on discarded memory, in which case |encoded_next_|
// is 0, and none of the checks apply. Don't prefetch nullptr either. // is 0, and none of the checks apply. Don't prefetch nullptr either.
if (IsEncodedNextPtrZero()) { if (IsEncodedNextPtrZero())
return nullptr; return nullptr;
}
auto* ret = encoded_next_.Decode(); auto* ret = encoded_next_.Decode();
// We rely on constant propagation to remove the branches coming from // We rely on constant propagation to remove the branches coming from

View File

@ -13,20 +13,20 @@ namespace partition_alloc::internal {
OomFunction g_oom_handling_function = nullptr; OomFunction g_oom_handling_function = nullptr;
PA_NOINLINE PA_NOT_TAIL_CALLED void PartitionExcessiveAllocationSize( PA_NOINLINE void PA_NOT_TAIL_CALLED
size_t size) { PartitionExcessiveAllocationSize(size_t size) {
PA_NO_CODE_FOLDING(); PA_NO_CODE_FOLDING();
OOM_CRASH(size); OOM_CRASH(size);
} }
#if !defined(ARCH_CPU_64_BITS) #if !defined(ARCH_CPU_64_BITS)
PA_NOINLINE PA_NOT_TAIL_CALLED void PA_NOINLINE void PA_NOT_TAIL_CALLED
PartitionOutOfMemoryWithLotsOfUncommitedPages(size_t size) { PartitionOutOfMemoryWithLotsOfUncommitedPages(size_t size) {
PA_NO_CODE_FOLDING(); PA_NO_CODE_FOLDING();
OOM_CRASH(size); OOM_CRASH(size);
} }
[[noreturn]] PA_NOT_TAIL_CALLED PA_NOINLINE void [[noreturn]] PA_NOINLINE void PA_NOT_TAIL_CALLED
PartitionOutOfMemoryWithLargeVirtualSize(size_t virtual_size) { PartitionOutOfMemoryWithLargeVirtualSize(size_t virtual_size) {
PA_NO_CODE_FOLDING(); PA_NO_CODE_FOLDING();
OOM_CRASH(virtual_size); OOM_CRASH(virtual_size);

View File

@ -23,8 +23,8 @@ namespace internal {
// g_oom_handling_function is invoked when PartitionAlloc hits OutOfMemory. // g_oom_handling_function is invoked when PartitionAlloc hits OutOfMemory.
extern OomFunction g_oom_handling_function; extern OomFunction g_oom_handling_function;
[[noreturn]] PA_NOINLINE PA_COMPONENT_EXPORT( [[noreturn]] PA_COMPONENT_EXPORT(PARTITION_ALLOC) PA_NOINLINE
PARTITION_ALLOC) void PartitionExcessiveAllocationSize(size_t size); void PartitionExcessiveAllocationSize(size_t size);
#if !defined(ARCH_CPU_64_BITS) #if !defined(ARCH_CPU_64_BITS)
[[noreturn]] PA_NOINLINE void PartitionOutOfMemoryWithLotsOfUncommitedPages( [[noreturn]] PA_NOINLINE void PartitionOutOfMemoryWithLotsOfUncommitedPages(

View File

@ -105,9 +105,8 @@ PA_ALWAYS_INLINE void SlotSpanMetadata<thread_safe>::RegisterEmpty() {
root->global_empty_slot_span_ring[current_index]; root->global_empty_slot_span_ring[current_index];
// The slot span might well have been re-activated, filled up, etc. before we // The slot span might well have been re-activated, filled up, etc. before we
// get around to looking at it here. // get around to looking at it here.
if (slot_span_to_decommit) { if (slot_span_to_decommit)
slot_span_to_decommit->DecommitIfPossible(root); slot_span_to_decommit->DecommitIfPossible(root);
}
// We put the empty slot span on our global list of "slot spans that were once // We put the empty slot span on our global list of "slot spans that were once
// empty", thus providing it a bit of breathing room to get re-used before we // empty", thus providing it a bit of breathing room to get re-used before we
@ -117,9 +116,8 @@ PA_ALWAYS_INLINE void SlotSpanMetadata<thread_safe>::RegisterEmpty() {
empty_cache_index_ = current_index; empty_cache_index_ = current_index;
in_empty_cache_ = 1; in_empty_cache_ = 1;
++current_index; ++current_index;
if (current_index == root->global_empty_slot_span_ring_size) { if (current_index == root->global_empty_slot_span_ring_size)
current_index = 0; current_index = 0;
}
root->global_empty_slot_span_ring_index = current_index; root->global_empty_slot_span_ring_index = current_index;
// Avoid wasting too much memory on empty slot spans. Note that we only divide // Avoid wasting too much memory on empty slot spans. Note that we only divide
@ -187,9 +185,8 @@ void SlotSpanMetadata<thread_safe>::FreeSlowPath(size_t number_of_freed) {
// chances of it being filled up again. The old current slot span will be // chances of it being filled up again. The old current slot span will be
// the next slot span. // the next slot span.
PA_DCHECK(!next_slot_span); PA_DCHECK(!next_slot_span);
if (PA_LIKELY(bucket->active_slot_spans_head != get_sentinel_slot_span())) { if (PA_LIKELY(bucket->active_slot_spans_head != get_sentinel_slot_span()))
next_slot_span = bucket->active_slot_spans_head; next_slot_span = bucket->active_slot_spans_head;
}
bucket->active_slot_spans_head = this; bucket->active_slot_spans_head = this;
PA_CHECK(bucket->num_full_slot_spans); // Underflow. PA_CHECK(bucket->num_full_slot_spans); // Underflow.
--bucket->num_full_slot_spans; --bucket->num_full_slot_spans;
@ -206,14 +203,12 @@ void SlotSpanMetadata<thread_safe>::FreeSlowPath(size_t number_of_freed) {
#endif #endif
// If it's the current active slot span, change it. We bounce the slot span // If it's the current active slot span, change it. We bounce the slot span
// to the empty list as a force towards defragmentation. // to the empty list as a force towards defragmentation.
if (PA_LIKELY(this == bucket->active_slot_spans_head)) { if (PA_LIKELY(this == bucket->active_slot_spans_head))
bucket->SetNewActiveSlotSpan(); bucket->SetNewActiveSlotSpan();
}
PA_DCHECK(bucket->active_slot_spans_head != this); PA_DCHECK(bucket->active_slot_spans_head != this);
if (CanStoreRawSize()) { if (CanStoreRawSize())
SetRawSize(0); SetRawSize(0);
}
RegisterEmpty(); RegisterEmpty();
} }
@ -264,9 +259,8 @@ void SlotSpanMetadata<thread_safe>::DecommitIfPossible(
PA_DCHECK(empty_cache_index_ < kMaxFreeableSpans); PA_DCHECK(empty_cache_index_ < kMaxFreeableSpans);
PA_DCHECK(this == root->global_empty_slot_span_ring[empty_cache_index_]); PA_DCHECK(this == root->global_empty_slot_span_ring[empty_cache_index_]);
in_empty_cache_ = 0; in_empty_cache_ = 0;
if (is_empty()) { if (is_empty())
Decommit(root); Decommit(root);
}
} }
template <bool thread_safe> template <bool thread_safe>
@ -301,11 +295,10 @@ void SlotSpanMetadata<thread_safe>::SortFreelist() {
uintptr_t slot_start = slot_span_start + (slot_size * slot_number); uintptr_t slot_start = slot_span_start + (slot_size * slot_number);
auto* entry = PartitionFreelistEntry::EmplaceAndInitNull(slot_start); auto* entry = PartitionFreelistEntry::EmplaceAndInitNull(slot_start);
if (!head) { if (!head)
head = entry; head = entry;
} else { else
back->SetNext(entry); back->SetNext(entry);
}
back = entry; back = entry;
} }

View File

@ -25,6 +25,8 @@
#include "base/allocator/partition_allocator/partition_alloc_forward.h" #include "base/allocator/partition_allocator/partition_alloc_forward.h"
#include "base/allocator/partition_allocator/partition_bucket.h" #include "base/allocator/partition_allocator/partition_bucket.h"
#include "base/allocator/partition_allocator/partition_freelist_entry.h" #include "base/allocator/partition_allocator/partition_freelist_entry.h"
#include "base/allocator/partition_allocator/partition_tag_bitmap.h"
#include "base/allocator/partition_allocator/partition_tag_types.h"
#include "base/allocator/partition_allocator/reservation_offset_table.h" #include "base/allocator/partition_allocator/reservation_offset_table.h"
#include "base/allocator/partition_allocator/tagging.h" #include "base/allocator/partition_allocator/tagging.h"
#include "build/build_config.h" #include "build/build_config.h"
@ -183,8 +185,8 @@ struct SlotSpanMetadata {
// Public API // Public API
// Note the matching Alloc() functions are in PartitionPage. // Note the matching Alloc() functions are in PartitionPage.
PA_NOINLINE PA_COMPONENT_EXPORT(PARTITION_ALLOC) void FreeSlowPath( PA_COMPONENT_EXPORT(PARTITION_ALLOC)
size_t number_of_freed); PA_NOINLINE void FreeSlowPath(size_t number_of_freed);
PA_ALWAYS_INLINE PartitionFreelistEntry* PopForAlloc(size_t size); PA_ALWAYS_INLINE PartitionFreelistEntry* PopForAlloc(size_t size);
PA_ALWAYS_INLINE void Free(uintptr_t ptr); PA_ALWAYS_INLINE void Free(uintptr_t ptr);
// Appends the passed freelist to the slot-span's freelist. Please note that // Appends the passed freelist to the slot-span's freelist. Please note that
@ -226,6 +228,10 @@ struct SlotSpanMetadata {
PA_ALWAYS_INLINE void SetRawSize(size_t raw_size); PA_ALWAYS_INLINE void SetRawSize(size_t raw_size);
PA_ALWAYS_INLINE size_t GetRawSize() const; PA_ALWAYS_INLINE size_t GetRawSize() const;
// Only meaningful when `this` refers to a slot span in a direct map
// bucket.
PA_ALWAYS_INLINE PartitionTag* DirectMapMTETag();
PA_ALWAYS_INLINE PartitionFreelistEntry* get_freelist_head() const { PA_ALWAYS_INLINE PartitionFreelistEntry* get_freelist_head() const {
return freelist_head; return freelist_head;
} }
@ -345,6 +351,13 @@ struct SubsequentPageMetadata {
// the first one is used to store slot information, but the second one is // the first one is used to store slot information, but the second one is
// available for extra information) // available for extra information)
size_t raw_size; size_t raw_size;
// Specific to when `this` is used in a direct map bucket. Since direct
// maps don't have as many tags as the typical normal bucket slot span,
// we can get away with just hiding the sole tag in here.
//
// See `//base/memory/mtecheckedptr.md` for details.
PartitionTag direct_map_tag;
}; };
// Each partition page has metadata associated with it. The metadata of the // Each partition page has metadata associated with it. The metadata of the
@ -441,14 +454,14 @@ PartitionSuperPageToExtent(uintptr_t super_page) {
// Size that should be reserved for state bitmap (if present) inside a super // Size that should be reserved for state bitmap (if present) inside a super
// page. Elements of a super page are partition-page-aligned, hence the returned // page. Elements of a super page are partition-page-aligned, hence the returned
// size is a multiple of partition page size. // size is a multiple of partition page size.
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
ReservedStateBitmapSize() { ReservedStateBitmapSize() {
return base::bits::AlignUp(sizeof(AllocationStateMap), PartitionPageSize()); return base::bits::AlignUp(sizeof(AllocationStateMap), PartitionPageSize());
} }
// Size that should be committed for state bitmap (if present) inside a super // Size that should be committed for state bitmap (if present) inside a super
// page. It is a multiple of system page size. // page. It is a multiple of system page size.
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
CommittedStateBitmapSize() { CommittedStateBitmapSize() {
return base::bits::AlignUp(sizeof(AllocationStateMap), SystemPageSize()); return base::bits::AlignUp(sizeof(AllocationStateMap), SystemPageSize());
} }
@ -458,8 +471,9 @@ CommittedStateBitmapSize() {
PA_ALWAYS_INLINE uintptr_t SuperPageStateBitmapAddr(uintptr_t super_page) { PA_ALWAYS_INLINE uintptr_t SuperPageStateBitmapAddr(uintptr_t super_page) {
PA_DCHECK(!(super_page % kSuperPageAlignment)); PA_DCHECK(!(super_page % kSuperPageAlignment));
return super_page + PartitionPageSize() + return super_page + PartitionPageSize() +
(IsManagedByNormalBuckets(super_page) ? ReservedFreeSlotBitmapSize() (IsManagedByNormalBuckets(super_page)
: 0); ? ReservedTagBitmapSize() + ReservedFreeSlotBitmapSize()
: 0);
} }
PA_ALWAYS_INLINE AllocationStateMap* SuperPageStateBitmap( PA_ALWAYS_INLINE AllocationStateMap* SuperPageStateBitmap(
@ -470,18 +484,28 @@ PA_ALWAYS_INLINE AllocationStateMap* SuperPageStateBitmap(
#else // BUILDFLAG(USE_STARSCAN) #else // BUILDFLAG(USE_STARSCAN)
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
ReservedStateBitmapSize() { ReservedStateBitmapSize() {
return 0ull; return 0ull;
} }
#endif // BUILDFLAG(USE_STARSCAN) #endif // BUILDFLAG(USE_STARSCAN)
// Returns the address of the tag bitmap of the `super_page`. Caller must ensure
// that bitmap exists.
PA_ALWAYS_INLINE uintptr_t SuperPageTagBitmapAddr(uintptr_t super_page) {
PA_DCHECK(IsReservationStart(super_page));
// Skip over the guard pages / metadata.
return super_page + PartitionPageSize();
}
PA_ALWAYS_INLINE uintptr_t PA_ALWAYS_INLINE uintptr_t
SuperPagePayloadStartOffset(bool is_managed_by_normal_buckets, SuperPagePayloadStartOffset(bool is_managed_by_normal_buckets,
bool with_quarantine) { bool with_quarantine) {
return PartitionPageSize() + return PartitionPageSize() +
(is_managed_by_normal_buckets ? ReservedFreeSlotBitmapSize() : 0) + (is_managed_by_normal_buckets
? (ReservedTagBitmapSize() + ReservedFreeSlotBitmapSize())
: 0) +
(with_quarantine ? ReservedStateBitmapSize() : 0); (with_quarantine ? ReservedStateBitmapSize() : 0);
} }
@ -717,6 +741,15 @@ PA_ALWAYS_INLINE size_t SlotSpanMetadata<thread_safe>::GetRawSize() const {
return subsequent_page_metadata->raw_size; return subsequent_page_metadata->raw_size;
} }
template <bool thread_safe>
PA_ALWAYS_INLINE PartitionTag*
SlotSpanMetadata<thread_safe>::DirectMapMTETag() {
PA_DCHECK(bucket->is_direct_mapped());
auto* subsequent_page_metadata = GetSubsequentPageMetadata(
reinterpret_cast<PartitionPage<thread_safe>*>(this));
return &subsequent_page_metadata->direct_map_tag;
}
template <bool thread_safe> template <bool thread_safe>
PA_ALWAYS_INLINE void SlotSpanMetadata<thread_safe>::SetFreelistHead( PA_ALWAYS_INLINE void SlotSpanMetadata<thread_safe>::SetFreelistHead(
PartitionFreelistEntry* new_head) { PartitionFreelistEntry* new_head) {
@ -927,9 +960,8 @@ void IterateSlotSpans(uintptr_t super_page,
break; break;
} }
slot_span = &page->slot_span_metadata; slot_span = &page->slot_span_metadata;
if (callback(slot_span)) { if (callback(slot_span))
return; return;
}
page += slot_span->bucket->get_pages_per_slot_span(); page += slot_span->bucket->get_pages_per_slot_span();
} }
// Each super page must have at least one valid slot span. // Each super page must have at least one valid slot span.

View File

@ -93,8 +93,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRefCount {
static constexpr CountType kPtrInc = 0x0000'0002; static constexpr CountType kPtrInc = 0x0000'0002;
#endif #endif
PA_ALWAYS_INLINE explicit PartitionRefCount( explicit PartitionRefCount(bool needs_mac11_malloc_size_hack);
bool needs_mac11_malloc_size_hack);
// Incrementing the counter doesn't imply any visibility about modified // Incrementing the counter doesn't imply any visibility about modified
// memory, hence relaxed atomics. For decrement, visibility is required before // memory, hence relaxed atomics. For decrement, visibility is required before
@ -191,9 +190,8 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRefCount {
CountType old_count = CountType old_count =
count_.fetch_and(~kMemoryHeldByAllocatorBit, std::memory_order_release); count_.fetch_and(~kMemoryHeldByAllocatorBit, std::memory_order_release);
if (PA_UNLIKELY(!(old_count & kMemoryHeldByAllocatorBit))) { if (PA_UNLIKELY(!(old_count & kMemoryHeldByAllocatorBit)))
DoubleFreeOrCorruptionDetected(old_count); DoubleFreeOrCorruptionDetected(old_count);
}
if (PA_LIKELY((old_count & ~kNeedsMac11MallocSizeHackBit) == if (PA_LIKELY((old_count & ~kNeedsMac11MallocSizeHackBit) ==
kMemoryHeldByAllocatorBit)) { kMemoryHeldByAllocatorBit)) {
@ -228,9 +226,8 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRefCount {
PA_ALWAYS_INLINE bool IsAlive() { PA_ALWAYS_INLINE bool IsAlive() {
bool alive = bool alive =
count_.load(std::memory_order_relaxed) & kMemoryHeldByAllocatorBit; count_.load(std::memory_order_relaxed) & kMemoryHeldByAllocatorBit;
if (alive) { if (alive)
CheckCookieIfSupported(); CheckCookieIfSupported();
}
return alive; return alive;
} }
@ -351,10 +348,9 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRefCount {
#endif #endif
}; };
PA_ALWAYS_INLINE PartitionRefCount::PartitionRefCount( PA_ALWAYS_INLINE PartitionRefCount::PartitionRefCount(bool use_mac11_hack)
bool needs_mac11_malloc_size_hack)
: count_(kMemoryHeldByAllocatorBit | : count_(kMemoryHeldByAllocatorBit |
(needs_mac11_malloc_size_hack ? kNeedsMac11MallocSizeHackBit : 0)) (use_mac11_hack ? kNeedsMac11MallocSizeHackBit : 0))
#if PA_CONFIG(REF_COUNT_CHECK_COOKIE) #if PA_CONFIG(REF_COUNT_CHECK_COOKIE)
, ,
brp_cookie_(CalculateCookie()) brp_cookie_(CalculateCookie())
@ -407,7 +403,7 @@ static_assert((1 << kPartitionRefCountSizeShift) == sizeof(PartitionRefCount));
// SystemPageSize() isn't always a constrexpr, in which case the compiler // SystemPageSize() isn't always a constrexpr, in which case the compiler
// wouldn't know it's a power of two. The equivalence of these calculations is // wouldn't know it's a power of two. The equivalence of these calculations is
// checked in PartitionAllocGlobalInit(). // checked in PartitionAllocGlobalInit().
PA_ALWAYS_INLINE static PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t static PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
GetPartitionRefCountIndexMultiplierShift() { GetPartitionRefCountIndexMultiplierShift() {
return SystemPageShift() * 2 - kSuperPageShift - kPartitionRefCountSizeShift; return SystemPageShift() * 2 - kSuperPageShift - kPartitionRefCountSizeShift;
} }

View File

@ -24,7 +24,6 @@
#include "base/allocator/partition_allocator/partition_cookie.h" #include "base/allocator/partition_allocator/partition_cookie.h"
#include "base/allocator/partition_allocator/partition_oom.h" #include "base/allocator/partition_allocator/partition_oom.h"
#include "base/allocator/partition_allocator/partition_page.h" #include "base/allocator/partition_allocator/partition_page.h"
#include "base/allocator/partition_allocator/partition_ref_count.h"
#include "base/allocator/partition_allocator/pkey.h" #include "base/allocator/partition_allocator/pkey.h"
#include "base/allocator/partition_allocator/reservation_offset_table.h" #include "base/allocator/partition_allocator/reservation_offset_table.h"
#include "base/allocator/partition_allocator/tagging.h" #include "base/allocator/partition_allocator/tagging.h"
@ -47,9 +46,9 @@
#include <pthread.h> #include <pthread.h>
#endif #endif
#if BUILDFLAG(RECORD_ALLOC_INFO)
namespace partition_alloc::internal { namespace partition_alloc::internal {
#if BUILDFLAG(RECORD_ALLOC_INFO)
// Even if this is not hidden behind a BUILDFLAG, it should not use any memory // Even if this is not hidden behind a BUILDFLAG, it should not use any memory
// when recording is disabled, since it ends up in the .bss section. // when recording is disabled, since it ends up in the .bss section.
AllocInfo g_allocs = {}; AllocInfo g_allocs = {};
@ -58,47 +57,9 @@ void RecordAllocOrFree(uintptr_t addr, size_t size) {
g_allocs.allocs[g_allocs.index.fetch_add(1, std::memory_order_relaxed) % g_allocs.allocs[g_allocs.index.fetch_add(1, std::memory_order_relaxed) %
kAllocInfoSize] = {addr, size}; kAllocInfoSize] = {addr, size};
} }
#endif // BUILDFLAG(RECORD_ALLOC_INFO)
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
PtrPosWithinAlloc IsPtrWithinSameAlloc(uintptr_t orig_address,
uintptr_t test_address,
size_t type_size) {
// Required for pointers right past an allocation. See
// |PartitionAllocGetSlotStartInBRPPool()|.
uintptr_t adjusted_address =
orig_address - kPartitionPastAllocationAdjustment;
PA_DCHECK(IsManagedByNormalBucketsOrDirectMap(adjusted_address));
DCheckIfManagedByPartitionAllocBRPPool(adjusted_address);
uintptr_t slot_start = PartitionAllocGetSlotStartInBRPPool(adjusted_address);
// Don't use |adjusted_address| beyond this point at all. It was needed to
// pick the right slot, but now we're dealing with very concrete addresses.
// Zero it just in case, to catch errors.
adjusted_address = 0;
auto* slot_span = SlotSpanMetadata<ThreadSafe>::FromSlotStart(slot_start);
auto* root = PartitionRoot<ThreadSafe>::FromSlotSpan(slot_span);
// Double check that ref-count is indeed present.
PA_DCHECK(root->brp_enabled());
uintptr_t object_addr = root->SlotStartToObjectAddr(slot_start);
uintptr_t object_end = object_addr + slot_span->GetUsableSize(root);
if (test_address < object_addr || object_end < test_address) {
return PtrPosWithinAlloc::kFarOOB;
#if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
} else if (object_end - type_size < test_address) {
// Not even a single element of the type referenced by the pointer can fit
// between the pointer and the end of the object.
return PtrPosWithinAlloc::kAllocEnd;
#endif
} else {
return PtrPosWithinAlloc::kInBounds;
}
}
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
} // namespace partition_alloc::internal } // namespace partition_alloc::internal
#endif // BUILDFLAG(RECORD_ALLOC_INFO)
namespace partition_alloc { namespace partition_alloc {
@ -344,7 +305,7 @@ namespace {
// more work and larger |slot_usage| array. Lower value would probably decrease // more work and larger |slot_usage| array. Lower value would probably decrease
// chances of purging. Not empirically tested. // chances of purging. Not empirically tested.
constexpr size_t kMaxPurgeableSlotsPerSystemPage = 64; constexpr size_t kMaxPurgeableSlotsPerSystemPage = 64;
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
MinPurgeableSlotSize() { MinPurgeableSlotSize() {
return SystemPageSize() / kMaxPurgeableSlotsPerSystemPage; return SystemPageSize() / kMaxPurgeableSlotsPerSystemPage;
} }
@ -905,18 +866,6 @@ void PartitionRoot<thread_safe>::Init(PartitionOptions opts) {
PartitionOptions::UseConfigurablePool::kIfAvailable) && PartitionOptions::UseConfigurablePool::kIfAvailable) &&
IsConfigurablePoolAvailable(); IsConfigurablePoolAvailable();
PA_DCHECK(!flags.use_configurable_pool || IsConfigurablePoolAvailable()); PA_DCHECK(!flags.use_configurable_pool || IsConfigurablePoolAvailable());
#if PA_CONFIG(HAS_MEMORY_TAGGING)
TagViolationReportingMode memory_tagging_mode =
internal::GetMemoryTaggingModeForCurrentThread();
// Memory tagging is not supported in the configurable pool because MTE
// stores tagging information in the high bits of the pointer, it causes
// issues with components like V8's ArrayBuffers which use custom pointer
// representations. All custom representations encountered so far rely on an
// "is in configurable pool?" check, so we use that as a proxy.
flags.memory_tagging_enabled_ =
!flags.use_configurable_pool &&
memory_tagging_mode != TagViolationReportingMode::kUndefined;
#endif
// brp_enabled() is not supported in the configurable pool because // brp_enabled() is not supported in the configurable pool because
// BRP requires objects to be in a different Pool. // BRP requires objects to be in a different Pool.
@ -960,6 +909,11 @@ void PartitionRoot<thread_safe>::Init(PartitionOptions opts) {
PA_CHECK(!brp_enabled()); PA_CHECK(!brp_enabled());
flags.extras_size += internal::kPartitionRefCountSizeAdjustment; flags.extras_size += internal::kPartitionRefCountSizeAdjustment;
} }
#if PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
// Add one extra byte to each slot's end to allow beyond-the-end
// pointers (crbug.com/1364476).
flags.extras_size += 1;
#endif // PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
#endif // PA_CONFIG(EXTRAS_REQUIRED) #endif // PA_CONFIG(EXTRAS_REQUIRED)
// Re-confirm the above PA_CHECKs, by making sure there are no // Re-confirm the above PA_CHECKs, by making sure there are no
@ -1687,5 +1641,4 @@ static_assert(offsetof(PartitionRoot<internal::ThreadSafe>, sentinel_bucket) ==
static_assert( static_assert(
offsetof(PartitionRoot<internal::ThreadSafe>, lock_) >= 64, offsetof(PartitionRoot<internal::ThreadSafe>, lock_) >= 64,
"The lock should not be on the same cacheline as the read-mostly flags"); "The lock should not be on the same cacheline as the read-mostly flags");
} // namespace partition_alloc } // namespace partition_alloc

View File

@ -40,6 +40,7 @@
#include "base/allocator/partition_allocator/chromecast_buildflags.h" #include "base/allocator/partition_allocator/chromecast_buildflags.h"
#include "base/allocator/partition_allocator/freeslot_bitmap.h" #include "base/allocator/partition_allocator/freeslot_bitmap.h"
#include "base/allocator/partition_allocator/page_allocator.h" #include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/page_allocator_constants.h"
#include "base/allocator/partition_allocator/partition_address_space.h" #include "base/allocator/partition_allocator/partition_address_space.h"
#include "base/allocator/partition_allocator/partition_alloc-inl.h" #include "base/allocator/partition_allocator/partition_alloc-inl.h"
#include "base/allocator/partition_allocator/partition_alloc_base/bits.h" #include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
@ -63,6 +64,8 @@
#include "base/allocator/partition_allocator/partition_oom.h" #include "base/allocator/partition_allocator/partition_oom.h"
#include "base/allocator/partition_allocator/partition_page.h" #include "base/allocator/partition_allocator/partition_page.h"
#include "base/allocator/partition_allocator/partition_ref_count.h" #include "base/allocator/partition_allocator/partition_ref_count.h"
#include "base/allocator/partition_allocator/partition_tag.h"
#include "base/allocator/partition_allocator/partition_tag_types.h"
#include "base/allocator/partition_allocator/pkey.h" #include "base/allocator/partition_allocator/pkey.h"
#include "base/allocator/partition_allocator/reservation_offset_table.h" #include "base/allocator/partition_allocator/reservation_offset_table.h"
#include "base/allocator/partition_allocator/tagging.h" #include "base/allocator/partition_allocator/tagging.h"
@ -290,9 +293,6 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
#endif // PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK) #endif // PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) #endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
bool use_configurable_pool; bool use_configurable_pool;
#if PA_CONFIG(HAS_MEMORY_TAGGING)
bool memory_tagging_enabled_;
#endif
#if BUILDFLAG(ENABLE_PKEYS) #if BUILDFLAG(ENABLE_PKEYS)
int pkey; int pkey;
@ -396,6 +396,12 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
bool quarantine_always_for_testing = false; bool quarantine_always_for_testing = false;
#if PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
partition_alloc::PartitionTag current_partition_tag = 0;
// Points to the end of the committed tag bitmap region.
uintptr_t next_tag_bitmap_page = 0;
#endif // PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
PartitionRoot() PartitionRoot()
: flags{QuarantineMode::kAlwaysDisabled, ScanMode::kDisabled} {} : flags{QuarantineMode::kAlwaysDisabled, ScanMode::kDisabled} {}
explicit PartitionRoot(PartitionOptions opts) : flags() { Init(opts); } explicit PartitionRoot(PartitionOptions opts) : flags() { Init(opts); }
@ -557,6 +563,8 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
PA_ALWAYS_INLINE size_t PA_ALWAYS_INLINE size_t
AllocationCapacityFromRequestedSize(size_t size) const; AllocationCapacityFromRequestedSize(size_t size) const;
PA_ALWAYS_INLINE bool IsMemoryTaggingEnabled() const;
// Frees memory from this partition, if possible, by decommitting pages or // Frees memory from this partition, if possible, by decommitting pages or
// even entire slot spans. |flags| is an OR of base::PartitionPurgeFlags. // even entire slot spans. |flags| is an OR of base::PartitionPurgeFlags.
void PurgeMemory(int flags); void PurgeMemory(int flags);
@ -685,7 +693,7 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
// If quarantine is enabled and the tag overflows, move the containing slot // If quarantine is enabled and the tag overflows, move the containing slot
// to quarantine, to prevent the attacker from exploiting a pointer that has // to quarantine, to prevent the attacker from exploiting a pointer that has
// an old tag. // an old tag.
if (PA_LIKELY(memory_tagging_enabled())) { if (PA_LIKELY(IsMemoryTaggingEnabled())) {
return internal::HasOverflowTag(object); return internal::HasOverflowTag(object);
} }
// Default behaviour if MTE is not enabled for this PartitionRoot. // Default behaviour if MTE is not enabled for this PartitionRoot.
@ -705,7 +713,7 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
return flags.scan_mode == ScanMode::kEnabled; return flags.scan_mode == ScanMode::kEnabled;
} }
PA_ALWAYS_INLINE static PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t static PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
GetDirectMapMetadataAndGuardPagesSize() { GetDirectMapMetadataAndGuardPagesSize() {
// Because we need to fake a direct-map region to look like a super page, we // Because we need to fake a direct-map region to look like a super page, we
// need to allocate more pages around the payload: // need to allocate more pages around the payload:
@ -718,7 +726,7 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
return 2 * internal::PartitionPageSize(); return 2 * internal::PartitionPageSize();
} }
PA_ALWAYS_INLINE static PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t static PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
GetDirectMapSlotSize(size_t raw_size) { GetDirectMapSlotSize(size_t raw_size) {
// Caller must check that the size is not above the MaxDirectMapped() // Caller must check that the size is not above the MaxDirectMapped()
// limit before calling. This also guards against integer overflow in the // limit before calling. This also guards against integer overflow in the
@ -728,8 +736,8 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
raw_size, internal::SystemPageSize()); raw_size, internal::SystemPageSize());
} }
PA_ALWAYS_INLINE static size_t GetDirectMapReservationSize( static PA_ALWAYS_INLINE size_t
size_t padded_raw_size) { GetDirectMapReservationSize(size_t padded_raw_size) {
// Caller must check that the size is not above the MaxDirectMapped() // Caller must check that the size is not above the MaxDirectMapped()
// limit before calling. This also guards against integer overflow in the // limit before calling. This also guards against integer overflow in the
// calculation here. // calculation here.
@ -819,19 +827,6 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
return flags.use_configurable_pool; return flags.use_configurable_pool;
} }
// Returns whether MTE is supported for this partition root. Because MTE
// stores tagging information in the high bits of the pointer, it causes
// issues with components like V8's ArrayBuffers which use custom pointer
// representations. All custom representations encountered so far rely on an
// "is in configurable pool?" check, so we use that as a proxy.
bool memory_tagging_enabled() const {
#if PA_CONFIG(HAS_MEMORY_TAGGING)
return flags.memory_tagging_enabled_;
#else
return false;
#endif
}
// To make tests deterministic, it is necessary to uncap the amount of memory // To make tests deterministic, it is necessary to uncap the amount of memory
// waste incurred by empty slot spans. Otherwise, the size of various // waste incurred by empty slot spans. Otherwise, the size of various
// freelists, and committed memory becomes harder to reason about (and // freelists, and committed memory becomes harder to reason about (and
@ -840,6 +835,17 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
max_empty_slot_spans_dirty_bytes_shift = 0; max_empty_slot_spans_dirty_bytes_shift = 0;
} }
#if PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
PA_ALWAYS_INLINE partition_alloc::PartitionTag GetNewPartitionTag() {
// TODO(crbug.com/1298696): performance is not an issue. We can use
// random tags in lieu of sequential ones.
auto tag = ++current_partition_tag;
tag += !tag; // Avoid 0.
current_partition_tag = tag;
return tag;
}
#endif // PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
// Enables the sorting of active slot spans in PurgeMemory(). // Enables the sorting of active slot spans in PurgeMemory().
static void EnableSortActiveSlotSpans(); static void EnableSortActiveSlotSpans();
@ -916,7 +922,6 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
// May return an invalid thread cache. // May return an invalid thread cache.
PA_ALWAYS_INLINE ThreadCache* GetOrCreateThreadCache(); PA_ALWAYS_INLINE ThreadCache* GetOrCreateThreadCache();
PA_ALWAYS_INLINE ThreadCache* GetThreadCache();
#if PA_CONFIG(USE_PARTITION_ROOT_ENUMERATOR) #if PA_CONFIG(USE_PARTITION_ROOT_ENUMERATOR)
static internal::Lock& GetEnumeratorLock(); static internal::Lock& GetEnumeratorLock();
@ -941,11 +946,9 @@ class ScopedSyscallTimer {
~ScopedSyscallTimer() { ~ScopedSyscallTimer() {
root_->syscall_count.fetch_add(1, std::memory_order_relaxed); root_->syscall_count.fetch_add(1, std::memory_order_relaxed);
int64_t elapsed_nanos = (base::TimeTicks::Now() - tick_).InNanoseconds(); uint64_t elapsed_nanos = (base::TimeTicks::Now() - tick_).InNanoseconds();
if (elapsed_nanos > 0) { root_->syscall_total_time_ns.fetch_add(elapsed_nanos,
root_->syscall_total_time_ns.fetch_add( std::memory_order_relaxed);
static_cast<uint64_t>(elapsed_nanos), std::memory_order_relaxed);
}
} }
private: private:
@ -1041,34 +1044,47 @@ PartitionAllocGetSlotStartInBRPPool(uintptr_t address) {
bucket->slot_size * bucket->GetSlotNumber(offset_in_slot_span); bucket->slot_size * bucket->GetSlotNumber(offset_in_slot_span);
} }
// Return values to indicate where a pointer is pointing relative to the bounds // Checks whether a given address stays within the same allocation slot after
// of an allocation. // modification.
enum class PtrPosWithinAlloc {
// When BACKUP_REF_PTR_POISON_OOB_PTR is disabled, end-of-allocation pointers
// are also considered in-bounds.
kInBounds,
#if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
kAllocEnd,
#endif
kFarOOB
};
// Checks whether `test_address` is in the same allocation slot as
// `orig_address`.
//
// This can be called after adding or subtracting from the `orig_address`
// to produce a different pointer which must still stay in the same allocation.
//
// The `type_size` is the size of the type that the raw_ptr is pointing to,
// which may be the type the allocation is holding or a compatible pointer type
// such as a base class or char*. It is used to detect pointers near the end of
// the allocation but not strictly beyond it.
// //
// This isn't a general purpose function. The caller is responsible for ensuring // This isn't a general purpose function. The caller is responsible for ensuring
// that the ref-count is in place for this allocation. // that the ref-count is in place for this allocation.
PtrPosWithinAlloc IsPtrWithinSameAlloc(uintptr_t orig_address, template <typename Z>
uintptr_t test_address, PA_ALWAYS_INLINE PtrPosWithinAlloc
size_t type_size); PartitionAllocIsValidPtrDelta(uintptr_t address, PtrDelta<Z> delta) {
// Required for pointers right past an allocation. See
// |PartitionAllocGetSlotStartInBRPPool()|.
uintptr_t adjusted_address = address - kPartitionPastAllocationAdjustment;
PA_DCHECK(IsManagedByNormalBucketsOrDirectMap(adjusted_address));
DCheckIfManagedByPartitionAllocBRPPool(adjusted_address);
uintptr_t slot_start = PartitionAllocGetSlotStartInBRPPool(adjusted_address);
// Don't use |adjusted_address| beyond this point at all. It was needed to
// pick the right slot, but now we're dealing with very concrete addresses.
// Zero it just in case, to catch errors.
adjusted_address = 0;
auto* slot_span = SlotSpanMetadata<ThreadSafe>::FromSlotStart(slot_start);
auto* root = PartitionRoot<ThreadSafe>::FromSlotSpan(slot_span);
// Double check that ref-count is indeed present.
PA_DCHECK(root->brp_enabled());
uintptr_t object_addr = root->SlotStartToObjectAddr(slot_start);
uintptr_t new_address =
address + static_cast<uintptr_t>(delta.delta_in_bytes);
uintptr_t object_end = object_addr + slot_span->GetUsableSize(root);
if (new_address < object_addr || object_end < new_address) {
return PtrPosWithinAlloc::kFarOOB;
#if PA_CONFIG(USE_OOB_POISON)
} else if (object_end - delta.type_size < new_address) {
// Not even a single element of the type referenced by the pointer can fit
// between the pointer and the end of the object.
return PtrPosWithinAlloc::kAllocEnd;
#endif
} else {
return PtrPosWithinAlloc::kInBounds;
}
}
PA_ALWAYS_INLINE void PartitionAllocFreeForRefCounting(uintptr_t slot_start) { PA_ALWAYS_INLINE void PartitionAllocFreeForRefCounting(uintptr_t slot_start) {
PA_DCHECK(!PartitionRefCountPointer(slot_start)->IsAlive()); PA_DCHECK(!PartitionRefCountPointer(slot_start)->IsAlive());
@ -1205,6 +1221,21 @@ PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeWithFlags(
FreeNoHooks(object); FreeNoHooks(object);
} }
// Returns whether MTE is supported for this partition root. Because MTE stores
// tagging information in the high bits of the pointer, it causes issues with
// components like V8's ArrayBuffers which use custom pointer representations.
// All custom representations encountered so far rely on an "is in configurable
// pool?" check, so we use that as a proxy.
template <bool thread_safe>
PA_ALWAYS_INLINE bool PartitionRoot<thread_safe>::IsMemoryTaggingEnabled()
const {
#if PA_CONFIG(HAS_MEMORY_TAGGING)
return !flags.use_configurable_pool;
#else
return false;
#endif
}
// static // static
template <bool thread_safe> template <bool thread_safe>
PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooks(void* object) { PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooks(void* object) {
@ -1249,7 +1280,7 @@ PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooks(void* object) {
PA_DCHECK(slot_span == SlotSpan::FromSlotStart(slot_start)); PA_DCHECK(slot_span == SlotSpan::FromSlotStart(slot_start));
#if PA_CONFIG(HAS_MEMORY_TAGGING) #if PA_CONFIG(HAS_MEMORY_TAGGING)
if (PA_LIKELY(root->memory_tagging_enabled())) { if (PA_LIKELY(root->IsMemoryTaggingEnabled())) {
const size_t slot_size = slot_span->bucket->slot_size; const size_t slot_size = slot_span->bucket->slot_size;
if (PA_LIKELY(slot_size <= internal::kMaxMemoryTaggingSize)) { if (PA_LIKELY(slot_size <= internal::kMaxMemoryTaggingSize)) {
// slot_span is untagged at this point, so we have to recover its tag // slot_span is untagged at this point, so we have to recover its tag
@ -1276,6 +1307,13 @@ PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooks(void* object) {
PA_PREFETCH(slot_span); PA_PREFETCH(slot_span);
#endif // PA_CONFIG(HAS_MEMORY_TAGGING) #endif // PA_CONFIG(HAS_MEMORY_TAGGING)
#if PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
if (!root->IsDirectMappedBucket(slot_span->bucket)) {
partition_alloc::internal::PartitionTagIncrementValue(
slot_start, slot_span->bucket->slot_size);
}
#endif // PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
#if BUILDFLAG(USE_STARSCAN) #if BUILDFLAG(USE_STARSCAN)
// TODO(bikineev): Change the condition to PA_LIKELY once PCScan is enabled by // TODO(bikineev): Change the condition to PA_LIKELY once PCScan is enabled by
// default. // default.
@ -1509,7 +1547,7 @@ PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::RawFreeWithThreadCache(
SlotSpan* slot_span) { SlotSpan* slot_span) {
// PA_LIKELY: performance-sensitive partitions have a thread cache, // PA_LIKELY: performance-sensitive partitions have a thread cache,
// direct-mapped allocations are uncommon. // direct-mapped allocations are uncommon.
ThreadCache* thread_cache = GetThreadCache(); ThreadCache* thread_cache = GetOrCreateThreadCache();
if (PA_LIKELY(ThreadCache::IsValid(thread_cache) && if (PA_LIKELY(ThreadCache::IsValid(thread_cache) &&
!IsDirectMappedBucket(slot_span->bucket))) { !IsDirectMappedBucket(slot_span->bucket))) {
size_t bucket_index = size_t bucket_index =
@ -1766,7 +1804,7 @@ PartitionRoot<thread_safe>::GetPageAccessibility() const {
PageAccessibilityConfiguration::Permissions permissions = PageAccessibilityConfiguration::Permissions permissions =
PageAccessibilityConfiguration::kReadWrite; PageAccessibilityConfiguration::kReadWrite;
#if PA_CONFIG(HAS_MEMORY_TAGGING) #if PA_CONFIG(HAS_MEMORY_TAGGING)
if (memory_tagging_enabled()) { if (IsMemoryTaggingEnabled()) {
permissions = PageAccessibilityConfiguration::kReadWriteTagged; permissions = PageAccessibilityConfiguration::kReadWriteTagged;
} }
#endif #endif
@ -2236,11 +2274,6 @@ ThreadCache* PartitionRoot<thread_safe>::GetOrCreateThreadCache() {
return thread_cache; return thread_cache;
} }
template <bool thread_safe>
ThreadCache* PartitionRoot<thread_safe>::GetThreadCache() {
return PA_LIKELY(flags.with_thread_cache) ? ThreadCache::Get() : nullptr;
}
using ThreadSafePartitionRoot = PartitionRoot<internal::ThreadSafe>; using ThreadSafePartitionRoot = PartitionRoot<internal::ThreadSafe>;
static_assert(offsetof(ThreadSafePartitionRoot, lock_) == static_assert(offsetof(ThreadSafePartitionRoot, lock_) ==

View File

@ -0,0 +1,144 @@
// Copyright 2020 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_TAG_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_TAG_H_
// This file defines types and functions for `MTECheckedPtr<T>` (cf.
// `tagging.h`, which deals with real ARM MTE).
#include <string.h>
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
#include "base/allocator/partition_allocator/partition_alloc_notreached.h"
#include "base/allocator/partition_allocator/partition_cookie.h"
#include "base/allocator/partition_allocator/partition_page.h"
#include "base/allocator/partition_allocator/partition_tag_bitmap.h"
#include "base/allocator/partition_allocator/partition_tag_types.h"
#include "base/allocator/partition_allocator/reservation_offset_table.h"
#include "base/allocator/partition_allocator/tagging.h"
#include "build/build_config.h"
namespace partition_alloc {
#if PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
static_assert(
sizeof(PartitionTag) == internal::tag_bitmap::kPartitionTagSize,
"sizeof(PartitionTag) must be equal to bitmap::kPartitionTagSize.");
PA_ALWAYS_INLINE PartitionTag* NormalBucketPartitionTagPointer(uintptr_t addr) {
uintptr_t bitmap_base =
internal::SuperPageTagBitmapAddr(addr & internal::kSuperPageBaseMask);
const size_t bitmap_end_offset =
internal::PartitionPageSize() + internal::ReservedTagBitmapSize();
PA_DCHECK((addr & internal::kSuperPageOffsetMask) >= bitmap_end_offset);
uintptr_t offset_in_super_page =
(addr & internal::kSuperPageOffsetMask) - bitmap_end_offset;
size_t offset_in_bitmap = offset_in_super_page >>
internal::tag_bitmap::kBytesPerPartitionTagShift
<< internal::tag_bitmap::kPartitionTagSizeShift;
// No need to tag, as the tag bitmap region isn't protected by MTE.
return reinterpret_cast<PartitionTag*>(bitmap_base + offset_in_bitmap);
}
PA_ALWAYS_INLINE PartitionTag* DirectMapPartitionTagPointer(uintptr_t addr) {
uintptr_t first_super_page = internal::GetDirectMapReservationStart(addr);
PA_DCHECK(first_super_page) << "not managed by a direct map: " << addr;
auto* subsequent_page_metadata = GetSubsequentPageMetadata(
internal::PartitionSuperPageToMetadataArea<internal::ThreadSafe>(
first_super_page));
return &subsequent_page_metadata->direct_map_tag;
}
PA_ALWAYS_INLINE PartitionTag* PartitionTagPointer(uintptr_t addr) {
// UNLIKELY because direct maps are far less common than normal buckets.
if (PA_UNLIKELY(internal::IsManagedByDirectMap(addr))) {
return DirectMapPartitionTagPointer(addr);
}
return NormalBucketPartitionTagPointer(addr);
}
PA_ALWAYS_INLINE PartitionTag* PartitionTagPointer(const void* ptr) {
// Disambiguation: UntagPtr relates to hwardware MTE, and it strips the tag
// from the pointer. Whereas, PartitionTagPointer relates to software MTE
// (i.e. MTECheckedPtr) and it returns a pointer to the tag in memory.
return PartitionTagPointer(UntagPtr(ptr));
}
namespace internal {
PA_ALWAYS_INLINE void DirectMapPartitionTagSetValue(uintptr_t addr,
PartitionTag value) {
*DirectMapPartitionTagPointer(addr) = value;
}
PA_ALWAYS_INLINE void NormalBucketPartitionTagSetValue(uintptr_t slot_start,
size_t size,
PartitionTag value) {
PA_DCHECK((size % tag_bitmap::kBytesPerPartitionTag) == 0);
PA_DCHECK((slot_start % tag_bitmap::kBytesPerPartitionTag) == 0);
size_t tag_count = size >> tag_bitmap::kBytesPerPartitionTagShift;
PartitionTag* tag_ptr = NormalBucketPartitionTagPointer(slot_start);
if (sizeof(PartitionTag) == 1) {
memset(tag_ptr, value, tag_count);
} else {
while (tag_count-- > 0)
*tag_ptr++ = value;
}
}
PA_ALWAYS_INLINE PartitionTag PartitionTagGetValue(void* ptr) {
return *PartitionTagPointer(ptr);
}
PA_ALWAYS_INLINE void PartitionTagIncrementValue(uintptr_t slot_start,
size_t size) {
PartitionTag tag = *PartitionTagPointer(slot_start);
PartitionTag new_tag = tag;
++new_tag;
new_tag += !new_tag; // Avoid 0.
#if BUILDFLAG(PA_DCHECK_IS_ON)
PA_DCHECK(internal::IsManagedByNormalBuckets(slot_start));
// This verifies that tags for the entire slot have the same value and that
// |size| doesn't exceed the slot size.
size_t tag_count = size >> tag_bitmap::kBytesPerPartitionTagShift;
PartitionTag* tag_ptr = PartitionTagPointer(slot_start);
while (tag_count-- > 0) {
PA_DCHECK(tag == *tag_ptr);
tag_ptr++;
}
#endif
NormalBucketPartitionTagSetValue(slot_start, size, new_tag);
}
} // namespace internal
#else // No-op versions
PA_ALWAYS_INLINE PartitionTag* PartitionTagPointer(void* ptr) {
PA_NOTREACHED();
return nullptr;
}
namespace internal {
PA_ALWAYS_INLINE PartitionTag PartitionTagGetValue(void*) {
return 0;
}
PA_ALWAYS_INLINE void PartitionTagIncrementValue(uintptr_t slot_start,
size_t size) {}
} // namespace internal
#endif // PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
} // namespace partition_alloc
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_TAG_H_

View File

@ -0,0 +1,147 @@
// Copyright 2020 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_TAG_BITMAP_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_TAG_BITMAP_H_
#include "base/allocator/partition_allocator/page_allocator_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
namespace partition_alloc::internal {
#if PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
namespace tag_bitmap {
// kPartitionTagSize should be equal to sizeof(PartitionTag).
// PartitionTag is defined in partition_tag.h and static_assert there
// checks the condition.
static constexpr size_t kPartitionTagSizeShift = 0;
static constexpr size_t kPartitionTagSize = 1U << kPartitionTagSizeShift;
static constexpr size_t kBytesPerPartitionTagShift = 4;
// One partition tag is assigned per |kBytesPerPartitionTag| bytes in the slot
// spans.
// +-----------+ 0
// | | ====> 1 partition tag
// +-----------+ kBytesPerPartitionTag
// | | ====> 1 partition tag
// +-----------+ 2*kBytesPerPartitionTag
// ...
// +-----------+ slot_size
static constexpr size_t kBytesPerPartitionTag = 1U
<< kBytesPerPartitionTagShift;
static_assert(
kMinBucketedOrder >= kBytesPerPartitionTagShift + 1,
"MTECheckedPtr requires kBytesPerPartitionTagShift-bytes alignment.");
static constexpr size_t kBytesPerPartitionTagRatio =
kBytesPerPartitionTag / kPartitionTagSize;
static_assert(kBytesPerPartitionTag > 0,
"kBytesPerPartitionTag should be larger than 0");
static_assert(
kBytesPerPartitionTag % kPartitionTagSize == 0,
"kBytesPerPartitionTag should be multiples of sizeof(PartitionTag).");
constexpr size_t CeilCountOfUnits(size_t size, size_t unit_size) {
return (size + unit_size - 1) / unit_size;
}
} // namespace tag_bitmap
// kTagBitmapSize is calculated in the following way:
// (1) kSuperPageSize - 2 * PartitionPageSize() = kTagBitmapSize +
// SlotSpanSize()
// (2) kTagBitmapSize >= SlotSpanSize() / kBytesPerPartitionTag *
// sizeof(PartitionTag)
//--
// (1)' SlotSpanSize() = kSuperPageSize - 2 * PartitionPageSize() -
// kTagBitmapSize
// (2)' SlotSpanSize() <= kTagBitmapSize * Y
// (3)' Y = kBytesPerPartitionTag / sizeof(PartitionTag) =
// kBytesPerPartitionTagRatio
//
// kTagBitmapSize * Y >= kSuperPageSize - 2 * PartitionPageSize() -
// kTagBitmapSize (1 + Y) * kTagBimapSize >= kSuperPageSize - 2 *
// PartitionPageSize()
// Finally,
// kTagBitmapSize >= (kSuperPageSize - 2 * PartitionPageSize()) / (1 + Y)
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
NumPartitionPagesPerTagBitmap() {
return tag_bitmap::CeilCountOfUnits(
kSuperPageSize / PartitionPageSize() - 2,
tag_bitmap::kBytesPerPartitionTagRatio + 1);
}
// To make guard pages between the tag bitmap and the slot span, calculate the
// number of SystemPages of TagBitmap. If kNumSystemPagesPerTagBitmap *
// SystemPageSize() < kTagBitmapSize, guard pages will be created. (c.f. no
// guard pages if sizeof(PartitionTag) == 2.)
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
NumSystemPagesPerTagBitmap() {
return tag_bitmap::CeilCountOfUnits(
kSuperPageSize / SystemPageSize() -
2 * PartitionPageSize() / SystemPageSize(),
tag_bitmap::kBytesPerPartitionTagRatio + 1);
}
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
ActualTagBitmapSize() {
return NumSystemPagesPerTagBitmap() * SystemPageSize();
}
// PartitionPageSize-aligned tag bitmap size.
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
ReservedTagBitmapSize() {
return PartitionPageSize() * NumPartitionPagesPerTagBitmap();
}
#if PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR
static_assert(ActualTagBitmapSize() <= ReservedTagBitmapSize(),
"kActualTagBitmapSize should be smaller than or equal to "
"kReservedTagBitmapSize.");
static_assert(ReservedTagBitmapSize() - ActualTagBitmapSize() <
PartitionPageSize(),
"Unused space in the tag bitmap should be smaller than "
"PartitionPageSize()");
// The region available for slot spans is the reminder of the super page, after
// taking away the first and last partition page (for metadata and guard pages)
// and partition pages reserved for the freeslot bitmap and the tag bitmap.
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
SlotSpansSize() {
return kSuperPageSize - 2 * PartitionPageSize() - ReservedTagBitmapSize();
}
static_assert(ActualTagBitmapSize() * tag_bitmap::kBytesPerPartitionTagRatio >=
SlotSpansSize(),
"bitmap is large enough to cover slot spans");
static_assert((ActualTagBitmapSize() - PartitionPageSize()) *
tag_bitmap::kBytesPerPartitionTagRatio <
SlotSpansSize(),
"any smaller bitmap wouldn't suffice to cover slots spans");
#endif // PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR
#else
constexpr PA_ALWAYS_INLINE size_t NumPartitionPagesPerTagBitmap() {
return 0;
}
constexpr PA_ALWAYS_INLINE size_t ActualTagBitmapSize() {
return 0;
}
constexpr PA_ALWAYS_INLINE size_t ReservedTagBitmapSize() {
return 0;
}
#endif // PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
} // namespace partition_alloc::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_TAG_BITMAP_H_

View File

@ -0,0 +1,25 @@
// Copyright 2022 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_TAG_TYPES_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_TAG_TYPES_H_
#include <cstdint>
// This header defines the types for MTECheckedPtr. Canonical
// documentation available at `//base/memory/raw_ptr_mtecheckedptr.md`.
namespace partition_alloc {
// Use 8 bits for the partition tag. This is the "lower" byte of the
// two top bytes in a 64-bit pointer. The "upper" byte of the same
// is reserved for true ARM MTE.
//
// MTECheckedPtr is not yet compatible with ARM MTE, but it is a
// distant goal to have them coexist.
using PartitionTag = uint8_t;
} // namespace partition_alloc
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_TAG_TYPES_H_

View File

@ -108,9 +108,8 @@ PA_ALWAYS_INLINE void* PartitionTlsGet(PartitionTlsKey key) {
DWORD saved_error = GetLastError(); DWORD saved_error = GetLastError();
void* ret = TlsGetValue(key); void* ret = TlsGetValue(key);
// Only non-zero errors need to be restored. // Only non-zero errors need to be restored.
if (PA_UNLIKELY(saved_error)) { if (PA_UNLIKELY(saved_error))
SetLastError(saved_error); SetLastError(saved_error);
}
return ret; return ret;
} }

View File

@ -19,19 +19,16 @@ void (*g_on_dll_process_detach)() = nullptr;
void NTAPI PartitionTlsOnThreadExit(PVOID module, void NTAPI PartitionTlsOnThreadExit(PVOID module,
DWORD reason, DWORD reason,
PVOID reserved) { PVOID reserved) {
if (reason != DLL_THREAD_DETACH && reason != DLL_PROCESS_DETACH) { if (reason != DLL_THREAD_DETACH && reason != DLL_PROCESS_DETACH)
return; return;
}
if (reason == DLL_PROCESS_DETACH && g_on_dll_process_detach) { if (reason == DLL_PROCESS_DETACH && g_on_dll_process_detach)
g_on_dll_process_detach(); g_on_dll_process_detach();
}
if (g_destructor) { if (g_destructor) {
void* per_thread_data = PartitionTlsGet(g_key); void* per_thread_data = PartitionTlsGet(g_key);
if (per_thread_data) { if (per_thread_data)
g_destructor(per_thread_data); g_destructor(per_thread_data);
}
} }
} }

View File

@ -98,9 +98,8 @@ void Wrpkru(uint32_t pkru) {
LiftPkeyRestrictionsScope::LiftPkeyRestrictionsScope() LiftPkeyRestrictionsScope::LiftPkeyRestrictionsScope()
: saved_pkey_value_(kDefaultPkeyValue) { : saved_pkey_value_(kDefaultPkeyValue) {
if (!PkeySettings::settings.enabled) { if (!PkeySettings::settings.enabled)
return; return;
}
saved_pkey_value_ = Rdpkru(); saved_pkey_value_ = Rdpkru();
if (saved_pkey_value_ != kDefaultPkeyValue) { if (saved_pkey_value_ != kDefaultPkeyValue) {
Wrpkru(kAllowAllPkeyValue); Wrpkru(kAllowAllPkeyValue);
@ -108,9 +107,8 @@ LiftPkeyRestrictionsScope::LiftPkeyRestrictionsScope()
} }
LiftPkeyRestrictionsScope::~LiftPkeyRestrictionsScope() { LiftPkeyRestrictionsScope::~LiftPkeyRestrictionsScope() {
if (!PkeySettings::settings.enabled) { if (!PkeySettings::settings.enabled)
return; return;
}
if (Rdpkru() != saved_pkey_value_) { if (Rdpkru() != saved_pkey_value_) {
Wrpkru(saved_pkey_value_); Wrpkru(saved_pkey_value_);
} }

File diff suppressed because it is too large Load Diff

View File

@ -10,7 +10,6 @@
#include <type_traits> #include <type_traits>
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h" #include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_base/cxx20_is_constant_evaluated.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h" #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_forward.h" #include "base/allocator/partition_allocator/partition_alloc_forward.h"
@ -27,23 +26,20 @@ template <bool IsAdjustablePtr>
struct RawPtrAsanUnownedImpl { struct RawPtrAsanUnownedImpl {
// Wraps a pointer. // Wraps a pointer.
template <typename T> template <typename T>
PA_ALWAYS_INLINE static constexpr T* WrapRawPtr(T* ptr) { static PA_ALWAYS_INLINE T* WrapRawPtr(T* ptr) {
return ptr; return ptr;
} }
// Notifies the allocator when a wrapped pointer is being removed or replaced. // Notifies the allocator when a wrapped pointer is being removed or replaced.
template <typename T> template <typename T>
PA_ALWAYS_INLINE static constexpr void ReleaseWrappedPtr(T* wrapped_ptr) { static PA_ALWAYS_INLINE void ReleaseWrappedPtr(T* wrapped_ptr) {
if (!partition_alloc::internal::base::is_constant_evaluated()) { ProbeForLowSeverityLifetimeIssue(wrapped_ptr);
ProbeForLowSeverityLifetimeIssue(wrapped_ptr);
}
} }
// Unwraps the pointer, while asserting that memory hasn't been freed. The // Unwraps the pointer, while asserting that memory hasn't been freed. The
// function is allowed to crash on nullptr. // function is allowed to crash on nullptr.
template <typename T> template <typename T>
PA_ALWAYS_INLINE static constexpr T* SafelyUnwrapPtrForDereference( static PA_ALWAYS_INLINE T* SafelyUnwrapPtrForDereference(T* wrapped_ptr) {
T* wrapped_ptr) {
// ASAN will catch use of dereferenced ptr without additional probing. // ASAN will catch use of dereferenced ptr without additional probing.
return wrapped_ptr; return wrapped_ptr;
} }
@ -51,25 +47,21 @@ struct RawPtrAsanUnownedImpl {
// Unwraps the pointer, while asserting that memory hasn't been freed. The // Unwraps the pointer, while asserting that memory hasn't been freed. The
// function must handle nullptr gracefully. // function must handle nullptr gracefully.
template <typename T> template <typename T>
PA_ALWAYS_INLINE static constexpr T* SafelyUnwrapPtrForExtraction( static PA_ALWAYS_INLINE T* SafelyUnwrapPtrForExtraction(T* wrapped_ptr) {
T* wrapped_ptr) { ProbeForLowSeverityLifetimeIssue(wrapped_ptr);
if (!partition_alloc::internal::base::is_constant_evaluated()) {
ProbeForLowSeverityLifetimeIssue(wrapped_ptr);
}
return wrapped_ptr; return wrapped_ptr;
} }
// Unwraps the pointer, without making an assertion on whether memory was // Unwraps the pointer, without making an assertion on whether memory was
// freed or not. // freed or not.
template <typename T> template <typename T>
PA_ALWAYS_INLINE static constexpr T* UnsafelyUnwrapPtrForComparison( static PA_ALWAYS_INLINE T* UnsafelyUnwrapPtrForComparison(T* wrapped_ptr) {
T* wrapped_ptr) {
return wrapped_ptr; return wrapped_ptr;
} }
// Upcasts the wrapped pointer. // Upcasts the wrapped pointer.
template <typename To, typename From> template <typename To, typename From>
PA_ALWAYS_INLINE static constexpr To* Upcast(From* wrapped_ptr) { static PA_ALWAYS_INLINE constexpr To* Upcast(From* wrapped_ptr) {
static_assert(std::is_convertible<From*, To*>::value, static_assert(std::is_convertible<From*, To*>::value,
"From must be convertible to To."); "From must be convertible to To.");
// Note, this cast may change the address if upcasting to base that lies in // Note, this cast may change the address if upcasting to base that lies in
@ -82,31 +74,21 @@ struct RawPtrAsanUnownedImpl {
typename T, typename T,
typename Z, typename Z,
typename = typename =
std::enable_if_t<partition_alloc::internal::is_offset_type<Z>, void>> std::enable_if_t<partition_alloc::internal::offset_type<Z>, void>>
PA_ALWAYS_INLINE static constexpr T* Advance(T* wrapped_ptr, Z delta_elems) { static PA_ALWAYS_INLINE T* Advance(T* wrapped_ptr, Z delta_elems) {
return wrapped_ptr + delta_elems; return wrapped_ptr + delta_elems;
} }
// Retreat the wrapped pointer by `delta_elems`.
template <
typename T,
typename Z,
typename =
std::enable_if_t<partition_alloc::internal::is_offset_type<Z>, void>>
PA_ALWAYS_INLINE static constexpr T* Retreat(T* wrapped_ptr, Z delta_elems) {
return wrapped_ptr - delta_elems;
}
template <typename T> template <typename T>
PA_ALWAYS_INLINE static constexpr ptrdiff_t GetDeltaElems(T* wrapped_ptr1, static PA_ALWAYS_INLINE ptrdiff_t GetDeltaElems(T* wrapped_ptr1,
T* wrapped_ptr2) { T* wrapped_ptr2) {
return wrapped_ptr1 - wrapped_ptr2; return wrapped_ptr1 - wrapped_ptr2;
} }
// Returns a copy of a wrapped pointer, without making an assertion on whether // Returns a copy of a wrapped pointer, without making an assertion on whether
// memory was freed or not. // memory was freed or not.
template <typename T> template <typename T>
PA_ALWAYS_INLINE static constexpr T* Duplicate(T* wrapped_ptr) { static PA_ALWAYS_INLINE T* Duplicate(T* wrapped_ptr) {
return wrapped_ptr; return wrapped_ptr;
} }
@ -121,21 +103,19 @@ struct RawPtrAsanUnownedImpl {
// `WrapRawPtrForDuplication` and `UnsafelyUnwrapPtrForDuplication` are used // `WrapRawPtrForDuplication` and `UnsafelyUnwrapPtrForDuplication` are used
// to create a new raw_ptr<T> from another raw_ptr<T> of a different flavor. // to create a new raw_ptr<T> from another raw_ptr<T> of a different flavor.
template <typename T> template <typename T>
PA_ALWAYS_INLINE static constexpr T* WrapRawPtrForDuplication(T* ptr) { static PA_ALWAYS_INLINE T* WrapRawPtrForDuplication(T* ptr) {
return ptr; return ptr;
} }
template <typename T> template <typename T>
PA_ALWAYS_INLINE static constexpr T* UnsafelyUnwrapPtrForDuplication( static PA_ALWAYS_INLINE T* UnsafelyUnwrapPtrForDuplication(T* wrapped_ptr) {
T* wrapped_ptr) {
return wrapped_ptr; return wrapped_ptr;
} }
// This is for accounting only, used by unit tests. // This is for accounting only, used by unit tests.
PA_ALWAYS_INLINE static constexpr void IncrementSwapCountForTest() {} static PA_ALWAYS_INLINE void IncrementSwapCountForTest() {}
PA_ALWAYS_INLINE static constexpr void IncrementLessCountForTest() {} static PA_ALWAYS_INLINE void IncrementLessCountForTest() {}
PA_ALWAYS_INLINE static constexpr void static PA_ALWAYS_INLINE void IncrementPointerToMemberOperatorCountForTest() {}
IncrementPointerToMemberOperatorCountForTest() {}
}; };
} // namespace base::internal } // namespace base::internal

View File

@ -64,29 +64,6 @@ void RawPtrBackupRefImpl<AllowDangling>::ReportIfDanglingInternal(
} }
} }
// static
template <bool AllowDangling>
bool RawPtrBackupRefImpl<AllowDangling>::CheckPointerWithinSameAlloc(
uintptr_t before_addr,
uintptr_t after_addr,
size_t type_size) {
partition_alloc::internal::PtrPosWithinAlloc ptr_pos_within_alloc =
partition_alloc::internal::IsPtrWithinSameAlloc(before_addr, after_addr,
type_size);
// No need to check that |new_ptr| is in the same pool, as
// IsPtrWithinSameAlloc() checks that it's within the same allocation, so
// must be the same pool.
PA_BASE_CHECK(ptr_pos_within_alloc !=
partition_alloc::internal::PtrPosWithinAlloc::kFarOOB);
#if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
return ptr_pos_within_alloc ==
partition_alloc::internal::PtrPosWithinAlloc::kAllocEnd;
#else
return false;
#endif
}
template <bool AllowDangling> template <bool AllowDangling>
bool RawPtrBackupRefImpl<AllowDangling>::IsPointeeAlive(uintptr_t address) { bool RawPtrBackupRefImpl<AllowDangling>::IsPointeeAlive(uintptr_t address) {
#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS) #if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
@ -98,11 +75,38 @@ bool RawPtrBackupRefImpl<AllowDangling>::IsPointeeAlive(uintptr_t address) {
->IsAlive(); ->IsAlive();
} }
template <bool AllowDangling>
template <typename Z>
partition_alloc::PtrPosWithinAlloc
RawPtrBackupRefImpl<AllowDangling>::IsValidDelta(
uintptr_t address,
partition_alloc::internal::PtrDelta<Z> delta) {
return partition_alloc::internal::PartitionAllocIsValidPtrDelta(address,
delta);
}
// Explicitly instantiates the two BackupRefPtr variants in the .cc. This // Explicitly instantiates the two BackupRefPtr variants in the .cc. This
// ensures the definitions not visible from the .h are available in the binary. // ensures the definitions not visible from the .h are available in the binary.
template struct RawPtrBackupRefImpl</*AllowDangling=*/false>; template struct RawPtrBackupRefImpl</*AllowDangling=*/false>;
template struct RawPtrBackupRefImpl</*AllowDangling=*/true>; template struct RawPtrBackupRefImpl</*AllowDangling=*/true>;
template PA_COMPONENT_EXPORT(RAW_PTR)
partition_alloc::PtrPosWithinAlloc RawPtrBackupRefImpl<false>::IsValidDelta(
uintptr_t,
partition_alloc::internal::PtrDelta<size_t>);
template PA_COMPONENT_EXPORT(RAW_PTR)
partition_alloc::PtrPosWithinAlloc RawPtrBackupRefImpl<false>::IsValidDelta(
uintptr_t,
partition_alloc::internal::PtrDelta<ptrdiff_t>);
template PA_COMPONENT_EXPORT(RAW_PTR)
partition_alloc::PtrPosWithinAlloc RawPtrBackupRefImpl<true>::IsValidDelta(
uintptr_t,
partition_alloc::internal::PtrDelta<size_t>);
template PA_COMPONENT_EXPORT(RAW_PTR)
partition_alloc::PtrPosWithinAlloc RawPtrBackupRefImpl<true>::IsValidDelta(
uintptr_t,
partition_alloc::internal::PtrDelta<ptrdiff_t>);
#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS) #if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
void CheckThatAddressIsntWithinFirstPartitionPage(uintptr_t address) { void CheckThatAddressIsntWithinFirstPartitionPage(uintptr_t address) {
if (partition_alloc::internal::IsManagedByDirectMap(address)) { if (partition_alloc::internal::IsManagedByDirectMap(address)) {

View File

@ -13,7 +13,6 @@
#include "base/allocator/partition_allocator/partition_address_space.h" #include "base/allocator/partition_allocator/partition_address_space.h"
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h" #include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h" #include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_base/cxx20_is_constant_evaluated.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h" #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h" #include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h" #include "base/allocator/partition_allocator/partition_alloc_constants.h"
@ -37,8 +36,7 @@ struct RawPtrBackupRefImpl {
// threads modify the same smart pointer object without synchronization, a // threads modify the same smart pointer object without synchronization, a
// data race will occur. // data race will occur.
private: static PA_ALWAYS_INLINE bool IsSupportedAndNotNull(uintptr_t address) {
PA_ALWAYS_INLINE static bool IsSupportedAndNotNull(uintptr_t address) {
// There are many situations where the compiler can prove that // There are many situations where the compiler can prove that
// `ReleaseWrappedPtr` is called on a value that is always nullptr, but the // `ReleaseWrappedPtr` is called on a value that is always nullptr, but the
// way `IsManagedByPartitionAllocBRPPool` is written, the compiler can't // way `IsManagedByPartitionAllocBRPPool` is written, the compiler can't
@ -89,7 +87,7 @@ struct RawPtrBackupRefImpl {
return is_in_brp_pool; return is_in_brp_pool;
} }
#if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR) #if PA_CONFIG(USE_OOB_POISON)
// Out-Of-Bounds (OOB) poison bit is set when the pointer has overflowed by // Out-Of-Bounds (OOB) poison bit is set when the pointer has overflowed by
// one byte. // one byte.
#if defined(ARCH_CPU_X86_64) #if defined(ARCH_CPU_X86_64)
@ -103,36 +101,32 @@ struct RawPtrBackupRefImpl {
#endif #endif
template <typename T> template <typename T>
PA_ALWAYS_INLINE static T* UnpoisonPtr(T* ptr) { static PA_ALWAYS_INLINE T* UnpoisonPtr(T* ptr) {
return reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(ptr) & return reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(ptr) &
~OOB_POISON_BIT); ~OOB_POISON_BIT);
} }
template <typename T> template <typename T>
PA_ALWAYS_INLINE static bool IsPtrOOB(T* ptr) { static PA_ALWAYS_INLINE bool IsPtrOOB(T* ptr) {
return (reinterpret_cast<uintptr_t>(ptr) & OOB_POISON_BIT) == return (reinterpret_cast<uintptr_t>(ptr) & OOB_POISON_BIT) ==
OOB_POISON_BIT; OOB_POISON_BIT;
} }
template <typename T> template <typename T>
PA_ALWAYS_INLINE static T* PoisonOOBPtr(T* ptr) { static PA_ALWAYS_INLINE T* PoisonOOBPtr(T* ptr) {
return reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(ptr) | return reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(ptr) |
OOB_POISON_BIT); OOB_POISON_BIT);
} }
#else // BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR) #else // PA_USE_OOB_POISON
template <typename T> template <typename T>
PA_ALWAYS_INLINE static T* UnpoisonPtr(T* ptr) { static PA_ALWAYS_INLINE T* UnpoisonPtr(T* ptr) {
return ptr; return ptr;
} }
#endif // BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR) #endif // PA_USE_OOB_POISON
public:
// Wraps a pointer. // Wraps a pointer.
template <typename T> template <typename T>
PA_ALWAYS_INLINE static constexpr T* WrapRawPtr(T* ptr) { static PA_ALWAYS_INLINE T* WrapRawPtr(T* ptr) {
if (partition_alloc::internal::base::is_constant_evaluated()) {
return ptr;
}
uintptr_t address = partition_alloc::UntagPtr(UnpoisonPtr(ptr)); uintptr_t address = partition_alloc::UntagPtr(UnpoisonPtr(ptr));
if (IsSupportedAndNotNull(address)) { if (IsSupportedAndNotNull(address)) {
#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS) #if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
@ -163,10 +157,7 @@ struct RawPtrBackupRefImpl {
// Notifies the allocator when a wrapped pointer is being removed or replaced. // Notifies the allocator when a wrapped pointer is being removed or replaced.
template <typename T> template <typename T>
PA_ALWAYS_INLINE static constexpr void ReleaseWrappedPtr(T* wrapped_ptr) { static PA_ALWAYS_INLINE void ReleaseWrappedPtr(T* wrapped_ptr) {
if (partition_alloc::internal::base::is_constant_evaluated()) {
return;
}
uintptr_t address = partition_alloc::UntagPtr(UnpoisonPtr(wrapped_ptr)); uintptr_t address = partition_alloc::UntagPtr(UnpoisonPtr(wrapped_ptr));
if (IsSupportedAndNotNull(address)) { if (IsSupportedAndNotNull(address)) {
#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS) #if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
@ -186,13 +177,9 @@ struct RawPtrBackupRefImpl {
// Unwraps the pointer, while asserting that memory hasn't been freed. The // Unwraps the pointer, while asserting that memory hasn't been freed. The
// function is allowed to crash on nullptr. // function is allowed to crash on nullptr.
template <typename T> template <typename T>
PA_ALWAYS_INLINE static constexpr T* SafelyUnwrapPtrForDereference( static PA_ALWAYS_INLINE T* SafelyUnwrapPtrForDereference(T* wrapped_ptr) {
T* wrapped_ptr) {
if (partition_alloc::internal::base::is_constant_evaluated()) {
return wrapped_ptr;
}
#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS) #if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
#if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR) #if PA_CONFIG(USE_OOB_POISON)
PA_BASE_CHECK(!IsPtrOOB(wrapped_ptr)); PA_BASE_CHECK(!IsPtrOOB(wrapped_ptr));
#endif #endif
uintptr_t address = partition_alloc::UntagPtr(wrapped_ptr); uintptr_t address = partition_alloc::UntagPtr(wrapped_ptr);
@ -208,13 +195,9 @@ struct RawPtrBackupRefImpl {
// Unwraps the pointer, while asserting that memory hasn't been freed. The // Unwraps the pointer, while asserting that memory hasn't been freed. The
// function must handle nullptr gracefully. // function must handle nullptr gracefully.
template <typename T> template <typename T>
PA_ALWAYS_INLINE static constexpr T* SafelyUnwrapPtrForExtraction( static PA_ALWAYS_INLINE T* SafelyUnwrapPtrForExtraction(T* wrapped_ptr) {
T* wrapped_ptr) {
if (partition_alloc::internal::base::is_constant_evaluated()) {
return wrapped_ptr;
}
T* unpoisoned_ptr = UnpoisonPtr(wrapped_ptr); T* unpoisoned_ptr = UnpoisonPtr(wrapped_ptr);
#if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR) #if PA_CONFIG(USE_OOB_POISON)
// Some code uses invalid pointer values as indicators, so those values must // Some code uses invalid pointer values as indicators, so those values must
// be passed through unchanged during extraction. The following check will // be passed through unchanged during extraction. The following check will
// pass invalid values through if those values do not fall within the BRP // pass invalid values through if those values do not fall within the BRP
@ -227,18 +210,14 @@ struct RawPtrBackupRefImpl {
// OOB conditions, e.g., in code that extracts an end-of-allocation pointer // OOB conditions, e.g., in code that extracts an end-of-allocation pointer
// for use in a loop termination condition. The poison bit would make that // for use in a loop termination condition. The poison bit would make that
// pointer appear to reference a very high address. // pointer appear to reference a very high address.
#endif // BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR) #endif // PA_CONFIG(USE_OOB_POISON)
return unpoisoned_ptr; return unpoisoned_ptr;
} }
// Unwraps the pointer, without making an assertion on whether memory was // Unwraps the pointer, without making an assertion on whether memory was
// freed or not. // freed or not.
template <typename T> template <typename T>
PA_ALWAYS_INLINE static constexpr T* UnsafelyUnwrapPtrForComparison( static PA_ALWAYS_INLINE T* UnsafelyUnwrapPtrForComparison(T* wrapped_ptr) {
T* wrapped_ptr) {
if (partition_alloc::internal::base::is_constant_evaluated()) {
return wrapped_ptr;
}
// This may be used for unwrapping an end-of-allocation pointer to be used // This may be used for unwrapping an end-of-allocation pointer to be used
// as an endpoint in an iterative algorithm, so this removes the OOB poison // as an endpoint in an iterative algorithm, so this removes the OOB poison
// bit. // bit.
@ -247,7 +226,7 @@ struct RawPtrBackupRefImpl {
// Upcasts the wrapped pointer. // Upcasts the wrapped pointer.
template <typename To, typename From> template <typename To, typename From>
PA_ALWAYS_INLINE static constexpr To* Upcast(From* wrapped_ptr) { static PA_ALWAYS_INLINE constexpr To* Upcast(From* wrapped_ptr) {
static_assert(std::is_convertible<From*, To*>::value, static_assert(std::is_convertible<From*, To*>::value,
"From must be convertible to To."); "From must be convertible to To.");
// Note, this cast may change the address if upcasting to base that lies in // Note, this cast may change the address if upcasting to base that lies in
@ -255,12 +234,53 @@ struct RawPtrBackupRefImpl {
return wrapped_ptr; return wrapped_ptr;
} }
// Verify the pointer stayed in the same slot, and return the poisoned version // Advance the wrapped pointer by `delta_elems`.
// of `new_ptr` if OOB poisoning is enabled. template <
template <typename T> typename T,
PA_ALWAYS_INLINE static T* VerifyAndPoisonPointerAfterAdvanceOrRetreat( typename Z,
T* unpoisoned_ptr, typename =
T* new_ptr) { std::enable_if_t<partition_alloc::internal::offset_type<Z>, void>>
static PA_ALWAYS_INLINE T* Advance(T* wrapped_ptr, Z delta_elems) {
#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
T* unpoisoned_ptr = UnpoisonPtr(wrapped_ptr);
T* new_ptr = unpoisoned_ptr + delta_elems;
// First check if the new address didn't migrate in/out the BRP pool, and
// that it lands within the same allocation. An end-of-allocation address is
// ok, too, and that may lead to the pointer being poisoned if the relevant
// feature is enabled. These checks add a non-trivial cost, but they're
// cheaper and more secure than the previous implementation that rewrapped
// the pointer (wrapped the new pointer and unwrapped the old one).
//
// Note, the value of these checks goes beyond OOB protection. They're
// important for integrity of the BRP algorithm. Without these, an attacker
// could make the pointer point to another allocation, and cause its
// ref-count to go to 0 upon this pointer's destruction, even though there
// may be another pointer still pointing to it, thus making it lose the BRP
// protection prematurely.
uintptr_t address = partition_alloc::UntagPtr(unpoisoned_ptr);
// TODO(bartekn): Consider adding support for non-BRP pools too (without
// removing the cross-pool migration check).
if (IsSupportedAndNotNull(address)) {
auto ptr_pos_within_alloc = IsValidDelta(
address, delta_elems * static_cast<Z>(sizeof(T)), sizeof(T));
// No need to check that |new_ptr| is in the same pool, as IsValidDeta()
// checks that it's within the same allocation, so must be the same pool.
PA_BASE_CHECK(ptr_pos_within_alloc !=
partition_alloc::PtrPosWithinAlloc::kFarOOB);
#if PA_CONFIG(USE_OOB_POISON)
if (ptr_pos_within_alloc ==
partition_alloc::PtrPosWithinAlloc::kAllocEnd) {
new_ptr = PoisonOOBPtr(new_ptr);
}
#endif
} else {
// Check that the new address didn't migrate into the BRP pool, as it
// would result in more pointers pointing to an allocation than its
// ref-count reflects.
PA_BASE_CHECK(!IsSupportedAndNotNull(partition_alloc::UntagPtr(new_ptr)));
}
return new_ptr;
#else // BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
// In the "before allocation" mode, on 32-bit, we can run into a problem // In the "before allocation" mode, on 32-bit, we can run into a problem
// that the end-of-allocation address could fall outside of // that the end-of-allocation address could fall outside of
// PartitionAlloc's pools, if this is the last slot of the super page, // PartitionAlloc's pools, if this is the last slot of the super page,
@ -285,99 +305,26 @@ struct RawPtrBackupRefImpl {
// This problem doesn't exist in the "previous slot" mode, or any mode that // This problem doesn't exist in the "previous slot" mode, or any mode that
// involves putting extras after the allocation, because the // involves putting extras after the allocation, because the
// end-of-allocation address belongs to the same slot. // end-of-allocation address belongs to the same slot.
static_assert(BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)); static_assert(false);
#endif // BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
// First check if the new address didn't migrate in/out the BRP pool, and
// that it lands within the same allocation. An end-of-allocation address is
// ok, too, and that may lead to the pointer being poisoned if the relevant
// feature is enabled. These checks add a non-trivial cost, but they're
// cheaper and more secure than the previous implementation that rewrapped
// the pointer (wrapped the new pointer and unwrapped the old one).
//
// Note, the value of these checks goes beyond OOB protection. They're
// important for integrity of the BRP algorithm. Without these, an attacker
// could make the pointer point to another allocation, and cause its
// ref-count to go to 0 upon this pointer's destruction, even though there
// may be another pointer still pointing to it, thus making it lose the BRP
// protection prematurely.
const uintptr_t before_addr = partition_alloc::UntagPtr(unpoisoned_ptr);
const uintptr_t after_addr = partition_alloc::UntagPtr(new_ptr);
// TODO(bartekn): Consider adding support for non-BRP pools too (without
// removing the cross-pool migration check).
if (IsSupportedAndNotNull(before_addr)) {
constexpr size_t size = sizeof(T);
[[maybe_unused]] const bool is_end =
CheckPointerWithinSameAlloc(before_addr, after_addr, size);
#if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
if (is_end) {
new_ptr = PoisonOOBPtr(new_ptr);
}
#endif // BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
} else {
// Check that the new address didn't migrate into the BRP pool, as it
// would result in more pointers pointing to an allocation than its
// ref-count reflects.
PA_BASE_CHECK(!IsSupportedAndNotNull(after_addr));
}
return new_ptr;
}
// Advance the wrapped pointer by `delta_elems`.
template <
typename T,
typename Z,
typename =
std::enable_if_t<partition_alloc::internal::is_offset_type<Z>, void>>
PA_ALWAYS_INLINE static constexpr T* Advance(T* wrapped_ptr, Z delta_elems) {
if (partition_alloc::internal::base::is_constant_evaluated()) {
return wrapped_ptr + delta_elems;
}
T* unpoisoned_ptr = UnpoisonPtr(wrapped_ptr);
return VerifyAndPoisonPointerAfterAdvanceOrRetreat(
unpoisoned_ptr, unpoisoned_ptr + delta_elems);
}
// Retreat the wrapped pointer by `delta_elems`.
template <
typename T,
typename Z,
typename =
std::enable_if_t<partition_alloc::internal::is_offset_type<Z>, void>>
PA_ALWAYS_INLINE static constexpr T* Retreat(T* wrapped_ptr, Z delta_elems) {
if (partition_alloc::internal::base::is_constant_evaluated()) {
return wrapped_ptr - delta_elems;
}
T* unpoisoned_ptr = UnpoisonPtr(wrapped_ptr);
return VerifyAndPoisonPointerAfterAdvanceOrRetreat(
unpoisoned_ptr, unpoisoned_ptr - delta_elems);
} }
template <typename T> template <typename T>
PA_ALWAYS_INLINE static constexpr ptrdiff_t GetDeltaElems(T* wrapped_ptr1, static PA_ALWAYS_INLINE ptrdiff_t GetDeltaElems(T* wrapped_ptr1,
T* wrapped_ptr2) { T* wrapped_ptr2) {
if (partition_alloc::internal::base::is_constant_evaluated()) {
return wrapped_ptr1 - wrapped_ptr2;
}
T* unpoisoned_ptr1 = UnpoisonPtr(wrapped_ptr1); T* unpoisoned_ptr1 = UnpoisonPtr(wrapped_ptr1);
T* unpoisoned_ptr2 = UnpoisonPtr(wrapped_ptr2); T* unpoisoned_ptr2 = UnpoisonPtr(wrapped_ptr2);
#if BUILDFLAG(ENABLE_POINTER_SUBTRACTION_CHECK)
if (partition_alloc::internal::base::is_constant_evaluated()) {
return unpoisoned_ptr1 - unpoisoned_ptr2;
}
uintptr_t address1 = partition_alloc::UntagPtr(unpoisoned_ptr1); uintptr_t address1 = partition_alloc::UntagPtr(unpoisoned_ptr1);
uintptr_t address2 = partition_alloc::UntagPtr(unpoisoned_ptr2); uintptr_t address2 = partition_alloc::UntagPtr(unpoisoned_ptr2);
// Ensure that both pointers are within the same slot, and pool! // Ensure that both pointers are within the same slot, and pool!
// TODO(bartekn): Consider adding support for non-BRP pool too. // TODO(bartekn): Consider adding support for non-BRP pool too.
if (IsSupportedAndNotNull(address1)) { if (IsSupportedAndNotNull(address1)) {
PA_BASE_CHECK(IsSupportedAndNotNull(address2)); PA_BASE_CHECK(IsSupportedAndNotNull(address2));
PA_BASE_CHECK(partition_alloc::internal::IsPtrWithinSameAlloc( PA_BASE_CHECK(IsValidDelta(address2, address1 - address2, sizeof(T)) !=
address2, address1, sizeof(T)) != partition_alloc::PtrPosWithinAlloc::kFarOOB);
partition_alloc::internal::PtrPosWithinAlloc::kFarOOB);
} else { } else {
PA_BASE_CHECK(!IsSupportedAndNotNull(address2)); PA_BASE_CHECK(!IsSupportedAndNotNull(address2));
} }
#endif // BUILDFLAG(ENABLE_POINTER_SUBTRACTION_CHECK)
return unpoisoned_ptr1 - unpoisoned_ptr2; return unpoisoned_ptr1 - unpoisoned_ptr2;
} }
@ -385,45 +332,32 @@ struct RawPtrBackupRefImpl {
// memory was freed or not. // memory was freed or not.
// This method increments the reference count of the allocation slot. // This method increments the reference count of the allocation slot.
template <typename T> template <typename T>
PA_ALWAYS_INLINE static constexpr T* Duplicate(T* wrapped_ptr) { static PA_ALWAYS_INLINE T* Duplicate(T* wrapped_ptr) {
if (partition_alloc::internal::base::is_constant_evaluated()) {
return wrapped_ptr;
}
return WrapRawPtr(wrapped_ptr); return WrapRawPtr(wrapped_ptr);
} }
// Report the current wrapped pointer if pointee isn't alive anymore. // Report the current wrapped pointer if pointee isn't alive anymore.
template <typename T> template <typename T>
PA_ALWAYS_INLINE static void ReportIfDangling(T* wrapped_ptr) { static PA_ALWAYS_INLINE void ReportIfDangling(T* wrapped_ptr) {
ReportIfDanglingInternal(partition_alloc::UntagPtr(wrapped_ptr)); ReportIfDanglingInternal(partition_alloc::UntagPtr(wrapped_ptr));
} }
// `WrapRawPtrForDuplication` and `UnsafelyUnwrapPtrForDuplication` are used // `WrapRawPtrForDuplication` and `UnsafelyUnwrapPtrForDuplication` are used
// to create a new raw_ptr<T> from another raw_ptr<T> of a different flavor. // to create a new raw_ptr<T> from another raw_ptr<T> of a different flavor.
template <typename T> template <typename T>
PA_ALWAYS_INLINE static constexpr T* WrapRawPtrForDuplication(T* ptr) { static PA_ALWAYS_INLINE T* WrapRawPtrForDuplication(T* ptr) {
if (partition_alloc::internal::base::is_constant_evaluated()) { return WrapRawPtr(ptr);
return ptr;
} else {
return WrapRawPtr(ptr);
}
} }
template <typename T> template <typename T>
PA_ALWAYS_INLINE static constexpr T* UnsafelyUnwrapPtrForDuplication( static PA_ALWAYS_INLINE T* UnsafelyUnwrapPtrForDuplication(T* wrapped_ptr) {
T* wrapped_ptr) { return UnpoisonPtr(wrapped_ptr);
if (partition_alloc::internal::base::is_constant_evaluated()) {
return wrapped_ptr;
} else {
return UnpoisonPtr(wrapped_ptr);
}
} }
// This is for accounting only, used by unit tests. // This is for accounting only, used by unit tests.
PA_ALWAYS_INLINE static constexpr void IncrementSwapCountForTest() {} static PA_ALWAYS_INLINE void IncrementSwapCountForTest() {}
PA_ALWAYS_INLINE static constexpr void IncrementLessCountForTest() {} static PA_ALWAYS_INLINE void IncrementLessCountForTest() {}
PA_ALWAYS_INLINE static constexpr void static PA_ALWAYS_INLINE void IncrementPointerToMemberOperatorCountForTest() {}
IncrementPointerToMemberOperatorCountForTest() {}
private: private:
// We've evaluated several strategies (inline nothing, various parts, or // We've evaluated several strategies (inline nothing, various parts, or
@ -432,24 +366,31 @@ struct RawPtrBackupRefImpl {
// lightweight |IsManagedByPartitionAllocBRPPool()| check was inlined. // lightweight |IsManagedByPartitionAllocBRPPool()| check was inlined.
// Therefore, we've extracted the rest into the functions below and marked // Therefore, we've extracted the rest into the functions below and marked
// them as PA_NOINLINE to prevent unintended LTO effects. // them as PA_NOINLINE to prevent unintended LTO effects.
PA_NOINLINE static PA_COMPONENT_EXPORT(RAW_PTR) void AcquireInternal( static PA_COMPONENT_EXPORT(RAW_PTR) PA_NOINLINE
uintptr_t address); void AcquireInternal(uintptr_t address);
PA_NOINLINE static PA_COMPONENT_EXPORT(RAW_PTR) void ReleaseInternal( static PA_COMPONENT_EXPORT(RAW_PTR) PA_NOINLINE
uintptr_t address); void ReleaseInternal(uintptr_t address);
PA_NOINLINE static PA_COMPONENT_EXPORT(RAW_PTR) bool IsPointeeAlive( static PA_COMPONENT_EXPORT(RAW_PTR) PA_NOINLINE
uintptr_t address); bool IsPointeeAlive(uintptr_t address);
PA_NOINLINE static PA_COMPONENT_EXPORT(RAW_PTR) void ReportIfDanglingInternal( static PA_COMPONENT_EXPORT(RAW_PTR) PA_NOINLINE
uintptr_t address); void ReportIfDanglingInternal(uintptr_t address);
template <
typename Z,
typename =
std::enable_if_t<partition_alloc::internal::offset_type<Z>, void>>
static PA_ALWAYS_INLINE partition_alloc::PtrPosWithinAlloc
IsValidDelta(uintptr_t address, Z delta_in_bytes, size_t type_size) {
using delta_t = std::conditional_t<std::is_signed_v<Z>, ptrdiff_t, size_t>;
partition_alloc::internal::PtrDelta<delta_t> ptr_delta(delta_in_bytes,
type_size);
// CHECK if `before_addr` and `after_addr` are in the same allocation, for a return IsValidDelta(address, ptr_delta);
// given `type_size`. }
// If BACKUP_REF_PTR_POISON_OOB_PTR is enabled, return whether the allocation template <typename Z>
// is at the end. static PA_COMPONENT_EXPORT(RAW_PTR)
// If BACKUP_REF_PTR_POISON_OOB_PTR is disable, return false. PA_NOINLINE partition_alloc::PtrPosWithinAlloc
PA_NOINLINE static PA_COMPONENT_EXPORT( IsValidDelta(uintptr_t address,
RAW_PTR) bool CheckPointerWithinSameAlloc(uintptr_t before_addr, partition_alloc::internal::PtrDelta<Z> delta);
uintptr_t after_addr,
size_t type_size);
}; };
} // namespace base::internal } // namespace base::internal

View File

@ -11,7 +11,6 @@
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h" #include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h" #include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_base/cxx20_is_constant_evaluated.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h" #include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_forward.h" #include "base/allocator/partition_allocator/partition_alloc_forward.h"
@ -46,60 +45,47 @@ PA_COMPONENT_EXPORT(RAW_PTR) void ResetRawPtrHooks();
struct RawPtrHookableImpl { struct RawPtrHookableImpl {
// Wraps a pointer. // Wraps a pointer.
template <typename T> template <typename T>
PA_ALWAYS_INLINE static constexpr T* WrapRawPtr(T* ptr) { static PA_ALWAYS_INLINE T* WrapRawPtr(T* ptr) {
if (!partition_alloc::internal::base::is_constant_evaluated()) { GetRawPtrHooks()->wrap_ptr(reinterpret_cast<uintptr_t>(ptr));
GetRawPtrHooks()->wrap_ptr(reinterpret_cast<uintptr_t>(ptr));
}
return ptr; return ptr;
} }
// Notifies the allocator when a wrapped pointer is being removed or replaced. // Notifies the allocator when a wrapped pointer is being removed or replaced.
template <typename T> template <typename T>
PA_ALWAYS_INLINE static constexpr void ReleaseWrappedPtr(T* ptr) { static PA_ALWAYS_INLINE void ReleaseWrappedPtr(T* ptr) {
if (!partition_alloc::internal::base::is_constant_evaluated()) { GetRawPtrHooks()->release_wrapped_ptr(reinterpret_cast<uintptr_t>(ptr));
GetRawPtrHooks()->release_wrapped_ptr(reinterpret_cast<uintptr_t>(ptr));
}
} }
// Unwraps the pointer, while asserting that memory hasn't been freed. The // Unwraps the pointer, while asserting that memory hasn't been freed. The
// function is allowed to crash on nullptr. // function is allowed to crash on nullptr.
template <typename T> template <typename T>
PA_ALWAYS_INLINE static constexpr T* SafelyUnwrapPtrForDereference( static PA_ALWAYS_INLINE T* SafelyUnwrapPtrForDereference(T* wrapped_ptr) {
T* wrapped_ptr) { GetRawPtrHooks()->safely_unwrap_for_dereference(
if (!partition_alloc::internal::base::is_constant_evaluated()) { reinterpret_cast<uintptr_t>(wrapped_ptr));
GetRawPtrHooks()->safely_unwrap_for_dereference(
reinterpret_cast<uintptr_t>(wrapped_ptr));
}
return wrapped_ptr; return wrapped_ptr;
} }
// Unwraps the pointer, while asserting that memory hasn't been freed. The // Unwraps the pointer, while asserting that memory hasn't been freed. The
// function must handle nullptr gracefully. // function must handle nullptr gracefully.
template <typename T> template <typename T>
PA_ALWAYS_INLINE static constexpr T* SafelyUnwrapPtrForExtraction( static PA_ALWAYS_INLINE T* SafelyUnwrapPtrForExtraction(T* wrapped_ptr) {
T* wrapped_ptr) { GetRawPtrHooks()->safely_unwrap_for_extraction(
if (!partition_alloc::internal::base::is_constant_evaluated()) { reinterpret_cast<uintptr_t>(wrapped_ptr));
GetRawPtrHooks()->safely_unwrap_for_extraction(
reinterpret_cast<uintptr_t>(wrapped_ptr));
}
return wrapped_ptr; return wrapped_ptr;
} }
// Unwraps the pointer, without making an assertion on whether memory was // Unwraps the pointer, without making an assertion on whether memory was
// freed or not. // freed or not.
template <typename T> template <typename T>
PA_ALWAYS_INLINE static constexpr T* UnsafelyUnwrapPtrForComparison( static PA_ALWAYS_INLINE T* UnsafelyUnwrapPtrForComparison(T* wrapped_ptr) {
T* wrapped_ptr) { GetRawPtrHooks()->unsafely_unwrap_for_comparison(
if (!partition_alloc::internal::base::is_constant_evaluated()) { reinterpret_cast<uintptr_t>(wrapped_ptr));
GetRawPtrHooks()->unsafely_unwrap_for_comparison(
reinterpret_cast<uintptr_t>(wrapped_ptr));
}
return wrapped_ptr; return wrapped_ptr;
} }
// Upcasts the wrapped pointer. // Upcasts the wrapped pointer.
template <typename To, typename From> template <typename To, typename From>
PA_ALWAYS_INLINE static constexpr To* Upcast(From* wrapped_ptr) { static PA_ALWAYS_INLINE constexpr To* Upcast(From* wrapped_ptr) {
static_assert(std::is_convertible<From*, To*>::value, static_assert(std::is_convertible<From*, To*>::value,
"From must be convertible to To."); "From must be convertible to To.");
// Note, this cast may change the address if upcasting to base that lies in // Note, this cast may change the address if upcasting to base that lies in
@ -112,65 +98,44 @@ struct RawPtrHookableImpl {
typename T, typename T,
typename Z, typename Z,
typename = typename =
std::enable_if_t<partition_alloc::internal::is_offset_type<Z>, void>> std::enable_if_t<partition_alloc::internal::offset_type<Z>, void>>
PA_ALWAYS_INLINE static constexpr T* Advance(T* wrapped_ptr, Z delta_elems) { static PA_ALWAYS_INLINE T* Advance(T* wrapped_ptr, Z delta_elems) {
if (!partition_alloc::internal::base::is_constant_evaluated()) { GetRawPtrHooks()->advance(
GetRawPtrHooks()->advance( reinterpret_cast<uintptr_t>(wrapped_ptr),
reinterpret_cast<uintptr_t>(wrapped_ptr), reinterpret_cast<uintptr_t>(wrapped_ptr + delta_elems));
reinterpret_cast<uintptr_t>(wrapped_ptr + delta_elems));
}
return wrapped_ptr + delta_elems; return wrapped_ptr + delta_elems;
} }
// Retreat the wrapped pointer by `delta_elems`.
template <
typename T,
typename Z,
typename =
std::enable_if_t<partition_alloc::internal::is_offset_type<Z>, void>>
PA_ALWAYS_INLINE static constexpr T* Retreat(T* wrapped_ptr, Z delta_elems) {
if (!partition_alloc::internal::base::is_constant_evaluated()) {
GetRawPtrHooks()->advance(
reinterpret_cast<uintptr_t>(wrapped_ptr),
reinterpret_cast<uintptr_t>(wrapped_ptr - delta_elems));
}
return wrapped_ptr - delta_elems;
}
template <typename T> template <typename T>
PA_ALWAYS_INLINE static constexpr ptrdiff_t GetDeltaElems(T* wrapped_ptr1, static PA_ALWAYS_INLINE ptrdiff_t GetDeltaElems(T* wrapped_ptr1,
T* wrapped_ptr2) { T* wrapped_ptr2) {
return wrapped_ptr1 - wrapped_ptr2; return wrapped_ptr1 - wrapped_ptr2;
} }
// Returns a copy of a wrapped pointer, without making an assertion on whether // Returns a copy of a wrapped pointer, without making an assertion on whether
// memory was freed or not. // memory was freed or not.
template <typename T> template <typename T>
PA_ALWAYS_INLINE static constexpr T* Duplicate(T* wrapped_ptr) { static PA_ALWAYS_INLINE T* Duplicate(T* wrapped_ptr) {
if (!partition_alloc::internal::base::is_constant_evaluated()) { GetRawPtrHooks()->duplicate(reinterpret_cast<uintptr_t>(wrapped_ptr));
GetRawPtrHooks()->duplicate(reinterpret_cast<uintptr_t>(wrapped_ptr));
}
return wrapped_ptr; return wrapped_ptr;
} }
// `WrapRawPtrForDuplication` and `UnsafelyUnwrapPtrForDuplication` are used // `WrapRawPtrForDuplication` and `UnsafelyUnwrapPtrForDuplication` are used
// to create a new raw_ptr<T> from another raw_ptr<T> of a different flavor. // to create a new raw_ptr<T> from another raw_ptr<T> of a different flavor.
template <typename T> template <typename T>
PA_ALWAYS_INLINE static constexpr T* WrapRawPtrForDuplication(T* ptr) { static PA_ALWAYS_INLINE T* WrapRawPtrForDuplication(T* ptr) {
return ptr; return ptr;
} }
template <typename T> template <typename T>
PA_ALWAYS_INLINE static constexpr T* UnsafelyUnwrapPtrForDuplication( static PA_ALWAYS_INLINE T* UnsafelyUnwrapPtrForDuplication(T* wrapped_ptr) {
T* wrapped_ptr) {
return wrapped_ptr; return wrapped_ptr;
} }
// This is for accounting only, used by unit tests. // This is for accounting only, used by unit tests.
PA_ALWAYS_INLINE static constexpr void IncrementSwapCountForTest() {} static PA_ALWAYS_INLINE void IncrementSwapCountForTest() {}
PA_ALWAYS_INLINE static constexpr void IncrementLessCountForTest() {} static PA_ALWAYS_INLINE void IncrementLessCountForTest() {}
PA_ALWAYS_INLINE static constexpr void static PA_ALWAYS_INLINE void IncrementPointerToMemberOperatorCountForTest() {}
IncrementPointerToMemberOperatorCountForTest() {}
}; };
} // namespace base::internal } // namespace base::internal

View File

@ -73,28 +73,21 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ref {
// and aborts. Failure to clear would be indicated by the related death tests // and aborts. Failure to clear would be indicated by the related death tests
// not CHECKing appropriately. // not CHECKing appropriately.
static constexpr bool need_clear_after_move = static constexpr bool need_clear_after_move =
#if PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
std::is_same_v<Impl,
internal::MTECheckedPtrImpl<
internal::MTECheckedPtrImplPartitionAllocSupport>> ||
#endif // PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
#if BUILDFLAG(USE_ASAN_UNOWNED_PTR) #if BUILDFLAG(USE_ASAN_UNOWNED_PTR)
std::is_same_v<Impl, internal::RawPtrAsanUnownedImpl<true>> || std::is_same_v<Impl, internal::RawPtrAsanUnownedImpl<true>> ||
std::is_same_v<Impl, internal::RawPtrAsanUnownedImpl<false>> || std::is_same_v<Impl, internal::RawPtrAsanUnownedImpl<false>> ||
#endif // BUILDFLAG(USE_ASAN_UNOWNED_PTR) #endif // BUILDFLAG(USE_ASAN_UNOWNED_PTR)
std::is_same_v<Impl, internal::RawPtrNoOpImpl>; std::is_same_v<Impl, internal::RawPtrNoOpImpl>;
// Construct a raw_ref from a pointer, which must not be null. PA_ALWAYS_INLINE explicit raw_ref(T& p) noexcept
//
// This function is safe to use with any pointer, as it will CHECK and
// terminate the process if the pointer is null. Avoid dereferencing a pointer
// to avoid this CHECK as you may be dereferencing null.
PA_ALWAYS_INLINE constexpr static raw_ref from_ptr(T* ptr) noexcept {
PA_RAW_PTR_CHECK(ptr);
return raw_ref(*ptr);
}
// Construct a raw_ref from a reference.
PA_ALWAYS_INLINE constexpr explicit raw_ref(T& p) noexcept
: inner_(std::addressof(p)) {} : inner_(std::addressof(p)) {}
// Assign a new reference to the raw_ref, replacing the existing reference. PA_ALWAYS_INLINE raw_ref& operator=(T& p) noexcept {
PA_ALWAYS_INLINE constexpr raw_ref& operator=(T& p) noexcept {
inner_.operator=(&p); inner_.operator=(&p);
return *this; return *this;
} }
@ -103,26 +96,24 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ref {
raw_ref(const T&& p) = delete; raw_ref(const T&& p) = delete;
raw_ref& operator=(const T&& p) = delete; raw_ref& operator=(const T&& p) = delete;
PA_ALWAYS_INLINE constexpr raw_ref(const raw_ref& p) noexcept PA_ALWAYS_INLINE raw_ref(const raw_ref& p) noexcept : inner_(p.inner_) {
: inner_(p.inner_) {
PA_RAW_PTR_CHECK(inner_); // Catch use-after-move. PA_RAW_PTR_CHECK(inner_); // Catch use-after-move.
} }
PA_ALWAYS_INLINE constexpr raw_ref(raw_ref&& p) noexcept PA_ALWAYS_INLINE raw_ref(raw_ref&& p) noexcept : inner_(std::move(p.inner_)) {
: inner_(std::move(p.inner_)) {
PA_RAW_PTR_CHECK(inner_); // Catch use-after-move. PA_RAW_PTR_CHECK(inner_); // Catch use-after-move.
if constexpr (need_clear_after_move) { if constexpr (need_clear_after_move) {
p.inner_ = nullptr; p.inner_ = nullptr;
} }
} }
PA_ALWAYS_INLINE constexpr raw_ref& operator=(const raw_ref& p) noexcept { PA_ALWAYS_INLINE raw_ref& operator=(const raw_ref& p) noexcept {
PA_RAW_PTR_CHECK(p.inner_); // Catch use-after-move. PA_RAW_PTR_CHECK(p.inner_); // Catch use-after-move.
inner_.operator=(p.inner_); inner_.operator=(p.inner_);
return *this; return *this;
} }
PA_ALWAYS_INLINE constexpr raw_ref& operator=(raw_ref&& p) noexcept { PA_ALWAYS_INLINE raw_ref& operator=(raw_ref&& p) noexcept {
PA_RAW_PTR_CHECK(p.inner_); // Catch use-after-move. PA_RAW_PTR_CHECK(p.inner_); // Catch use-after-move.
inner_.operator=(std::move(p.inner_)); inner_.operator=(std::move(p.inner_));
if constexpr (need_clear_after_move) { if constexpr (need_clear_after_move) {
@ -132,24 +123,16 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ref {
} }
// Deliberately implicit in order to support implicit upcast. // Deliberately implicit in order to support implicit upcast.
// Delegate cross-kind conversion to the inner raw_ptr, which decides when to template <class U, class = std::enable_if_t<std::is_convertible_v<U&, T&>>>
// allow it.
template <class U,
RawPtrTraits PassedTraits,
class = std::enable_if_t<std::is_convertible_v<U&, T&>>>
// NOLINTNEXTLINE(google-explicit-constructor) // NOLINTNEXTLINE(google-explicit-constructor)
PA_ALWAYS_INLINE constexpr raw_ref(const raw_ref<U, PassedTraits>& p) noexcept PA_ALWAYS_INLINE raw_ref(const raw_ref<U, Traits>& p) noexcept
: inner_(p.inner_) { : inner_(p.inner_) {
PA_RAW_PTR_CHECK(inner_); // Catch use-after-move. PA_RAW_PTR_CHECK(inner_); // Catch use-after-move.
} }
// Deliberately implicit in order to support implicit upcast. // Deliberately implicit in order to support implicit upcast.
// Delegate cross-kind conversion to the inner raw_ptr, which decides when to template <class U, class = std::enable_if_t<std::is_convertible_v<U&, T&>>>
// allow it.
template <class U,
RawPtrTraits PassedTraits,
class = std::enable_if_t<std::is_convertible_v<U&, T&>>>
// NOLINTNEXTLINE(google-explicit-constructor) // NOLINTNEXTLINE(google-explicit-constructor)
PA_ALWAYS_INLINE constexpr raw_ref(raw_ref<U, PassedTraits>&& p) noexcept PA_ALWAYS_INLINE raw_ref(raw_ref<U, Traits>&& p) noexcept
: inner_(std::move(p.inner_)) { : inner_(std::move(p.inner_)) {
PA_RAW_PTR_CHECK(inner_); // Catch use-after-move. PA_RAW_PTR_CHECK(inner_); // Catch use-after-move.
if constexpr (need_clear_after_move) { if constexpr (need_clear_after_move) {
@ -157,25 +140,20 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ref {
} }
} }
static PA_ALWAYS_INLINE raw_ref from_ptr(T* ptr) noexcept {
PA_RAW_PTR_CHECK(ptr);
return raw_ref(*ptr);
}
// Upcast assignment // Upcast assignment
// Delegate cross-kind conversion to the inner raw_ptr, which decides when to template <class U, class = std::enable_if_t<std::is_convertible_v<U&, T&>>>
// allow it. PA_ALWAYS_INLINE raw_ref& operator=(const raw_ref<U, Traits>& p) noexcept {
template <class U,
RawPtrTraits PassedTraits,
class = std::enable_if_t<std::is_convertible_v<U&, T&>>>
PA_ALWAYS_INLINE constexpr raw_ref& operator=(
const raw_ref<U, PassedTraits>& p) noexcept {
PA_RAW_PTR_CHECK(p.inner_); // Catch use-after-move. PA_RAW_PTR_CHECK(p.inner_); // Catch use-after-move.
inner_.operator=(p.inner_); inner_.operator=(p.inner_);
return *this; return *this;
} }
// Delegate cross-kind conversion to the inner raw_ptr, which decides when to template <class U, class = std::enable_if_t<std::is_convertible_v<U&, T&>>>
// allow it. PA_ALWAYS_INLINE raw_ref& operator=(raw_ref<U, Traits>&& p) noexcept {
template <class U,
RawPtrTraits PassedTraits,
class = std::enable_if_t<std::is_convertible_v<U&, T&>>>
PA_ALWAYS_INLINE constexpr raw_ref& operator=(
raw_ref<U, PassedTraits>&& p) noexcept {
PA_RAW_PTR_CHECK(p.inner_); // Catch use-after-move. PA_RAW_PTR_CHECK(p.inner_); // Catch use-after-move.
inner_.operator=(std::move(p.inner_)); inner_.operator=(std::move(p.inner_));
if constexpr (need_clear_after_move) { if constexpr (need_clear_after_move) {
@ -184,7 +162,7 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ref {
return *this; return *this;
} }
PA_ALWAYS_INLINE constexpr T& operator*() const { PA_ALWAYS_INLINE T& operator*() const {
PA_RAW_PTR_CHECK(inner_); // Catch use-after-move. PA_RAW_PTR_CHECK(inner_); // Catch use-after-move.
return inner_.operator*(); return inner_.operator*();
} }
@ -193,13 +171,12 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ref {
// rather than GetForDereference semantics (see raw_ptr.h). This should be // rather than GetForDereference semantics (see raw_ptr.h). This should be
// used in place of operator*() when the memory referred to by the reference // used in place of operator*() when the memory referred to by the reference
// is not immediately going to be accessed. // is not immediately going to be accessed.
PA_ALWAYS_INLINE constexpr T& get() const { PA_ALWAYS_INLINE T& get() const {
PA_RAW_PTR_CHECK(inner_); // Catch use-after-move. PA_RAW_PTR_CHECK(inner_); // Catch use-after-move.
return *inner_.get(); return *inner_.get();
} }
PA_ALWAYS_INLINE constexpr T* operator->() const PA_ALWAYS_INLINE T* operator->() const PA_ATTRIBUTE_RETURNS_NONNULL {
PA_ATTRIBUTE_RETURNS_NONNULL {
PA_RAW_PTR_CHECK(inner_); // Catch use-after-move. PA_RAW_PTR_CHECK(inner_); // Catch use-after-move.
return inner_.operator->(); return inner_.operator->();
} }
@ -212,90 +189,89 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ref {
inner_.ReportIfDangling(); inner_.ReportIfDangling();
} }
PA_ALWAYS_INLINE friend constexpr void swap(raw_ref& lhs, friend PA_ALWAYS_INLINE void swap(raw_ref& lhs, raw_ref& rhs) noexcept {
raw_ref& rhs) noexcept {
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move. PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move. PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
swap(lhs.inner_, rhs.inner_); swap(lhs.inner_, rhs.inner_);
} }
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2> template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
friend bool operator==(const raw_ref<U, Traits1>& lhs, friend PA_ALWAYS_INLINE bool operator==(const raw_ref<U, Traits1>& lhs,
const raw_ref<V, Traits2>& rhs); const raw_ref<V, Traits2>& rhs);
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2> template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
friend bool operator!=(const raw_ref<U, Traits1>& lhs, friend PA_ALWAYS_INLINE bool operator!=(const raw_ref<U, Traits1>& lhs,
const raw_ref<V, Traits2>& rhs); const raw_ref<V, Traits2>& rhs);
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2> template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
friend bool operator<(const raw_ref<U, Traits1>& lhs, friend PA_ALWAYS_INLINE bool operator<(const raw_ref<U, Traits1>& lhs,
const raw_ref<V, Traits2>& rhs); const raw_ref<V, Traits2>& rhs);
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2> template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
friend bool operator>(const raw_ref<U, Traits1>& lhs, friend PA_ALWAYS_INLINE bool operator>(const raw_ref<U, Traits1>& lhs,
const raw_ref<V, Traits2>& rhs); const raw_ref<V, Traits2>& rhs);
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2> template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
friend bool operator<=(const raw_ref<U, Traits1>& lhs, friend PA_ALWAYS_INLINE bool operator<=(const raw_ref<U, Traits1>& lhs,
const raw_ref<V, Traits2>& rhs); const raw_ref<V, Traits2>& rhs);
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2> template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
friend bool operator>=(const raw_ref<U, Traits1>& lhs, friend PA_ALWAYS_INLINE bool operator>=(const raw_ref<U, Traits1>& lhs,
const raw_ref<V, Traits2>& rhs); const raw_ref<V, Traits2>& rhs);
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>> template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
PA_ALWAYS_INLINE friend bool operator==(const raw_ref& lhs, const U& rhs) { friend PA_ALWAYS_INLINE bool operator==(const raw_ref& lhs, const U& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move. PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
return lhs.inner_ == &rhs; return lhs.inner_ == &rhs;
} }
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>> template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
PA_ALWAYS_INLINE friend bool operator!=(const raw_ref& lhs, const U& rhs) { friend PA_ALWAYS_INLINE bool operator!=(const raw_ref& lhs, const U& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move. PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
return lhs.inner_ != &rhs; return lhs.inner_ != &rhs;
} }
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>> template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
PA_ALWAYS_INLINE friend bool operator<(const raw_ref& lhs, const U& rhs) { friend PA_ALWAYS_INLINE bool operator<(const raw_ref& lhs, const U& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move. PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
return lhs.inner_ < &rhs; return lhs.inner_ < &rhs;
} }
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>> template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
PA_ALWAYS_INLINE friend bool operator>(const raw_ref& lhs, const U& rhs) { friend PA_ALWAYS_INLINE bool operator>(const raw_ref& lhs, const U& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move. PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
return lhs.inner_ > &rhs; return lhs.inner_ > &rhs;
} }
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>> template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
PA_ALWAYS_INLINE friend bool operator<=(const raw_ref& lhs, const U& rhs) { friend PA_ALWAYS_INLINE bool operator<=(const raw_ref& lhs, const U& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move. PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
return lhs.inner_ <= &rhs; return lhs.inner_ <= &rhs;
} }
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>> template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
PA_ALWAYS_INLINE friend bool operator>=(const raw_ref& lhs, const U& rhs) { friend PA_ALWAYS_INLINE bool operator>=(const raw_ref& lhs, const U& rhs) {
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move. PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
return lhs.inner_ >= &rhs; return lhs.inner_ >= &rhs;
} }
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>> template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
PA_ALWAYS_INLINE friend bool operator==(const U& lhs, const raw_ref& rhs) { friend PA_ALWAYS_INLINE bool operator==(const U& lhs, const raw_ref& rhs) {
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move. PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
return &lhs == rhs.inner_; return &lhs == rhs.inner_;
} }
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>> template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
PA_ALWAYS_INLINE friend bool operator!=(const U& lhs, const raw_ref& rhs) { friend PA_ALWAYS_INLINE bool operator!=(const U& lhs, const raw_ref& rhs) {
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move. PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
return &lhs != rhs.inner_; return &lhs != rhs.inner_;
} }
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>> template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
PA_ALWAYS_INLINE friend bool operator<(const U& lhs, const raw_ref& rhs) { friend PA_ALWAYS_INLINE bool operator<(const U& lhs, const raw_ref& rhs) {
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move. PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
return &lhs < rhs.inner_; return &lhs < rhs.inner_;
} }
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>> template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
PA_ALWAYS_INLINE friend bool operator>(const U& lhs, const raw_ref& rhs) { friend PA_ALWAYS_INLINE bool operator>(const U& lhs, const raw_ref& rhs) {
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move. PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
return &lhs > rhs.inner_; return &lhs > rhs.inner_;
} }
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>> template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
PA_ALWAYS_INLINE friend bool operator<=(const U& lhs, const raw_ref& rhs) { friend PA_ALWAYS_INLINE bool operator<=(const U& lhs, const raw_ref& rhs) {
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move. PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
return &lhs <= rhs.inner_; return &lhs <= rhs.inner_;
} }
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>> template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
PA_ALWAYS_INLINE friend bool operator>=(const U& lhs, const raw_ref& rhs) { friend PA_ALWAYS_INLINE bool operator>=(const U& lhs, const raw_ref& rhs) {
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move. PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
return &lhs >= rhs.inner_; return &lhs >= rhs.inner_;
} }

View File

@ -90,9 +90,8 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ReservationOffsetTable {
uint16_t offsets[kReservationOffsetTableLength] = {}; uint16_t offsets[kReservationOffsetTableLength] = {};
constexpr _ReservationOffsetTable() { constexpr _ReservationOffsetTable() {
for (uint16_t& offset : offsets) { for (uint16_t& offset : offsets)
offset = kOffsetTagNotAllocated; offset = kOffsetTagNotAllocated;
}
} }
}; };
#if BUILDFLAG(HAS_64_BIT_POINTERS) #if BUILDFLAG(HAS_64_BIT_POINTERS)
@ -107,7 +106,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ReservationOffsetTable {
static PA_CONSTINIT _PaddedReservationOffsetTables static PA_CONSTINIT _PaddedReservationOffsetTables
padded_reservation_offset_tables_ PA_PKEY_ALIGN; padded_reservation_offset_tables_ PA_PKEY_ALIGN;
#else #else
// A single table for the entire 32-bit address space. // A single table for the entire 32-bit address space.
static PA_CONSTINIT struct _ReservationOffsetTable reservation_offset_table_; static PA_CONSTINIT struct _ReservationOffsetTable reservation_offset_table_;
#endif // BUILDFLAG(HAS_64_BIT_POINTERS) #endif // BUILDFLAG(HAS_64_BIT_POINTERS)
}; };
@ -194,9 +193,8 @@ PA_ALWAYS_INLINE uintptr_t GetDirectMapReservationStart(uintptr_t address) {
#endif // BUILDFLAG(PA_DCHECK_IS_ON) #endif // BUILDFLAG(PA_DCHECK_IS_ON)
uint16_t* offset_ptr = ReservationOffsetPointer(address); uint16_t* offset_ptr = ReservationOffsetPointer(address);
PA_DCHECK(*offset_ptr != kOffsetTagNotAllocated); PA_DCHECK(*offset_ptr != kOffsetTagNotAllocated);
if (*offset_ptr == kOffsetTagNormalBuckets) { if (*offset_ptr == kOffsetTagNormalBuckets)
return 0; return 0;
}
uintptr_t reservation_start = ComputeReservationStart(address, offset_ptr); uintptr_t reservation_start = ComputeReservationStart(address, offset_ptr);
#if BUILDFLAG(PA_DCHECK_IS_ON) #if BUILDFLAG(PA_DCHECK_IS_ON)
// MSVC workaround: the preprocessor seems to choke on an `#if` embedded // MSVC workaround: the preprocessor seems to choke on an `#if` embedded
@ -242,9 +240,8 @@ GetDirectMapReservationStart(uintptr_t address,
address); address);
uint16_t* offset_ptr = ReservationOffsetPointer(pool, offset_in_pool); uint16_t* offset_ptr = ReservationOffsetPointer(pool, offset_in_pool);
PA_DCHECK(*offset_ptr != kOffsetTagNotAllocated); PA_DCHECK(*offset_ptr != kOffsetTagNotAllocated);
if (*offset_ptr == kOffsetTagNormalBuckets) { if (*offset_ptr == kOffsetTagNormalBuckets)
return 0; return 0;
}
uintptr_t reservation_start = ComputeReservationStart(address, offset_ptr); uintptr_t reservation_start = ComputeReservationStart(address, offset_ptr);
PA_DCHECK(*ReservationOffsetPointer(reservation_start) == 0); PA_DCHECK(*ReservationOffsetPointer(reservation_start) == 0);
return reservation_start; return reservation_start;

View File

@ -189,8 +189,6 @@ BASE_EXPORT void EnablePartitionAllocMemoryReclaimer();
using EnableBrp = base::StrongAlias<class EnableBrpTag, bool>; using EnableBrp = base::StrongAlias<class EnableBrpTag, bool>;
using EnableBrpZapping = base::StrongAlias<class EnableBrpZappingTag, bool>; using EnableBrpZapping = base::StrongAlias<class EnableBrpZappingTag, bool>;
using EnableBrpPartitionMemoryReclaimer =
base::StrongAlias<class EnableBrpPartitionMemoryReclaimerTag, bool>;
using SplitMainPartition = base::StrongAlias<class SplitMainPartitionTag, bool>; using SplitMainPartition = base::StrongAlias<class SplitMainPartitionTag, bool>;
using UseDedicatedAlignedPartition = using UseDedicatedAlignedPartition =
base::StrongAlias<class UseDedicatedAlignedPartitionTag, bool>; base::StrongAlias<class UseDedicatedAlignedPartitionTag, bool>;
@ -204,7 +202,6 @@ using AlternateBucketDistribution =
BASE_EXPORT void ConfigurePartitions( BASE_EXPORT void ConfigurePartitions(
EnableBrp enable_brp, EnableBrp enable_brp,
EnableBrpZapping enable_brp_zapping, EnableBrpZapping enable_brp_zapping,
EnableBrpPartitionMemoryReclaimer enable_brp_memory_reclaimer,
SplitMainPartition split_main_partition, SplitMainPartition split_main_partition,
UseDedicatedAlignedPartition use_dedicated_aligned_partition, UseDedicatedAlignedPartition use_dedicated_aligned_partition,
AddDummyRefCount add_dummy_ref_count, AddDummyRefCount add_dummy_ref_count,

View File

@ -584,7 +584,6 @@ void EnablePartitionAllocMemoryReclaimer() {
void ConfigurePartitions( void ConfigurePartitions(
EnableBrp enable_brp, EnableBrp enable_brp,
EnableBrpZapping enable_brp_zapping, EnableBrpZapping enable_brp_zapping,
EnableBrpPartitionMemoryReclaimer enable_brp_memory_reclaimer,
SplitMainPartition split_main_partition, SplitMainPartition split_main_partition,
UseDedicatedAlignedPartition use_dedicated_aligned_partition, UseDedicatedAlignedPartition use_dedicated_aligned_partition,
AddDummyRefCount add_dummy_ref_count, AddDummyRefCount add_dummy_ref_count,
@ -693,14 +692,6 @@ void ConfigurePartitions(
// is replaced, it must've been g_original_root. // is replaced, it must've been g_original_root.
PA_CHECK(current_aligned_root == g_original_root); PA_CHECK(current_aligned_root == g_original_root);
if (enable_brp_memory_reclaimer) {
partition_alloc::MemoryReclaimer::Instance()->RegisterPartition(new_root);
if (new_aligned_root != new_root) {
partition_alloc::MemoryReclaimer::Instance()->RegisterPartition(
new_aligned_root);
}
}
// Purge memory, now that the traffic to the original partition is cut off. // Purge memory, now that the traffic to the original partition is cut off.
current_root->PurgeMemory( current_root->PurgeMemory(
partition_alloc::PurgeFlags::kDecommitEmptySlotSpans | partition_alloc::PurgeFlags::kDecommitEmptySlotSpans |

View File

@ -54,9 +54,8 @@ void SpinningMutex::AcquireSpinThenBlock() {
int tries = 0; int tries = 0;
int backoff = 1; int backoff = 1;
do { do {
if (PA_LIKELY(Try())) { if (PA_LIKELY(Try()))
return; return;
}
// Note: Per the intel optimization manual // Note: Per the intel optimization manual
// (https://software.intel.com/content/dam/develop/public/us/en/documents/64-ia-32-architectures-optimization-manual.pdf), // (https://software.intel.com/content/dam/develop/public/us/en/documents/64-ia-32-architectures-optimization-manual.pdf),
// the "pause" instruction is more costly on Skylake Client than on previous // the "pause" instruction is more costly on Skylake Client than on previous

View File

@ -73,11 +73,7 @@ class PA_LOCKABLE PA_COMPONENT_EXPORT(PARTITION_ALLOC) SpinningMutex {
private: private:
PA_NOINLINE void AcquireSpinThenBlock() PA_EXCLUSIVE_LOCK_FUNCTION(); PA_NOINLINE void AcquireSpinThenBlock() PA_EXCLUSIVE_LOCK_FUNCTION();
#if PA_CONFIG(HAS_FAST_MUTEX)
void LockSlow() PA_EXCLUSIVE_LOCK_FUNCTION(); void LockSlow() PA_EXCLUSIVE_LOCK_FUNCTION();
#else
PA_ALWAYS_INLINE void LockSlow() PA_EXCLUSIVE_LOCK_FUNCTION();
#endif
// See below, the latency of PA_YIELD_PROCESSOR can be as high as ~150 // See below, the latency of PA_YIELD_PROCESSOR can be as high as ~150
// cycles. Meanwhile, sleeping costs a few us. Spinning 64 times at 3GHz would // cycles. Meanwhile, sleeping costs a few us. Spinning 64 times at 3GHz would
@ -123,9 +119,8 @@ PA_ALWAYS_INLINE void SpinningMutex::Acquire() {
// 1. We don't know how much contention the lock would experience // 1. We don't know how much contention the lock would experience
// 2. This may lead to weird-looking code layout when inlined into a caller // 2. This may lead to weird-looking code layout when inlined into a caller
// with PA_(UN)LIKELY() annotations. // with PA_(UN)LIKELY() annotations.
if (Try()) { if (Try())
return; return;
}
return AcquireSpinThenBlock(); return AcquireSpinThenBlock();
} }

View File

@ -16,15 +16,14 @@
#include "base/allocator/partition_allocator/partition_page.h" #include "base/allocator/partition_allocator/partition_page.h"
#include "base/allocator/partition_allocator/starscan/pcscan_scheduling.h" #include "base/allocator/partition_allocator/starscan/pcscan_scheduling.h"
#include "base/allocator/partition_allocator/tagging.h" #include "base/allocator/partition_allocator/tagging.h"
namespace partition_alloc { namespace partition_alloc {
class StatsReporter; class StatsReporter;
namespace internal { namespace internal {
[[noreturn]] PA_NOINLINE PA_NOT_TAIL_CALLED [[noreturn]] PA_COMPONENT_EXPORT(PARTITION_ALLOC) PA_NOINLINE PA_NOT_TAIL_CALLED
PA_COMPONENT_EXPORT(PARTITION_ALLOC) void DoubleFreeAttempt(); void DoubleFreeAttempt();
// PCScan (Probabilistic Conservative Scanning) is the algorithm that eliminates // PCScan (Probabilistic Conservative Scanning) is the algorithm that eliminates
// use-after-free bugs by verifying that there are no pointers in memory which // use-after-free bugs by verifying that there are no pointers in memory which
@ -109,10 +108,10 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PCScan final {
static void PerformDelayedScan(int64_t delay_in_microseconds); static void PerformDelayedScan(int64_t delay_in_microseconds);
// Enables safepoints in mutator threads. // Enables safepoints in mutator threads.
PA_ALWAYS_INLINE static void EnableSafepoints(); static void EnableSafepoints();
// Join scan from safepoint in mutator thread. As soon as PCScan is scheduled, // Join scan from safepoint in mutator thread. As soon as PCScan is scheduled,
// mutators can join PCScan helping out with clearing and scanning. // mutators can join PCScan helping out with clearing and scanning.
PA_ALWAYS_INLINE static void JoinScanIfNeeded(); static void JoinScanIfNeeded();
// Checks if there is a PCScan task currently in progress. // Checks if there is a PCScan task currently in progress.
PA_ALWAYS_INLINE static bool IsInProgress(); PA_ALWAYS_INLINE static bool IsInProgress();
@ -136,7 +135,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PCScan final {
static void UninitForTesting(); static void UninitForTesting();
static inline PCScanScheduler& scheduler(); inline static PCScanScheduler& scheduler();
// Registers reporting class. // Registers reporting class.
static void RegisterStatsReporter(partition_alloc::StatsReporter* reporter); static void RegisterStatsReporter(partition_alloc::StatsReporter* reporter);

View File

@ -35,7 +35,7 @@ struct QuarantineData final {
class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PCScanSchedulingBackend { class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PCScanSchedulingBackend {
public: public:
inline constexpr explicit PCScanSchedulingBackend(PCScanScheduler&); explicit inline constexpr PCScanSchedulingBackend(PCScanScheduler&);
// No virtual destructor to allow constant initialization of PCScan as // No virtual destructor to allow constant initialization of PCScan as
// static global which directly embeds LimitBackend as default backend. // static global which directly embeds LimitBackend as default backend.
@ -82,7 +82,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) LimitBackend final
public: public:
static constexpr double kQuarantineSizeFraction = 0.1; static constexpr double kQuarantineSizeFraction = 0.1;
inline constexpr explicit LimitBackend(PCScanScheduler&); explicit inline constexpr LimitBackend(PCScanScheduler&);
bool LimitReached() final; bool LimitReached() final;
void UpdateScheduleAfterScan(size_t, base::TimeDelta, size_t) final; void UpdateScheduleAfterScan(size_t, base::TimeDelta, size_t) final;
@ -188,7 +188,7 @@ QuarantineData& PCScanSchedulingBackend::GetQuarantineData() {
constexpr LimitBackend::LimitBackend(PCScanScheduler& scheduler) constexpr LimitBackend::LimitBackend(PCScanScheduler& scheduler)
: PCScanSchedulingBackend(scheduler) {} : PCScanSchedulingBackend(scheduler) {}
PA_ALWAYS_INLINE bool PCScanScheduler::AccountFreed(size_t size) { bool PCScanScheduler::AccountFreed(size_t size) {
const size_t size_before = const size_t size_before =
quarantine_data_.current_size.fetch_add(size, std::memory_order_relaxed); quarantine_data_.current_size.fetch_add(size, std::memory_order_relaxed);
return (size_before + size > return (size_before + size >

View File

@ -14,7 +14,7 @@ namespace partition_alloc::internal {
// Returns the current stack pointer. // Returns the current stack pointer.
// TODO(bikineev,1202644): Remove this once base/stack_util.h lands. // TODO(bikineev,1202644): Remove this once base/stack_util.h lands.
PA_NOINLINE PA_COMPONENT_EXPORT(PARTITION_ALLOC) uintptr_t* GetStackPointer(); PA_COMPONENT_EXPORT(PARTITION_ALLOC) PA_NOINLINE uintptr_t* GetStackPointer();
// Returns the top of the stack using system API. // Returns the top of the stack using system API.
PA_COMPONENT_EXPORT(PARTITION_ALLOC) void* GetStackTop(); PA_COMPONENT_EXPORT(PARTITION_ALLOC) void* GetStackTop();

View File

@ -130,9 +130,8 @@ void* TagRegionRandomlyForMTE(void* ptr, size_t sz, uint64_t mask) {
// Randomly tag a region (MTE-enabled systems only). The first 16-byte // Randomly tag a region (MTE-enabled systems only). The first 16-byte
// granule is randomly tagged, all other granules in the region are // granule is randomly tagged, all other granules in the region are
// then assigned that initial tag via __arm_mte_set_tag. // then assigned that initial tag via __arm_mte_set_tag.
if (!CheckTagRegionParameters(ptr, sz)) { if (!CheckTagRegionParameters(ptr, sz))
return nullptr; return nullptr;
}
// __arm_mte_create_random_tag generates a randomly tagged pointer via the // __arm_mte_create_random_tag generates a randomly tagged pointer via the
// hardware's random number generator, but does not apply it to the memory. // hardware's random number generator, but does not apply it to the memory.
char* nptr = reinterpret_cast<char*>(__arm_mte_create_random_tag(ptr, mask)); char* nptr = reinterpret_cast<char*>(__arm_mte_create_random_tag(ptr, mask));
@ -147,9 +146,8 @@ void* TagRegionRandomlyForMTE(void* ptr, size_t sz, uint64_t mask) {
void* TagRegionIncrementForMTE(void* ptr, size_t sz) { void* TagRegionIncrementForMTE(void* ptr, size_t sz) {
// Increment a region's tag (MTE-enabled systems only), using the tag of the // Increment a region's tag (MTE-enabled systems only), using the tag of the
// first granule. // first granule.
if (!CheckTagRegionParameters(ptr, sz)) { if (!CheckTagRegionParameters(ptr, sz))
return nullptr; return nullptr;
}
// Increment ptr's tag. // Increment ptr's tag.
char* nptr = reinterpret_cast<char*>(__arm_mte_increment_tag(ptr, 1u)); char* nptr = reinterpret_cast<char*>(__arm_mte_increment_tag(ptr, 1u));
for (size_t i = 0; i < sz; i += kMemTagGranuleSize) { for (size_t i = 0; i < sz; i += kMemTagGranuleSize) {

View File

@ -93,22 +93,18 @@ void ThreadCacheRegistry::RegisterThreadCache(ThreadCache* cache) {
ThreadCache* previous_head = list_head_; ThreadCache* previous_head = list_head_;
list_head_ = cache; list_head_ = cache;
cache->next_ = previous_head; cache->next_ = previous_head;
if (previous_head) { if (previous_head)
previous_head->prev_ = cache; previous_head->prev_ = cache;
}
} }
void ThreadCacheRegistry::UnregisterThreadCache(ThreadCache* cache) { void ThreadCacheRegistry::UnregisterThreadCache(ThreadCache* cache) {
internal::ScopedGuard scoped_locker(GetLock()); internal::ScopedGuard scoped_locker(GetLock());
if (cache->prev_) { if (cache->prev_)
cache->prev_->next_ = cache->next_; cache->prev_->next_ = cache->next_;
} if (cache->next_)
if (cache->next_) {
cache->next_->prev_ = cache->prev_; cache->next_->prev_ = cache->prev_;
} if (cache == list_head_)
if (cache == list_head_) {
list_head_ = cache->next_; list_head_ = cache->next_;
}
} }
void ThreadCacheRegistry::DumpStats(bool my_thread_only, void ThreadCacheRegistry::DumpStats(bool my_thread_only,
@ -119,9 +115,8 @@ void ThreadCacheRegistry::DumpStats(bool my_thread_only,
internal::ScopedGuard scoped_locker(GetLock()); internal::ScopedGuard scoped_locker(GetLock());
if (my_thread_only) { if (my_thread_only) {
auto* tcache = ThreadCache::Get(); auto* tcache = ThreadCache::Get();
if (!ThreadCache::IsValid(tcache)) { if (!ThreadCache::IsValid(tcache))
return; return;
}
tcache->AccumulateStats(stats); tcache->AccumulateStats(stats);
} else { } else {
ThreadCache* tcache = list_head_; ThreadCache* tcache = list_head_;
@ -151,9 +146,8 @@ void ThreadCacheRegistry::PurgeAll() {
// the main thread for the partition lock, since it is acquired/released once // the main thread for the partition lock, since it is acquired/released once
// per bucket. By purging the main thread first, we avoid these interferences // per bucket. By purging the main thread first, we avoid these interferences
// for this thread at least. // for this thread at least.
if (ThreadCache::IsValid(current_thread_tcache)) { if (ThreadCache::IsValid(current_thread_tcache))
current_thread_tcache->Purge(); current_thread_tcache->Purge();
}
{ {
internal::ScopedGuard scoped_locker(GetLock()); internal::ScopedGuard scoped_locker(GetLock());
@ -164,9 +158,8 @@ void ThreadCacheRegistry::PurgeAll() {
// point". // point".
// Note that this will not work if the other thread is sleeping forever. // Note that this will not work if the other thread is sleeping forever.
// TODO(lizeb): Handle sleeping threads. // TODO(lizeb): Handle sleeping threads.
if (tcache != current_thread_tcache) { if (tcache != current_thread_tcache)
tcache->SetShouldPurge(); tcache->SetShouldPurge();
}
tcache = tcache->next_; tcache = tcache->next_;
} }
} }
@ -224,9 +217,8 @@ void ThreadCacheRegistry::SetThreadCacheMultiplier(float multiplier) {
// If this is called before *any* thread cache has serviced *any* // If this is called before *any* thread cache has serviced *any*
// allocation, which can happen in tests, and in theory in non-test code as // allocation, which can happen in tests, and in theory in non-test code as
// well. // well.
if (!tcache) { if (!tcache)
return; return;
}
// Setting the global limit while locked, because we need |tcache->root_|. // Setting the global limit while locked, because we need |tcache->root_|.
ThreadCache::SetGlobalLimits(tcache->root_, multiplier); ThreadCache::SetGlobalLimits(tcache->root_, multiplier);
@ -264,9 +256,8 @@ void ThreadCacheRegistry::RunPeriodicPurge() {
// Can run when there is no thread cache, in which case there is nothing to // Can run when there is no thread cache, in which case there is nothing to
// do, and the task should not be rescheduled. This would typically indicate // do, and the task should not be rescheduled. This would typically indicate
// a case where the thread cache was never enabled, or got disabled. // a case where the thread cache was never enabled, or got disabled.
if (!tcache) { if (!tcache)
return; return;
}
while (tcache) { while (tcache) {
cached_memory_approx += tcache->cached_memory_; cached_memory_approx += tcache->cached_memory_;
@ -325,9 +316,8 @@ void ThreadCache::EnsureThreadSpecificDataInitialized() {
// adding a special-pupose lock. // adding a special-pupose lock.
internal::ScopedGuard scoped_locker( internal::ScopedGuard scoped_locker(
ThreadCacheRegistry::Instance().GetLock()); ThreadCacheRegistry::Instance().GetLock());
if (g_thread_cache_key_created) { if (g_thread_cache_key_created)
return; return;
}
bool ok = internal::PartitionTlsCreate(&internal::g_thread_cache_key, Delete); bool ok = internal::PartitionTlsCreate(&internal::g_thread_cache_key, Delete);
PA_CHECK(ok); PA_CHECK(ok);
@ -343,9 +333,8 @@ void ThreadCache::DeleteForTesting(ThreadCache* tcache) {
void ThreadCache::SwapForTesting(PartitionRoot<>* root) { void ThreadCache::SwapForTesting(PartitionRoot<>* root) {
auto* old_tcache = ThreadCache::Get(); auto* old_tcache = ThreadCache::Get();
g_thread_cache_root.store(nullptr, std::memory_order_relaxed); g_thread_cache_root.store(nullptr, std::memory_order_relaxed);
if (old_tcache) { if (old_tcache)
ThreadCache::DeleteForTesting(old_tcache); ThreadCache::DeleteForTesting(old_tcache);
}
if (root) { if (root) {
Init(root); Init(root);
Create(root); Create(root);
@ -432,9 +421,8 @@ void ThreadCache::SetGlobalLimits(PartitionRoot<>* root, float multiplier) {
// static // static
void ThreadCache::SetLargestCachedSize(size_t size) { void ThreadCache::SetLargestCachedSize(size_t size) {
if (size > ThreadCache::kLargeSizeThreshold) { if (size > ThreadCache::kLargeSizeThreshold)
size = ThreadCache::kLargeSizeThreshold; size = ThreadCache::kLargeSizeThreshold;
}
largest_active_bucket_index_ = largest_active_bucket_index_ =
PartitionRoot<internal::ThreadSafe>::SizeToBucketIndex( PartitionRoot<internal::ThreadSafe>::SizeToBucketIndex(
size, size,
@ -524,9 +512,8 @@ ThreadCache::~ThreadCache() {
void ThreadCache::Delete(void* tcache_ptr) { void ThreadCache::Delete(void* tcache_ptr) {
auto* tcache = static_cast<ThreadCache*>(tcache_ptr); auto* tcache = static_cast<ThreadCache*>(tcache_ptr);
if (!IsValid(tcache)) { if (!IsValid(tcache))
return; return;
}
#if PA_CONFIG(THREAD_CACHE_FAST_TLS) #if PA_CONFIG(THREAD_CACHE_FAST_TLS)
internal::g_thread_cache = nullptr; internal::g_thread_cache = nullptr;
@ -630,9 +617,8 @@ void ThreadCache::FillBucket(size_t bucket_index) {
// some objects, then the allocation will be handled normally. Otherwise, // some objects, then the allocation will be handled normally. Otherwise,
// this goes to the central allocator, which will service the allocation, // this goes to the central allocator, which will service the allocation,
// return nullptr or crash. // return nullptr or crash.
if (!slot_start) { if (!slot_start)
break; break;
}
allocated_slots++; allocated_slots++;
PutInBucket(bucket, slot_start); PutInBucket(bucket, slot_start);
@ -648,9 +634,8 @@ void ThreadCache::ClearBucket(Bucket& bucket, size_t limit) {
template <bool crash_on_corruption> template <bool crash_on_corruption>
void ThreadCache::ClearBucketHelper(Bucket& bucket, size_t limit) { void ThreadCache::ClearBucketHelper(Bucket& bucket, size_t limit) {
// Avoids acquiring the lock needlessly. // Avoids acquiring the lock needlessly.
if (!bucket.count || bucket.count <= limit) { if (!bucket.count || bucket.count <= limit)
return; return;
}
// This serves two purposes: error checking and avoiding stalls when grabbing // This serves two purposes: error checking and avoiding stalls when grabbing
// the lock: // the lock:
@ -732,9 +717,8 @@ void ThreadCache::ResetForTesting() {
size_t ThreadCache::CachedMemory() const { size_t ThreadCache::CachedMemory() const {
size_t total = 0; size_t total = 0;
for (const Bucket& bucket : buckets_) { for (const Bucket& bucket : buckets_)
total += bucket.count * static_cast<size_t>(bucket.slot_size); total += bucket.count * static_cast<size_t>(bucket.slot_size);
}
return total; return total;
} }
@ -754,9 +738,8 @@ void ThreadCache::AccumulateStats(ThreadCacheStats* stats) const {
stats->batch_fill_count += stats_.batch_fill_count; stats->batch_fill_count += stats_.batch_fill_count;
#if PA_CONFIG(THREAD_CACHE_ALLOC_STATS) #if PA_CONFIG(THREAD_CACHE_ALLOC_STATS)
for (size_t i = 0; i < internal::kNumBuckets + 1; i++) { for (size_t i = 0; i < internal::kNumBuckets + 1; i++)
stats->allocs_per_bucket_[i] += stats_.allocs_per_bucket_[i]; stats->allocs_per_bucket_[i] += stats_.allocs_per_bucket_[i];
}
#endif // PA_CONFIG(THREAD_CACHE_ALLOC_STATS) #endif // PA_CONFIG(THREAD_CACHE_ALLOC_STATS)
// cached_memory_ is not necessarily equal to |CachedMemory()| here, since // cached_memory_ is not necessarily equal to |CachedMemory()| here, since
@ -784,9 +767,8 @@ void ThreadCache::TryPurge() {
// static // static
void ThreadCache::PurgeCurrentThread() { void ThreadCache::PurgeCurrentThread() {
auto* tcache = Get(); auto* tcache = Get();
if (IsValid(tcache)) { if (IsValid(tcache))
tcache->Purge(); tcache->Purge();
}
} }
void ThreadCache::PurgeInternal() { void ThreadCache::PurgeInternal() {
@ -807,9 +789,8 @@ void ThreadCache::PurgeInternalHelper() {
// |largest_active_bucket_index_| can be lowered at runtime, there may be // |largest_active_bucket_index_| can be lowered at runtime, there may be
// memory already cached in the inactive buckets. They should still be // memory already cached in the inactive buckets. They should still be
// purged. // purged.
for (auto& bucket : buckets_) { for (auto& bucket : buckets_)
ClearBucketHelper<crash_on_corruption>(bucket, 0); ClearBucketHelper<crash_on_corruption>(bucket, 0);
}
} }
} // namespace partition_alloc } // namespace partition_alloc

View File

@ -194,10 +194,8 @@ class ReentrancyGuard {
} // namespace internal } // namespace internal
#define PA_REENTRANCY_GUARD(x) \ #define PA_REENTRANCY_GUARD(x) \
internal::ReentrancyGuard guard { \ internal::ReentrancyGuard guard { x }
x \
}
#else // BUILDFLAG(PA_DCHECK_IS_ON) #else // BUILDFLAG(PA_DCHECK_IS_ON)
@ -495,9 +493,8 @@ PA_ALWAYS_INLINE bool ThreadCache::MaybePutInCache(uintptr_t slot_start,
ClearBucket(bucket, limit / 2); ClearBucket(bucket, limit / 2);
} }
if (PA_UNLIKELY(should_purge_.load(std::memory_order_relaxed))) { if (PA_UNLIKELY(should_purge_.load(std::memory_order_relaxed)))
PurgeInternal(); PurgeInternal();
}
*slot_size = bucket.slot_size; *slot_size = bucket.slot_size;
return true; return true;
@ -530,9 +527,8 @@ PA_ALWAYS_INLINE uintptr_t ThreadCache::GetFromCache(size_t bucket_index,
// Very unlikely, means that the central allocator is out of memory. Let it // Very unlikely, means that the central allocator is out of memory. Let it
// deal with it (may return 0, may crash). // deal with it (may return 0, may crash).
if (PA_UNLIKELY(!bucket.freelist_head)) { if (PA_UNLIKELY(!bucket.freelist_head))
return 0; return 0;
}
} }
PA_DCHECK(bucket.count != 0); PA_DCHECK(bucket.count != 0);
@ -631,12 +627,12 @@ PA_ALWAYS_INLINE void ThreadCache::PutInBucket(Bucket& bucket,
bucket.count++; bucket.count++;
} }
PA_ALWAYS_INLINE void ThreadCache::RecordAllocation(size_t size) { void ThreadCache::RecordAllocation(size_t size) {
thread_alloc_stats_.alloc_count++; thread_alloc_stats_.alloc_count++;
thread_alloc_stats_.alloc_total_size += size; thread_alloc_stats_.alloc_total_size += size;
} }
PA_ALWAYS_INLINE void ThreadCache::RecordDeallocation(size_t size) { void ThreadCache::RecordDeallocation(size_t size) {
thread_alloc_stats_.dealloc_count++; thread_alloc_stats_.dealloc_count++;
thread_alloc_stats_.dealloc_total_size += size; thread_alloc_stats_.dealloc_total_size += size;
} }

View File

@ -82,10 +82,7 @@ BuildInfo::BuildInfo(const std::vector<std::string>& params)
version_incremental_(StrDupParam(params, 24)), version_incremental_(StrDupParam(params, 24)),
hardware_(StrDupParam(params, 25)), hardware_(StrDupParam(params, 25)),
is_at_least_t_(GetIntParam(params, 26)), is_at_least_t_(GetIntParam(params, 26)),
is_automotive_(GetIntParam(params, 27)), is_automotive_(GetIntParam(params, 27)) {}
is_at_least_u_(GetIntParam(params, 28)),
targets_at_least_u_(GetIntParam(params, 29)),
codename_(StrDupParam(params, 30)) {}
// static // static
BuildInfo* BuildInfo::GetInstance() { BuildInfo* BuildInfo::GetInstance() {

View File

@ -146,12 +146,6 @@ class BASE_EXPORT BuildInfo {
bool is_automotive() const { return is_automotive_; } bool is_automotive() const { return is_automotive_; }
bool is_at_least_u() const { return is_at_least_u_; }
bool targets_at_least_u() const { return targets_at_least_u_; }
const char* codename() const { return codename_; }
private: private:
friend struct BuildInfoSingletonTraits; friend struct BuildInfoSingletonTraits;
@ -190,9 +184,6 @@ class BASE_EXPORT BuildInfo {
const char* const hardware_; const char* const hardware_;
const bool is_at_least_t_; const bool is_at_least_t_;
const bool is_automotive_; const bool is_automotive_;
const bool is_at_least_u_;
const bool targets_at_least_u_;
const char* const codename_;
}; };
} // namespace android } // namespace android

View File

@ -60,10 +60,7 @@ BuildInfo::BuildInfo(const std::vector<std::string>& params)
version_incremental_(""), version_incremental_(""),
hardware_(""), hardware_(""),
is_at_least_t_(false), is_at_least_t_(false),
is_automotive_(false), is_automotive_(false) {}
is_at_least_u_(false),
targets_at_least_u_(false),
codename_("") {}
// static // static
BuildInfo* BuildInfo::GetInstance() { BuildInfo* BuildInfo::GetInstance() {

View File

@ -11,7 +11,6 @@
#include "base/base_jni_headers/FieldTrialList_jni.h" #include "base/base_jni_headers/FieldTrialList_jni.h"
#include "base/lazy_instance.h" #include "base/lazy_instance.h"
#include "base/metrics/field_trial.h" #include "base/metrics/field_trial.h"
#include "base/metrics/field_trial_list_including_low_anonymity.h"
#include "base/metrics/field_trial_params.h" #include "base/metrics/field_trial_params.h"
using base::android::ConvertJavaStringToUTF8; using base::android::ConvertJavaStringToUTF8;
@ -78,39 +77,15 @@ static ScopedJavaLocalRef<jstring> JNI_FieldTrialList_GetVariationParameter(
env, parameters[ConvertJavaStringToUTF8(env, jparameter_key)]); env, parameters[ConvertJavaStringToUTF8(env, jparameter_key)]);
} }
// JNI_FieldTrialList_LogActiveTrials() is static function, this makes friending
// it a hassle because it must be declared in the file that the friend
// declaration is in, but its declaration can't be included in multiple places
// or things get messy and the linker gets mad. This helper class exists only to
// friend the JNI function and is, in turn, friended by
// FieldTrialListIncludingLowAnonymity which allows for the private
// GetActiveFieldTrialGroups() to be reached.
class AndroidFieldTrialListLogActiveTrialsFriendHelper {
private:
friend void ::JNI_FieldTrialList_LogActiveTrials(JNIEnv* env);
static bool AddObserver(base::FieldTrialList::Observer* observer) {
return base::FieldTrialListIncludingLowAnonymity::AddObserver(observer);
}
static void GetActiveFieldTrialGroups(
base::FieldTrial::ActiveGroups* active_groups) {
base::FieldTrialListIncludingLowAnonymity::GetActiveFieldTrialGroups(
active_groups);
}
};
static void JNI_FieldTrialList_LogActiveTrials(JNIEnv* env) { static void JNI_FieldTrialList_LogActiveTrials(JNIEnv* env) {
DCHECK(!g_trial_logger.IsCreated()); // This need only be called once. DCHECK(!g_trial_logger.IsCreated()); // This need only be called once.
LOG(INFO) << "Logging active field trials..."; LOG(INFO) << "Logging active field trials...";
AndroidFieldTrialListLogActiveTrialsFriendHelper::AddObserver( base::FieldTrialList::AddObserver(&g_trial_logger.Get());
&g_trial_logger.Get());
// Log any trials that were already active before adding the observer. // Log any trials that were already active before adding the observer.
std::vector<base::FieldTrial::ActiveGroup> active_groups; std::vector<base::FieldTrial::ActiveGroup> active_groups;
AndroidFieldTrialListLogActiveTrialsFriendHelper::GetActiveFieldTrialGroups( base::FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
&active_groups);
for (const base::FieldTrial::ActiveGroup& group : active_groups) { for (const base::FieldTrial::ActiveGroup& group : active_groups) {
TrialLogger::Log(group.trial_name, group.group_name); TrialLogger::Log(group.trial_name, group.group_name);
} }

Some files were not shown because too many files have changed in this diff Show More