mirror of
https://github.com/klzgrad/naiveproxy.git
synced 2024-12-01 09:46:09 +03:00
Compare commits
No commits in common. "0efdd5d766d82fc319aed5028eb3fb0c149ea4a9" and "900f316fde0c9b0d6aa08750b955b64bc23da9bf" have entirely different histories.
0efdd5d766
...
900f316fde
@ -1 +1 @@
|
||||
112.0.5615.49
|
||||
113.0.5672.62
|
||||
|
@ -21,7 +21,7 @@ The Naïve server here works as a forward proxy and a packet length padding laye
|
||||
|
||||
## Download NaïveProxy
|
||||
|
||||
Download [here](https://github.com/klzgrad/naiveproxy/releases/latest). Supported platforms include: Windows, Android (with [SagerNet](https://github.com/SagerNet/SagerNet)), Linux, Mac OS, and OpenWrt ([support status](https://github.com/klzgrad/naiveproxy/wiki/OpenWrt-Support)).
|
||||
Download [here](https://github.com/klzgrad/naiveproxy/releases/latest). Supported platforms include: Windows, Android (with [NekoBox](https://github.com/MatsuriDayo/NekoBoxForAndroid)), Linux, Mac OS, and OpenWrt ([support status](https://github.com/klzgrad/naiveproxy/wiki/OpenWrt-Support)).
|
||||
|
||||
Users should always use the latest version to keep signatures identical to Chrome.
|
||||
|
||||
|
@ -10,6 +10,7 @@ Standard: Cpp11
|
||||
# TODO(crbug.com/1392808): Remove when InsertBraces has been upstreamed into
|
||||
# the Chromium style (is implied by BasedOnStyle: Chromium).
|
||||
InsertBraces: true
|
||||
InsertNewlineAtEOF: true
|
||||
|
||||
# Make sure code like:
|
||||
# IPC_BEGIN_MESSAGE_MAP()
|
||||
|
16
src/AUTHORS
16
src/AUTHORS
@ -39,6 +39,7 @@ Aditya Bhargava <heuristicist@gmail.com>
|
||||
Adrian Belgun <adrian.belgun@intel.com>
|
||||
Adrian Ratiu <adrian.ratiu@collabora.corp-partner.google.com>
|
||||
Adrià Vilanova Martínez <me@avm99963.com>
|
||||
Ahmed Elwasefi <a.m.elwasefi@gmail.com>
|
||||
Ahmet Emir Ercin <ahmetemiremir@gmail.com>
|
||||
Ajay Berwal <a.berwal@samsung.com>
|
||||
Ajay Berwal <ajay.berwal@samsung.com>
|
||||
@ -138,6 +139,7 @@ Arnaud Mandy <arnaud.mandy@intel.com>
|
||||
Arnaud Renevier <a.renevier@samsung.com>
|
||||
Arpita Bahuguna <a.bah@samsung.com>
|
||||
Arthur Lussos <developer0420@gmail.com>
|
||||
Artin Lindqvist <artin.lindqvist.chromium@gmail.com>
|
||||
Artur Akerberg <artur.aker@gmail.com>
|
||||
Arun Kulkarni <kulkarni.a@samsung.com>
|
||||
Arun Kumar <arun87.kumar@samsung.com>
|
||||
@ -235,6 +237,7 @@ Cheng Zhao <zcbenz@gmail.com>
|
||||
Cheng Yu <yuzichengcode@gmail.com>
|
||||
Choongwoo Han <cwhan.tunz@gmail.com>
|
||||
Choudhury M. Shamsujjoha <choudhury.s@samsung.com>
|
||||
Chris Dalton <chris@rive.app>
|
||||
Chris Greene <cwgreene@amazon.com>
|
||||
Chris Harrelson <chrishtr@gmail.com>
|
||||
Chris Nardi <hichris123@gmail.com>
|
||||
@ -279,6 +282,7 @@ Dániel Bátyai <dbatyai@inf.u-szeged.hu>
|
||||
Dániel Vince <vinced@inf.u-szeged.hu>
|
||||
Daniil Suvorov <severecloud@gmail.com>
|
||||
Danny Weiss <danny.weiss.fr@gmail.com>
|
||||
Danylo Boiko <danielboyko02@gmail.com>
|
||||
Daoming Qiu <daoming.qiu@intel.com>
|
||||
Darik Harter <darik.harter@gmail.com>
|
||||
Darshan Sen <raisinten@gmail.com>
|
||||
@ -300,6 +304,7 @@ David Sanders <dsanders11@ucsbalum.com>
|
||||
David Spellman <dspell@amazon.com>
|
||||
David Valachovic <adenflorian@gmail.com>
|
||||
Dax Kelson <dkelson@gurulabs.com>
|
||||
Debadree Chatterjee <debadree333@gmail.com>
|
||||
Debashish Samantaray <d.samantaray@samsung.com>
|
||||
Debug Wang <debugwang@tencent.com>
|
||||
Deepak Dilip Borade <deepak.db@samsung.com>
|
||||
@ -464,6 +469,7 @@ Horia Olaru <olaru@adobe.com>
|
||||
Hosung You <hosung.you@samsung.com>
|
||||
Huapeng Li <huapengl@amazon.com>
|
||||
Huayong Xu <huayong.xu@samsung.com>
|
||||
Hung Ngo <ngotienhung195@gmail.com>
|
||||
Hugo Holgersson <hugo.holgersson@sonymobile.com>
|
||||
Hui Wang <wanghui07050707@gmail.com>
|
||||
Hui Wang <wanghui210@huawei.com>
|
||||
@ -503,6 +509,7 @@ Ivan Naydonov <samogot@gmail.com>
|
||||
Ivan Pavlotskiy <ivan.pavlotskiy@lgepartner.com>
|
||||
Ivan Sham <ivansham@amazon.com>
|
||||
Jack Bates <jack@nottheoilrig.com>
|
||||
Jackson Loeffler <j@jloeffler.com>
|
||||
Jacky Hu <flameddd@gmail.com>
|
||||
Jacob Clark <jacob.jh.clark@googlemail.com>
|
||||
Jacob Mandelson <jacob@mandelson.org>
|
||||
@ -570,6 +577,7 @@ Jiangzhen Hou <houjiangzhen@360.cn>
|
||||
Jianjun Zhu <jianjun.zhu@intel.com>
|
||||
Jianneng Zhong <muzuiget@gmail.com>
|
||||
Jiawei Shao <jiawei.shao@intel.com>
|
||||
Jiawei Chen <jiawei.chen@dolby.com>
|
||||
Jiaxun Wei <leuisken@gmail.com>
|
||||
Jiaxun Yang <jiaxun.yang@flygoat.com>
|
||||
Jidong Qin <qinjidong@qianxin.com>
|
||||
@ -602,6 +610,7 @@ Joe Thomas <mhx348@motorola.com>
|
||||
Joel Stanley <joel@jms.id.au>
|
||||
Joey Jiao <joeyjiao0810@gmail.com>
|
||||
Joey Mou <joeymou@amazon.com>
|
||||
Johann <johann@duck.com>
|
||||
Johannes Rudolph <johannes.rudolph@googlemail.com>
|
||||
John Ingve Olsen <johningveolsen@gmail.com>
|
||||
John Kleinschmidt <kleinschmidtorama@gmail.com>
|
||||
@ -743,6 +752,8 @@ Leon Han <leon.han@intel.com>
|
||||
Leung Wing Chung <lwchkg@gmail.com>
|
||||
Li Yanbo <liyanbo.monster@bytedance.com>
|
||||
Li Yin <li.yin@intel.com>
|
||||
Lian Ruilong <lianrl@dingdao.com>
|
||||
Lian Ruilong <lianruilong1108@gmail.com>
|
||||
Lidwine Genevet <lgenevet@cisco.com>
|
||||
Lin Sun <lin.sun@intel.com>
|
||||
Lin Peng <penglin220@gmail.com>
|
||||
@ -764,7 +775,7 @@ Luka Dojcilovic <l.dojcilovic@gmail.com>
|
||||
Lukas Lihotzki <lukas@lihotzki.de>
|
||||
Lukasz Krakowiak <lukasz.krakowiak@mobica.com>
|
||||
Luke Inman-Semerau <luke.semerau@gmail.com>
|
||||
Luke Seunghoe Gu <gulukesh@gmail.com>
|
||||
Luke Gu <gulukesh@gmail.com>
|
||||
Luke Zarko <lukezarko@gmail.com>
|
||||
Luoxi Pan <l.panpax@gmail.com>
|
||||
Lu Yahan <yahan@iscas.ac.cn>
|
||||
@ -786,6 +797,7 @@ Manuel Lagana <manuel.lagana.dev@gmail.com>
|
||||
Mao Yujie <maojie0924@gmail.com>
|
||||
Mao Yujie <yujie.mao@intel.com>
|
||||
Marc des Garets <marc.desgarets@googlemail.com>
|
||||
Marcio Caroso <msscaroso@gmail.com>
|
||||
Marcin Wiacek <marcin@mwiacek.com>
|
||||
Marco Rodrigues <gothicx@gmail.com>
|
||||
Marcos Caceres <marcos@marcosc.com>
|
||||
@ -1290,6 +1302,7 @@ Vinoth Chandar <vinoth@uber.com>
|
||||
Vipul Bhasin <vipul.bhasin@gmail.com>
|
||||
Visa Putkinen <v.putkinen@partner.samsung.com>
|
||||
Vishal Bhatnagar <vishal.b@samsung.com>
|
||||
Vishal Lingam <vishal.reddy@samsung.com>
|
||||
Vitaliy Kharin <kvserr@gmail.com>
|
||||
Vivek Galatage <vivek.vg@samsung.com>
|
||||
Volker Sorge <volker.sorge@gmail.com>
|
||||
@ -1304,6 +1317,7 @@ Wenxiang Qian <leonwxqian@gmail.com>
|
||||
WenSheng He <wensheng.he@samsung.com>
|
||||
Wesley Lancel <wesleylancel@gmail.com>
|
||||
Wei Wang <wei4.wang@intel.com>
|
||||
Wei Wen <wenwei.wenwei@bytedance.com>
|
||||
Wesley Wigham <wwigham@gmail.com>
|
||||
Will Cohen <wwcohen@gmail.com>
|
||||
Will Hirsch <chromium@willhirsch.co.uk>
|
||||
|
29
src/BUILD.gn
29
src/BUILD.gn
@ -33,37 +33,24 @@ if (is_official_build) {
|
||||
assert(!is_component_build)
|
||||
}
|
||||
|
||||
# This file defines the following two main targets:
|
||||
# The `gn_all` target is used to list all of the main targets in the build, so
|
||||
# that we can figure out which BUILD.gn files to process, following the process
|
||||
# described at the top of this file.
|
||||
#
|
||||
# "gn_all" is used to create explicit dependencies from the root BUILD.gn to
|
||||
# each top-level component that we wish to include when building everything via
|
||||
# "all". This is required since the set of targets built by "all" is determined
|
||||
# automatically based on reachability from the root BUILD.gn (for details, see
|
||||
# crbug.com/503241). Builders should typically use "all", or list targets
|
||||
# explicitly, rather than relying on "gn_all".
|
||||
#
|
||||
# "gn_visibility": targets that are normally not visible to top-level targets,
|
||||
# but are built anyway by "all". Since we don't want any such targets, we have
|
||||
# this placeholder to make sure hidden targets that aren't otherwise depended
|
||||
# on yet are accounted for.
|
||||
# Because of the way GN works (again, as described above), there may be targets
|
||||
# built by `all` that aren't built by `gn_all`. We always want `all` to build,
|
||||
# so there's really never a reason you'd want to build `gn_all` instead of
|
||||
# `all`, and no tooling should depend directly on this target. Tools should
|
||||
# should depend on either an explicit list of targets, or `all`.
|
||||
|
||||
group("gn_all") {
|
||||
testonly = true
|
||||
|
||||
deps = [
|
||||
":gn_visibility",
|
||||
"//net",
|
||||
]
|
||||
}
|
||||
|
||||
group("gn_visibility") {
|
||||
deps = [
|
||||
"//build/config/sanitizers:options_sources",
|
||||
# "//third_party/pdfium:pdfium_embeddertests", # TODO(GYP): visibility?
|
||||
# "//third_party/pdfium:pdfium_unittests", # TODO(GYP): visibility?
|
||||
]
|
||||
}
|
||||
|
||||
if (is_android) {
|
||||
group("optimize_gn_gen") {
|
||||
deps = [
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
import("//base/allocator/allocator.gni")
|
||||
import("//base/allocator/partition_allocator/partition_alloc.gni")
|
||||
import("//base/debug/debug.gni")
|
||||
import("//base/trace_event/tracing.gni")
|
||||
import("//build/buildflag_header.gni")
|
||||
import("//build/config/arm.gni")
|
||||
@ -120,12 +121,21 @@ if (is_fuchsia) {
|
||||
# TODO(crbug.com/1304707): Drop toolchain_has_rust after we have support for all
|
||||
# our toolchains: Linux x86 is missing in order to build for Android.
|
||||
#
|
||||
# Rust to C++ type conversions.
|
||||
build_rust_base_conversions = toolchain_has_rust && enable_rust_base_conversions
|
||||
|
||||
# The Rust implementation of base::JSONReader.
|
||||
build_rust_json_reader = toolchain_has_rust && enable_rust_json
|
||||
|
||||
buildflag_header("parsing_buildflags") {
|
||||
header = "parsing_buildflags.h"
|
||||
flags = [ "BUILD_RUST_JSON_READER=$build_rust_json_reader" ]
|
||||
assert(build_rust_base_conversions || !build_rust_json_reader,
|
||||
"Cannot enable Rust JSON decoder without also base conversions")
|
||||
|
||||
buildflag_header("rust_buildflags") {
|
||||
header = "rust_buildflags.h"
|
||||
flags = [
|
||||
"BUILD_RUST_JSON_READER=$build_rust_json_reader",
|
||||
"BUILD_RUST_BASE_CONVERSIONS=$build_rust_base_conversions",
|
||||
]
|
||||
}
|
||||
|
||||
if (is_win) {
|
||||
@ -244,7 +254,6 @@ component("base") {
|
||||
"big_endian.h",
|
||||
"bit_cast.h",
|
||||
"bits.h",
|
||||
"build_time.cc",
|
||||
"build_time.h",
|
||||
"callback_list.cc",
|
||||
"callback_list.h",
|
||||
@ -442,6 +451,7 @@ component("base") {
|
||||
"memory/shared_memory_tracker.cc",
|
||||
"memory/shared_memory_tracker.h",
|
||||
"memory/singleton.h",
|
||||
"memory/stack_allocated.h",
|
||||
"memory/unsafe_shared_memory_pool.cc",
|
||||
"memory/unsafe_shared_memory_pool.h",
|
||||
"memory/unsafe_shared_memory_region.cc",
|
||||
@ -470,6 +480,8 @@ component("base") {
|
||||
"metrics/dummy_histogram.h",
|
||||
"metrics/field_trial.cc",
|
||||
"metrics/field_trial.h",
|
||||
"metrics/field_trial_list_including_low_anonymity.cc",
|
||||
"metrics/field_trial_list_including_low_anonymity.h",
|
||||
"metrics/field_trial_param_associator.cc",
|
||||
"metrics/field_trial_param_associator.h",
|
||||
"metrics/field_trial_params.cc",
|
||||
@ -930,6 +942,7 @@ component("base") {
|
||||
"types/pass_key.h",
|
||||
"types/strong_alias.h",
|
||||
"types/token_type.h",
|
||||
"types/variant_util.h",
|
||||
"unguessable_token.cc",
|
||||
"unguessable_token.h",
|
||||
"value_iterators.cc",
|
||||
@ -1064,9 +1077,9 @@ component("base") {
|
||||
":ios_cronet_buildflags",
|
||||
":logging_buildflags",
|
||||
":orderfile_buildflags",
|
||||
":parsing_buildflags",
|
||||
":power_monitor_buildflags",
|
||||
":profiler_buildflags",
|
||||
":rust_buildflags",
|
||||
":sanitizer_buildflags",
|
||||
":synchronization_buildflags",
|
||||
":tracing_buildflags",
|
||||
@ -1074,11 +1087,10 @@ component("base") {
|
||||
"//base/numerics:base_numerics",
|
||||
"//build:chromecast_buildflags",
|
||||
"//build:chromeos_buildflags",
|
||||
"//build/rust:rust_buildflags",
|
||||
"//third_party/abseil-cpp:absl",
|
||||
]
|
||||
|
||||
if (toolchain_has_rust) {
|
||||
if (build_rust_base_conversions) {
|
||||
# Base provides conversions between CXX types and base types (e.g.
|
||||
# StringPiece).
|
||||
public_deps += [ "//build/rust:cxx_cppdeps" ]
|
||||
@ -1204,6 +1216,8 @@ component("base") {
|
||||
"os_compat_android.cc",
|
||||
"os_compat_android.h",
|
||||
"process/process_android.cc",
|
||||
"profiler/native_unwinder_android_map_delegate.h",
|
||||
"profiler/native_unwinder_android_memory_regions_map.h",
|
||||
"profiler/stack_sampler_android.cc",
|
||||
"system/sys_info_android.cc",
|
||||
"threading/platform_thread_android_stub.cc",
|
||||
@ -1264,6 +1278,13 @@ component("base") {
|
||||
libs += [ "android/library_loader/anchor_functions.lds" ]
|
||||
} # is_android
|
||||
|
||||
if (build_allocation_stack_trace_recorder) {
|
||||
sources += [
|
||||
"debug/allocation_trace.cc",
|
||||
"debug/allocation_trace.h",
|
||||
]
|
||||
}
|
||||
|
||||
if (is_robolectric) {
|
||||
# Make jni.h available.
|
||||
configs += [ "//third_party/jdk" ]
|
||||
@ -1335,10 +1356,12 @@ component("base") {
|
||||
"files/memory_mapped_file_posix.cc",
|
||||
"fuchsia/default_job.cc",
|
||||
"fuchsia/default_job.h",
|
||||
"fuchsia/fidl_event_handler.h",
|
||||
"fuchsia/file_utils.cc",
|
||||
"fuchsia/file_utils.h",
|
||||
"fuchsia/filtered_service_directory.cc",
|
||||
"fuchsia/filtered_service_directory.h",
|
||||
"fuchsia/fuchsia_component_connect.h",
|
||||
"fuchsia/fuchsia_logging.cc",
|
||||
"fuchsia/fuchsia_logging.h",
|
||||
"fuchsia/intl_profile_watcher.cc",
|
||||
@ -1407,13 +1430,16 @@ component("base") {
|
||||
public_deps += [
|
||||
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.component.runner:fuchsia.component.runner_hlcpp",
|
||||
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.intl:fuchsia.intl_hlcpp",
|
||||
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.io:fuchsia.io_cpp",
|
||||
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.io:fuchsia.io_hlcpp",
|
||||
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.logger:fuchsia.logger_hlcpp",
|
||||
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.logger:fuchsia.logger_cpp",
|
||||
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.mem:fuchsia.mem_hlcpp",
|
||||
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.process.lifecycle:fuchsia.process.lifecycle_hlcpp",
|
||||
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.process.lifecycle:fuchsia.process.lifecycle_cpp",
|
||||
"//third_party/fuchsia-sdk/sdk/pkg/async",
|
||||
"//third_party/fuchsia-sdk/sdk/pkg/component_incoming_cpp",
|
||||
"//third_party/fuchsia-sdk/sdk/pkg/fdio",
|
||||
"//third_party/fuchsia-sdk/sdk/pkg/fidl_cpp",
|
||||
"//third_party/fuchsia-sdk/sdk/pkg/fidl_cpp_wire",
|
||||
"//third_party/fuchsia-sdk/sdk/pkg/sync",
|
||||
"//third_party/fuchsia-sdk/sdk/pkg/sys_cpp",
|
||||
"//third_party/fuchsia-sdk/sdk/pkg/syslog_structured_backend",
|
||||
@ -1422,9 +1448,10 @@ component("base") {
|
||||
]
|
||||
|
||||
deps += [
|
||||
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.buildinfo:fuchsia.buildinfo_hlcpp",
|
||||
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.hwinfo:fuchsia.hwinfo_hlcpp",
|
||||
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.media:fuchsia.media_hlcpp",
|
||||
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.buildinfo:fuchsia.buildinfo_cpp",
|
||||
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.hwinfo:fuchsia.hwinfo_cpp",
|
||||
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.io:fuchsia.io_cpp_hlcpp_conversion",
|
||||
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.media:fuchsia.media_cpp",
|
||||
"//third_party/fuchsia-sdk/sdk/fidl/fuchsia.sys:fuchsia.sys_hlcpp",
|
||||
"//third_party/fuchsia-sdk/sdk/pkg/async-default",
|
||||
"//third_party/fuchsia-sdk/sdk/pkg/async-loop-cpp",
|
||||
@ -1817,7 +1844,7 @@ component("base") {
|
||||
"win/sphelper.h",
|
||||
"win/startup_information.cc",
|
||||
"win/startup_information.h",
|
||||
"win/variant_util.h",
|
||||
"win/variant_conversions.h",
|
||||
"win/variant_vector.cc",
|
||||
"win/variant_vector.h",
|
||||
"win/vector.cc",
|
||||
@ -1900,8 +1927,6 @@ component("base") {
|
||||
"mac/authorization_util.h",
|
||||
"mac/authorization_util.mm",
|
||||
"mac/close_nocancel.cc",
|
||||
"mac/dispatch_source_mach.cc",
|
||||
"mac/dispatch_source_mach.h",
|
||||
"mac/launch_application.h",
|
||||
"mac/launch_application.mm",
|
||||
"mac/launchd.cc",
|
||||
@ -1966,7 +1991,6 @@ component("base") {
|
||||
"CoreFoundation.framework",
|
||||
"IOKit.framework",
|
||||
"OpenDirectory.framework",
|
||||
"Security.framework",
|
||||
]
|
||||
}
|
||||
|
||||
@ -1983,6 +2007,8 @@ component("base") {
|
||||
"mac/call_with_eh_frame.cc",
|
||||
"mac/call_with_eh_frame.h",
|
||||
"mac/call_with_eh_frame_asm.S",
|
||||
"mac/dispatch_source_mach.cc",
|
||||
"mac/dispatch_source_mach.h",
|
||||
"mac/foundation_util.h",
|
||||
"mac/foundation_util.mm",
|
||||
"mac/mac_logging.h",
|
||||
@ -2011,6 +2037,7 @@ component("base") {
|
||||
"threading/platform_thread_mac.mm",
|
||||
"time/time_mac.mm",
|
||||
]
|
||||
frameworks += [ "Security.framework" ]
|
||||
}
|
||||
|
||||
# Linux.
|
||||
@ -2068,9 +2095,7 @@ component("base") {
|
||||
"mac/bridging.h",
|
||||
"native_library_ios.mm",
|
||||
"power_monitor/power_monitor_device_source_ios.mm",
|
||||
"process/launch_ios.cc",
|
||||
"process/process_metrics_ios.cc",
|
||||
"process/process_stubs.cc",
|
||||
"profiler/stack_sampler_ios.cc",
|
||||
"system/sys_info_ios.mm",
|
||||
]
|
||||
@ -2080,13 +2105,27 @@ component("base") {
|
||||
"files/file_path_watcher_kqueue.cc",
|
||||
"files/file_path_watcher_kqueue.h",
|
||||
"files/file_path_watcher_mac.cc",
|
||||
"ios/sim_header_shims.h",
|
||||
"mac/mach_port_rendezvous.cc",
|
||||
"mac/mach_port_rendezvous.h",
|
||||
"process/launch_mac.cc",
|
||||
"process/memory_mac.mm",
|
||||
"process/port_provider_mac.cc",
|
||||
"process/port_provider_mac.h",
|
||||
"process/process_handle_mac.cc",
|
||||
"process/process_iterator_ios.mm",
|
||||
"process/process_mac.cc",
|
||||
"process/process_posix.cc",
|
||||
"sync_socket_posix.cc",
|
||||
"synchronization/waitable_event_watcher_mac.cc",
|
||||
]
|
||||
libs += [ "bsm" ]
|
||||
} else {
|
||||
sources += [ "process/memory_stubs.cc" ]
|
||||
sources += [
|
||||
"process/launch_ios.cc",
|
||||
"process/memory_stubs.cc",
|
||||
"process/process_stubs.cc",
|
||||
]
|
||||
}
|
||||
|
||||
if (is_cronet_build) {
|
||||
@ -2118,6 +2157,8 @@ component("base") {
|
||||
]
|
||||
} else {
|
||||
sources -= [
|
||||
"allocator/dispatcher/dispatcher.cc",
|
||||
"allocator/dispatcher/dispatcher.h",
|
||||
"sampling_heap_profiler/poisson_allocation_sampler.cc",
|
||||
"sampling_heap_profiler/poisson_allocation_sampler.h",
|
||||
"sampling_heap_profiler/sampling_heap_profiler.cc",
|
||||
@ -2376,6 +2417,10 @@ buildflag_header("debugging_buildflags") {
|
||||
enable_lldbinit_warning =
|
||||
is_debug && strip_absolute_paths_from_debug_symbols && is_mac
|
||||
|
||||
# TODO(crbug.com/1420774): Try and enable these checks on Android too.
|
||||
enable_commandline_sequence_checks =
|
||||
(is_debug || dcheck_always_on) && !is_android
|
||||
|
||||
flags = [
|
||||
"DCHECK_IS_CONFIGURABLE=$dcheck_is_configurable",
|
||||
"ENABLE_LOCATION_SOURCE=$enable_location_source",
|
||||
@ -2388,6 +2433,8 @@ buildflag_header("debugging_buildflags") {
|
||||
"ENABLE_LLDBINIT_WARNING=$enable_lldbinit_warning",
|
||||
"EXPENSIVE_DCHECKS_ARE_ON=$enable_expensive_dchecks",
|
||||
"ENABLE_STACK_TRACE_LINE_NUMBERS=$enable_stack_trace_line_numbers",
|
||||
"ENABLE_COMMANDLINE_SEQUENCE_CHECKS=$enable_commandline_sequence_checks",
|
||||
"ENABLE_ALLOCATION_STACK_TRACE_RECORDER=$build_allocation_stack_trace_recorder",
|
||||
]
|
||||
}
|
||||
|
||||
@ -2542,7 +2589,7 @@ if (use_custom_libcxx && enable_safe_libcxx && !is_debug) {
|
||||
}
|
||||
|
||||
action("build_date") {
|
||||
script = "//build/write_build_date_header.py"
|
||||
script = "write_build_date_header.py"
|
||||
|
||||
outputs = [ "$target_gen_dir/generated_build_date.h" ]
|
||||
|
||||
|
@ -18,6 +18,8 @@ buildflag_header("buildflags") {
|
||||
"USE_ALLOCATOR_SHIM=$use_allocator_shim",
|
||||
|
||||
"USE_PARTITION_ALLOC_AS_GWP_ASAN_STORE=$enable_backup_ref_ptr_support",
|
||||
|
||||
"USE_ALLOCATION_EVENT_DISPATCHER=$use_allocation_event_dispatcher",
|
||||
]
|
||||
}
|
||||
|
||||
|
@ -12,6 +12,11 @@ if (is_ios) {
|
||||
declare_args() {
|
||||
# Causes all the allocations to be routed via allocator_shim.cc.
|
||||
use_allocator_shim = use_allocator_shim_default
|
||||
|
||||
# Use the new allocation event dispatcher to distribute events to event observers.
|
||||
# If set to false, PoissonAllocationSampler will hook into PartitionAllocator and
|
||||
# AllocatorShims directly.
|
||||
use_allocation_event_dispatcher = false
|
||||
}
|
||||
|
||||
assert(
|
||||
@ -29,14 +34,12 @@ if (is_win && use_allocator_shim) {
|
||||
# Chromium-specific asserts. External embedders _may_ elect to use these
|
||||
# features even without PA-E.
|
||||
if (!use_partition_alloc_as_malloc) {
|
||||
# In theory, BackupRefPtr/MTECheckedPtr will work just fine without
|
||||
# PartitionAlloc-Everywhere, but their scope would be limited to partitions
|
||||
# In theory, BackupRefPtr will work just fine without
|
||||
# PartitionAlloc-Everywhere, but its scope would be limited to partitions
|
||||
# that are invoked explicitly (not via malloc). These are only Blink
|
||||
# partition, where we currently don't even use raw_ptr<T>.
|
||||
assert(!enable_backup_ref_ptr_support,
|
||||
"Chromium does not use BRP without PA-E")
|
||||
assert(!enable_mte_checked_ptr_support,
|
||||
"Chromium does not use MTECheckedPtr without PA-E")
|
||||
|
||||
# Pointer compression works only if all pointers are guaranteed to be
|
||||
# allocated by PA (in one of its core pools, to be precise). In theory,
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <atomic>
|
||||
#endif
|
||||
|
||||
#if !BUILDFLAG(USE_ALLOCATION_EVENT_DISPATCHER)
|
||||
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||
namespace base::allocator::dispatcher::allocator_shim_details {
|
||||
namespace {
|
||||
@ -223,9 +224,11 @@ void PartitionFreeHook(void* address) {
|
||||
} // namespace
|
||||
} // namespace base::allocator::dispatcher::partition_allocator_details
|
||||
#endif // BUILDFLAG(USE_PARTITION_ALLOC)
|
||||
#endif // !BUILDFLAG(USE_ALLOCATION_EVENT_DISPATCHER)
|
||||
|
||||
namespace base::allocator::dispatcher {
|
||||
|
||||
#if !BUILDFLAG(USE_ALLOCATION_EVENT_DISPATCHER)
|
||||
void InstallStandardAllocatorHooks() {
|
||||
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
|
||||
allocator_shim::InsertAllocatorDispatch(
|
||||
@ -242,10 +245,7 @@ void InstallStandardAllocatorHooks() {
|
||||
&partition_allocator_details::PartitionFreeHook);
|
||||
#endif // BUILDFLAG(USE_PARTITION_ALLOC)
|
||||
}
|
||||
|
||||
} // namespace base::allocator::dispatcher
|
||||
|
||||
namespace base::allocator::dispatcher {
|
||||
#endif // !BUILDFLAG(USE_ALLOCATION_EVENT_DISPATCHER)
|
||||
|
||||
// The private implementation of Dispatcher.
|
||||
struct Dispatcher::Impl {
|
||||
|
@ -5,6 +5,7 @@
|
||||
#ifndef BASE_ALLOCATOR_DISPATCHER_DISPATCHER_H_
|
||||
#define BASE_ALLOCATOR_DISPATCHER_DISPATCHER_H_
|
||||
|
||||
#include "base/allocator/buildflags.h"
|
||||
#include "base/allocator/dispatcher/internal/dispatcher_internal.h"
|
||||
#include "base/base_export.h"
|
||||
|
||||
@ -12,7 +13,9 @@
|
||||
|
||||
namespace base::allocator::dispatcher {
|
||||
|
||||
#if !BUILDFLAG(USE_ALLOCATION_EVENT_DISPATCHER)
|
||||
void BASE_EXPORT InstallStandardAllocatorHooks();
|
||||
#endif
|
||||
|
||||
namespace internal {
|
||||
struct DispatchData;
|
||||
|
@ -274,7 +274,7 @@ struct DispatcherImpl {
|
||||
static AllocatorDispatch allocator_dispatch_;
|
||||
#endif
|
||||
|
||||
static ALWAYS_INLINE void DoNotifyAllocation(
|
||||
ALWAYS_INLINE static void DoNotifyAllocation(
|
||||
void* address,
|
||||
size_t size,
|
||||
AllocationSubsystem subSystem,
|
||||
@ -283,7 +283,7 @@ struct DispatcherImpl {
|
||||
subSystem, type_name);
|
||||
}
|
||||
|
||||
static ALWAYS_INLINE void DoNotifyFree(void* address) {
|
||||
ALWAYS_INLINE static void DoNotifyFree(void* address) {
|
||||
PerformFreeNotification(s_observers, AllObservers{}, address);
|
||||
}
|
||||
|
||||
|
@ -32,7 +32,7 @@ const base::FeatureParam<UnretainedDanglingPtrMode>
|
||||
|
||||
BASE_FEATURE(kPartitionAllocDanglingPtr,
|
||||
"PartitionAllocDanglingPtr",
|
||||
#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_FEATURE_FLAGS_FOR_BOTS)
|
||||
#if BUILDFLAG(ENABLE_DANGLING_RAW_PTR_FEATURE_FLAG)
|
||||
FEATURE_ENABLED_BY_DEFAULT
|
||||
#else
|
||||
FEATURE_DISABLED_BY_DEFAULT
|
||||
@ -104,8 +104,8 @@ BASE_FEATURE(kPartitionAllocLargeEmptySlotSpanRing,
|
||||
|
||||
BASE_FEATURE(kPartitionAllocBackupRefPtr,
|
||||
"PartitionAllocBackupRefPtr",
|
||||
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_WIN) || \
|
||||
BUILDFLAG(ENABLE_DANGLING_RAW_PTR_FEATURE_FLAGS_FOR_BOTS) || \
|
||||
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_WIN) || \
|
||||
BUILDFLAG(ENABLE_BACKUP_REF_PTR_FEATURE_FLAG) || \
|
||||
(BUILDFLAG(USE_ASAN_BACKUP_REF_PTR) && BUILDFLAG(IS_LINUX))
|
||||
FEATURE_ENABLED_BY_DEFAULT
|
||||
#else
|
||||
@ -124,7 +124,8 @@ constexpr FeatureParam<BackupRefPtrEnabledProcesses>::Option
|
||||
const base::FeatureParam<BackupRefPtrEnabledProcesses>
|
||||
kBackupRefPtrEnabledProcessesParam {
|
||||
&kPartitionAllocBackupRefPtr, "enabled-processes",
|
||||
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_WIN) || \
|
||||
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_WIN) || \
|
||||
BUILDFLAG(ENABLE_BACKUP_REF_PTR_FEATURE_FLAG) || \
|
||||
(BUILDFLAG(USE_ASAN_BACKUP_REF_PTR) && BUILDFLAG(IS_LINUX))
|
||||
BackupRefPtrEnabledProcesses::kNonRenderer,
|
||||
#else
|
||||
@ -137,8 +138,12 @@ constexpr FeatureParam<BackupRefPtrMode>::Option kBackupRefPtrModeOptions[] = {
|
||||
{BackupRefPtrMode::kDisabled, "disabled"},
|
||||
{BackupRefPtrMode::kEnabled, "enabled"},
|
||||
{BackupRefPtrMode::kEnabledWithoutZapping, "enabled-without-zapping"},
|
||||
{BackupRefPtrMode::kEnabledWithMemoryReclaimer,
|
||||
"enabled-with-memory-reclaimer"},
|
||||
{BackupRefPtrMode::kDisabledButSplitPartitions2Way,
|
||||
"disabled-but-2-way-split"},
|
||||
{BackupRefPtrMode::kDisabledButSplitPartitions2WayWithMemoryReclaimer,
|
||||
"disabled-but-2-way-split-with-memory-reclaimer"},
|
||||
{BackupRefPtrMode::kDisabledButSplitPartitions3Way,
|
||||
"disabled-but-3-way-split"},
|
||||
{BackupRefPtrMode::kDisabledButAddDummyRefCount,
|
||||
|
@ -90,10 +90,17 @@ enum class BackupRefPtrMode {
|
||||
// Same as kEnabled but without zapping quarantined objects.
|
||||
kEnabledWithoutZapping,
|
||||
|
||||
// Same as kEnabled but registers the main partition to memory reclaimer.
|
||||
kEnabledWithMemoryReclaimer,
|
||||
|
||||
// BRP is disabled, but the main partition is split out, as if BRP was enabled
|
||||
// in the "previous slot" mode.
|
||||
kDisabledButSplitPartitions2Way,
|
||||
|
||||
// Same as kDisabledButSplitPartitions2Way but registers the main partition to
|
||||
// memory reclaimer.
|
||||
kDisabledButSplitPartitions2WayWithMemoryReclaimer,
|
||||
|
||||
// BRP is disabled, but the main partition *and* aligned partition are split
|
||||
// out, as if BRP was enabled in the "before allocation" mode.
|
||||
kDisabledButSplitPartitions3Way,
|
||||
|
@ -336,11 +336,22 @@ std::map<std::string, std::string> ProposeSyntheticFinchTrials() {
|
||||
brp_group_name = "EnabledPrevSlotWithoutZapping";
|
||||
#else
|
||||
brp_group_name = "EnabledBeforeAllocWithoutZapping";
|
||||
#endif
|
||||
break;
|
||||
case features::BackupRefPtrMode::kEnabledWithMemoryReclaimer:
|
||||
#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
|
||||
brp_group_name = "EnabledPrevSlotWithMemoryReclaimer";
|
||||
#else
|
||||
brp_group_name = "EnabledBeforeAllocWithMemoryReclaimer";
|
||||
#endif
|
||||
break;
|
||||
case features::BackupRefPtrMode::kDisabledButSplitPartitions2Way:
|
||||
brp_group_name = "DisabledBut2WaySplit";
|
||||
break;
|
||||
case features::BackupRefPtrMode::
|
||||
kDisabledButSplitPartitions2WayWithMemoryReclaimer:
|
||||
brp_group_name = "DisabledBut2WaySplitWithMemoryReclaimer";
|
||||
break;
|
||||
case features::BackupRefPtrMode::kDisabledButSplitPartitions3Way:
|
||||
brp_group_name = "DisabledBut3WaySplit";
|
||||
break;
|
||||
@ -620,18 +631,19 @@ void DanglingRawPtrReleased(uintptr_t id) {
|
||||
<< StringPrintf("0x%016" PRIxPTR, id) << ":\n"
|
||||
<< dangling_signature << "\n\n"
|
||||
<< "The memory was freed at:\n"
|
||||
<< free_info->stack_trace << free_info->task_trace << "\n"
|
||||
<< free_info->stack_trace << "\n"
|
||||
<< free_info->task_trace << "\n"
|
||||
<< "The dangling raw_ptr was released at:\n"
|
||||
<< stack_trace_release << task_trace_release
|
||||
<< dangling_ptr_footer;
|
||||
<< stack_trace_release << "\n"
|
||||
<< task_trace_release << dangling_ptr_footer;
|
||||
} else {
|
||||
LOG(ERROR) << "Detected dangling raw_ptr with id="
|
||||
<< StringPrintf("0x%016" PRIxPTR, id) << ":\n\n"
|
||||
<< dangling_signature << "\n\n"
|
||||
<< "It was not recorded where the memory was freed.\n\n"
|
||||
<< "The dangling raw_ptr was released at:\n"
|
||||
<< stack_trace_release << task_trace_release
|
||||
<< dangling_ptr_footer;
|
||||
<< stack_trace_release << "\n"
|
||||
<< task_trace_release << dangling_ptr_footer;
|
||||
}
|
||||
|
||||
if constexpr (dangling_pointer_mode == features::DanglingPtrMode::kCrash) {
|
||||
@ -828,10 +840,132 @@ void ReconfigurePartitionForKnownProcess(const std::string& process_type) {
|
||||
|
||||
PartitionAllocSupport::PartitionAllocSupport() = default;
|
||||
|
||||
void PartitionAllocSupport::ReconfigureForTests() {
|
||||
ReconfigureEarlyish("");
|
||||
base::AutoLock scoped_lock(lock_);
|
||||
called_for_tests_ = true;
|
||||
}
|
||||
|
||||
// static
|
||||
PartitionAllocSupport::BrpConfiguration
|
||||
PartitionAllocSupport::GetBrpConfiguration(const std::string& process_type) {
|
||||
// TODO(bartekn): Switch to DCHECK once confirmed there are no issues.
|
||||
CHECK(base::FeatureList::GetInstance());
|
||||
|
||||
bool enable_brp = false;
|
||||
bool enable_brp_zapping = false;
|
||||
bool split_main_partition = false;
|
||||
bool use_dedicated_aligned_partition = false;
|
||||
bool add_dummy_ref_count = false;
|
||||
bool process_affected_by_brp_flag = false;
|
||||
bool enable_memory_reclaimer = false;
|
||||
|
||||
#if (BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && \
|
||||
BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)) || \
|
||||
BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
|
||||
if (base::FeatureList::IsEnabled(
|
||||
base::features::kPartitionAllocBackupRefPtr)) {
|
||||
// No specified process type means this is the Browser process.
|
||||
switch (base::features::kBackupRefPtrEnabledProcessesParam.Get()) {
|
||||
case base::features::BackupRefPtrEnabledProcesses::kBrowserOnly:
|
||||
process_affected_by_brp_flag = process_type.empty();
|
||||
break;
|
||||
case base::features::BackupRefPtrEnabledProcesses::kBrowserAndRenderer:
|
||||
process_affected_by_brp_flag =
|
||||
process_type.empty() ||
|
||||
(process_type == switches::kRendererProcess);
|
||||
break;
|
||||
case base::features::BackupRefPtrEnabledProcesses::kNonRenderer:
|
||||
process_affected_by_brp_flag =
|
||||
(process_type != switches::kRendererProcess);
|
||||
break;
|
||||
case base::features::BackupRefPtrEnabledProcesses::kAllProcesses:
|
||||
process_affected_by_brp_flag = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
#endif // (BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&
|
||||
// BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)) ||
|
||||
// BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
|
||||
|
||||
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && \
|
||||
BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
|
||||
if (process_affected_by_brp_flag) {
|
||||
switch (base::features::kBackupRefPtrModeParam.Get()) {
|
||||
case base::features::BackupRefPtrMode::kDisabled:
|
||||
// Do nothing. Equivalent to !IsEnabled(kPartitionAllocBackupRefPtr).
|
||||
break;
|
||||
|
||||
case base::features::BackupRefPtrMode::kEnabledWithMemoryReclaimer:
|
||||
enable_memory_reclaimer = true;
|
||||
ABSL_FALLTHROUGH_INTENDED;
|
||||
case base::features::BackupRefPtrMode::kEnabled:
|
||||
enable_brp_zapping = true;
|
||||
ABSL_FALLTHROUGH_INTENDED;
|
||||
case base::features::BackupRefPtrMode::kEnabledWithoutZapping:
|
||||
enable_brp = true;
|
||||
split_main_partition = true;
|
||||
#if !BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
|
||||
// AlignedAlloc relies on natural alignment offered by the allocator
|
||||
// (see the comment inside PartitionRoot::AlignedAllocFlags). Any extras
|
||||
// in front of the allocation will mess up that alignment. Such extras
|
||||
// are used when BackupRefPtr is on, in which case, we need a separate
|
||||
// partition, dedicated to handle only aligned allocations, where those
|
||||
// extras are disabled. However, if the "previous slot" variant is used,
|
||||
// no dedicated partition is needed, as the extras won't interfere with
|
||||
// the alignment requirements.
|
||||
use_dedicated_aligned_partition = true;
|
||||
#endif
|
||||
break;
|
||||
|
||||
case base::features::BackupRefPtrMode::kDisabledButSplitPartitions2Way:
|
||||
split_main_partition = true;
|
||||
break;
|
||||
|
||||
case base::features::BackupRefPtrMode::
|
||||
kDisabledButSplitPartitions2WayWithMemoryReclaimer:
|
||||
split_main_partition = true;
|
||||
enable_memory_reclaimer = true;
|
||||
break;
|
||||
|
||||
case base::features::BackupRefPtrMode::kDisabledButSplitPartitions3Way:
|
||||
split_main_partition = true;
|
||||
use_dedicated_aligned_partition = true;
|
||||
break;
|
||||
|
||||
case base::features::BackupRefPtrMode::kDisabledButAddDummyRefCount:
|
||||
split_main_partition = true;
|
||||
add_dummy_ref_count = true;
|
||||
#if !BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
|
||||
use_dedicated_aligned_partition = true;
|
||||
#endif
|
||||
break;
|
||||
}
|
||||
}
|
||||
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&
|
||||
// BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
|
||||
|
||||
return {enable_brp,
|
||||
enable_brp_zapping,
|
||||
enable_memory_reclaimer,
|
||||
split_main_partition,
|
||||
use_dedicated_aligned_partition,
|
||||
add_dummy_ref_count,
|
||||
process_affected_by_brp_flag};
|
||||
}
|
||||
|
||||
void PartitionAllocSupport::ReconfigureEarlyish(
|
||||
const std::string& process_type) {
|
||||
{
|
||||
base::AutoLock scoped_lock(lock_);
|
||||
|
||||
// In tests, ReconfigureEarlyish() is called by ReconfigureForTest(), which
|
||||
// is earlier than ContentMain().
|
||||
if (called_for_tests_) {
|
||||
DCHECK(called_earlyish_);
|
||||
return;
|
||||
}
|
||||
|
||||
// TODO(bartekn): Switch to DCHECK once confirmed there are no issues.
|
||||
CHECK(!called_earlyish_)
|
||||
<< "ReconfigureEarlyish was already called for process '"
|
||||
@ -882,8 +1016,11 @@ void PartitionAllocSupport::ReconfigureAfterZygoteFork(
|
||||
}
|
||||
|
||||
void PartitionAllocSupport::ReconfigureAfterFeatureListInit(
|
||||
const std::string& process_type) {
|
||||
base::allocator::InstallDanglingRawPtrChecks();
|
||||
const std::string& process_type,
|
||||
bool configure_dangling_pointer_detector) {
|
||||
if (configure_dangling_pointer_detector) {
|
||||
base::allocator::InstallDanglingRawPtrChecks();
|
||||
}
|
||||
base::allocator::InstallUnretainedDanglingRawPtrChecks();
|
||||
{
|
||||
base::AutoLock scoped_lock(lock_);
|
||||
@ -914,46 +1051,11 @@ void PartitionAllocSupport::ReconfigureAfterFeatureListInit(
|
||||
}
|
||||
|
||||
DCHECK_NE(process_type, switches::kZygoteProcess);
|
||||
// TODO(bartekn): Switch to DCHECK once confirmed there are no issues.
|
||||
CHECK(base::FeatureList::GetInstance());
|
||||
|
||||
[[maybe_unused]] bool enable_brp = false;
|
||||
[[maybe_unused]] bool enable_brp_zapping = false;
|
||||
[[maybe_unused]] bool split_main_partition = false;
|
||||
[[maybe_unused]] bool use_dedicated_aligned_partition = false;
|
||||
[[maybe_unused]] bool add_dummy_ref_count = false;
|
||||
[[maybe_unused]] bool process_affected_by_brp_flag = false;
|
||||
|
||||
#if (BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && \
|
||||
BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)) || \
|
||||
BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
|
||||
if (base::FeatureList::IsEnabled(
|
||||
base::features::kPartitionAllocBackupRefPtr)) {
|
||||
// No specified process type means this is the Browser process.
|
||||
switch (base::features::kBackupRefPtrEnabledProcessesParam.Get()) {
|
||||
case base::features::BackupRefPtrEnabledProcesses::kBrowserOnly:
|
||||
process_affected_by_brp_flag = process_type.empty();
|
||||
break;
|
||||
case base::features::BackupRefPtrEnabledProcesses::kBrowserAndRenderer:
|
||||
process_affected_by_brp_flag =
|
||||
process_type.empty() ||
|
||||
(process_type == switches::kRendererProcess);
|
||||
break;
|
||||
case base::features::BackupRefPtrEnabledProcesses::kNonRenderer:
|
||||
process_affected_by_brp_flag =
|
||||
(process_type != switches::kRendererProcess);
|
||||
break;
|
||||
case base::features::BackupRefPtrEnabledProcesses::kAllProcesses:
|
||||
process_affected_by_brp_flag = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
#endif // (BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&
|
||||
// BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)) ||
|
||||
// BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
|
||||
[[maybe_unused]] BrpConfiguration brp_config =
|
||||
GetBrpConfiguration(process_type);
|
||||
|
||||
#if BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
|
||||
if (process_affected_by_brp_flag) {
|
||||
if (brp_config.process_affected_by_brp_flag) {
|
||||
base::RawPtrAsanService::GetInstance().Configure(
|
||||
base::EnableDereferenceCheck(
|
||||
base::features::kBackupRefPtrAsanEnableDereferenceCheckParam.Get()),
|
||||
@ -969,62 +1071,16 @@ void PartitionAllocSupport::ReconfigureAfterFeatureListInit(
|
||||
}
|
||||
#endif // BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
|
||||
|
||||
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && \
|
||||
BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
|
||||
if (process_affected_by_brp_flag) {
|
||||
switch (base::features::kBackupRefPtrModeParam.Get()) {
|
||||
case base::features::BackupRefPtrMode::kDisabled:
|
||||
// Do nothing. Equivalent to !IsEnabled(kPartitionAllocBackupRefPtr).
|
||||
break;
|
||||
|
||||
case base::features::BackupRefPtrMode::kEnabled:
|
||||
enable_brp_zapping = true;
|
||||
ABSL_FALLTHROUGH_INTENDED;
|
||||
case base::features::BackupRefPtrMode::kEnabledWithoutZapping:
|
||||
enable_brp = true;
|
||||
split_main_partition = true;
|
||||
#if !BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
|
||||
// AlignedAlloc relies on natural alignment offered by the allocator
|
||||
// (see the comment inside PartitionRoot::AlignedAllocFlags). Any extras
|
||||
// in front of the allocation will mess up that alignment. Such extras
|
||||
// are used when BackupRefPtr is on, in which case, we need a separate
|
||||
// partition, dedicated to handle only aligned allocations, where those
|
||||
// extras are disabled. However, if the "previous slot" variant is used,
|
||||
// no dedicated partition is needed, as the extras won't interfere with
|
||||
// the alignment requirements.
|
||||
use_dedicated_aligned_partition = true;
|
||||
#endif
|
||||
break;
|
||||
|
||||
case base::features::BackupRefPtrMode::kDisabledButSplitPartitions2Way:
|
||||
split_main_partition = true;
|
||||
break;
|
||||
|
||||
case base::features::BackupRefPtrMode::kDisabledButSplitPartitions3Way:
|
||||
split_main_partition = true;
|
||||
use_dedicated_aligned_partition = true;
|
||||
break;
|
||||
|
||||
case base::features::BackupRefPtrMode::kDisabledButAddDummyRefCount:
|
||||
split_main_partition = true;
|
||||
add_dummy_ref_count = true;
|
||||
#if !BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
|
||||
use_dedicated_aligned_partition = true;
|
||||
#endif
|
||||
break;
|
||||
}
|
||||
}
|
||||
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&
|
||||
// BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
|
||||
|
||||
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
allocator_shim::ConfigurePartitions(
|
||||
allocator_shim::EnableBrp(enable_brp),
|
||||
allocator_shim::EnableBrpZapping(enable_brp_zapping),
|
||||
allocator_shim::SplitMainPartition(split_main_partition),
|
||||
allocator_shim::EnableBrp(brp_config.enable_brp),
|
||||
allocator_shim::EnableBrpZapping(brp_config.enable_brp_zapping),
|
||||
allocator_shim::EnableBrpPartitionMemoryReclaimer(
|
||||
brp_config.enable_brp_partition_memory_reclaimer),
|
||||
allocator_shim::SplitMainPartition(brp_config.split_main_partition),
|
||||
allocator_shim::UseDedicatedAlignedPartition(
|
||||
use_dedicated_aligned_partition),
|
||||
allocator_shim::AddDummyRefCount(add_dummy_ref_count),
|
||||
brp_config.use_dedicated_aligned_partition),
|
||||
allocator_shim::AddDummyRefCount(brp_config.add_dummy_ref_count),
|
||||
allocator_shim::AlternateBucketDistribution(
|
||||
base::features::kPartitionAllocAlternateBucketDistributionParam
|
||||
.Get()));
|
||||
@ -1033,7 +1089,7 @@ void PartitionAllocSupport::ReconfigureAfterFeatureListInit(
|
||||
// If BRP is not enabled, check if any of PCScan flags is enabled.
|
||||
[[maybe_unused]] bool scan_enabled = false;
|
||||
#if BUILDFLAG(USE_STARSCAN)
|
||||
if (!enable_brp) {
|
||||
if (!brp_config.enable_brp) {
|
||||
scan_enabled = EnablePCScanForMallocPartitionsIfNeeded();
|
||||
// No specified process type means this is the Browser process.
|
||||
if (process_type.empty()) {
|
||||
|
@ -43,6 +43,15 @@ BASE_EXPORT void InstallUnretainedDanglingRawPtrChecks();
|
||||
// Allows to re-configure PartitionAlloc at run-time.
|
||||
class BASE_EXPORT PartitionAllocSupport {
|
||||
public:
|
||||
struct BrpConfiguration {
|
||||
bool enable_brp = false;
|
||||
bool enable_brp_zapping = false;
|
||||
bool enable_brp_partition_memory_reclaimer = false;
|
||||
bool split_main_partition = false;
|
||||
bool use_dedicated_aligned_partition = false;
|
||||
bool add_dummy_ref_count = false;
|
||||
bool process_affected_by_brp_flag = false;
|
||||
};
|
||||
// Reconfigure* functions re-configure PartitionAlloc. It is impossible to
|
||||
// configure PartitionAlloc before/at its initialization using information not
|
||||
// known at compile-time (e.g. process type, Finch), because by the time this
|
||||
@ -66,9 +75,12 @@ class BASE_EXPORT PartitionAllocSupport {
|
||||
// re-configuration steps exactly once.
|
||||
//
|
||||
// *AfterTaskRunnerInit() may be called more than once.
|
||||
void ReconfigureForTests();
|
||||
void ReconfigureEarlyish(const std::string& process_type);
|
||||
void ReconfigureAfterZygoteFork(const std::string& process_type);
|
||||
void ReconfigureAfterFeatureListInit(const std::string& process_type);
|
||||
void ReconfigureAfterFeatureListInit(
|
||||
const std::string& process_type,
|
||||
bool configure_dangling_pointer_detector = true);
|
||||
void ReconfigureAfterTaskRunnerInit(const std::string& process_type);
|
||||
|
||||
// |has_main_frame| tells us if the renderer contains a main frame.
|
||||
@ -85,10 +97,13 @@ class BASE_EXPORT PartitionAllocSupport {
|
||||
return singleton;
|
||||
}
|
||||
|
||||
static BrpConfiguration GetBrpConfiguration(const std::string& process_type);
|
||||
|
||||
private:
|
||||
PartitionAllocSupport();
|
||||
|
||||
base::Lock lock_;
|
||||
bool called_for_tests_ GUARDED_BY(lock_) = false;
|
||||
bool called_earlyish_ GUARDED_BY(lock_) = false;
|
||||
bool called_after_zygote_fork_ GUARDED_BY(lock_) = false;
|
||||
bool called_after_feature_list_init_ GUARDED_BY(lock_) = false;
|
||||
|
@ -92,6 +92,7 @@ component("partition_alloc") {
|
||||
"partition_alloc_base/cpu.cc",
|
||||
"partition_alloc_base/cpu.h",
|
||||
"partition_alloc_base/cxx17_backports.h",
|
||||
"partition_alloc_base/cxx20_is_constant_evaluated.h",
|
||||
"partition_alloc_base/debug/alias.cc",
|
||||
"partition_alloc_base/debug/alias.h",
|
||||
"partition_alloc_base/gtest_prod_util.h",
|
||||
@ -102,7 +103,6 @@ component("partition_alloc") {
|
||||
"partition_alloc_base/memory/ref_counted.h",
|
||||
"partition_alloc_base/memory/scoped_policy.h",
|
||||
"partition_alloc_base/memory/scoped_refptr.h",
|
||||
"partition_alloc_base/migration_adapter.h",
|
||||
"partition_alloc_base/no_destructor.h",
|
||||
"partition_alloc_base/numerics/checked_math.h",
|
||||
"partition_alloc_base/numerics/checked_math_impl.h",
|
||||
@ -160,9 +160,6 @@ component("partition_alloc") {
|
||||
"partition_root.h",
|
||||
"partition_stats.cc",
|
||||
"partition_stats.h",
|
||||
"partition_tag.h",
|
||||
"partition_tag_bitmap.h",
|
||||
"partition_tag_types.h",
|
||||
"partition_tls.h",
|
||||
"pkey.cc",
|
||||
"pkey.h",
|
||||
@ -429,9 +426,11 @@ buildflag_header("partition_alloc_buildflags") {
|
||||
|
||||
"ENABLE_BACKUP_REF_PTR_SUPPORT=$enable_backup_ref_ptr_support",
|
||||
"ENABLE_BACKUP_REF_PTR_SLOW_CHECKS=$enable_backup_ref_ptr_slow_checks",
|
||||
"ENABLE_BACKUP_REF_PTR_FEATURE_FLAG=$enable_backup_ref_ptr_feature_flag",
|
||||
"ENABLE_DANGLING_RAW_PTR_CHECKS=$enable_dangling_raw_ptr_checks",
|
||||
"ENABLE_DANGLING_RAW_PTR_FEATURE_FLAGS_FOR_BOTS=$enable_dangling_raw_ptr_feature_flags_for_bots",
|
||||
"ENABLE_DANGLING_RAW_PTR_FEATURE_FLAG=$enable_dangling_raw_ptr_feature_flag",
|
||||
"ENABLE_DANGLING_RAW_PTR_PERF_EXPERIMENT=$enable_dangling_raw_ptr_perf_experiment",
|
||||
"ENABLE_POINTER_SUBTRACTION_CHECK=$enable_pointer_subtraction_check",
|
||||
"BACKUP_REF_PTR_POISON_OOB_PTR=$backup_ref_ptr_poison_oob_ptr",
|
||||
"PUT_REF_COUNT_IN_PREVIOUS_SLOT=$put_ref_count_in_previous_slot",
|
||||
"USE_ASAN_BACKUP_REF_PTR=$use_asan_backup_ref_ptr",
|
||||
@ -441,10 +440,6 @@ buildflag_header("partition_alloc_buildflags") {
|
||||
|
||||
"FORCE_ENABLE_RAW_PTR_EXCLUSION=$force_enable_raw_ptr_exclusion",
|
||||
|
||||
# Not to be used directly - instead use
|
||||
# PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
|
||||
"ENABLE_MTE_CHECKED_PTR_SUPPORT=$enable_mte_checked_ptr_support",
|
||||
|
||||
"RECORD_ALLOC_INFO=$_record_alloc_info",
|
||||
"USE_FREESLOT_BITMAP=$use_freeslot_bitmap",
|
||||
"GLUE_CORE_POOLS=$glue_core_pools",
|
||||
|
@ -139,6 +139,9 @@ include_rules = [
|
||||
specific_include_rules = {
|
||||
".*_(perf|unit)test\.cc$": [
|
||||
"+base/allocator/allocator_shim_default_dispatch_to_partition_alloc.h",
|
||||
"+base/allocator/dispatcher/dispatcher.h",
|
||||
"+base/debug/allocation_trace.h",
|
||||
"+base/debug/debugging_buildflags.h",
|
||||
"+base/debug/proc_maps_linux.h",
|
||||
"+base/system/sys_info.h",
|
||||
"+base/test/gtest_util.h",
|
||||
|
@ -62,16 +62,18 @@ void AddressPoolManager::GetPoolUsedSuperPages(
|
||||
pool_handle handle,
|
||||
std::bitset<kMaxSuperPagesInPool>& used) {
|
||||
Pool* pool = GetPool(handle);
|
||||
if (!pool)
|
||||
if (!pool) {
|
||||
return;
|
||||
}
|
||||
|
||||
pool->GetUsedSuperPages(used);
|
||||
}
|
||||
|
||||
uintptr_t AddressPoolManager::GetPoolBaseAddress(pool_handle handle) {
|
||||
Pool* pool = GetPool(handle);
|
||||
if (!pool)
|
||||
if (!pool) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return pool->GetBaseAddress();
|
||||
}
|
||||
@ -92,11 +94,13 @@ uintptr_t AddressPoolManager::Reserve(pool_handle handle,
|
||||
uintptr_t requested_address,
|
||||
size_t length) {
|
||||
Pool* pool = GetPool(handle);
|
||||
if (!requested_address)
|
||||
if (!requested_address) {
|
||||
return pool->FindChunk(length);
|
||||
}
|
||||
const bool is_available = pool->TryReserveChunk(requested_address, length);
|
||||
if (is_available)
|
||||
if (is_available) {
|
||||
return requested_address;
|
||||
}
|
||||
return pool->FindChunk(length);
|
||||
}
|
||||
|
||||
@ -163,8 +167,9 @@ uintptr_t AddressPoolManager::Pool::FindChunk(size_t requested_size) {
|
||||
// |end_bit| points 1 past the last bit that needs to be 0. If it goes past
|
||||
// |total_bits_|, return |nullptr| to signal no free chunk was found.
|
||||
size_t end_bit = beg_bit + need_bits;
|
||||
if (end_bit > total_bits_)
|
||||
if (end_bit > total_bits_) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool found = true;
|
||||
for (; curr_bit < end_bit; ++curr_bit) {
|
||||
@ -176,8 +181,9 @@ uintptr_t AddressPoolManager::Pool::FindChunk(size_t requested_size) {
|
||||
// next outer loop pass from checking the same bits.
|
||||
beg_bit = curr_bit + 1;
|
||||
found = false;
|
||||
if (bit_hint_ == curr_bit)
|
||||
if (bit_hint_ == curr_bit) {
|
||||
++bit_hint_;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -212,12 +218,14 @@ bool AddressPoolManager::Pool::TryReserveChunk(uintptr_t address,
|
||||
const size_t need_bits = requested_size / kSuperPageSize;
|
||||
const size_t end_bit = begin_bit + need_bits;
|
||||
// Check that requested address is not too high.
|
||||
if (end_bit > total_bits_)
|
||||
if (end_bit > total_bits_) {
|
||||
return false;
|
||||
}
|
||||
// Check if any bit of the requested region is set already.
|
||||
for (size_t i = begin_bit; i < end_bit; ++i) {
|
||||
if (alloc_bitset_.test(i))
|
||||
if (alloc_bitset_.test(i)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
// Otherwise, set the bits.
|
||||
for (size_t i = begin_bit; i < end_bit; ++i) {
|
||||
@ -520,8 +528,9 @@ bool AddressPoolManager::GetStats(AddressSpaceStats* stats) {
|
||||
// Get blocklist size.
|
||||
for (const auto& blocked :
|
||||
AddressPoolManagerBitmap::brp_forbidden_super_page_map_) {
|
||||
if (blocked.load(std::memory_order_relaxed))
|
||||
if (blocked.load(std::memory_order_relaxed)) {
|
||||
stats->blocklist_size += 1;
|
||||
}
|
||||
}
|
||||
|
||||
// Count failures in finding non-blocklisted addresses.
|
||||
|
@ -33,10 +33,12 @@ uintptr_t GetRandomPageBase() {
|
||||
// randomization isn't buying anything. In that case we just skip it.
|
||||
// TODO(palmer): Just dump the randomization when HE-ASLR is present.
|
||||
static BOOL is_wow64 = -1;
|
||||
if (is_wow64 == -1 && !IsWow64Process(GetCurrentProcess(), &is_wow64))
|
||||
if (is_wow64 == -1 && !IsWow64Process(GetCurrentProcess(), &is_wow64)) {
|
||||
is_wow64 = FALSE;
|
||||
if (!is_wow64)
|
||||
}
|
||||
if (!is_wow64) {
|
||||
return 0;
|
||||
}
|
||||
#endif // BUILDFLAG(IS_WIN)
|
||||
random &= internal::ASLRMask();
|
||||
random += internal::ASLROffset();
|
||||
|
@ -20,11 +20,11 @@ PA_COMPONENT_EXPORT(PARTITION_ALLOC) uintptr_t GetRandomPageBase();
|
||||
|
||||
namespace internal {
|
||||
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
|
||||
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
|
||||
AslrAddress(uintptr_t mask) {
|
||||
return mask & PageAllocationGranularityBaseMask();
|
||||
}
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
|
||||
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
|
||||
AslrMask(uintptr_t bits) {
|
||||
return AslrAddress((1ULL << bits) - 1ULL);
|
||||
}
|
||||
@ -45,11 +45,11 @@ AslrMask(uintptr_t bits) {
|
||||
// hard-coded in those tools, bad things happen. This address range is
|
||||
// copied from TSAN source but works with all tools. See
|
||||
// https://crbug.com/539863.
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
|
||||
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
|
||||
ASLRMask() {
|
||||
return AslrAddress(0x007fffffffffULL);
|
||||
}
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
|
||||
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
|
||||
ASLROffset() {
|
||||
return AslrAddress(0x7e8000000000ULL);
|
||||
}
|
||||
@ -59,11 +59,11 @@ AslrMask(uintptr_t bits) {
|
||||
// Windows 8.10 and newer support the full 48 bit address range. Since
|
||||
// ASLROffset() is non-zero and may cause a carry, use 47 bit masks. See
|
||||
// http://www.alex-ionescu.com/?p=246
|
||||
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
|
||||
PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
|
||||
return AslrMask(47);
|
||||
}
|
||||
// Try not to map pages into the range where Windows loads DLLs by default.
|
||||
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
|
||||
return 0x80000000ULL;
|
||||
}
|
||||
|
||||
@ -82,11 +82,11 @@ AslrMask(uintptr_t bits) {
|
||||
//
|
||||
// TODO(crbug.com/738925): Remove this limitation if/when the macOS behavior
|
||||
// changes.
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
|
||||
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
|
||||
ASLRMask() {
|
||||
return AslrMask(38);
|
||||
}
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
|
||||
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
|
||||
ASLROffset() {
|
||||
// Be careful, there is a zone where macOS will not map memory, at least
|
||||
// on ARM64. From an ARM64 machine running 12.3, the range seems to be
|
||||
@ -104,10 +104,10 @@ AslrMask(uintptr_t bits) {
|
||||
|
||||
// Linux (and macOS) support the full 47-bit user space of x64 processors.
|
||||
// Use only 46 to allow the kernel a chance to fulfill the request.
|
||||
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
|
||||
PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
|
||||
return AslrMask(46);
|
||||
}
|
||||
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
|
||||
return AslrAddress(0);
|
||||
}
|
||||
|
||||
@ -117,10 +117,10 @@ AslrMask(uintptr_t bits) {
|
||||
|
||||
// Restrict the address range on Android to avoid a large performance
|
||||
// regression in single-process WebViews. See https://crbug.com/837640.
|
||||
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
|
||||
PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
|
||||
return AslrMask(30);
|
||||
}
|
||||
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
|
||||
return AslrAddress(0x20000000ULL);
|
||||
}
|
||||
|
||||
@ -130,11 +130,11 @@ AslrMask(uintptr_t bits) {
|
||||
// page size and number of levels of translation pages used. We use
|
||||
// 39-bit as base as all setups should support this, lowered to 38-bit
|
||||
// as ASLROffset() could cause a carry.
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
|
||||
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
|
||||
ASLRMask() {
|
||||
return AslrMask(38);
|
||||
}
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
|
||||
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
|
||||
ASLROffset() {
|
||||
return AslrAddress(0x1000000000ULL);
|
||||
}
|
||||
@ -143,10 +143,10 @@ AslrMask(uintptr_t bits) {
|
||||
|
||||
// ARM64 on Linux has 39-bit user space. Use 38 bits since ASLROffset()
|
||||
// could cause a carry.
|
||||
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
|
||||
PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
|
||||
return AslrMask(38);
|
||||
}
|
||||
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
|
||||
return AslrAddress(0x1000000000ULL);
|
||||
}
|
||||
|
||||
@ -159,30 +159,30 @@ AslrMask(uintptr_t bits) {
|
||||
// AIX has 64 bits of virtual addressing, but we limit the address range
|
||||
// to (a) minimize segment lookaside buffer (SLB) misses; and (b) use
|
||||
// extra address space to isolate the mmap regions.
|
||||
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
|
||||
PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
|
||||
return AslrMask(30);
|
||||
}
|
||||
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
|
||||
return AslrAddress(0x400000000000ULL);
|
||||
}
|
||||
|
||||
#elif defined(ARCH_CPU_BIG_ENDIAN)
|
||||
|
||||
// Big-endian Linux PPC has 44 bits of virtual addressing. Use 42.
|
||||
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
|
||||
PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
|
||||
return AslrMask(42);
|
||||
}
|
||||
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
|
||||
return AslrAddress(0);
|
||||
}
|
||||
|
||||
#else // !BUILDFLAG(IS_AIX) && !defined(ARCH_CPU_BIG_ENDIAN)
|
||||
|
||||
// Little-endian Linux PPC has 48 bits of virtual addressing. Use 46.
|
||||
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
|
||||
PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
|
||||
return AslrMask(46);
|
||||
}
|
||||
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
|
||||
return AslrAddress(0);
|
||||
}
|
||||
|
||||
@ -193,10 +193,10 @@ AslrMask(uintptr_t bits) {
|
||||
// Linux on Z uses bits 22 - 32 for Region Indexing, which translates to
|
||||
// 42 bits of virtual addressing. Truncate to 40 bits to allow kernel a
|
||||
// chance to fulfill the request.
|
||||
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
|
||||
PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
|
||||
return AslrMask(40);
|
||||
}
|
||||
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
|
||||
return AslrAddress(0);
|
||||
}
|
||||
|
||||
@ -204,10 +204,10 @@ AslrMask(uintptr_t bits) {
|
||||
|
||||
// 31 bits of virtual addressing. Truncate to 29 bits to allow the kernel
|
||||
// a chance to fulfill the request.
|
||||
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
|
||||
PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
|
||||
return AslrMask(29);
|
||||
}
|
||||
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
|
||||
return AslrAddress(0);
|
||||
}
|
||||
|
||||
@ -215,7 +215,7 @@ AslrMask(uintptr_t bits) {
|
||||
// !defined(ARCH_CPU_S390X) && !defined(ARCH_CPU_S390)
|
||||
|
||||
// For all other POSIX variants, use 30 bits.
|
||||
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
|
||||
PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
|
||||
return AslrMask(30);
|
||||
}
|
||||
|
||||
@ -231,7 +231,7 @@ AslrMask(uintptr_t bits) {
|
||||
// fails allocate as if there were no hint at all. The high hint
|
||||
// prevents the break from getting hemmed in at low values, ceding half
|
||||
// of the address space to the system heap.
|
||||
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
|
||||
return AslrAddress(0x80000000ULL);
|
||||
}
|
||||
|
||||
@ -239,7 +239,7 @@ AslrMask(uintptr_t bits) {
|
||||
|
||||
// The range 0x30000000 - 0xD0000000 is available on AIX; choose the
|
||||
// upper range.
|
||||
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
|
||||
return AslrAddress(0x90000000ULL);
|
||||
}
|
||||
|
||||
@ -248,7 +248,7 @@ AslrMask(uintptr_t bits) {
|
||||
// The range 0x20000000 - 0x60000000 is relatively unpopulated across a
|
||||
// variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macOS
|
||||
// 10.6 and 10.7.
|
||||
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
|
||||
return AslrAddress(0x20000000ULL);
|
||||
}
|
||||
|
||||
@ -264,10 +264,10 @@ AslrMask(uintptr_t bits) {
|
||||
// This is a good range on 32-bit Windows and Android (the only platforms on
|
||||
// which we support 32-bitness). Allocates in the 0.5 - 1.5 GiB region. There
|
||||
// is no issue with carries here.
|
||||
constexpr PA_ALWAYS_INLINE uintptr_t ASLRMask() {
|
||||
PA_ALWAYS_INLINE constexpr uintptr_t ASLRMask() {
|
||||
return AslrMask(30);
|
||||
}
|
||||
constexpr PA_ALWAYS_INLINE uintptr_t ASLROffset() {
|
||||
PA_ALWAYS_INLINE constexpr uintptr_t ASLROffset() {
|
||||
return AslrAddress(0x20000000ULL);
|
||||
}
|
||||
|
||||
|
@ -15,8 +15,9 @@ thread_local bool g_disallow_allocations;
|
||||
} // namespace
|
||||
|
||||
ScopedDisallowAllocations::ScopedDisallowAllocations() {
|
||||
if (g_disallow_allocations)
|
||||
if (g_disallow_allocations) {
|
||||
PA_IMMEDIATE_CRASH();
|
||||
}
|
||||
|
||||
g_disallow_allocations = true;
|
||||
}
|
||||
|
@ -32,8 +32,8 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ScopedAllowAllocations {
|
||||
|
||||
#else
|
||||
|
||||
struct [[maybe_unused]] ScopedDisallowAllocations{};
|
||||
struct [[maybe_unused]] ScopedAllowAllocations{};
|
||||
struct [[maybe_unused]] ScopedDisallowAllocations {};
|
||||
struct [[maybe_unused]] ScopedAllowAllocations {};
|
||||
|
||||
#endif // PA_CONFIG(HAS_ALLOCATION_GUARD)
|
||||
|
||||
|
@ -13,7 +13,6 @@ import("//build_overrides/build.gni")
|
||||
use_partition_alloc_as_malloc_default = false
|
||||
use_allocator_shim_default = false
|
||||
enable_backup_ref_ptr_support_default = false
|
||||
enable_mte_checked_ptr_support_default = false
|
||||
put_ref_count_in_previous_slot_default = true
|
||||
enable_backup_ref_ptr_slow_checks_default = false
|
||||
enable_dangling_raw_ptr_checks_default = false
|
||||
|
@ -227,7 +227,7 @@ class PA_TRIVIAL_ABI CompressedPointer final {
|
||||
internal::CompressedPointerBaseGlobal::kBitsToShift +
|
||||
kBitsForSignExtension;
|
||||
|
||||
static PA_ALWAYS_INLINE UnderlyingType Compress(T* ptr) {
|
||||
PA_ALWAYS_INLINE static UnderlyingType Compress(T* ptr) {
|
||||
static constexpr size_t kMinimalRequiredAlignment = 8;
|
||||
static_assert((1 << kOverallBitsToShift) == kMinimalRequiredAlignment);
|
||||
|
||||
@ -252,13 +252,14 @@ class PA_TRIVIAL_ABI CompressedPointer final {
|
||||
// frequent operation, we let more work here in favor of faster
|
||||
// decompression.
|
||||
// TODO(1376980): Avoid this by overreserving the heap.
|
||||
if (compressed)
|
||||
if (compressed) {
|
||||
compressed |= (1u << (sizeof(uint32_t) * CHAR_BIT - 1));
|
||||
}
|
||||
|
||||
return compressed;
|
||||
}
|
||||
|
||||
static PA_ALWAYS_INLINE T* Decompress(UnderlyingType ptr) {
|
||||
PA_ALWAYS_INLINE static T* Decompress(UnderlyingType ptr) {
|
||||
PA_DCHECK(internal::CompressedPointerBaseGlobal::IsSet());
|
||||
const uintptr_t base = internal::CompressedPointerBaseGlobal::Get();
|
||||
// Treat compressed pointer as signed and cast it to uint64_t, which will
|
||||
@ -460,13 +461,13 @@ class PA_TRIVIAL_ABI UncompressedPointer final {
|
||||
|
||||
template <typename U,
|
||||
std::enable_if_t<std::is_convertible_v<U*, T*>>* = nullptr>
|
||||
PA_ALWAYS_INLINE constexpr UncompressedPointer(
|
||||
PA_ALWAYS_INLINE constexpr explicit UncompressedPointer(
|
||||
const UncompressedPointer<U>& other)
|
||||
: ptr_(other.ptr_) {}
|
||||
|
||||
template <typename U,
|
||||
std::enable_if_t<std::is_convertible_v<U*, T*>>* = nullptr>
|
||||
PA_ALWAYS_INLINE constexpr UncompressedPointer(
|
||||
PA_ALWAYS_INLINE constexpr explicit UncompressedPointer(
|
||||
UncompressedPointer<U>&& other) noexcept
|
||||
: ptr_(std::move(other.ptr_)) {}
|
||||
|
||||
|
@ -18,8 +18,9 @@ namespace {
|
||||
void DisableThreadCacheForRootIfEnabled(ThreadSafePartitionRoot* root) {
|
||||
// Some platforms don't have a thread cache, or it could already have been
|
||||
// disabled.
|
||||
if (!root || !root->flags.with_thread_cache)
|
||||
if (!root || !root->flags.with_thread_cache) {
|
||||
return;
|
||||
}
|
||||
|
||||
ThreadCacheRegistry::Instance().PurgeAll();
|
||||
root->flags.with_thread_cache = false;
|
||||
@ -30,8 +31,9 @@ void DisableThreadCacheForRootIfEnabled(ThreadSafePartitionRoot* root) {
|
||||
|
||||
void EnablePartitionAllocThreadCacheForRootIfDisabled(
|
||||
ThreadSafePartitionRoot* root) {
|
||||
if (!root)
|
||||
if (!root) {
|
||||
return;
|
||||
}
|
||||
root->flags.with_thread_cache = true;
|
||||
}
|
||||
|
||||
@ -42,8 +44,9 @@ void DisablePartitionAllocThreadCacheForProcess() {
|
||||
auto* aligned_allocator =
|
||||
allocator_shim::internal::PartitionAllocMalloc::AlignedAllocator();
|
||||
DisableThreadCacheForRootIfEnabled(regular_allocator);
|
||||
if (aligned_allocator != regular_allocator)
|
||||
if (aligned_allocator != regular_allocator) {
|
||||
DisableThreadCacheForRootIfEnabled(aligned_allocator);
|
||||
}
|
||||
DisableThreadCacheForRootIfEnabled(
|
||||
allocator_shim::internal::PartitionAllocMalloc::OriginalAllocator());
|
||||
}
|
||||
@ -53,45 +56,79 @@ void DisablePartitionAllocThreadCacheForProcess() {
|
||||
|
||||
#endif // PA_CONFIG(THREAD_CACHE_SUPPORTED)
|
||||
|
||||
void SwapOutProcessThreadCacheForTesting(ThreadSafePartitionRoot* root) {
|
||||
#if PA_CONFIG(THREAD_CACHE_SUPPORTED)
|
||||
|
||||
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
DisablePartitionAllocThreadCacheForProcess();
|
||||
#else
|
||||
PA_CHECK(!ThreadCache::IsValid(ThreadCache::Get()));
|
||||
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
|
||||
ThreadCache::SwapForTesting(root);
|
||||
EnablePartitionAllocThreadCacheForRootIfDisabled(root);
|
||||
|
||||
#endif // PA_CONFIG(THREAD_CACHE_SUPPORTED)
|
||||
}
|
||||
|
||||
void SwapInProcessThreadCacheForTesting(ThreadSafePartitionRoot* root) {
|
||||
#if PA_CONFIG(THREAD_CACHE_SUPPORTED)
|
||||
|
||||
// First, disable the test thread cache we have.
|
||||
DisableThreadCacheForRootIfEnabled(root);
|
||||
|
||||
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
auto* regular_allocator =
|
||||
allocator_shim::internal::PartitionAllocMalloc::Allocator();
|
||||
EnablePartitionAllocThreadCacheForRootIfDisabled(regular_allocator);
|
||||
|
||||
ThreadCache::SwapForTesting(regular_allocator);
|
||||
#else
|
||||
ThreadCache::SwapForTesting(nullptr);
|
||||
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
|
||||
#endif // PA_CONFIG(THREAD_CACHE_SUPPORTED)
|
||||
}
|
||||
|
||||
ThreadAllocStats GetAllocStatsForCurrentThread() {
|
||||
ThreadCache* thread_cache = ThreadCache::Get();
|
||||
if (ThreadCache::IsValid(thread_cache))
|
||||
if (ThreadCache::IsValid(thread_cache)) {
|
||||
return thread_cache->thread_alloc_stats();
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
#if PA_CONFIG(THREAD_CACHE_SUPPORTED)
|
||||
ThreadCacheProcessScopeForTesting::ThreadCacheProcessScopeForTesting(
|
||||
ThreadSafePartitionRoot* root)
|
||||
: root_(root) {
|
||||
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
auto* regular_allocator =
|
||||
allocator_shim::internal::PartitionAllocMalloc::Allocator();
|
||||
regular_was_enabled_ =
|
||||
regular_allocator && regular_allocator->flags.with_thread_cache;
|
||||
|
||||
if (root_ != regular_allocator) {
|
||||
// Another |root| is ThreadCache's PartitionRoot. Need to disable
|
||||
// thread cache for the process.
|
||||
DisablePartitionAllocThreadCacheForProcess();
|
||||
EnablePartitionAllocThreadCacheForRootIfDisabled(root_);
|
||||
// Replace ThreadCache's PartitionRoot.
|
||||
ThreadCache::SwapForTesting(root_);
|
||||
} else {
|
||||
if (!regular_was_enabled_) {
|
||||
EnablePartitionAllocThreadCacheForRootIfDisabled(root_);
|
||||
ThreadCache::SwapForTesting(root_);
|
||||
}
|
||||
}
|
||||
#else
|
||||
PA_CHECK(!ThreadCache::IsValid(ThreadCache::Get()));
|
||||
EnablePartitionAllocThreadCacheForRootIfDisabled(root_);
|
||||
ThreadCache::SwapForTesting(root_);
|
||||
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
|
||||
PA_CHECK(ThreadCache::Get());
|
||||
}
|
||||
|
||||
ThreadCacheProcessScopeForTesting::~ThreadCacheProcessScopeForTesting() {
|
||||
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
auto* regular_allocator =
|
||||
allocator_shim::internal::PartitionAllocMalloc::Allocator();
|
||||
bool regular_enabled =
|
||||
regular_allocator && regular_allocator->flags.with_thread_cache;
|
||||
|
||||
if (regular_was_enabled_) {
|
||||
if (!regular_enabled) {
|
||||
// Need to re-enable ThreadCache for the process.
|
||||
EnablePartitionAllocThreadCacheForRootIfDisabled(regular_allocator);
|
||||
// In the case, |regular_allocator| must be ThreadCache's root.
|
||||
ThreadCache::SwapForTesting(regular_allocator);
|
||||
} else {
|
||||
// ThreadCache is enabled for the process, but we need to be
|
||||
// careful about ThreadCache's PartitionRoot. If it is different from
|
||||
// |regular_allocator|, we need to invoke SwapForTesting().
|
||||
if (regular_allocator != root_) {
|
||||
ThreadCache::SwapForTesting(regular_allocator);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// ThreadCache for all processes was disabled.
|
||||
DisableThreadCacheForRootIfEnabled(regular_allocator);
|
||||
ThreadCache::SwapForTesting(nullptr);
|
||||
}
|
||||
#else
|
||||
// First, disable the test thread cache we have.
|
||||
DisableThreadCacheForRootIfEnabled(root_);
|
||||
|
||||
ThreadCache::SwapForTesting(nullptr);
|
||||
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
}
|
||||
#endif // PA_CONFIG(THREAD_CACHE_SUPPORTED)
|
||||
|
||||
} // namespace partition_alloc::internal
|
||||
|
@ -5,25 +5,38 @@
|
||||
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_EXTENDED_API_H_
|
||||
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_EXTENDED_API_H_
|
||||
|
||||
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
|
||||
#include "base/allocator/partition_allocator/partition_root.h"
|
||||
#include "base/allocator/partition_allocator/partition_stats.h"
|
||||
#include "base/allocator/partition_allocator/thread_cache.h"
|
||||
|
||||
namespace partition_alloc::internal {
|
||||
// These two functions are unsafe to run if there are multiple threads running
|
||||
// in the process.
|
||||
//
|
||||
// Disables the thread cache for the entire process, and replaces it with a
|
||||
// thread cache for |root|.
|
||||
void SwapOutProcessThreadCacheForTesting(ThreadSafePartitionRoot* root);
|
||||
// Disables the current thread cache, and replaces it with the default for the
|
||||
// process.
|
||||
void SwapInProcessThreadCacheForTesting(ThreadSafePartitionRoot* root);
|
||||
|
||||
// Get allocation stats for the thread cache partition on the current
|
||||
// thread. See the documentation of ThreadAllocStats for details.
|
||||
ThreadAllocStats GetAllocStatsForCurrentThread();
|
||||
|
||||
// Creates a scope for testing which:
|
||||
// - if the given |root| is a default malloc root for the entire process,
|
||||
// enables the thread cache for the entire process.
|
||||
// (This may happen if UsePartitionAllocAsMalloc is enabled.)
|
||||
// - otherwise, disables the thread cache for the entire process, and
|
||||
// replaces it with a thread cache for |root|.
|
||||
// This class is unsafe to run if there are multiple threads running
|
||||
// in the process.
|
||||
class ThreadCacheProcessScopeForTesting {
|
||||
public:
|
||||
explicit ThreadCacheProcessScopeForTesting(ThreadSafePartitionRoot* root);
|
||||
~ThreadCacheProcessScopeForTesting();
|
||||
|
||||
ThreadCacheProcessScopeForTesting() = delete;
|
||||
|
||||
private:
|
||||
ThreadSafePartitionRoot* root_ = nullptr;
|
||||
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
|
||||
bool regular_was_enabled_ = false;
|
||||
#endif
|
||||
};
|
||||
|
||||
} // namespace partition_alloc::internal
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_EXTENDED_API_H_
|
||||
|
@ -12,7 +12,6 @@
|
||||
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
|
||||
#include "base/allocator/partition_allocator/partition_tag_bitmap.h"
|
||||
#include "base/allocator/partition_allocator/reservation_offset_table.h"
|
||||
|
||||
namespace partition_alloc::internal {
|
||||
@ -27,7 +26,7 @@ constexpr size_t kFreeSlotBitmapOffsetMask = kFreeSlotBitmapBitsPerCell - 1;
|
||||
constexpr size_t kFreeSlotBitmapSize =
|
||||
(kSuperPageSize / kSmallestBucket) / CHAR_BIT;
|
||||
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
|
||||
ReservedFreeSlotBitmapSize() {
|
||||
#if BUILDFLAG(USE_FREESLOT_BITMAP)
|
||||
return base::bits::AlignUp(kFreeSlotBitmapSize, PartitionPageSize());
|
||||
@ -36,7 +35,7 @@ ReservedFreeSlotBitmapSize() {
|
||||
#endif
|
||||
}
|
||||
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
|
||||
CommittedFreeSlotBitmapSize() {
|
||||
#if BUILDFLAG(USE_FREESLOT_BITMAP)
|
||||
return base::bits::AlignUp(kFreeSlotBitmapSize, SystemPageSize());
|
||||
@ -45,7 +44,7 @@ CommittedFreeSlotBitmapSize() {
|
||||
#endif
|
||||
}
|
||||
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
|
||||
NumPartitionPagesPerFreeSlotBitmap() {
|
||||
return ReservedFreeSlotBitmapSize() / PartitionPageSize();
|
||||
}
|
||||
@ -53,11 +52,10 @@ NumPartitionPagesPerFreeSlotBitmap() {
|
||||
#if BUILDFLAG(USE_FREESLOT_BITMAP)
|
||||
PA_ALWAYS_INLINE uintptr_t SuperPageFreeSlotBitmapAddr(uintptr_t super_page) {
|
||||
PA_DCHECK(!(super_page % kSuperPageAlignment));
|
||||
return super_page + PartitionPageSize() +
|
||||
(IsManagedByNormalBuckets(super_page) ? ReservedTagBitmapSize() : 0);
|
||||
return super_page + PartitionPageSize();
|
||||
}
|
||||
#endif
|
||||
|
||||
} // namespace partition_alloc::internal
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_FREESLOT_BITMAP_CONSTANTS_H_
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_FREESLOT_BITMAP_CONSTANTS_H_
|
||||
|
@ -65,8 +65,9 @@ void* GwpAsanSupport::MapRegion(size_t slot_count,
|
||||
super_page_span_start = bucket->AllocNewSuperPageSpanForGwpAsan(
|
||||
root.get(), super_page_count, 0);
|
||||
|
||||
if (!super_page_span_start)
|
||||
if (!super_page_span_start) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
#if defined(ARCH_CPU_64_BITS)
|
||||
// Mapping the GWP-ASan region in to the lower 32-bits of address space
|
||||
|
@ -70,7 +70,7 @@ namespace internal {
|
||||
// The crash is generated in a PA_NOINLINE function so that we can classify the
|
||||
// crash as an OOM solely by analyzing the stack trace. It is tagged as
|
||||
// PA_NOT_TAIL_CALLED to ensure that its parent function stays on the stack.
|
||||
[[noreturn]] PA_NOINLINE void PA_NOT_TAIL_CALLED OnNoMemory(size_t size) {
|
||||
[[noreturn]] PA_NOINLINE PA_NOT_TAIL_CALLED void OnNoMemory(size_t size) {
|
||||
RunPartitionAllocOomCallback();
|
||||
TerminateBecauseOutOfMemory(size);
|
||||
PA_IMMEDIATE_CRASH();
|
||||
|
@ -49,8 +49,8 @@ namespace internal {
|
||||
// The crash is generated in a PA_NOINLINE function so that we can classify the
|
||||
// crash as an OOM solely by analyzing the stack trace. It is tagged as
|
||||
// PA_NOT_TAIL_CALLED to ensure that its parent function stays on the stack.
|
||||
[[noreturn]] PA_COMPONENT_EXPORT(PARTITION_ALLOC) void PA_NOT_TAIL_CALLED
|
||||
OnNoMemory(size_t size);
|
||||
[[noreturn]] PA_NOT_TAIL_CALLED PA_COMPONENT_EXPORT(
|
||||
PARTITION_ALLOC) void OnNoMemory(size_t size);
|
||||
|
||||
// OOM_CRASH(size) - Specialization of IMMEDIATE_CRASH which will raise a custom
|
||||
// exception on Windows to signal this is OOM and not a normal assert.
|
||||
|
@ -19,8 +19,9 @@ void SetPartitionAllocOomCallback(PartitionAllocOomCallback callback) {
|
||||
|
||||
namespace internal {
|
||||
void RunPartitionAllocOomCallback() {
|
||||
if (g_oom_callback)
|
||||
if (g_oom_callback) {
|
||||
g_oom_callback();
|
||||
}
|
||||
}
|
||||
} // namespace internal
|
||||
|
||||
|
@ -113,10 +113,11 @@ uintptr_t NextAlignedWithOffset(uintptr_t address,
|
||||
|
||||
uintptr_t actual_offset = address & (alignment - 1);
|
||||
uintptr_t new_address;
|
||||
if (actual_offset <= requested_offset)
|
||||
if (actual_offset <= requested_offset) {
|
||||
new_address = address + requested_offset - actual_offset;
|
||||
else
|
||||
} else {
|
||||
new_address = address + alignment + requested_offset - actual_offset;
|
||||
}
|
||||
PA_DCHECK(new_address >= address);
|
||||
PA_DCHECK(new_address - address < alignment);
|
||||
PA_DCHECK(new_address % alignment == requested_offset);
|
||||
@ -135,8 +136,9 @@ uintptr_t SystemAllocPages(uintptr_t hint,
|
||||
PA_DCHECK(!(hint & internal::PageAllocationGranularityOffsetMask()));
|
||||
uintptr_t ret = internal::SystemAllocPagesInternal(
|
||||
hint, length, accessibility, page_tag, file_descriptor_for_shared_alloc);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
g_total_mapped_address_space.fetch_add(length, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -210,14 +212,16 @@ uintptr_t AllocPagesWithAlignOffset(
|
||||
file_descriptor_for_shared_alloc);
|
||||
if (ret) {
|
||||
// If the alignment is to our liking, we're done.
|
||||
if ((ret & align_offset_mask) == align_offset)
|
||||
if ((ret & align_offset_mask) == align_offset) {
|
||||
return ret;
|
||||
}
|
||||
// Free the memory and try again.
|
||||
FreePages(ret, length);
|
||||
} else {
|
||||
// |ret| is null; if this try was unhinted, we're OOM.
|
||||
if (internal::kHintIsAdvisory || !address)
|
||||
if (internal::kHintIsAdvisory || !address) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(ARCH_CPU_32_BITS)
|
||||
@ -368,8 +372,9 @@ bool ReserveAddressSpace(size_t size) {
|
||||
bool ReleaseReservation() {
|
||||
// To avoid deadlock, call only FreePages.
|
||||
internal::ScopedGuard guard(GetReserveLock());
|
||||
if (!s_reservation_address)
|
||||
if (!s_reservation_address) {
|
||||
return false;
|
||||
}
|
||||
|
||||
FreePages(s_reservation_address, s_reservation_size);
|
||||
s_reservation_address = 0;
|
||||
|
@ -34,12 +34,12 @@ struct PageAccessibilityConfiguration {
|
||||
};
|
||||
|
||||
#if BUILDFLAG(ENABLE_PKEYS)
|
||||
explicit constexpr PageAccessibilityConfiguration(Permissions permissions)
|
||||
constexpr explicit PageAccessibilityConfiguration(Permissions permissions)
|
||||
: permissions(permissions), pkey(0) {}
|
||||
constexpr PageAccessibilityConfiguration(Permissions permissions, int pkey)
|
||||
: permissions(permissions), pkey(pkey) {}
|
||||
#else
|
||||
explicit constexpr PageAccessibilityConfiguration(Permissions permissions)
|
||||
constexpr explicit PageAccessibilityConfiguration(Permissions permissions)
|
||||
: permissions(permissions) {}
|
||||
#endif // BUILDFLAG(ENABLE_PKEYS)
|
||||
|
||||
@ -300,7 +300,7 @@ void DiscardSystemPages(void* address, size_t length);
|
||||
|
||||
// Rounds up |address| to the next multiple of |SystemPageSize()|. Returns
|
||||
// 0 for an |address| of 0.
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
|
||||
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
|
||||
RoundUpToSystemPage(uintptr_t address) {
|
||||
return (address + internal::SystemPageOffsetMask()) &
|
||||
internal::SystemPageBaseMask();
|
||||
@ -308,14 +308,14 @@ RoundUpToSystemPage(uintptr_t address) {
|
||||
|
||||
// Rounds down |address| to the previous multiple of |SystemPageSize()|. Returns
|
||||
// 0 for an |address| of 0.
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
|
||||
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
|
||||
RoundDownToSystemPage(uintptr_t address) {
|
||||
return address & internal::SystemPageBaseMask();
|
||||
}
|
||||
|
||||
// Rounds up |address| to the next multiple of |PageAllocationGranularity()|.
|
||||
// Returns 0 for an |address| of 0.
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
|
||||
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
|
||||
RoundUpToPageAllocationGranularity(uintptr_t address) {
|
||||
return (address + internal::PageAllocationGranularityOffsetMask()) &
|
||||
internal::PageAllocationGranularityBaseMask();
|
||||
@ -323,7 +323,7 @@ RoundUpToPageAllocationGranularity(uintptr_t address) {
|
||||
|
||||
// Rounds down |address| to the previous multiple of
|
||||
// |PageAllocationGranularity()|. Returns 0 for an |address| of 0.
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE uintptr_t
|
||||
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR uintptr_t
|
||||
RoundDownToPageAllocationGranularity(uintptr_t address) {
|
||||
return address & internal::PageAllocationGranularityBaseMask();
|
||||
}
|
||||
|
@ -7,8 +7,8 @@
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
#if BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)
|
||||
@ -66,10 +66,10 @@ extern PageCharacteristics page_characteristics;
|
||||
namespace partition_alloc::internal {
|
||||
|
||||
// Forward declaration, implementation below
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
|
||||
PageAllocationGranularity();
|
||||
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
|
||||
PageAllocationGranularityShift() {
|
||||
#if BUILDFLAG(IS_WIN) || defined(ARCH_CPU_PPC64)
|
||||
// Modern ppc64 systems support 4kB (shift = 12) and 64kB (shift = 16) page
|
||||
@ -96,7 +96,7 @@ PageAllocationGranularityShift() {
|
||||
#endif
|
||||
}
|
||||
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
|
||||
PageAllocationGranularity() {
|
||||
#if BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)
|
||||
// This is literally equivalent to |1 << PageAllocationGranularityShift()|
|
||||
@ -116,17 +116,17 @@ PageAllocationGranularity() {
|
||||
#endif
|
||||
}
|
||||
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
|
||||
PageAllocationGranularityOffsetMask() {
|
||||
return PageAllocationGranularity() - 1;
|
||||
}
|
||||
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
|
||||
PageAllocationGranularityBaseMask() {
|
||||
return ~PageAllocationGranularityOffsetMask();
|
||||
}
|
||||
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
|
||||
SystemPageShift() {
|
||||
// On Windows allocation granularity is higher than the page size. This comes
|
||||
// into play when reserving address space range (allocation granularity),
|
||||
@ -138,7 +138,7 @@ SystemPageShift() {
|
||||
#endif
|
||||
}
|
||||
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
|
||||
SystemPageSize() {
|
||||
#if (BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)) || \
|
||||
(BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64))
|
||||
@ -151,12 +151,12 @@ SystemPageSize() {
|
||||
#endif
|
||||
}
|
||||
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
|
||||
SystemPageOffsetMask() {
|
||||
return SystemPageSize() - 1;
|
||||
}
|
||||
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
|
||||
SystemPageBaseMask() {
|
||||
return ~SystemPageOffsetMask();
|
||||
}
|
||||
|
@ -111,9 +111,8 @@ uintptr_t SystemAllocPagesInternal(
|
||||
}
|
||||
|
||||
uint64_t address;
|
||||
status =
|
||||
zx::vmar::root_self()->map(options, vmar_offset, vmo,
|
||||
/*vmo_offset=*/0, length, &address);
|
||||
status = zx::vmar::root_self()->map(options, vmar_offset, vmo,
|
||||
/*vmo_offset=*/0, length, &address);
|
||||
if (status != ZX_OK) {
|
||||
// map() is expected to fail if |hint| is set to an already-in-use location.
|
||||
if (!hint) {
|
||||
|
@ -130,8 +130,9 @@ bool UseMapJit() {
|
||||
base::ScopedCFTypeRef<CFTypeRef> jit_entitlement(
|
||||
SecTaskCopyValueForEntitlement(
|
||||
task.get(), CFSTR("com.apple.security.cs.allow-jit"), nullptr));
|
||||
if (!jit_entitlement)
|
||||
if (!jit_entitlement) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return base::mac::CFCast<CFBooleanRef>(jit_entitlement.get()) ==
|
||||
kCFBooleanTrue;
|
||||
@ -248,8 +249,9 @@ void SetSystemPagesAccessInternal(
|
||||
//
|
||||
// In this case, we are almost certainly bumping into the sandbox limit, mark
|
||||
// the crash as OOM. See SandboxLinux::LimitAddressSpace() for details.
|
||||
if (ret == -1 && errno == ENOMEM && (access_flags & PROT_WRITE))
|
||||
if (ret == -1 && errno == ENOMEM && (access_flags & PROT_WRITE)) {
|
||||
OOM_CRASH(length);
|
||||
}
|
||||
|
||||
PA_PCHECK(0 == ret);
|
||||
}
|
||||
@ -365,8 +367,9 @@ bool TryRecommitSystemPagesInternal(
|
||||
if (accessibility_disposition ==
|
||||
PageAccessibilityDisposition::kRequireUpdate) {
|
||||
bool ok = TrySetSystemPagesAccess(address, length, accessibility);
|
||||
if (!ok)
|
||||
if (!ok) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
#if BUILDFLAG(IS_APPLE)
|
||||
|
@ -72,8 +72,9 @@ void* VirtualAllocWithRetry(void* address,
|
||||
// Only retry for commit failures. If this is an address space problem
|
||||
// (e.g. caller asked for an address which is not available), this is
|
||||
// unlikely to be resolved by waiting.
|
||||
if (ret || !should_retry || !IsOutOfMemory(GetLastError()))
|
||||
if (ret || !should_retry || !IsOutOfMemory(GetLastError())) {
|
||||
break;
|
||||
}
|
||||
|
||||
Sleep(kDelayMs);
|
||||
}
|
||||
@ -142,8 +143,9 @@ bool TrySetSystemPagesAccessInternal(
|
||||
PageAccessibilityConfiguration accessibility) {
|
||||
void* ptr = reinterpret_cast<void*>(address);
|
||||
if (accessibility.permissions ==
|
||||
PageAccessibilityConfiguration::kInaccessible)
|
||||
PageAccessibilityConfiguration::kInaccessible) {
|
||||
return VirtualFree(ptr, length, MEM_DECOMMIT) != 0;
|
||||
}
|
||||
// Call the retry path even though this function can fail, because callers of
|
||||
// this are likely to crash the process when this function fails, and we don't
|
||||
// want that for transient failures.
|
||||
@ -167,8 +169,9 @@ void SetSystemPagesAccessInternal(
|
||||
if (!VirtualAllocWithRetry(ptr, length, MEM_COMMIT,
|
||||
GetAccessFlags(accessibility))) {
|
||||
int32_t error = GetLastError();
|
||||
if (error == ERROR_COMMITMENT_LIMIT)
|
||||
if (error == ERROR_COMMITMENT_LIMIT) {
|
||||
OOM_CRASH(length);
|
||||
}
|
||||
// We check `GetLastError` for `ERROR_SUCCESS` here so that in a crash
|
||||
// report we get the error number.
|
||||
PA_CHECK(ERROR_SUCCESS == error);
|
||||
|
@ -37,16 +37,16 @@ namespace internal {
|
||||
class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
|
||||
public:
|
||||
#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
|
||||
static PA_ALWAYS_INLINE uintptr_t RegularPoolBaseMask() {
|
||||
PA_ALWAYS_INLINE static uintptr_t RegularPoolBaseMask() {
|
||||
return setup_.regular_pool_base_mask_;
|
||||
}
|
||||
#else
|
||||
static PA_ALWAYS_INLINE constexpr uintptr_t RegularPoolBaseMask() {
|
||||
PA_ALWAYS_INLINE static constexpr uintptr_t RegularPoolBaseMask() {
|
||||
return kRegularPoolBaseMask;
|
||||
}
|
||||
#endif
|
||||
|
||||
static PA_ALWAYS_INLINE std::pair<pool_handle, uintptr_t> GetPoolAndOffset(
|
||||
PA_ALWAYS_INLINE static std::pair<pool_handle, uintptr_t> GetPoolAndOffset(
|
||||
uintptr_t address) {
|
||||
// When USE_BACKUP_REF_PTR is off, BRP pool isn't used.
|
||||
#if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
|
||||
@ -76,10 +76,10 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
|
||||
}
|
||||
return std::make_pair(pool, address - base);
|
||||
}
|
||||
static PA_ALWAYS_INLINE constexpr size_t ConfigurablePoolMaxSize() {
|
||||
PA_ALWAYS_INLINE static constexpr size_t ConfigurablePoolMaxSize() {
|
||||
return kConfigurablePoolMaxSize;
|
||||
}
|
||||
static PA_ALWAYS_INLINE constexpr size_t ConfigurablePoolMinSize() {
|
||||
PA_ALWAYS_INLINE static constexpr size_t ConfigurablePoolMinSize() {
|
||||
return kConfigurablePoolMinSize;
|
||||
}
|
||||
|
||||
@ -100,7 +100,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
|
||||
static void UninitForTesting();
|
||||
static void UninitConfigurablePoolForTesting();
|
||||
|
||||
static PA_ALWAYS_INLINE bool IsInitialized() {
|
||||
PA_ALWAYS_INLINE static bool IsInitialized() {
|
||||
// Either neither or both regular and BRP pool are initialized. The
|
||||
// configurable and pkey pool are initialized separately.
|
||||
if (setup_.regular_pool_base_address_ != kUninitializedPoolBaseAddress) {
|
||||
@ -112,19 +112,19 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
|
||||
return false;
|
||||
}
|
||||
|
||||
static PA_ALWAYS_INLINE bool IsConfigurablePoolInitialized() {
|
||||
PA_ALWAYS_INLINE static bool IsConfigurablePoolInitialized() {
|
||||
return setup_.configurable_pool_base_address_ !=
|
||||
kUninitializedPoolBaseAddress;
|
||||
}
|
||||
|
||||
#if BUILDFLAG(ENABLE_PKEYS)
|
||||
static PA_ALWAYS_INLINE bool IsPkeyPoolInitialized() {
|
||||
PA_ALWAYS_INLINE static bool IsPkeyPoolInitialized() {
|
||||
return setup_.pkey_pool_base_address_ != kUninitializedPoolBaseAddress;
|
||||
}
|
||||
#endif
|
||||
|
||||
// Returns false for nullptr.
|
||||
static PA_ALWAYS_INLINE bool IsInRegularPool(uintptr_t address) {
|
||||
PA_ALWAYS_INLINE static bool IsInRegularPool(uintptr_t address) {
|
||||
#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
|
||||
const uintptr_t regular_pool_base_mask = setup_.regular_pool_base_mask_;
|
||||
#else
|
||||
@ -134,12 +134,12 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
|
||||
setup_.regular_pool_base_address_;
|
||||
}
|
||||
|
||||
static PA_ALWAYS_INLINE uintptr_t RegularPoolBase() {
|
||||
PA_ALWAYS_INLINE static uintptr_t RegularPoolBase() {
|
||||
return setup_.regular_pool_base_address_;
|
||||
}
|
||||
|
||||
// Returns false for nullptr.
|
||||
static PA_ALWAYS_INLINE bool IsInBRPPool(uintptr_t address) {
|
||||
PA_ALWAYS_INLINE static bool IsInBRPPool(uintptr_t address) {
|
||||
#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
|
||||
const uintptr_t brp_pool_base_mask = setup_.brp_pool_base_mask_;
|
||||
#else
|
||||
@ -151,7 +151,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
|
||||
#if PA_CONFIG(GLUE_CORE_POOLS)
|
||||
// Checks whether the address belongs to either regular or BRP pool.
|
||||
// Returns false for nullptr.
|
||||
static PA_ALWAYS_INLINE bool IsInCorePools(uintptr_t address) {
|
||||
PA_ALWAYS_INLINE static bool IsInCorePools(uintptr_t address) {
|
||||
#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
|
||||
const uintptr_t core_pools_base_mask = setup_.core_pools_base_mask_;
|
||||
#else
|
||||
@ -166,40 +166,40 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
|
||||
return ret;
|
||||
}
|
||||
#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
|
||||
static PA_ALWAYS_INLINE size_t CorePoolsSize() {
|
||||
PA_ALWAYS_INLINE static size_t CorePoolsSize() {
|
||||
return RegularPoolSize() * 2;
|
||||
}
|
||||
#else
|
||||
static PA_ALWAYS_INLINE constexpr size_t CorePoolsSize() {
|
||||
PA_ALWAYS_INLINE static constexpr size_t CorePoolsSize() {
|
||||
return RegularPoolSize() * 2;
|
||||
}
|
||||
#endif // PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
|
||||
#endif // PA_CONFIG(GLUE_CORE_POOLS)
|
||||
|
||||
static PA_ALWAYS_INLINE uintptr_t OffsetInBRPPool(uintptr_t address) {
|
||||
PA_ALWAYS_INLINE static uintptr_t OffsetInBRPPool(uintptr_t address) {
|
||||
PA_DCHECK(IsInBRPPool(address));
|
||||
return address - setup_.brp_pool_base_address_;
|
||||
}
|
||||
|
||||
// Returns false for nullptr.
|
||||
static PA_ALWAYS_INLINE bool IsInConfigurablePool(uintptr_t address) {
|
||||
PA_ALWAYS_INLINE static bool IsInConfigurablePool(uintptr_t address) {
|
||||
return (address & setup_.configurable_pool_base_mask_) ==
|
||||
setup_.configurable_pool_base_address_;
|
||||
}
|
||||
|
||||
static PA_ALWAYS_INLINE uintptr_t ConfigurablePoolBase() {
|
||||
PA_ALWAYS_INLINE static uintptr_t ConfigurablePoolBase() {
|
||||
return setup_.configurable_pool_base_address_;
|
||||
}
|
||||
|
||||
#if BUILDFLAG(ENABLE_PKEYS)
|
||||
// Returns false for nullptr.
|
||||
static PA_ALWAYS_INLINE bool IsInPkeyPool(uintptr_t address) {
|
||||
PA_ALWAYS_INLINE static bool IsInPkeyPool(uintptr_t address) {
|
||||
return (address & kPkeyPoolBaseMask) == setup_.pkey_pool_base_address_;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if PA_CONFIG(ENABLE_SHADOW_METADATA)
|
||||
static PA_ALWAYS_INLINE std::ptrdiff_t ShadowPoolOffset(pool_handle pool) {
|
||||
PA_ALWAYS_INLINE static std::ptrdiff_t ShadowPoolOffset(pool_handle pool) {
|
||||
if (pool == kRegularPoolHandle) {
|
||||
return regular_pool_shadow_offset_;
|
||||
} else if (pool == kBRPPoolHandle) {
|
||||
@ -222,20 +222,20 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAddressSpace {
|
||||
|
||||
private:
|
||||
#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
|
||||
static PA_ALWAYS_INLINE size_t RegularPoolSize();
|
||||
static PA_ALWAYS_INLINE size_t BRPPoolSize();
|
||||
PA_ALWAYS_INLINE static size_t RegularPoolSize();
|
||||
PA_ALWAYS_INLINE static size_t BRPPoolSize();
|
||||
#else
|
||||
// The pool sizes should be as large as maximum whenever possible.
|
||||
constexpr static PA_ALWAYS_INLINE size_t RegularPoolSize() {
|
||||
PA_ALWAYS_INLINE static constexpr size_t RegularPoolSize() {
|
||||
return kRegularPoolSize;
|
||||
}
|
||||
constexpr static PA_ALWAYS_INLINE size_t BRPPoolSize() {
|
||||
PA_ALWAYS_INLINE static constexpr size_t BRPPoolSize() {
|
||||
return kBRPPoolSize;
|
||||
}
|
||||
#endif // PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
|
||||
|
||||
#if BUILDFLAG(ENABLE_PKEYS)
|
||||
constexpr static PA_ALWAYS_INLINE size_t PkeyPoolSize() {
|
||||
PA_ALWAYS_INLINE static constexpr size_t PkeyPoolSize() {
|
||||
return kPkeyPoolSize;
|
||||
}
|
||||
#endif
|
||||
|
@ -68,12 +68,15 @@ declare_args() {
|
||||
# recommended to enable PA-E above, but isn't strictly necessary. Embedders
|
||||
# can create and use PA partitions explicitly.
|
||||
enable_pointer_compression_support = false
|
||||
|
||||
# Enables a bounds check when two pointers (at least one being raw_ptr) are
|
||||
# subtracted (if supported by the underlying implementation).
|
||||
enable_pointer_subtraction_check = false
|
||||
}
|
||||
|
||||
declare_args() {
|
||||
# Build support for Use-after-Free protection via BackupRefPtr (BRP) or
|
||||
# MTECheckedPtr, and switch the raw_ptr<T> implementation to RawPtrBackupRefImpl
|
||||
# and MTECheckedPtrImp, respectively. They're mutually exclusive.
|
||||
# Build support for Use-after-Free protection via BackupRefPtr (BRP),
|
||||
# making the raw_ptr<T> implementation to RawPtrBackupRefImpl if active.
|
||||
#
|
||||
# These are effective only for memory allocated from PartitionAlloc, so it is
|
||||
# recommended to enable PA-E above, but isn't strictly necessary. Embedders
|
||||
@ -84,17 +87,12 @@ declare_args() {
|
||||
# partition_alloc::PartitionOptions::BackupRefPtr::kEnabled.
|
||||
enable_backup_ref_ptr_support =
|
||||
use_partition_alloc && enable_backup_ref_ptr_support_default
|
||||
enable_mte_checked_ptr_support =
|
||||
use_partition_alloc && enable_mte_checked_ptr_support_default
|
||||
|
||||
# RAW_PTR_EXCLUSION macro is disabled on official builds because it increased
|
||||
# binary size. This flag can be used to enable it for official builds too.
|
||||
force_enable_raw_ptr_exclusion = false
|
||||
}
|
||||
|
||||
assert(!(enable_backup_ref_ptr_support && enable_mte_checked_ptr_support),
|
||||
"MTECheckedPtrSupport conflicts with BRPSupport.")
|
||||
|
||||
assert(!enable_pointer_compression_support || glue_core_pools,
|
||||
"Pointer compression relies on core pools being contiguous.")
|
||||
|
||||
@ -133,16 +131,24 @@ declare_args() {
|
||||
enable_backup_ref_ptr_slow_checks =
|
||||
enable_backup_ref_ptr_slow_checks_default && enable_backup_ref_ptr_support
|
||||
|
||||
# Enable the feature flag required to activate backup ref pointers. That is to
|
||||
# say `PartitionAllocBackupRefPtr`.
|
||||
#
|
||||
# This is meant to be used primarily on bots. It is much easier to override
|
||||
# the feature flags using a binary flag instead of updating multiple bots's
|
||||
# scripts to pass command line arguments.
|
||||
enable_backup_ref_ptr_feature_flag = false
|
||||
|
||||
enable_dangling_raw_ptr_checks =
|
||||
enable_dangling_raw_ptr_checks_default && enable_backup_ref_ptr_support
|
||||
|
||||
# Enable the feature flags required to check for dangling pointers. That is to
|
||||
# say `PartitionAllocBackupRefPtr` and `PartitionAllocDanglingPtr`.
|
||||
# Enable the feature flag required to check for dangling pointers. That is to
|
||||
# say `PartitionAllocDanglingPtr`.
|
||||
#
|
||||
# This is meant to be used on bots only. It is much easier to override the
|
||||
# feature flags using a binary flag instead of updating multiple bots's
|
||||
# This is meant to be used primarily on bots. It is much easier to override
|
||||
# the feature flags using a binary flag instead of updating multiple bots's
|
||||
# scripts to pass command line arguments.
|
||||
enable_dangling_raw_ptr_feature_flags_for_bots = false
|
||||
enable_dangling_raw_ptr_feature_flag = false
|
||||
|
||||
# Enables the dangling raw_ptr checks feature for the performance experiment.
|
||||
# Not every dangling pointers have been fixed or annotated yet. To avoid
|
||||
@ -154,6 +160,7 @@ declare_args() {
|
||||
# to go through build_overrides
|
||||
enable_dangling_raw_ptr_perf_experiment = false
|
||||
|
||||
# Set to `enable_backup_ref_ptr_support && has_64_bit_pointers` when enabling.
|
||||
backup_ref_ptr_poison_oob_ptr = false
|
||||
}
|
||||
|
||||
@ -187,7 +194,6 @@ if (is_nacl) {
|
||||
if (!use_partition_alloc) {
|
||||
use_partition_alloc_as_malloc = false
|
||||
enable_backup_ref_ptr_support = false
|
||||
enable_mte_checked_ptr_support = false
|
||||
use_asan_backup_ref_ptr = false
|
||||
use_asan_unowned_ptr = false
|
||||
use_hookable_raw_ptr = false
|
||||
@ -195,6 +201,7 @@ if (!use_partition_alloc) {
|
||||
enable_backup_ref_ptr_slow_checks = false
|
||||
enable_dangling_raw_ptr_checks = false
|
||||
enable_dangling_raw_ptr_perf_experiment = false
|
||||
enable_pointer_subtraction_check = false
|
||||
backup_ref_ptr_poison_oob_ptr = false
|
||||
use_starscan = false
|
||||
}
|
||||
@ -227,6 +234,8 @@ assert(
|
||||
assert(
|
||||
enable_backup_ref_ptr_support || !backup_ref_ptr_poison_oob_ptr,
|
||||
"Can't enable poisoning for OOB pointers if BackupRefPtr isn't enabled at all")
|
||||
assert(has_64_bit_pointers || !backup_ref_ptr_poison_oob_ptr,
|
||||
"Can't enable poisoning for OOB pointers if pointers are only 32-bit")
|
||||
|
||||
# AsanBackupRefPtr and AsanUnownedPtr are mutually exclusive variants of raw_ptr.
|
||||
assert(
|
||||
|
@ -13,7 +13,6 @@
|
||||
#include <type_traits>
|
||||
|
||||
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_base/migration_adapter.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
|
@ -7,19 +7,6 @@
|
||||
|
||||
#include "build/build_config.h"
|
||||
|
||||
// This is a wrapper around `__has_cpp_attribute`, which can be used to test for
|
||||
// the presence of an attribute. In case the compiler does not support this
|
||||
// macro it will simply evaluate to 0.
|
||||
//
|
||||
// References:
|
||||
// https://wg21.link/sd6#testing-for-the-presence-of-an-attribute-__has_cpp_attribute
|
||||
// https://wg21.link/cpp.cond#:__has_cpp_attribute
|
||||
#if defined(__has_cpp_attribute)
|
||||
#define PA_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x)
|
||||
#else
|
||||
#define PA_HAS_CPP_ATTRIBUTE(x) 0
|
||||
#endif
|
||||
|
||||
// A wrapper around `__has_attribute`, similar to HAS_CPP_ATTRIBUTE.
|
||||
#if defined(__has_attribute)
|
||||
#define PA_HAS_ATTRIBUTE(x) __has_attribute(x)
|
||||
@ -37,7 +24,9 @@
|
||||
// Annotate a function indicating it should not be inlined.
|
||||
// Use like:
|
||||
// NOINLINE void DoStuff() { ... }
|
||||
#if defined(COMPILER_GCC) || defined(__clang__)
|
||||
#if defined(__clang__) && PA_HAS_ATTRIBUTE(noinline)
|
||||
#define PA_NOINLINE [[clang::noinline]]
|
||||
#elif defined(COMPILER_GCC) && PA_HAS_ATTRIBUTE(noinline)
|
||||
#define PA_NOINLINE __attribute__((noinline))
|
||||
#elif defined(COMPILER_MSVC)
|
||||
#define PA_NOINLINE __declspec(noinline)
|
||||
@ -45,7 +34,10 @@
|
||||
#define PA_NOINLINE
|
||||
#endif
|
||||
|
||||
#if defined(COMPILER_GCC) && defined(NDEBUG)
|
||||
#if defined(__clang__) && defined(NDEBUG) && PA_HAS_ATTRIBUTE(always_inline)
|
||||
#define PA_ALWAYS_INLINE [[clang::always_inline]] inline
|
||||
#elif defined(COMPILER_GCC) && defined(NDEBUG) && \
|
||||
PA_HAS_ATTRIBUTE(always_inline)
|
||||
#define PA_ALWAYS_INLINE inline __attribute__((__always_inline__))
|
||||
#elif defined(COMPILER_MSVC) && defined(NDEBUG)
|
||||
#define PA_ALWAYS_INLINE __forceinline
|
||||
@ -62,72 +54,42 @@
|
||||
// Use like:
|
||||
// void NOT_TAIL_CALLED FooBar();
|
||||
#if defined(__clang__) && PA_HAS_ATTRIBUTE(not_tail_called)
|
||||
#define PA_NOT_TAIL_CALLED __attribute__((not_tail_called))
|
||||
#define PA_NOT_TAIL_CALLED [[clang::not_tail_called]]
|
||||
#else
|
||||
#define PA_NOT_TAIL_CALLED
|
||||
#endif
|
||||
|
||||
// Specify memory alignment for structs, classes, etc.
|
||||
// Use like:
|
||||
// class ALIGNAS(16) MyClass { ... }
|
||||
// ALIGNAS(16) int array[4];
|
||||
// class PA_ALIGNAS(16) MyClass { ... }
|
||||
// PA_ALIGNAS(16) int array[4];
|
||||
//
|
||||
// In most places you can use the C++11 keyword "alignas", which is preferred.
|
||||
//
|
||||
// But compilers have trouble mixing __attribute__((...)) syntax with
|
||||
// alignas(...) syntax.
|
||||
//
|
||||
// Doesn't work in clang or gcc:
|
||||
// struct alignas(16) __attribute__((packed)) S { char c; };
|
||||
// Works in clang but not gcc:
|
||||
// struct __attribute__((packed)) alignas(16) S2 { char c; };
|
||||
// Works in clang and gcc:
|
||||
// struct alignas(16) S3 { char c; } __attribute__((packed));
|
||||
//
|
||||
// There are also some attributes that must be specified *before* a class
|
||||
// definition: visibility (used for exporting functions/classes) is one of
|
||||
// these attributes. This means that it is not possible to use alignas() with a
|
||||
// class that is marked as exported.
|
||||
#if defined(COMPILER_MSVC)
|
||||
// Historically, compilers had trouble mixing __attribute__((...)) syntax with
|
||||
// alignas(...) syntax. However, at least Clang is very accepting nowadays. It
|
||||
// may be that this macro can be removed entirely.
|
||||
#if defined(__clang__)
|
||||
#define PA_ALIGNAS(byte_alignment) alignas(byte_alignment)
|
||||
#elif defined(COMPILER_MSVC)
|
||||
#define PA_ALIGNAS(byte_alignment) __declspec(align(byte_alignment))
|
||||
#elif defined(COMPILER_GCC)
|
||||
#elif defined(COMPILER_GCC) && PA_HAS_ATTRIBUTE(aligned)
|
||||
#define PA_ALIGNAS(byte_alignment) __attribute__((aligned(byte_alignment)))
|
||||
#endif
|
||||
|
||||
// In case the compiler supports it NO_UNIQUE_ADDRESS evaluates to the C++20
|
||||
// attribute [[no_unique_address]]. This allows annotating data members so that
|
||||
// they need not have an address distinct from all other non-static data members
|
||||
// of its class.
|
||||
//
|
||||
// References:
|
||||
// * https://en.cppreference.com/w/cpp/language/attributes/no_unique_address
|
||||
// * https://wg21.link/dcl.attr.nouniqueaddr
|
||||
#if PA_HAS_CPP_ATTRIBUTE(no_unique_address)
|
||||
#define PA_NO_UNIQUE_ADDRESS [[no_unique_address]]
|
||||
#else
|
||||
#define PA_NO_UNIQUE_ADDRESS
|
||||
#endif
|
||||
|
||||
// Tell the compiler a function is using a printf-style format string.
|
||||
// Tells the compiler a function is using a printf-style format string.
|
||||
// |format_param| is the one-based index of the format string parameter;
|
||||
// |dots_param| is the one-based index of the "..." parameter.
|
||||
// For v*printf functions (which take a va_list), pass 0 for dots_param.
|
||||
// (This is undocumented but matches what the system C headers do.)
|
||||
// For member functions, the implicit this parameter counts as index 1.
|
||||
#if defined(COMPILER_GCC) || defined(__clang__)
|
||||
#if (defined(COMPILER_GCC) || defined(__clang__)) && PA_HAS_ATTRIBUTE(format)
|
||||
#define PA_PRINTF_FORMAT(format_param, dots_param) \
|
||||
__attribute__((format(printf, format_param, dots_param)))
|
||||
#else
|
||||
#define PA_PRINTF_FORMAT(format_param, dots_param)
|
||||
#endif
|
||||
|
||||
// WPRINTF_FORMAT is the same, but for wide format strings.
|
||||
// This doesn't appear to yet be implemented in any compiler.
|
||||
// See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=38308 .
|
||||
#define PA_WPRINTF_FORMAT(format_param, dots_param)
|
||||
// If available, it would look like:
|
||||
// __attribute__((format(wprintf, format_param, dots_param)))
|
||||
|
||||
// Sanitizers annotations.
|
||||
#if PA_HAS_ATTRIBUTE(no_sanitize)
|
||||
#define PA_NO_SANITIZE(what) __attribute__((no_sanitize(what)))
|
||||
@ -144,27 +106,10 @@
|
||||
// Use this to annotate code that deliberately reads uninitialized data, for
|
||||
// example a GC scavenging root set pointers from the stack.
|
||||
#define PA_MSAN_UNPOISON(p, size) __msan_unpoison(p, size)
|
||||
|
||||
// Check a memory region for initializedness, as if it was being used here.
|
||||
// If any bits are uninitialized, crash with an MSan report.
|
||||
// Use this to sanitize data which MSan won't be able to track, e.g. before
|
||||
// passing data to another process via shared memory.
|
||||
#define PA_MSAN_CHECK_MEM_IS_INITIALIZED(p, size) \
|
||||
__msan_check_mem_is_initialized(p, size)
|
||||
#else // MEMORY_SANITIZER
|
||||
#define PA_MSAN_UNPOISON(p, size)
|
||||
#define PA_MSAN_CHECK_MEM_IS_INITIALIZED(p, size)
|
||||
#endif // MEMORY_SANITIZER
|
||||
|
||||
// Macro useful for writing cross-platform function pointers.
|
||||
#if !defined(PA_CDECL)
|
||||
#if BUILDFLAG(IS_WIN)
|
||||
#define PA_CDECL __cdecl
|
||||
#else // BUILDFLAG(IS_WIN)
|
||||
#define PA_CDECL
|
||||
#endif // BUILDFLAG(IS_WIN)
|
||||
#endif // !defined(PA_CDECL)
|
||||
|
||||
// Macro for hinting that an expression is likely to be false.
|
||||
#if !defined(PA_UNLIKELY)
|
||||
#if defined(COMPILER_GCC) || defined(__clang__)
|
||||
@ -182,23 +127,6 @@
|
||||
#endif // defined(COMPILER_GCC)
|
||||
#endif // !defined(PA_LIKELY)
|
||||
|
||||
// Compiler feature-detection.
|
||||
// clang.llvm.org/docs/LanguageExtensions.html#has-feature-and-has-extension
|
||||
#if defined(__has_feature)
|
||||
#define PA_HAS_FEATURE(FEATURE) __has_feature(FEATURE)
|
||||
#else
|
||||
#define PA_HAS_FEATURE(FEATURE) 0
|
||||
#endif
|
||||
|
||||
#if defined(COMPILER_GCC)
|
||||
#define PA_PRETTY_FUNCTION __PRETTY_FUNCTION__
|
||||
#elif defined(COMPILER_MSVC)
|
||||
#define PA_PRETTY_FUNCTION __FUNCSIG__
|
||||
#else
|
||||
// See https://en.cppreference.com/w/c/language/function_definition#func
|
||||
#define PA_PRETTY_FUNCTION __func__
|
||||
#endif
|
||||
|
||||
#if !defined(PA_CPU_ARM_NEON)
|
||||
#if defined(__arm__)
|
||||
#if !defined(__ARMEB__) && !defined(__ARM_EABI__) && !defined(__EABI__) && \
|
||||
@ -217,63 +145,6 @@
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(__clang__) && PA_HAS_ATTRIBUTE(uninitialized)
|
||||
// Attribute "uninitialized" disables -ftrivial-auto-var-init=pattern for
|
||||
// the specified variable.
|
||||
// Library-wide alternative is
|
||||
// 'configs -= [ "//build/config/compiler:default_init_stack_vars" ]' in .gn
|
||||
// file.
|
||||
//
|
||||
// See "init_stack_vars" in build/config/compiler/BUILD.gn and
|
||||
// http://crbug.com/977230
|
||||
// "init_stack_vars" is enabled for non-official builds and we hope to enable it
|
||||
// in official build in 2020 as well. The flag writes fixed pattern into
|
||||
// uninitialized parts of all local variables. In rare cases such initialization
|
||||
// is undesirable and attribute can be used:
|
||||
// 1. Degraded performance
|
||||
// In most cases compiler is able to remove additional stores. E.g. if memory is
|
||||
// never accessed or properly initialized later. Preserved stores mostly will
|
||||
// not affect program performance. However if compiler failed on some
|
||||
// performance critical code we can get a visible regression in a benchmark.
|
||||
// 2. memset, memcpy calls
|
||||
// Compiler may replaces some memory writes with memset or memcpy calls. This is
|
||||
// not -ftrivial-auto-var-init specific, but it can happen more likely with the
|
||||
// flag. It can be a problem if code is not linked with C run-time library.
|
||||
//
|
||||
// Note: The flag is security risk mitigation feature. So in future the
|
||||
// attribute uses should be avoided when possible. However to enable this
|
||||
// mitigation on the most of the code we need to be less strict now and minimize
|
||||
// number of exceptions later. So if in doubt feel free to use attribute, but
|
||||
// please document the problem for someone who is going to cleanup it later.
|
||||
// E.g. platform, bot, benchmark or test name in patch description or next to
|
||||
// the attribute.
|
||||
#define PA_STACK_UNINITIALIZED __attribute__((uninitialized))
|
||||
#else
|
||||
#define PA_STACK_UNINITIALIZED
|
||||
#endif
|
||||
|
||||
// Attribute "no_stack_protector" disables -fstack-protector for the specified
|
||||
// function.
|
||||
//
|
||||
// "stack_protector" is enabled on most POSIX builds. The flag adds a canary
|
||||
// to each stack frame, which on function return is checked against a reference
|
||||
// canary. If the canaries do not match, it's likely that a stack buffer
|
||||
// overflow has occurred, so immediately crashing will prevent exploitation in
|
||||
// many cases.
|
||||
//
|
||||
// In some cases it's desirable to remove this, e.g. on hot functions, or if
|
||||
// we have purposely changed the reference canary.
|
||||
#if defined(COMPILER_GCC) || defined(__clang__)
|
||||
#if PA_HAS_ATTRIBUTE(__no_stack_protector__)
|
||||
#define PA_NO_STACK_PROTECTOR __attribute__((__no_stack_protector__))
|
||||
#else
|
||||
#define PA_NO_STACK_PROTECTOR \
|
||||
__attribute__((__optimize__("-fno-stack-protector")))
|
||||
#endif
|
||||
#else
|
||||
#define PA_NO_STACK_PROTECTOR
|
||||
#endif
|
||||
|
||||
// The ANALYZER_ASSUME_TRUE(bool arg) macro adds compiler-specific hints
|
||||
// to Clang which control what code paths are statically analyzed,
|
||||
// and is meant to be used in conjunction with assert & assert-like functions.
|
||||
@ -343,15 +214,6 @@ inline constexpr bool AnalyzerAssumeTrue(bool arg) {
|
||||
#define PA_TRIVIAL_ABI
|
||||
#endif
|
||||
|
||||
// Marks a member function as reinitializing a moved-from variable.
|
||||
// See also
|
||||
// https://clang.llvm.org/extra/clang-tidy/checks/bugprone-use-after-move.html#reinitialization
|
||||
#if defined(__clang__) && PA_HAS_ATTRIBUTE(reinitializes)
|
||||
#define PA_REINITIALIZES_AFTER_MOVE [[clang::reinitializes]]
|
||||
#else
|
||||
#define PA_REINITIALIZES_AFTER_MOVE
|
||||
#endif
|
||||
|
||||
// Requires constant initialization. See constinit in C++20. Allows to rely on a
|
||||
// variable being initialized before execution, and not requiring a global
|
||||
// constructor.
|
||||
@ -363,10 +225,8 @@ inline constexpr bool AnalyzerAssumeTrue(bool arg) {
|
||||
#endif
|
||||
|
||||
#if defined(__clang__)
|
||||
#define PA_GSL_OWNER [[gsl::Owner]]
|
||||
#define PA_GSL_POINTER [[gsl::Pointer]]
|
||||
#else
|
||||
#define PA_GSL_OWNER
|
||||
#define PA_GSL_POINTER
|
||||
#endif
|
||||
|
||||
|
@ -6,7 +6,6 @@
|
||||
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_CPU_H_
|
||||
|
||||
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_base/migration_adapter.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
namespace partition_alloc::internal::base {
|
||||
|
@ -0,0 +1,33 @@
|
||||
// Copyright 2023 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_CXX20_IS_CONSTANT_EVALUATED_H_
|
||||
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_CXX20_IS_CONSTANT_EVALUATED_H_
|
||||
|
||||
namespace partition_alloc::internal::base {
|
||||
|
||||
// std::is_constant_evaluated was introduced in C++20. PartitionAlloc's minimum
|
||||
// supported C++ version is C++17.
|
||||
#if defined(__cpp_lib_is_constant_evaluated) && \
|
||||
__cpp_lib_is_constant_evaluated >= 201811L
|
||||
|
||||
#include <type_traits>
|
||||
using std::is_constant_evaluated;
|
||||
|
||||
#else
|
||||
|
||||
// Implementation of C++20's std::is_constant_evaluated.
|
||||
//
|
||||
// References:
|
||||
// - https://en.cppreference.com/w/cpp/types/is_constant_evaluated
|
||||
// - https://wg21.link/meta.const.eval
|
||||
constexpr bool is_constant_evaluated() noexcept {
|
||||
return __builtin_is_constant_evaluated();
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
} // namespace partition_alloc::internal::base
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_CXX20_IS_CONSTANT_EVALUATED_H_
|
@ -8,7 +8,6 @@
|
||||
#include <stddef.h>
|
||||
|
||||
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_base/migration_adapter.h"
|
||||
|
||||
namespace partition_alloc::internal::base::debug {
|
||||
|
||||
|
@ -107,7 +107,6 @@
|
||||
#include <string>
|
||||
|
||||
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_base/migration_adapter.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
// Windows-style drive letter support and pathname separator characters can be
|
||||
|
@ -15,7 +15,6 @@
|
||||
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_base/migration_adapter.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_base/scoped_clear_last_error.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
|
@ -1,22 +0,0 @@
|
||||
// Copyright 2022 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_MIGRATION_ADAPTER_H_
|
||||
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_MIGRATION_ADAPTER_H_
|
||||
|
||||
namespace base {
|
||||
|
||||
class LapTimer;
|
||||
|
||||
} // namespace base
|
||||
|
||||
namespace partition_alloc::internal::base {
|
||||
|
||||
// TODO(https://crbug.com/1288247): Remove these 'using' declarations once
|
||||
// the migration to the new namespaces gets done.
|
||||
using ::base::LapTimer;
|
||||
|
||||
} // namespace partition_alloc::internal::base
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_MIGRATION_ADAPTER_H_
|
@ -12,7 +12,6 @@
|
||||
|
||||
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_base/files/file_path.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_base/migration_adapter.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
#if BUILDFLAG(IS_WIN)
|
||||
|
@ -10,7 +10,6 @@
|
||||
#include <string>
|
||||
|
||||
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_base/migration_adapter.h"
|
||||
|
||||
namespace partition_alloc::internal::base {
|
||||
|
||||
|
@ -10,7 +10,6 @@
|
||||
|
||||
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_base/gtest_prod_util.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_base/migration_adapter.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
namespace partition_alloc {
|
||||
|
@ -8,7 +8,6 @@
|
||||
#include <errno.h>
|
||||
|
||||
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_base/migration_adapter.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
namespace partition_alloc::internal::base {
|
||||
|
@ -14,7 +14,6 @@
|
||||
#include <iosfwd>
|
||||
|
||||
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_base/migration_adapter.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread_ref.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_base/time/time.h"
|
||||
#include "build/build_config.h"
|
||||
|
@ -13,7 +13,6 @@
|
||||
|
||||
#include <iosfwd>
|
||||
|
||||
#include "base/allocator/partition_allocator/partition_alloc_base/migration_adapter.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_base/threading/platform_thread.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
|
@ -13,7 +13,6 @@
|
||||
#include <iosfwd>
|
||||
|
||||
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_base/migration_adapter.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
#if BUILDFLAG(IS_WIN)
|
||||
|
@ -70,7 +70,6 @@
|
||||
|
||||
#include "base/allocator/partition_allocator/chromeos_buildflags.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_base/migration_adapter.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_base/numerics/clamped_math.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_check.h"
|
||||
#include "build/build_config.h"
|
||||
|
@ -134,8 +134,9 @@ struct PA_DEBUGKV_ALIGN DebugKv {
|
||||
|
||||
for (int index = 0; index < 8; index++) {
|
||||
k[index] = key[index];
|
||||
if (key[index] == '\0')
|
||||
if (key[index] == '\0') {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
@ -168,27 +168,8 @@ static_assert(sizeof(void*) != 8, "");
|
||||
static_assert(sizeof(void*) == 8);
|
||||
#endif
|
||||
|
||||
#if BUILDFLAG(HAS_64_BIT_POINTERS) && BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
|
||||
#define PA_CONFIG_USE_OOB_POISON() 1
|
||||
#else
|
||||
#define PA_CONFIG_USE_OOB_POISON() 0
|
||||
#endif
|
||||
|
||||
// Build MTECheckedPtr code.
|
||||
//
|
||||
// Only applicable to code with 64-bit pointers. Currently conflicts with true
|
||||
// hardware MTE.
|
||||
#if BUILDFLAG(ENABLE_MTE_CHECKED_PTR_SUPPORT) && \
|
||||
BUILDFLAG(HAS_64_BIT_POINTERS) && !PA_CONFIG(HAS_MEMORY_TAGGING)
|
||||
static_assert(sizeof(void*) == 8);
|
||||
#define PA_CONFIG_ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS() 1
|
||||
#else
|
||||
#define PA_CONFIG_ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS() 0
|
||||
#endif
|
||||
|
||||
// Specifies whether allocation extras need to be added.
|
||||
#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) || \
|
||||
PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
|
||||
#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
|
||||
#define PA_CONFIG_EXTRAS_REQUIRED() 1
|
||||
#else
|
||||
#define PA_CONFIG_EXTRAS_REQUIRED() 0
|
||||
@ -333,8 +314,7 @@ constexpr bool kUseLazyCommit = false;
|
||||
#if PA_CONFIG(DYNAMICALLY_SELECT_POOL_SIZE)
|
||||
#error "Dynamically selected pool size is currently not supported"
|
||||
#endif
|
||||
#if PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS) || \
|
||||
PA_CONFIG(HAS_MEMORY_TAGGING)
|
||||
#if PA_CONFIG(HAS_MEMORY_TAGGING)
|
||||
// TODO(1376980): Address MTE once it's enabled.
|
||||
#error "Compressed pointers don't support tag in the upper bits"
|
||||
#endif
|
||||
|
@ -82,36 +82,36 @@ constexpr size_t kPartitionCachelineSize = 64;
|
||||
// up against the end of a system page.
|
||||
|
||||
#if defined(_MIPS_ARCH_LOONGSON) || defined(ARCH_CPU_LOONG64)
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
|
||||
PartitionPageShift() {
|
||||
return 16; // 64 KiB
|
||||
}
|
||||
#elif defined(ARCH_CPU_PPC64)
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
|
||||
PartitionPageShift() {
|
||||
return 18; // 256 KiB
|
||||
}
|
||||
#elif (BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)) || \
|
||||
(BUILDFLAG(IS_LINUX) && defined(ARCH_CPU_ARM64))
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
|
||||
PartitionPageShift() {
|
||||
return PageAllocationGranularityShift() + 2;
|
||||
}
|
||||
#else
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
|
||||
PartitionPageShift() {
|
||||
return 14; // 16 KiB
|
||||
}
|
||||
#endif
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
|
||||
PartitionPageSize() {
|
||||
return 1 << PartitionPageShift();
|
||||
}
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
|
||||
PartitionPageOffsetMask() {
|
||||
return PartitionPageSize() - 1;
|
||||
}
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
|
||||
PartitionPageBaseMask() {
|
||||
return ~PartitionPageOffsetMask();
|
||||
}
|
||||
@ -131,18 +131,18 @@ constexpr size_t kMaxPartitionPagesPerRegularSlotSpan = 4;
|
||||
// dirty a private page, which is very wasteful if we never actually store
|
||||
// objects there.
|
||||
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
|
||||
NumSystemPagesPerPartitionPage() {
|
||||
return PartitionPageSize() >> SystemPageShift();
|
||||
}
|
||||
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
|
||||
MaxSystemPagesPerRegularSlotSpan() {
|
||||
return NumSystemPagesPerPartitionPage() *
|
||||
kMaxPartitionPagesPerRegularSlotSpan;
|
||||
}
|
||||
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
|
||||
MaxRegularSlotSpanSize() {
|
||||
return kMaxPartitionPagesPerRegularSlotSpan << PartitionPageShift();
|
||||
}
|
||||
@ -179,7 +179,6 @@ constexpr size_t kHighThresholdForAlternateDistribution =
|
||||
// | Guard page (4 KiB) |
|
||||
// | Metadata page (4 KiB) |
|
||||
// | Guard pages (8 KiB) |
|
||||
// | TagBitmap |
|
||||
// | Free Slot Bitmap |
|
||||
// | *Scan State Bitmap |
|
||||
// | Slot span |
|
||||
@ -189,8 +188,6 @@ constexpr size_t kHighThresholdForAlternateDistribution =
|
||||
// | Guard pages (16 KiB) |
|
||||
// +-----------------------+
|
||||
//
|
||||
// TagBitmap is only present when
|
||||
// PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS) is true.
|
||||
// Free Slot Bitmap is only present when USE_FREESLOT_BITMAP is true. State
|
||||
// Bitmap is inserted for partitions that may have quarantine enabled.
|
||||
//
|
||||
@ -332,23 +329,23 @@ PA_ALWAYS_INLINE bool HasOverflowTag(void* object) {
|
||||
}
|
||||
#endif // PA_CONFIG(HAS_MEMORY_TAGGING)
|
||||
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
|
||||
NumPartitionPagesPerSuperPage() {
|
||||
return kSuperPageSize >> PartitionPageShift();
|
||||
}
|
||||
|
||||
constexpr PA_ALWAYS_INLINE size_t MaxSuperPagesInPool() {
|
||||
PA_ALWAYS_INLINE constexpr size_t MaxSuperPagesInPool() {
|
||||
return kMaxSuperPagesInPool;
|
||||
}
|
||||
|
||||
#if BUILDFLAG(HAS_64_BIT_POINTERS)
|
||||
// In 64-bit mode, the direct map allocation granularity is super page size,
|
||||
// because this is the reservation granularity of the pools.
|
||||
constexpr PA_ALWAYS_INLINE size_t DirectMapAllocationGranularity() {
|
||||
PA_ALWAYS_INLINE constexpr size_t DirectMapAllocationGranularity() {
|
||||
return kSuperPageSize;
|
||||
}
|
||||
|
||||
constexpr PA_ALWAYS_INLINE size_t DirectMapAllocationGranularityShift() {
|
||||
PA_ALWAYS_INLINE constexpr size_t DirectMapAllocationGranularityShift() {
|
||||
return kSuperPageShift;
|
||||
}
|
||||
#else // BUILDFLAG(HAS_64_BIT_POINTERS)
|
||||
@ -356,18 +353,18 @@ constexpr PA_ALWAYS_INLINE size_t DirectMapAllocationGranularityShift() {
|
||||
// allocation granularity, which is the lowest possible address space allocation
|
||||
// unit. However, don't go below partition page size, so that pool bitmaps
|
||||
// don't get too large. See kBytesPer1BitOfBRPPoolBitmap.
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
|
||||
DirectMapAllocationGranularity() {
|
||||
return std::max(PageAllocationGranularity(), PartitionPageSize());
|
||||
}
|
||||
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
|
||||
DirectMapAllocationGranularityShift() {
|
||||
return std::max(PageAllocationGranularityShift(), PartitionPageShift());
|
||||
}
|
||||
#endif // BUILDFLAG(HAS_64_BIT_POINTERS)
|
||||
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
|
||||
DirectMapAllocationGranularityOffsetMask() {
|
||||
return DirectMapAllocationGranularity() - 1;
|
||||
}
|
||||
@ -415,7 +412,7 @@ constexpr size_t kMinDirectMappedDownsize = kMaxBucketed + 1;
|
||||
// The definition of MaxDirectMapped does only depend on constants that are
|
||||
// unconditionally constexpr. Therefore it is not necessary to use
|
||||
// PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR here.
|
||||
constexpr PA_ALWAYS_INLINE size_t MaxDirectMapped() {
|
||||
PA_ALWAYS_INLINE constexpr size_t MaxDirectMapped() {
|
||||
// Subtract kSuperPageSize to accommodate for granularity inside
|
||||
// PartitionRoot::GetDirectMapReservationSize.
|
||||
return (1UL << 31) - kSuperPageSize;
|
||||
@ -503,18 +500,6 @@ using ::partition_alloc::internal::kSuperPageSize;
|
||||
using ::partition_alloc::internal::MaxDirectMapped;
|
||||
using ::partition_alloc::internal::PartitionPageSize;
|
||||
|
||||
// Return values to indicate where a pointer is pointing relative to the bounds
|
||||
// of an allocation.
|
||||
enum class PtrPosWithinAlloc {
|
||||
// When PA_USE_OOB_POISON is disabled, end-of-allocation pointers are also
|
||||
// considered in-bounds.
|
||||
kInBounds,
|
||||
#if PA_CONFIG(USE_OOB_POISON)
|
||||
kAllocEnd,
|
||||
#endif
|
||||
kFarOOB
|
||||
};
|
||||
|
||||
} // namespace partition_alloc
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CONSTANTS_H_
|
||||
|
@ -51,27 +51,9 @@ void CheckThatSlotOffsetIsZero(uintptr_t address);
|
||||
// We support pointer offsets in signed (ptrdiff_t) or unsigned (size_t) values.
|
||||
// Smaller types are also allowed.
|
||||
template <typename Z>
|
||||
static constexpr bool offset_type =
|
||||
static constexpr bool is_offset_type =
|
||||
std::is_integral_v<Z> && sizeof(Z) <= sizeof(ptrdiff_t);
|
||||
|
||||
template <typename Z, typename = std::enable_if_t<offset_type<Z>, void>>
|
||||
struct PtrDelta {
|
||||
Z delta_in_bytes;
|
||||
#if PA_CONFIG(USE_OOB_POISON)
|
||||
// Size of the element type referenced by the pointer
|
||||
size_t type_size;
|
||||
#endif
|
||||
|
||||
constexpr PtrDelta(Z delta_in_bytes, size_t type_size)
|
||||
: delta_in_bytes(delta_in_bytes)
|
||||
#if PA_CONFIG(USE_OOB_POISON)
|
||||
,
|
||||
type_size(type_size)
|
||||
#endif
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
|
||||
class PartitionStatsDumper;
|
||||
|
@ -71,8 +71,9 @@ void PartitionAllocHooks::AllocationObserverHookIfEnabled(
|
||||
void* address,
|
||||
size_t size,
|
||||
const char* type_name) {
|
||||
if (auto* hook = allocation_observer_hook_.load(std::memory_order_relaxed))
|
||||
if (auto* hook = allocation_observer_hook_.load(std::memory_order_relaxed)) {
|
||||
hook(address, size, type_name);
|
||||
}
|
||||
}
|
||||
|
||||
bool PartitionAllocHooks::AllocationOverrideHookIfEnabled(
|
||||
@ -80,19 +81,22 @@ bool PartitionAllocHooks::AllocationOverrideHookIfEnabled(
|
||||
unsigned int flags,
|
||||
size_t size,
|
||||
const char* type_name) {
|
||||
if (auto* hook = allocation_override_hook_.load(std::memory_order_relaxed))
|
||||
if (auto* hook = allocation_override_hook_.load(std::memory_order_relaxed)) {
|
||||
return hook(out, flags, size, type_name);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void PartitionAllocHooks::FreeObserverHookIfEnabled(void* address) {
|
||||
if (auto* hook = free_observer_hook_.load(std::memory_order_relaxed))
|
||||
if (auto* hook = free_observer_hook_.load(std::memory_order_relaxed)) {
|
||||
hook(address);
|
||||
}
|
||||
}
|
||||
|
||||
bool PartitionAllocHooks::FreeOverrideHookIfEnabled(void* address) {
|
||||
if (auto* hook = free_override_hook_.load(std::memory_order_relaxed))
|
||||
if (auto* hook = free_override_hook_.load(std::memory_order_relaxed)) {
|
||||
return hook(address);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -31,8 +31,6 @@
|
||||
#include "base/allocator/partition_allocator/partition_direct_map_extent.h"
|
||||
#include "base/allocator/partition_allocator/partition_oom.h"
|
||||
#include "base/allocator/partition_allocator/partition_page.h"
|
||||
#include "base/allocator/partition_allocator/partition_tag.h"
|
||||
#include "base/allocator/partition_allocator/partition_tag_bitmap.h"
|
||||
#include "base/allocator/partition_allocator/reservation_offset_table.h"
|
||||
#include "base/allocator/partition_allocator/tagging.h"
|
||||
#include "build/build_config.h"
|
||||
@ -237,10 +235,6 @@ SlotSpanMetadata<thread_safe>* PartitionDirectMap(
|
||||
PartitionDirectMapExtent<thread_safe>* map_extent = nullptr;
|
||||
PartitionPage<thread_safe>* page = nullptr;
|
||||
|
||||
#if PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
|
||||
const PartitionTag tag = root->GetNewPartitionTag();
|
||||
#endif // PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
|
||||
|
||||
{
|
||||
// Getting memory for direct-mapped allocations doesn't interact with the
|
||||
// rest of the allocator, but takes a long time, as it involves several
|
||||
@ -455,10 +449,6 @@ SlotSpanMetadata<thread_safe>* PartitionDirectMap(
|
||||
map_extent->reservation_size = reservation_size;
|
||||
map_extent->padding_for_alignment = padding_for_alignment;
|
||||
map_extent->bucket = &metadata->bucket;
|
||||
|
||||
#if PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
|
||||
DirectMapPartitionTagSetValue(slot_start, tag);
|
||||
#endif // PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
|
||||
}
|
||||
|
||||
root->lock_.AssertAcquired();
|
||||
@ -702,28 +692,6 @@ PartitionBucket<thread_safe>::AllocNewSlotSpan(PartitionRoot<thread_safe>* root,
|
||||
// span.
|
||||
PA_DCHECK(root->next_partition_page <= root->next_partition_page_end);
|
||||
|
||||
#if PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
|
||||
PA_DCHECK(root->next_tag_bitmap_page);
|
||||
uintptr_t next_tag_bitmap_page =
|
||||
base::bits::AlignUp(reinterpret_cast<uintptr_t>(
|
||||
PartitionTagPointer(root->next_partition_page)),
|
||||
SystemPageSize());
|
||||
if (root->next_tag_bitmap_page < next_tag_bitmap_page) {
|
||||
#if BUILDFLAG(PA_DCHECK_IS_ON)
|
||||
uintptr_t super_page =
|
||||
reinterpret_cast<uintptr_t>(slot_span) & kSuperPageBaseMask;
|
||||
uintptr_t tag_bitmap = super_page + PartitionPageSize();
|
||||
PA_DCHECK(next_tag_bitmap_page <= tag_bitmap + ActualTagBitmapSize());
|
||||
PA_DCHECK(next_tag_bitmap_page > tag_bitmap);
|
||||
#endif
|
||||
SetSystemPagesAccess(root->next_tag_bitmap_page,
|
||||
next_tag_bitmap_page - root->next_tag_bitmap_page,
|
||||
PageAccessibilityConfiguration(
|
||||
PageAccessibilityConfiguration::kReadWrite));
|
||||
root->next_tag_bitmap_page = next_tag_bitmap_page;
|
||||
}
|
||||
#endif // PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
|
||||
|
||||
return slot_span;
|
||||
}
|
||||
|
||||
@ -789,9 +757,7 @@ PA_ALWAYS_INLINE uintptr_t PartitionBucket<thread_safe>::InitializeSuperPage(
|
||||
root->next_super_page = super_page + kSuperPageSize;
|
||||
uintptr_t state_bitmap =
|
||||
super_page + PartitionPageSize() +
|
||||
(is_direct_mapped()
|
||||
? 0
|
||||
: ReservedTagBitmapSize() + ReservedFreeSlotBitmapSize());
|
||||
(is_direct_mapped() ? 0 : ReservedFreeSlotBitmapSize());
|
||||
#if BUILDFLAG(USE_STARSCAN)
|
||||
PA_DCHECK(SuperPageStateBitmapAddr(super_page) == state_bitmap);
|
||||
const size_t state_bitmap_reservation_size =
|
||||
@ -896,19 +862,6 @@ PA_ALWAYS_INLINE uintptr_t PartitionBucket<thread_safe>::InitializeSuperPage(
|
||||
payload < SuperPagesEndFromExtent(current_extent));
|
||||
}
|
||||
|
||||
#if PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
|
||||
// `root->next_partition_page` currently points at the start of the
|
||||
// super page payload. We point `root->next_tag_bitmap_page` to the
|
||||
// corresponding point in the tag bitmap and let the caller
|
||||
// (slot span allocation) take care of the rest.
|
||||
root->next_tag_bitmap_page =
|
||||
base::bits::AlignDown(reinterpret_cast<uintptr_t>(
|
||||
PartitionTagPointer(root->next_partition_page)),
|
||||
SystemPageSize());
|
||||
PA_DCHECK(root->next_tag_bitmap_page >= super_page + PartitionPageSize())
|
||||
<< "tag bitmap can never intrude on metadata partition page";
|
||||
#endif // PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
|
||||
|
||||
// If PCScan is used, commit the state bitmap. Otherwise, leave it uncommitted
|
||||
// and let PartitionRoot::RegisterScannableRoot() commit it when needed. Make
|
||||
// sure to register the super-page after it has been fully initialized.
|
||||
@ -930,8 +883,7 @@ PA_ALWAYS_INLINE uintptr_t PartitionBucket<thread_safe>::InitializeSuperPage(
|
||||
#if BUILDFLAG(USE_FREESLOT_BITMAP)
|
||||
// Commit the pages for freeslot bitmap.
|
||||
if (!is_direct_mapped()) {
|
||||
uintptr_t freeslot_bitmap_addr =
|
||||
super_page + PartitionPageSize() + ReservedTagBitmapSize();
|
||||
uintptr_t freeslot_bitmap_addr = super_page + PartitionPageSize();
|
||||
PA_DCHECK(SuperPageFreeSlotBitmapAddr(super_page) == freeslot_bitmap_addr);
|
||||
ScopedSyscallTimer timer{root};
|
||||
RecommitSystemPages(freeslot_bitmap_addr, CommittedFreeSlotBitmapSize(),
|
||||
@ -1017,14 +969,10 @@ PartitionBucket<thread_safe>::ProvisionMoreSlotsAndAllocOne(
|
||||
}
|
||||
|
||||
if (PA_LIKELY(slot_size <= kMaxMemoryTaggingSize &&
|
||||
root->IsMemoryTaggingEnabled())) {
|
||||
root->memory_tagging_enabled())) {
|
||||
// Ensure the MTE-tag of the memory pointed by |return_slot| is unguessable.
|
||||
TagMemoryRangeRandomly(return_slot, slot_size);
|
||||
}
|
||||
#if PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
|
||||
NormalBucketPartitionTagSetValue(return_slot, slot_size,
|
||||
root->GetNewPartitionTag());
|
||||
#endif // PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
|
||||
|
||||
// Add all slots that fit within so far committed pages to the free list.
|
||||
PartitionFreelistEntry* prev_entry = nullptr;
|
||||
@ -1041,10 +989,6 @@ PartitionBucket<thread_safe>::ProvisionMoreSlotsAndAllocOne(
|
||||
// No MTE-tagging for larger slots, just cast.
|
||||
next_slot_ptr = reinterpret_cast<void*>(next_slot);
|
||||
}
|
||||
#if PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
|
||||
NormalBucketPartitionTagSetValue(next_slot, slot_size,
|
||||
root->GetNewPartitionTag());
|
||||
#endif // PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
|
||||
auto* entry = PartitionFreelistEntry::EmplaceAndInitNull(next_slot_ptr);
|
||||
if (!slot_span->get_freelist_head()) {
|
||||
PA_DCHECK(!prev_entry);
|
||||
|
@ -73,13 +73,13 @@ struct PartitionBucket {
|
||||
// |PartitionRoot::AllocFromBucket|.)
|
||||
//
|
||||
// Note the matching Free() functions are in SlotSpanMetadata.
|
||||
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||
PA_NOINLINE uintptr_t SlowPathAlloc(PartitionRoot<thread_safe>* root,
|
||||
unsigned int flags,
|
||||
size_t raw_size,
|
||||
size_t slot_span_alignment,
|
||||
bool* is_already_zeroed)
|
||||
PA_EXCLUSIVE_LOCKS_REQUIRED(root->lock_);
|
||||
PA_NOINLINE PA_COMPONENT_EXPORT(PARTITION_ALLOC) uintptr_t
|
||||
SlowPathAlloc(PartitionRoot<thread_safe>* root,
|
||||
unsigned int flags,
|
||||
size_t raw_size,
|
||||
size_t slot_span_alignment,
|
||||
bool* is_already_zeroed)
|
||||
PA_EXCLUSIVE_LOCKS_REQUIRED(root->lock_);
|
||||
|
||||
PA_ALWAYS_INLINE bool CanStoreRawSize() const {
|
||||
// For direct-map as well as single-slot slot spans (recognized by checking
|
||||
@ -87,8 +87,9 @@ struct PartitionBucket {
|
||||
// subsequent PartitionPage to store the raw size. It isn't only metadata
|
||||
// space though, slot spans that have more than one slot can't have raw size
|
||||
// stored, because we wouldn't know which slot it applies to.
|
||||
if (PA_LIKELY(slot_size <= MaxRegularSlotSpanSize()))
|
||||
if (PA_LIKELY(slot_size <= MaxRegularSlotSpanSize())) {
|
||||
return false;
|
||||
}
|
||||
|
||||
PA_DCHECK((slot_size % SystemPageSize()) == 0);
|
||||
PA_DCHECK(is_direct_mapped() || get_slots_per_span() == 1);
|
||||
|
@ -25,15 +25,17 @@ namespace partition_alloc::internal {
|
||||
// sub_order_index_mask is a mask for the remaining bits == 11 (masking to 01
|
||||
// for the sub_order_index).
|
||||
constexpr uint8_t OrderIndexShift(uint8_t order) {
|
||||
if (order < kNumBucketsPerOrderBits + 1)
|
||||
if (order < kNumBucketsPerOrderBits + 1) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return order - (kNumBucketsPerOrderBits + 1);
|
||||
}
|
||||
|
||||
constexpr size_t OrderSubIndexMask(uint8_t order) {
|
||||
if (order == kBitsPerSizeT)
|
||||
if (order == kBitsPerSizeT) {
|
||||
return static_cast<size_t>(-1) >> (kNumBucketsPerOrderBits + 1);
|
||||
}
|
||||
|
||||
return ((static_cast<size_t>(1) << order) - 1) >>
|
||||
(kNumBucketsPerOrderBits + 1);
|
||||
@ -104,10 +106,10 @@ inline constexpr size_t kOrderSubIndexMask[PA_BITS_PER_SIZE_T + 1] = {
|
||||
// The class used to generate the bucket lookup table at compile-time.
|
||||
class BucketIndexLookup final {
|
||||
public:
|
||||
PA_ALWAYS_INLINE constexpr static uint16_t GetIndexForDenserBuckets(
|
||||
PA_ALWAYS_INLINE static constexpr uint16_t GetIndexForDenserBuckets(
|
||||
size_t size);
|
||||
PA_ALWAYS_INLINE constexpr static uint16_t GetIndexFor8Buckets(size_t size);
|
||||
PA_ALWAYS_INLINE constexpr static uint16_t GetIndex(size_t size);
|
||||
PA_ALWAYS_INLINE static constexpr uint16_t GetIndexFor8Buckets(size_t size);
|
||||
PA_ALWAYS_INLINE static constexpr uint16_t GetIndex(size_t size);
|
||||
|
||||
constexpr BucketIndexLookup() {
|
||||
constexpr uint16_t sentinel_bucket_index = kNumBuckets;
|
||||
@ -262,10 +264,11 @@ PA_ALWAYS_INLINE constexpr uint16_t BucketIndexLookup::GetIndexForDenserBuckets(
|
||||
//
|
||||
// We also do not want to go about the index for the max bucketed size.
|
||||
if (size > kAlignment * kNumBucketsPerOrder &&
|
||||
index < GetIndexFor8Buckets(kMaxBucketed))
|
||||
index < GetIndexFor8Buckets(kMaxBucketed)) {
|
||||
return RoundUpToOdd(index);
|
||||
else
|
||||
} else {
|
||||
return index;
|
||||
}
|
||||
}
|
||||
|
||||
// static
|
||||
@ -288,8 +291,9 @@ PA_ALWAYS_INLINE constexpr uint16_t BucketIndexLookup::GetIndex(size_t size) {
|
||||
//
|
||||
// So, an allocation of size 1.4*2^10 would go into the 1.5*2^10 bucket under
|
||||
// Distribution A, but to the 2^11 bucket under Distribution B.
|
||||
if (1 << 8 < size && size < kHighThresholdForAlternateDistribution)
|
||||
if (1 << 8 < size && size < kHighThresholdForAlternateDistribution) {
|
||||
return BucketIndexLookup::GetIndexForDenserBuckets(RoundUpSize(size));
|
||||
}
|
||||
return BucketIndexLookup::GetIndexForDenserBuckets(size);
|
||||
}
|
||||
|
||||
|
@ -23,13 +23,15 @@ inline constexpr unsigned char kCookieValue[kCookieSize] = {
|
||||
constexpr size_t kPartitionCookieSizeAdjustment = kCookieSize;
|
||||
|
||||
PA_ALWAYS_INLINE void PartitionCookieCheckValue(unsigned char* cookie_ptr) {
|
||||
for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr)
|
||||
for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr) {
|
||||
PA_DCHECK(*cookie_ptr == kCookieValue[i]);
|
||||
}
|
||||
}
|
||||
|
||||
PA_ALWAYS_INLINE void PartitionCookieWriteValue(unsigned char* cookie_ptr) {
|
||||
for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr)
|
||||
for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr) {
|
||||
*cookie_ptr = kCookieValue[i];
|
||||
}
|
||||
}
|
||||
|
||||
#else
|
||||
|
@ -41,10 +41,10 @@ class PartitionFreelistEntry;
|
||||
|
||||
class EncodedPartitionFreelistEntryPtr {
|
||||
private:
|
||||
explicit PA_ALWAYS_INLINE constexpr EncodedPartitionFreelistEntryPtr(
|
||||
PA_ALWAYS_INLINE constexpr explicit EncodedPartitionFreelistEntryPtr(
|
||||
std::nullptr_t)
|
||||
: encoded_(Transform(0)) {}
|
||||
explicit PA_ALWAYS_INLINE EncodedPartitionFreelistEntryPtr(void* ptr)
|
||||
PA_ALWAYS_INLINE explicit EncodedPartitionFreelistEntryPtr(void* ptr)
|
||||
// The encoded pointer stays MTE-tagged.
|
||||
: encoded_(Transform(reinterpret_cast<uintptr_t>(ptr))) {}
|
||||
|
||||
@ -58,7 +58,7 @@ class EncodedPartitionFreelistEntryPtr {
|
||||
encoded_ = encoded;
|
||||
}
|
||||
|
||||
explicit PA_ALWAYS_INLINE constexpr operator bool() const { return encoded_; }
|
||||
PA_ALWAYS_INLINE constexpr explicit operator bool() const { return encoded_; }
|
||||
|
||||
// Transform() works the same in both directions, so can be used for
|
||||
// encoding and decoding.
|
||||
@ -90,7 +90,7 @@ class EncodedPartitionFreelistEntryPtr {
|
||||
// the rationale and mechanism, respectively.
|
||||
class PartitionFreelistEntry {
|
||||
private:
|
||||
explicit constexpr PartitionFreelistEntry(std::nullptr_t)
|
||||
constexpr explicit PartitionFreelistEntry(std::nullptr_t)
|
||||
: encoded_next_(EncodedPartitionFreelistEntryPtr(nullptr))
|
||||
#if PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)
|
||||
,
|
||||
@ -121,13 +121,13 @@ class PartitionFreelistEntry {
|
||||
|
||||
// Emplaces the freelist entry at the beginning of the given slot span, and
|
||||
// initializes it as null-terminated.
|
||||
static PA_ALWAYS_INLINE PartitionFreelistEntry* EmplaceAndInitNull(
|
||||
PA_ALWAYS_INLINE static PartitionFreelistEntry* EmplaceAndInitNull(
|
||||
void* slot_start_tagged) {
|
||||
// |slot_start_tagged| is MTE-tagged.
|
||||
auto* entry = new (slot_start_tagged) PartitionFreelistEntry(nullptr);
|
||||
return entry;
|
||||
}
|
||||
static PA_ALWAYS_INLINE PartitionFreelistEntry* EmplaceAndInitNull(
|
||||
PA_ALWAYS_INLINE static PartitionFreelistEntry* EmplaceAndInitNull(
|
||||
uintptr_t slot_start) {
|
||||
return EmplaceAndInitNull(SlotStartAddr2Ptr(slot_start));
|
||||
}
|
||||
@ -138,7 +138,7 @@ class PartitionFreelistEntry {
|
||||
// This freelist is built for the purpose of thread-cache. This means that we
|
||||
// can't perform a check that this and the next pointer belong to the same
|
||||
// super page, as thread-cache spans may chain slots across super pages.
|
||||
static PA_ALWAYS_INLINE PartitionFreelistEntry* EmplaceAndInitForThreadCache(
|
||||
PA_ALWAYS_INLINE static PartitionFreelistEntry* EmplaceAndInitForThreadCache(
|
||||
uintptr_t slot_start,
|
||||
PartitionFreelistEntry* next) {
|
||||
auto* entry =
|
||||
@ -151,7 +151,7 @@ class PartitionFreelistEntry {
|
||||
//
|
||||
// This is for testing purposes only! |make_shadow_match| allows you to choose
|
||||
// if the shadow matches the next pointer properly or is trash.
|
||||
static PA_ALWAYS_INLINE void EmplaceAndInitForTest(uintptr_t slot_start,
|
||||
PA_ALWAYS_INLINE static void EmplaceAndInitForTest(uintptr_t slot_start,
|
||||
void* next,
|
||||
bool make_shadow_match) {
|
||||
new (SlotStartAddr2Ptr(slot_start))
|
||||
@ -225,7 +225,7 @@ class PartitionFreelistEntry {
|
||||
size_t extra,
|
||||
bool for_thread_cache) const;
|
||||
|
||||
static PA_ALWAYS_INLINE bool IsSane(const PartitionFreelistEntry* here,
|
||||
PA_ALWAYS_INLINE static bool IsSane(const PartitionFreelistEntry* here,
|
||||
const PartitionFreelistEntry* next,
|
||||
bool for_thread_cache) {
|
||||
// Don't allow the freelist to be blindly followed to any location.
|
||||
@ -260,11 +260,12 @@ class PartitionFreelistEntry {
|
||||
bool not_in_metadata =
|
||||
(next_address & kSuperPageOffsetMask) >= PartitionPageSize();
|
||||
|
||||
if (for_thread_cache)
|
||||
if (for_thread_cache) {
|
||||
return shadow_ptr_ok & not_in_metadata;
|
||||
else
|
||||
} else {
|
||||
return shadow_ptr_ok & same_superpage & marked_as_free_in_bitmap &
|
||||
not_in_metadata;
|
||||
}
|
||||
}
|
||||
|
||||
EncodedPartitionFreelistEntryPtr encoded_next_;
|
||||
@ -297,8 +298,9 @@ PartitionFreelistEntry::GetNextInternal(size_t extra,
|
||||
bool for_thread_cache) const {
|
||||
// GetNext() can be called on discarded memory, in which case |encoded_next_|
|
||||
// is 0, and none of the checks apply. Don't prefetch nullptr either.
|
||||
if (IsEncodedNextPtrZero())
|
||||
if (IsEncodedNextPtrZero()) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
auto* ret = encoded_next_.Decode();
|
||||
// We rely on constant propagation to remove the branches coming from
|
||||
|
@ -13,20 +13,20 @@ namespace partition_alloc::internal {
|
||||
|
||||
OomFunction g_oom_handling_function = nullptr;
|
||||
|
||||
PA_NOINLINE void PA_NOT_TAIL_CALLED
|
||||
PartitionExcessiveAllocationSize(size_t size) {
|
||||
PA_NOINLINE PA_NOT_TAIL_CALLED void PartitionExcessiveAllocationSize(
|
||||
size_t size) {
|
||||
PA_NO_CODE_FOLDING();
|
||||
OOM_CRASH(size);
|
||||
}
|
||||
|
||||
#if !defined(ARCH_CPU_64_BITS)
|
||||
PA_NOINLINE void PA_NOT_TAIL_CALLED
|
||||
PA_NOINLINE PA_NOT_TAIL_CALLED void
|
||||
PartitionOutOfMemoryWithLotsOfUncommitedPages(size_t size) {
|
||||
PA_NO_CODE_FOLDING();
|
||||
OOM_CRASH(size);
|
||||
}
|
||||
|
||||
[[noreturn]] PA_NOINLINE void PA_NOT_TAIL_CALLED
|
||||
[[noreturn]] PA_NOT_TAIL_CALLED PA_NOINLINE void
|
||||
PartitionOutOfMemoryWithLargeVirtualSize(size_t virtual_size) {
|
||||
PA_NO_CODE_FOLDING();
|
||||
OOM_CRASH(virtual_size);
|
||||
|
@ -23,8 +23,8 @@ namespace internal {
|
||||
// g_oom_handling_function is invoked when PartitionAlloc hits OutOfMemory.
|
||||
extern OomFunction g_oom_handling_function;
|
||||
|
||||
[[noreturn]] PA_COMPONENT_EXPORT(PARTITION_ALLOC) PA_NOINLINE
|
||||
void PartitionExcessiveAllocationSize(size_t size);
|
||||
[[noreturn]] PA_NOINLINE PA_COMPONENT_EXPORT(
|
||||
PARTITION_ALLOC) void PartitionExcessiveAllocationSize(size_t size);
|
||||
|
||||
#if !defined(ARCH_CPU_64_BITS)
|
||||
[[noreturn]] PA_NOINLINE void PartitionOutOfMemoryWithLotsOfUncommitedPages(
|
||||
|
@ -105,8 +105,9 @@ PA_ALWAYS_INLINE void SlotSpanMetadata<thread_safe>::RegisterEmpty() {
|
||||
root->global_empty_slot_span_ring[current_index];
|
||||
// The slot span might well have been re-activated, filled up, etc. before we
|
||||
// get around to looking at it here.
|
||||
if (slot_span_to_decommit)
|
||||
if (slot_span_to_decommit) {
|
||||
slot_span_to_decommit->DecommitIfPossible(root);
|
||||
}
|
||||
|
||||
// We put the empty slot span on our global list of "slot spans that were once
|
||||
// empty", thus providing it a bit of breathing room to get re-used before we
|
||||
@ -116,8 +117,9 @@ PA_ALWAYS_INLINE void SlotSpanMetadata<thread_safe>::RegisterEmpty() {
|
||||
empty_cache_index_ = current_index;
|
||||
in_empty_cache_ = 1;
|
||||
++current_index;
|
||||
if (current_index == root->global_empty_slot_span_ring_size)
|
||||
if (current_index == root->global_empty_slot_span_ring_size) {
|
||||
current_index = 0;
|
||||
}
|
||||
root->global_empty_slot_span_ring_index = current_index;
|
||||
|
||||
// Avoid wasting too much memory on empty slot spans. Note that we only divide
|
||||
@ -185,8 +187,9 @@ void SlotSpanMetadata<thread_safe>::FreeSlowPath(size_t number_of_freed) {
|
||||
// chances of it being filled up again. The old current slot span will be
|
||||
// the next slot span.
|
||||
PA_DCHECK(!next_slot_span);
|
||||
if (PA_LIKELY(bucket->active_slot_spans_head != get_sentinel_slot_span()))
|
||||
if (PA_LIKELY(bucket->active_slot_spans_head != get_sentinel_slot_span())) {
|
||||
next_slot_span = bucket->active_slot_spans_head;
|
||||
}
|
||||
bucket->active_slot_spans_head = this;
|
||||
PA_CHECK(bucket->num_full_slot_spans); // Underflow.
|
||||
--bucket->num_full_slot_spans;
|
||||
@ -203,12 +206,14 @@ void SlotSpanMetadata<thread_safe>::FreeSlowPath(size_t number_of_freed) {
|
||||
#endif
|
||||
// If it's the current active slot span, change it. We bounce the slot span
|
||||
// to the empty list as a force towards defragmentation.
|
||||
if (PA_LIKELY(this == bucket->active_slot_spans_head))
|
||||
if (PA_LIKELY(this == bucket->active_slot_spans_head)) {
|
||||
bucket->SetNewActiveSlotSpan();
|
||||
}
|
||||
PA_DCHECK(bucket->active_slot_spans_head != this);
|
||||
|
||||
if (CanStoreRawSize())
|
||||
if (CanStoreRawSize()) {
|
||||
SetRawSize(0);
|
||||
}
|
||||
|
||||
RegisterEmpty();
|
||||
}
|
||||
@ -259,8 +264,9 @@ void SlotSpanMetadata<thread_safe>::DecommitIfPossible(
|
||||
PA_DCHECK(empty_cache_index_ < kMaxFreeableSpans);
|
||||
PA_DCHECK(this == root->global_empty_slot_span_ring[empty_cache_index_]);
|
||||
in_empty_cache_ = 0;
|
||||
if (is_empty())
|
||||
if (is_empty()) {
|
||||
Decommit(root);
|
||||
}
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
@ -295,10 +301,11 @@ void SlotSpanMetadata<thread_safe>::SortFreelist() {
|
||||
uintptr_t slot_start = slot_span_start + (slot_size * slot_number);
|
||||
auto* entry = PartitionFreelistEntry::EmplaceAndInitNull(slot_start);
|
||||
|
||||
if (!head)
|
||||
if (!head) {
|
||||
head = entry;
|
||||
else
|
||||
} else {
|
||||
back->SetNext(entry);
|
||||
}
|
||||
|
||||
back = entry;
|
||||
}
|
||||
|
@ -25,8 +25,6 @@
|
||||
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
|
||||
#include "base/allocator/partition_allocator/partition_bucket.h"
|
||||
#include "base/allocator/partition_allocator/partition_freelist_entry.h"
|
||||
#include "base/allocator/partition_allocator/partition_tag_bitmap.h"
|
||||
#include "base/allocator/partition_allocator/partition_tag_types.h"
|
||||
#include "base/allocator/partition_allocator/reservation_offset_table.h"
|
||||
#include "base/allocator/partition_allocator/tagging.h"
|
||||
#include "build/build_config.h"
|
||||
@ -185,8 +183,8 @@ struct SlotSpanMetadata {
|
||||
|
||||
// Public API
|
||||
// Note the matching Alloc() functions are in PartitionPage.
|
||||
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
|
||||
PA_NOINLINE void FreeSlowPath(size_t number_of_freed);
|
||||
PA_NOINLINE PA_COMPONENT_EXPORT(PARTITION_ALLOC) void FreeSlowPath(
|
||||
size_t number_of_freed);
|
||||
PA_ALWAYS_INLINE PartitionFreelistEntry* PopForAlloc(size_t size);
|
||||
PA_ALWAYS_INLINE void Free(uintptr_t ptr);
|
||||
// Appends the passed freelist to the slot-span's freelist. Please note that
|
||||
@ -228,10 +226,6 @@ struct SlotSpanMetadata {
|
||||
PA_ALWAYS_INLINE void SetRawSize(size_t raw_size);
|
||||
PA_ALWAYS_INLINE size_t GetRawSize() const;
|
||||
|
||||
// Only meaningful when `this` refers to a slot span in a direct map
|
||||
// bucket.
|
||||
PA_ALWAYS_INLINE PartitionTag* DirectMapMTETag();
|
||||
|
||||
PA_ALWAYS_INLINE PartitionFreelistEntry* get_freelist_head() const {
|
||||
return freelist_head;
|
||||
}
|
||||
@ -351,13 +345,6 @@ struct SubsequentPageMetadata {
|
||||
// the first one is used to store slot information, but the second one is
|
||||
// available for extra information)
|
||||
size_t raw_size;
|
||||
|
||||
// Specific to when `this` is used in a direct map bucket. Since direct
|
||||
// maps don't have as many tags as the typical normal bucket slot span,
|
||||
// we can get away with just hiding the sole tag in here.
|
||||
//
|
||||
// See `//base/memory/mtecheckedptr.md` for details.
|
||||
PartitionTag direct_map_tag;
|
||||
};
|
||||
|
||||
// Each partition page has metadata associated with it. The metadata of the
|
||||
@ -454,14 +441,14 @@ PartitionSuperPageToExtent(uintptr_t super_page) {
|
||||
// Size that should be reserved for state bitmap (if present) inside a super
|
||||
// page. Elements of a super page are partition-page-aligned, hence the returned
|
||||
// size is a multiple of partition page size.
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
|
||||
ReservedStateBitmapSize() {
|
||||
return base::bits::AlignUp(sizeof(AllocationStateMap), PartitionPageSize());
|
||||
}
|
||||
|
||||
// Size that should be committed for state bitmap (if present) inside a super
|
||||
// page. It is a multiple of system page size.
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
|
||||
CommittedStateBitmapSize() {
|
||||
return base::bits::AlignUp(sizeof(AllocationStateMap), SystemPageSize());
|
||||
}
|
||||
@ -471,9 +458,8 @@ CommittedStateBitmapSize() {
|
||||
PA_ALWAYS_INLINE uintptr_t SuperPageStateBitmapAddr(uintptr_t super_page) {
|
||||
PA_DCHECK(!(super_page % kSuperPageAlignment));
|
||||
return super_page + PartitionPageSize() +
|
||||
(IsManagedByNormalBuckets(super_page)
|
||||
? ReservedTagBitmapSize() + ReservedFreeSlotBitmapSize()
|
||||
: 0);
|
||||
(IsManagedByNormalBuckets(super_page) ? ReservedFreeSlotBitmapSize()
|
||||
: 0);
|
||||
}
|
||||
|
||||
PA_ALWAYS_INLINE AllocationStateMap* SuperPageStateBitmap(
|
||||
@ -484,28 +470,18 @@ PA_ALWAYS_INLINE AllocationStateMap* SuperPageStateBitmap(
|
||||
|
||||
#else // BUILDFLAG(USE_STARSCAN)
|
||||
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
|
||||
ReservedStateBitmapSize() {
|
||||
return 0ull;
|
||||
}
|
||||
|
||||
#endif // BUILDFLAG(USE_STARSCAN)
|
||||
|
||||
// Returns the address of the tag bitmap of the `super_page`. Caller must ensure
|
||||
// that bitmap exists.
|
||||
PA_ALWAYS_INLINE uintptr_t SuperPageTagBitmapAddr(uintptr_t super_page) {
|
||||
PA_DCHECK(IsReservationStart(super_page));
|
||||
// Skip over the guard pages / metadata.
|
||||
return super_page + PartitionPageSize();
|
||||
}
|
||||
|
||||
PA_ALWAYS_INLINE uintptr_t
|
||||
SuperPagePayloadStartOffset(bool is_managed_by_normal_buckets,
|
||||
bool with_quarantine) {
|
||||
return PartitionPageSize() +
|
||||
(is_managed_by_normal_buckets
|
||||
? (ReservedTagBitmapSize() + ReservedFreeSlotBitmapSize())
|
||||
: 0) +
|
||||
(is_managed_by_normal_buckets ? ReservedFreeSlotBitmapSize() : 0) +
|
||||
(with_quarantine ? ReservedStateBitmapSize() : 0);
|
||||
}
|
||||
|
||||
@ -741,15 +717,6 @@ PA_ALWAYS_INLINE size_t SlotSpanMetadata<thread_safe>::GetRawSize() const {
|
||||
return subsequent_page_metadata->raw_size;
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
PA_ALWAYS_INLINE PartitionTag*
|
||||
SlotSpanMetadata<thread_safe>::DirectMapMTETag() {
|
||||
PA_DCHECK(bucket->is_direct_mapped());
|
||||
auto* subsequent_page_metadata = GetSubsequentPageMetadata(
|
||||
reinterpret_cast<PartitionPage<thread_safe>*>(this));
|
||||
return &subsequent_page_metadata->direct_map_tag;
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
PA_ALWAYS_INLINE void SlotSpanMetadata<thread_safe>::SetFreelistHead(
|
||||
PartitionFreelistEntry* new_head) {
|
||||
@ -960,8 +927,9 @@ void IterateSlotSpans(uintptr_t super_page,
|
||||
break;
|
||||
}
|
||||
slot_span = &page->slot_span_metadata;
|
||||
if (callback(slot_span))
|
||||
if (callback(slot_span)) {
|
||||
return;
|
||||
}
|
||||
page += slot_span->bucket->get_pages_per_slot_span();
|
||||
}
|
||||
// Each super page must have at least one valid slot span.
|
||||
|
@ -93,7 +93,8 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRefCount {
|
||||
static constexpr CountType kPtrInc = 0x0000'0002;
|
||||
#endif
|
||||
|
||||
explicit PartitionRefCount(bool needs_mac11_malloc_size_hack);
|
||||
PA_ALWAYS_INLINE explicit PartitionRefCount(
|
||||
bool needs_mac11_malloc_size_hack);
|
||||
|
||||
// Incrementing the counter doesn't imply any visibility about modified
|
||||
// memory, hence relaxed atomics. For decrement, visibility is required before
|
||||
@ -190,8 +191,9 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRefCount {
|
||||
CountType old_count =
|
||||
count_.fetch_and(~kMemoryHeldByAllocatorBit, std::memory_order_release);
|
||||
|
||||
if (PA_UNLIKELY(!(old_count & kMemoryHeldByAllocatorBit)))
|
||||
if (PA_UNLIKELY(!(old_count & kMemoryHeldByAllocatorBit))) {
|
||||
DoubleFreeOrCorruptionDetected(old_count);
|
||||
}
|
||||
|
||||
if (PA_LIKELY((old_count & ~kNeedsMac11MallocSizeHackBit) ==
|
||||
kMemoryHeldByAllocatorBit)) {
|
||||
@ -226,8 +228,9 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRefCount {
|
||||
PA_ALWAYS_INLINE bool IsAlive() {
|
||||
bool alive =
|
||||
count_.load(std::memory_order_relaxed) & kMemoryHeldByAllocatorBit;
|
||||
if (alive)
|
||||
if (alive) {
|
||||
CheckCookieIfSupported();
|
||||
}
|
||||
return alive;
|
||||
}
|
||||
|
||||
@ -348,9 +351,10 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRefCount {
|
||||
#endif
|
||||
};
|
||||
|
||||
PA_ALWAYS_INLINE PartitionRefCount::PartitionRefCount(bool use_mac11_hack)
|
||||
PA_ALWAYS_INLINE PartitionRefCount::PartitionRefCount(
|
||||
bool needs_mac11_malloc_size_hack)
|
||||
: count_(kMemoryHeldByAllocatorBit |
|
||||
(use_mac11_hack ? kNeedsMac11MallocSizeHackBit : 0))
|
||||
(needs_mac11_malloc_size_hack ? kNeedsMac11MallocSizeHackBit : 0))
|
||||
#if PA_CONFIG(REF_COUNT_CHECK_COOKIE)
|
||||
,
|
||||
brp_cookie_(CalculateCookie())
|
||||
@ -403,7 +407,7 @@ static_assert((1 << kPartitionRefCountSizeShift) == sizeof(PartitionRefCount));
|
||||
// SystemPageSize() isn't always a constrexpr, in which case the compiler
|
||||
// wouldn't know it's a power of two. The equivalence of these calculations is
|
||||
// checked in PartitionAllocGlobalInit().
|
||||
static PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||
PA_ALWAYS_INLINE static PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
|
||||
GetPartitionRefCountIndexMultiplierShift() {
|
||||
return SystemPageShift() * 2 - kSuperPageShift - kPartitionRefCountSizeShift;
|
||||
}
|
||||
|
@ -24,6 +24,7 @@
|
||||
#include "base/allocator/partition_allocator/partition_cookie.h"
|
||||
#include "base/allocator/partition_allocator/partition_oom.h"
|
||||
#include "base/allocator/partition_allocator/partition_page.h"
|
||||
#include "base/allocator/partition_allocator/partition_ref_count.h"
|
||||
#include "base/allocator/partition_allocator/pkey.h"
|
||||
#include "base/allocator/partition_allocator/reservation_offset_table.h"
|
||||
#include "base/allocator/partition_allocator/tagging.h"
|
||||
@ -46,9 +47,9 @@
|
||||
#include <pthread.h>
|
||||
#endif
|
||||
|
||||
#if BUILDFLAG(RECORD_ALLOC_INFO)
|
||||
namespace partition_alloc::internal {
|
||||
|
||||
#if BUILDFLAG(RECORD_ALLOC_INFO)
|
||||
// Even if this is not hidden behind a BUILDFLAG, it should not use any memory
|
||||
// when recording is disabled, since it ends up in the .bss section.
|
||||
AllocInfo g_allocs = {};
|
||||
@ -57,9 +58,47 @@ void RecordAllocOrFree(uintptr_t addr, size_t size) {
|
||||
g_allocs.allocs[g_allocs.index.fetch_add(1, std::memory_order_relaxed) %
|
||||
kAllocInfoSize] = {addr, size};
|
||||
}
|
||||
#endif // BUILDFLAG(RECORD_ALLOC_INFO)
|
||||
|
||||
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
|
||||
PtrPosWithinAlloc IsPtrWithinSameAlloc(uintptr_t orig_address,
|
||||
uintptr_t test_address,
|
||||
size_t type_size) {
|
||||
// Required for pointers right past an allocation. See
|
||||
// |PartitionAllocGetSlotStartInBRPPool()|.
|
||||
uintptr_t adjusted_address =
|
||||
orig_address - kPartitionPastAllocationAdjustment;
|
||||
PA_DCHECK(IsManagedByNormalBucketsOrDirectMap(adjusted_address));
|
||||
DCheckIfManagedByPartitionAllocBRPPool(adjusted_address);
|
||||
|
||||
uintptr_t slot_start = PartitionAllocGetSlotStartInBRPPool(adjusted_address);
|
||||
// Don't use |adjusted_address| beyond this point at all. It was needed to
|
||||
// pick the right slot, but now we're dealing with very concrete addresses.
|
||||
// Zero it just in case, to catch errors.
|
||||
adjusted_address = 0;
|
||||
|
||||
auto* slot_span = SlotSpanMetadata<ThreadSafe>::FromSlotStart(slot_start);
|
||||
auto* root = PartitionRoot<ThreadSafe>::FromSlotSpan(slot_span);
|
||||
// Double check that ref-count is indeed present.
|
||||
PA_DCHECK(root->brp_enabled());
|
||||
|
||||
uintptr_t object_addr = root->SlotStartToObjectAddr(slot_start);
|
||||
uintptr_t object_end = object_addr + slot_span->GetUsableSize(root);
|
||||
if (test_address < object_addr || object_end < test_address) {
|
||||
return PtrPosWithinAlloc::kFarOOB;
|
||||
#if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
|
||||
} else if (object_end - type_size < test_address) {
|
||||
// Not even a single element of the type referenced by the pointer can fit
|
||||
// between the pointer and the end of the object.
|
||||
return PtrPosWithinAlloc::kAllocEnd;
|
||||
#endif
|
||||
} else {
|
||||
return PtrPosWithinAlloc::kInBounds;
|
||||
}
|
||||
}
|
||||
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
|
||||
|
||||
} // namespace partition_alloc::internal
|
||||
#endif // BUILDFLAG(RECORD_ALLOC_INFO)
|
||||
|
||||
namespace partition_alloc {
|
||||
|
||||
@ -305,7 +344,7 @@ namespace {
|
||||
// more work and larger |slot_usage| array. Lower value would probably decrease
|
||||
// chances of purging. Not empirically tested.
|
||||
constexpr size_t kMaxPurgeableSlotsPerSystemPage = 64;
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
|
||||
MinPurgeableSlotSize() {
|
||||
return SystemPageSize() / kMaxPurgeableSlotsPerSystemPage;
|
||||
}
|
||||
@ -866,6 +905,18 @@ void PartitionRoot<thread_safe>::Init(PartitionOptions opts) {
|
||||
PartitionOptions::UseConfigurablePool::kIfAvailable) &&
|
||||
IsConfigurablePoolAvailable();
|
||||
PA_DCHECK(!flags.use_configurable_pool || IsConfigurablePoolAvailable());
|
||||
#if PA_CONFIG(HAS_MEMORY_TAGGING)
|
||||
TagViolationReportingMode memory_tagging_mode =
|
||||
internal::GetMemoryTaggingModeForCurrentThread();
|
||||
// Memory tagging is not supported in the configurable pool because MTE
|
||||
// stores tagging information in the high bits of the pointer, it causes
|
||||
// issues with components like V8's ArrayBuffers which use custom pointer
|
||||
// representations. All custom representations encountered so far rely on an
|
||||
// "is in configurable pool?" check, so we use that as a proxy.
|
||||
flags.memory_tagging_enabled_ =
|
||||
!flags.use_configurable_pool &&
|
||||
memory_tagging_mode != TagViolationReportingMode::kUndefined;
|
||||
#endif
|
||||
|
||||
// brp_enabled() is not supported in the configurable pool because
|
||||
// BRP requires objects to be in a different Pool.
|
||||
@ -909,11 +960,6 @@ void PartitionRoot<thread_safe>::Init(PartitionOptions opts) {
|
||||
PA_CHECK(!brp_enabled());
|
||||
flags.extras_size += internal::kPartitionRefCountSizeAdjustment;
|
||||
}
|
||||
#if PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
|
||||
// Add one extra byte to each slot's end to allow beyond-the-end
|
||||
// pointers (crbug.com/1364476).
|
||||
flags.extras_size += 1;
|
||||
#endif // PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
|
||||
#endif // PA_CONFIG(EXTRAS_REQUIRED)
|
||||
|
||||
// Re-confirm the above PA_CHECKs, by making sure there are no
|
||||
@ -1641,4 +1687,5 @@ static_assert(offsetof(PartitionRoot<internal::ThreadSafe>, sentinel_bucket) ==
|
||||
static_assert(
|
||||
offsetof(PartitionRoot<internal::ThreadSafe>, lock_) >= 64,
|
||||
"The lock should not be on the same cacheline as the read-mostly flags");
|
||||
|
||||
} // namespace partition_alloc
|
||||
|
@ -40,7 +40,6 @@
|
||||
#include "base/allocator/partition_allocator/chromecast_buildflags.h"
|
||||
#include "base/allocator/partition_allocator/freeslot_bitmap.h"
|
||||
#include "base/allocator/partition_allocator/page_allocator.h"
|
||||
#include "base/allocator/partition_allocator/page_allocator_constants.h"
|
||||
#include "base/allocator/partition_allocator/partition_address_space.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc-inl.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_base/bits.h"
|
||||
@ -64,8 +63,6 @@
|
||||
#include "base/allocator/partition_allocator/partition_oom.h"
|
||||
#include "base/allocator/partition_allocator/partition_page.h"
|
||||
#include "base/allocator/partition_allocator/partition_ref_count.h"
|
||||
#include "base/allocator/partition_allocator/partition_tag.h"
|
||||
#include "base/allocator/partition_allocator/partition_tag_types.h"
|
||||
#include "base/allocator/partition_allocator/pkey.h"
|
||||
#include "base/allocator/partition_allocator/reservation_offset_table.h"
|
||||
#include "base/allocator/partition_allocator/tagging.h"
|
||||
@ -293,6 +290,9 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
|
||||
#endif // PA_CONFIG(ENABLE_MAC11_MALLOC_SIZE_HACK)
|
||||
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
|
||||
bool use_configurable_pool;
|
||||
#if PA_CONFIG(HAS_MEMORY_TAGGING)
|
||||
bool memory_tagging_enabled_;
|
||||
#endif
|
||||
|
||||
#if BUILDFLAG(ENABLE_PKEYS)
|
||||
int pkey;
|
||||
@ -396,12 +396,6 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
|
||||
|
||||
bool quarantine_always_for_testing = false;
|
||||
|
||||
#if PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
|
||||
partition_alloc::PartitionTag current_partition_tag = 0;
|
||||
// Points to the end of the committed tag bitmap region.
|
||||
uintptr_t next_tag_bitmap_page = 0;
|
||||
#endif // PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
|
||||
|
||||
PartitionRoot()
|
||||
: flags{QuarantineMode::kAlwaysDisabled, ScanMode::kDisabled} {}
|
||||
explicit PartitionRoot(PartitionOptions opts) : flags() { Init(opts); }
|
||||
@ -563,8 +557,6 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
|
||||
PA_ALWAYS_INLINE size_t
|
||||
AllocationCapacityFromRequestedSize(size_t size) const;
|
||||
|
||||
PA_ALWAYS_INLINE bool IsMemoryTaggingEnabled() const;
|
||||
|
||||
// Frees memory from this partition, if possible, by decommitting pages or
|
||||
// even entire slot spans. |flags| is an OR of base::PartitionPurgeFlags.
|
||||
void PurgeMemory(int flags);
|
||||
@ -693,7 +685,7 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
|
||||
// If quarantine is enabled and the tag overflows, move the containing slot
|
||||
// to quarantine, to prevent the attacker from exploiting a pointer that has
|
||||
// an old tag.
|
||||
if (PA_LIKELY(IsMemoryTaggingEnabled())) {
|
||||
if (PA_LIKELY(memory_tagging_enabled())) {
|
||||
return internal::HasOverflowTag(object);
|
||||
}
|
||||
// Default behaviour if MTE is not enabled for this PartitionRoot.
|
||||
@ -713,7 +705,7 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
|
||||
return flags.scan_mode == ScanMode::kEnabled;
|
||||
}
|
||||
|
||||
static PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||
PA_ALWAYS_INLINE static PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
|
||||
GetDirectMapMetadataAndGuardPagesSize() {
|
||||
// Because we need to fake a direct-map region to look like a super page, we
|
||||
// need to allocate more pages around the payload:
|
||||
@ -726,7 +718,7 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
|
||||
return 2 * internal::PartitionPageSize();
|
||||
}
|
||||
|
||||
static PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||
PA_ALWAYS_INLINE static PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
|
||||
GetDirectMapSlotSize(size_t raw_size) {
|
||||
// Caller must check that the size is not above the MaxDirectMapped()
|
||||
// limit before calling. This also guards against integer overflow in the
|
||||
@ -736,8 +728,8 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
|
||||
raw_size, internal::SystemPageSize());
|
||||
}
|
||||
|
||||
static PA_ALWAYS_INLINE size_t
|
||||
GetDirectMapReservationSize(size_t padded_raw_size) {
|
||||
PA_ALWAYS_INLINE static size_t GetDirectMapReservationSize(
|
||||
size_t padded_raw_size) {
|
||||
// Caller must check that the size is not above the MaxDirectMapped()
|
||||
// limit before calling. This also guards against integer overflow in the
|
||||
// calculation here.
|
||||
@ -827,6 +819,19 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
|
||||
return flags.use_configurable_pool;
|
||||
}
|
||||
|
||||
// Returns whether MTE is supported for this partition root. Because MTE
|
||||
// stores tagging information in the high bits of the pointer, it causes
|
||||
// issues with components like V8's ArrayBuffers which use custom pointer
|
||||
// representations. All custom representations encountered so far rely on an
|
||||
// "is in configurable pool?" check, so we use that as a proxy.
|
||||
bool memory_tagging_enabled() const {
|
||||
#if PA_CONFIG(HAS_MEMORY_TAGGING)
|
||||
return flags.memory_tagging_enabled_;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
// To make tests deterministic, it is necessary to uncap the amount of memory
|
||||
// waste incurred by empty slot spans. Otherwise, the size of various
|
||||
// freelists, and committed memory becomes harder to reason about (and
|
||||
@ -835,17 +840,6 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
|
||||
max_empty_slot_spans_dirty_bytes_shift = 0;
|
||||
}
|
||||
|
||||
#if PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
|
||||
PA_ALWAYS_INLINE partition_alloc::PartitionTag GetNewPartitionTag() {
|
||||
// TODO(crbug.com/1298696): performance is not an issue. We can use
|
||||
// random tags in lieu of sequential ones.
|
||||
auto tag = ++current_partition_tag;
|
||||
tag += !tag; // Avoid 0.
|
||||
current_partition_tag = tag;
|
||||
return tag;
|
||||
}
|
||||
#endif // PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
|
||||
|
||||
// Enables the sorting of active slot spans in PurgeMemory().
|
||||
static void EnableSortActiveSlotSpans();
|
||||
|
||||
@ -922,6 +916,7 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
|
||||
|
||||
// May return an invalid thread cache.
|
||||
PA_ALWAYS_INLINE ThreadCache* GetOrCreateThreadCache();
|
||||
PA_ALWAYS_INLINE ThreadCache* GetThreadCache();
|
||||
|
||||
#if PA_CONFIG(USE_PARTITION_ROOT_ENUMERATOR)
|
||||
static internal::Lock& GetEnumeratorLock();
|
||||
@ -946,9 +941,11 @@ class ScopedSyscallTimer {
|
||||
~ScopedSyscallTimer() {
|
||||
root_->syscall_count.fetch_add(1, std::memory_order_relaxed);
|
||||
|
||||
uint64_t elapsed_nanos = (base::TimeTicks::Now() - tick_).InNanoseconds();
|
||||
root_->syscall_total_time_ns.fetch_add(elapsed_nanos,
|
||||
std::memory_order_relaxed);
|
||||
int64_t elapsed_nanos = (base::TimeTicks::Now() - tick_).InNanoseconds();
|
||||
if (elapsed_nanos > 0) {
|
||||
root_->syscall_total_time_ns.fetch_add(
|
||||
static_cast<uint64_t>(elapsed_nanos), std::memory_order_relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
@ -1044,47 +1041,34 @@ PartitionAllocGetSlotStartInBRPPool(uintptr_t address) {
|
||||
bucket->slot_size * bucket->GetSlotNumber(offset_in_slot_span);
|
||||
}
|
||||
|
||||
// Checks whether a given address stays within the same allocation slot after
|
||||
// modification.
|
||||
// Return values to indicate where a pointer is pointing relative to the bounds
|
||||
// of an allocation.
|
||||
enum class PtrPosWithinAlloc {
|
||||
// When BACKUP_REF_PTR_POISON_OOB_PTR is disabled, end-of-allocation pointers
|
||||
// are also considered in-bounds.
|
||||
kInBounds,
|
||||
#if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
|
||||
kAllocEnd,
|
||||
#endif
|
||||
kFarOOB
|
||||
};
|
||||
|
||||
// Checks whether `test_address` is in the same allocation slot as
|
||||
// `orig_address`.
|
||||
//
|
||||
// This can be called after adding or subtracting from the `orig_address`
|
||||
// to produce a different pointer which must still stay in the same allocation.
|
||||
//
|
||||
// The `type_size` is the size of the type that the raw_ptr is pointing to,
|
||||
// which may be the type the allocation is holding or a compatible pointer type
|
||||
// such as a base class or char*. It is used to detect pointers near the end of
|
||||
// the allocation but not strictly beyond it.
|
||||
//
|
||||
// This isn't a general purpose function. The caller is responsible for ensuring
|
||||
// that the ref-count is in place for this allocation.
|
||||
template <typename Z>
|
||||
PA_ALWAYS_INLINE PtrPosWithinAlloc
|
||||
PartitionAllocIsValidPtrDelta(uintptr_t address, PtrDelta<Z> delta) {
|
||||
// Required for pointers right past an allocation. See
|
||||
// |PartitionAllocGetSlotStartInBRPPool()|.
|
||||
uintptr_t adjusted_address = address - kPartitionPastAllocationAdjustment;
|
||||
PA_DCHECK(IsManagedByNormalBucketsOrDirectMap(adjusted_address));
|
||||
DCheckIfManagedByPartitionAllocBRPPool(adjusted_address);
|
||||
|
||||
uintptr_t slot_start = PartitionAllocGetSlotStartInBRPPool(adjusted_address);
|
||||
// Don't use |adjusted_address| beyond this point at all. It was needed to
|
||||
// pick the right slot, but now we're dealing with very concrete addresses.
|
||||
// Zero it just in case, to catch errors.
|
||||
adjusted_address = 0;
|
||||
|
||||
auto* slot_span = SlotSpanMetadata<ThreadSafe>::FromSlotStart(slot_start);
|
||||
auto* root = PartitionRoot<ThreadSafe>::FromSlotSpan(slot_span);
|
||||
// Double check that ref-count is indeed present.
|
||||
PA_DCHECK(root->brp_enabled());
|
||||
|
||||
uintptr_t object_addr = root->SlotStartToObjectAddr(slot_start);
|
||||
uintptr_t new_address =
|
||||
address + static_cast<uintptr_t>(delta.delta_in_bytes);
|
||||
uintptr_t object_end = object_addr + slot_span->GetUsableSize(root);
|
||||
if (new_address < object_addr || object_end < new_address) {
|
||||
return PtrPosWithinAlloc::kFarOOB;
|
||||
#if PA_CONFIG(USE_OOB_POISON)
|
||||
} else if (object_end - delta.type_size < new_address) {
|
||||
// Not even a single element of the type referenced by the pointer can fit
|
||||
// between the pointer and the end of the object.
|
||||
return PtrPosWithinAlloc::kAllocEnd;
|
||||
#endif
|
||||
} else {
|
||||
return PtrPosWithinAlloc::kInBounds;
|
||||
}
|
||||
}
|
||||
PtrPosWithinAlloc IsPtrWithinSameAlloc(uintptr_t orig_address,
|
||||
uintptr_t test_address,
|
||||
size_t type_size);
|
||||
|
||||
PA_ALWAYS_INLINE void PartitionAllocFreeForRefCounting(uintptr_t slot_start) {
|
||||
PA_DCHECK(!PartitionRefCountPointer(slot_start)->IsAlive());
|
||||
@ -1221,21 +1205,6 @@ PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeWithFlags(
|
||||
FreeNoHooks(object);
|
||||
}
|
||||
|
||||
// Returns whether MTE is supported for this partition root. Because MTE stores
|
||||
// tagging information in the high bits of the pointer, it causes issues with
|
||||
// components like V8's ArrayBuffers which use custom pointer representations.
|
||||
// All custom representations encountered so far rely on an "is in configurable
|
||||
// pool?" check, so we use that as a proxy.
|
||||
template <bool thread_safe>
|
||||
PA_ALWAYS_INLINE bool PartitionRoot<thread_safe>::IsMemoryTaggingEnabled()
|
||||
const {
|
||||
#if PA_CONFIG(HAS_MEMORY_TAGGING)
|
||||
return !flags.use_configurable_pool;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
// static
|
||||
template <bool thread_safe>
|
||||
PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooks(void* object) {
|
||||
@ -1280,7 +1249,7 @@ PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooks(void* object) {
|
||||
PA_DCHECK(slot_span == SlotSpan::FromSlotStart(slot_start));
|
||||
|
||||
#if PA_CONFIG(HAS_MEMORY_TAGGING)
|
||||
if (PA_LIKELY(root->IsMemoryTaggingEnabled())) {
|
||||
if (PA_LIKELY(root->memory_tagging_enabled())) {
|
||||
const size_t slot_size = slot_span->bucket->slot_size;
|
||||
if (PA_LIKELY(slot_size <= internal::kMaxMemoryTaggingSize)) {
|
||||
// slot_span is untagged at this point, so we have to recover its tag
|
||||
@ -1307,13 +1276,6 @@ PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::FreeNoHooks(void* object) {
|
||||
PA_PREFETCH(slot_span);
|
||||
#endif // PA_CONFIG(HAS_MEMORY_TAGGING)
|
||||
|
||||
#if PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
|
||||
if (!root->IsDirectMappedBucket(slot_span->bucket)) {
|
||||
partition_alloc::internal::PartitionTagIncrementValue(
|
||||
slot_start, slot_span->bucket->slot_size);
|
||||
}
|
||||
#endif // PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
|
||||
|
||||
#if BUILDFLAG(USE_STARSCAN)
|
||||
// TODO(bikineev): Change the condition to PA_LIKELY once PCScan is enabled by
|
||||
// default.
|
||||
@ -1547,7 +1509,7 @@ PA_ALWAYS_INLINE void PartitionRoot<thread_safe>::RawFreeWithThreadCache(
|
||||
SlotSpan* slot_span) {
|
||||
// PA_LIKELY: performance-sensitive partitions have a thread cache,
|
||||
// direct-mapped allocations are uncommon.
|
||||
ThreadCache* thread_cache = GetOrCreateThreadCache();
|
||||
ThreadCache* thread_cache = GetThreadCache();
|
||||
if (PA_LIKELY(ThreadCache::IsValid(thread_cache) &&
|
||||
!IsDirectMappedBucket(slot_span->bucket))) {
|
||||
size_t bucket_index =
|
||||
@ -1804,7 +1766,7 @@ PartitionRoot<thread_safe>::GetPageAccessibility() const {
|
||||
PageAccessibilityConfiguration::Permissions permissions =
|
||||
PageAccessibilityConfiguration::kReadWrite;
|
||||
#if PA_CONFIG(HAS_MEMORY_TAGGING)
|
||||
if (IsMemoryTaggingEnabled()) {
|
||||
if (memory_tagging_enabled()) {
|
||||
permissions = PageAccessibilityConfiguration::kReadWriteTagged;
|
||||
}
|
||||
#endif
|
||||
@ -2274,6 +2236,11 @@ ThreadCache* PartitionRoot<thread_safe>::GetOrCreateThreadCache() {
|
||||
return thread_cache;
|
||||
}
|
||||
|
||||
template <bool thread_safe>
|
||||
ThreadCache* PartitionRoot<thread_safe>::GetThreadCache() {
|
||||
return PA_LIKELY(flags.with_thread_cache) ? ThreadCache::Get() : nullptr;
|
||||
}
|
||||
|
||||
using ThreadSafePartitionRoot = PartitionRoot<internal::ThreadSafe>;
|
||||
|
||||
static_assert(offsetof(ThreadSafePartitionRoot, lock_) ==
|
||||
|
@ -1,144 +0,0 @@
|
||||
// Copyright 2020 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_TAG_H_
|
||||
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_TAG_H_
|
||||
|
||||
// This file defines types and functions for `MTECheckedPtr<T>` (cf.
|
||||
// `tagging.h`, which deals with real ARM MTE).
|
||||
|
||||
#include <string.h>
|
||||
|
||||
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_config.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_notreached.h"
|
||||
#include "base/allocator/partition_allocator/partition_cookie.h"
|
||||
#include "base/allocator/partition_allocator/partition_page.h"
|
||||
#include "base/allocator/partition_allocator/partition_tag_bitmap.h"
|
||||
#include "base/allocator/partition_allocator/partition_tag_types.h"
|
||||
#include "base/allocator/partition_allocator/reservation_offset_table.h"
|
||||
#include "base/allocator/partition_allocator/tagging.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
namespace partition_alloc {
|
||||
|
||||
#if PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
|
||||
|
||||
static_assert(
|
||||
sizeof(PartitionTag) == internal::tag_bitmap::kPartitionTagSize,
|
||||
"sizeof(PartitionTag) must be equal to bitmap::kPartitionTagSize.");
|
||||
|
||||
PA_ALWAYS_INLINE PartitionTag* NormalBucketPartitionTagPointer(uintptr_t addr) {
|
||||
uintptr_t bitmap_base =
|
||||
internal::SuperPageTagBitmapAddr(addr & internal::kSuperPageBaseMask);
|
||||
const size_t bitmap_end_offset =
|
||||
internal::PartitionPageSize() + internal::ReservedTagBitmapSize();
|
||||
PA_DCHECK((addr & internal::kSuperPageOffsetMask) >= bitmap_end_offset);
|
||||
uintptr_t offset_in_super_page =
|
||||
(addr & internal::kSuperPageOffsetMask) - bitmap_end_offset;
|
||||
size_t offset_in_bitmap = offset_in_super_page >>
|
||||
internal::tag_bitmap::kBytesPerPartitionTagShift
|
||||
<< internal::tag_bitmap::kPartitionTagSizeShift;
|
||||
// No need to tag, as the tag bitmap region isn't protected by MTE.
|
||||
return reinterpret_cast<PartitionTag*>(bitmap_base + offset_in_bitmap);
|
||||
}
|
||||
|
||||
PA_ALWAYS_INLINE PartitionTag* DirectMapPartitionTagPointer(uintptr_t addr) {
|
||||
uintptr_t first_super_page = internal::GetDirectMapReservationStart(addr);
|
||||
PA_DCHECK(first_super_page) << "not managed by a direct map: " << addr;
|
||||
auto* subsequent_page_metadata = GetSubsequentPageMetadata(
|
||||
internal::PartitionSuperPageToMetadataArea<internal::ThreadSafe>(
|
||||
first_super_page));
|
||||
return &subsequent_page_metadata->direct_map_tag;
|
||||
}
|
||||
|
||||
PA_ALWAYS_INLINE PartitionTag* PartitionTagPointer(uintptr_t addr) {
|
||||
// UNLIKELY because direct maps are far less common than normal buckets.
|
||||
if (PA_UNLIKELY(internal::IsManagedByDirectMap(addr))) {
|
||||
return DirectMapPartitionTagPointer(addr);
|
||||
}
|
||||
return NormalBucketPartitionTagPointer(addr);
|
||||
}
|
||||
|
||||
PA_ALWAYS_INLINE PartitionTag* PartitionTagPointer(const void* ptr) {
|
||||
// Disambiguation: UntagPtr relates to hwardware MTE, and it strips the tag
|
||||
// from the pointer. Whereas, PartitionTagPointer relates to software MTE
|
||||
// (i.e. MTECheckedPtr) and it returns a pointer to the tag in memory.
|
||||
return PartitionTagPointer(UntagPtr(ptr));
|
||||
}
|
||||
|
||||
namespace internal {
|
||||
|
||||
PA_ALWAYS_INLINE void DirectMapPartitionTagSetValue(uintptr_t addr,
|
||||
PartitionTag value) {
|
||||
*DirectMapPartitionTagPointer(addr) = value;
|
||||
}
|
||||
|
||||
PA_ALWAYS_INLINE void NormalBucketPartitionTagSetValue(uintptr_t slot_start,
|
||||
size_t size,
|
||||
PartitionTag value) {
|
||||
PA_DCHECK((size % tag_bitmap::kBytesPerPartitionTag) == 0);
|
||||
PA_DCHECK((slot_start % tag_bitmap::kBytesPerPartitionTag) == 0);
|
||||
size_t tag_count = size >> tag_bitmap::kBytesPerPartitionTagShift;
|
||||
PartitionTag* tag_ptr = NormalBucketPartitionTagPointer(slot_start);
|
||||
if (sizeof(PartitionTag) == 1) {
|
||||
memset(tag_ptr, value, tag_count);
|
||||
} else {
|
||||
while (tag_count-- > 0)
|
||||
*tag_ptr++ = value;
|
||||
}
|
||||
}
|
||||
|
||||
PA_ALWAYS_INLINE PartitionTag PartitionTagGetValue(void* ptr) {
|
||||
return *PartitionTagPointer(ptr);
|
||||
}
|
||||
|
||||
PA_ALWAYS_INLINE void PartitionTagIncrementValue(uintptr_t slot_start,
|
||||
size_t size) {
|
||||
PartitionTag tag = *PartitionTagPointer(slot_start);
|
||||
PartitionTag new_tag = tag;
|
||||
++new_tag;
|
||||
new_tag += !new_tag; // Avoid 0.
|
||||
#if BUILDFLAG(PA_DCHECK_IS_ON)
|
||||
PA_DCHECK(internal::IsManagedByNormalBuckets(slot_start));
|
||||
// This verifies that tags for the entire slot have the same value and that
|
||||
// |size| doesn't exceed the slot size.
|
||||
size_t tag_count = size >> tag_bitmap::kBytesPerPartitionTagShift;
|
||||
PartitionTag* tag_ptr = PartitionTagPointer(slot_start);
|
||||
while (tag_count-- > 0) {
|
||||
PA_DCHECK(tag == *tag_ptr);
|
||||
tag_ptr++;
|
||||
}
|
||||
#endif
|
||||
NormalBucketPartitionTagSetValue(slot_start, size, new_tag);
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
|
||||
#else // No-op versions
|
||||
|
||||
PA_ALWAYS_INLINE PartitionTag* PartitionTagPointer(void* ptr) {
|
||||
PA_NOTREACHED();
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
namespace internal {
|
||||
|
||||
PA_ALWAYS_INLINE PartitionTag PartitionTagGetValue(void*) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
PA_ALWAYS_INLINE void PartitionTagIncrementValue(uintptr_t slot_start,
|
||||
size_t size) {}
|
||||
|
||||
} // namespace internal
|
||||
|
||||
#endif // PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
|
||||
|
||||
} // namespace partition_alloc
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_TAG_H_
|
@ -1,147 +0,0 @@
|
||||
// Copyright 2020 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_TAG_BITMAP_H_
|
||||
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_TAG_BITMAP_H_
|
||||
|
||||
#include "base/allocator/partition_allocator/page_allocator_constants.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_config.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
|
||||
|
||||
namespace partition_alloc::internal {
|
||||
|
||||
#if PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
|
||||
|
||||
namespace tag_bitmap {
|
||||
// kPartitionTagSize should be equal to sizeof(PartitionTag).
|
||||
// PartitionTag is defined in partition_tag.h and static_assert there
|
||||
// checks the condition.
|
||||
static constexpr size_t kPartitionTagSizeShift = 0;
|
||||
static constexpr size_t kPartitionTagSize = 1U << kPartitionTagSizeShift;
|
||||
|
||||
static constexpr size_t kBytesPerPartitionTagShift = 4;
|
||||
// One partition tag is assigned per |kBytesPerPartitionTag| bytes in the slot
|
||||
// spans.
|
||||
// +-----------+ 0
|
||||
// | | ====> 1 partition tag
|
||||
// +-----------+ kBytesPerPartitionTag
|
||||
// | | ====> 1 partition tag
|
||||
// +-----------+ 2*kBytesPerPartitionTag
|
||||
// ...
|
||||
// +-----------+ slot_size
|
||||
static constexpr size_t kBytesPerPartitionTag = 1U
|
||||
<< kBytesPerPartitionTagShift;
|
||||
static_assert(
|
||||
kMinBucketedOrder >= kBytesPerPartitionTagShift + 1,
|
||||
"MTECheckedPtr requires kBytesPerPartitionTagShift-bytes alignment.");
|
||||
|
||||
static constexpr size_t kBytesPerPartitionTagRatio =
|
||||
kBytesPerPartitionTag / kPartitionTagSize;
|
||||
|
||||
static_assert(kBytesPerPartitionTag > 0,
|
||||
"kBytesPerPartitionTag should be larger than 0");
|
||||
static_assert(
|
||||
kBytesPerPartitionTag % kPartitionTagSize == 0,
|
||||
"kBytesPerPartitionTag should be multiples of sizeof(PartitionTag).");
|
||||
|
||||
constexpr size_t CeilCountOfUnits(size_t size, size_t unit_size) {
|
||||
return (size + unit_size - 1) / unit_size;
|
||||
}
|
||||
|
||||
} // namespace tag_bitmap
|
||||
|
||||
// kTagBitmapSize is calculated in the following way:
|
||||
// (1) kSuperPageSize - 2 * PartitionPageSize() = kTagBitmapSize +
|
||||
// SlotSpanSize()
|
||||
// (2) kTagBitmapSize >= SlotSpanSize() / kBytesPerPartitionTag *
|
||||
// sizeof(PartitionTag)
|
||||
//--
|
||||
// (1)' SlotSpanSize() = kSuperPageSize - 2 * PartitionPageSize() -
|
||||
// kTagBitmapSize
|
||||
// (2)' SlotSpanSize() <= kTagBitmapSize * Y
|
||||
// (3)' Y = kBytesPerPartitionTag / sizeof(PartitionTag) =
|
||||
// kBytesPerPartitionTagRatio
|
||||
//
|
||||
// kTagBitmapSize * Y >= kSuperPageSize - 2 * PartitionPageSize() -
|
||||
// kTagBitmapSize (1 + Y) * kTagBimapSize >= kSuperPageSize - 2 *
|
||||
// PartitionPageSize()
|
||||
// Finally,
|
||||
// kTagBitmapSize >= (kSuperPageSize - 2 * PartitionPageSize()) / (1 + Y)
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||
NumPartitionPagesPerTagBitmap() {
|
||||
return tag_bitmap::CeilCountOfUnits(
|
||||
kSuperPageSize / PartitionPageSize() - 2,
|
||||
tag_bitmap::kBytesPerPartitionTagRatio + 1);
|
||||
}
|
||||
|
||||
// To make guard pages between the tag bitmap and the slot span, calculate the
|
||||
// number of SystemPages of TagBitmap. If kNumSystemPagesPerTagBitmap *
|
||||
// SystemPageSize() < kTagBitmapSize, guard pages will be created. (c.f. no
|
||||
// guard pages if sizeof(PartitionTag) == 2.)
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||
NumSystemPagesPerTagBitmap() {
|
||||
return tag_bitmap::CeilCountOfUnits(
|
||||
kSuperPageSize / SystemPageSize() -
|
||||
2 * PartitionPageSize() / SystemPageSize(),
|
||||
tag_bitmap::kBytesPerPartitionTagRatio + 1);
|
||||
}
|
||||
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||
ActualTagBitmapSize() {
|
||||
return NumSystemPagesPerTagBitmap() * SystemPageSize();
|
||||
}
|
||||
|
||||
// PartitionPageSize-aligned tag bitmap size.
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||
ReservedTagBitmapSize() {
|
||||
return PartitionPageSize() * NumPartitionPagesPerTagBitmap();
|
||||
}
|
||||
|
||||
#if PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR
|
||||
static_assert(ActualTagBitmapSize() <= ReservedTagBitmapSize(),
|
||||
"kActualTagBitmapSize should be smaller than or equal to "
|
||||
"kReservedTagBitmapSize.");
|
||||
static_assert(ReservedTagBitmapSize() - ActualTagBitmapSize() <
|
||||
PartitionPageSize(),
|
||||
"Unused space in the tag bitmap should be smaller than "
|
||||
"PartitionPageSize()");
|
||||
|
||||
// The region available for slot spans is the reminder of the super page, after
|
||||
// taking away the first and last partition page (for metadata and guard pages)
|
||||
// and partition pages reserved for the freeslot bitmap and the tag bitmap.
|
||||
PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR PA_ALWAYS_INLINE size_t
|
||||
SlotSpansSize() {
|
||||
return kSuperPageSize - 2 * PartitionPageSize() - ReservedTagBitmapSize();
|
||||
}
|
||||
|
||||
static_assert(ActualTagBitmapSize() * tag_bitmap::kBytesPerPartitionTagRatio >=
|
||||
SlotSpansSize(),
|
||||
"bitmap is large enough to cover slot spans");
|
||||
static_assert((ActualTagBitmapSize() - PartitionPageSize()) *
|
||||
tag_bitmap::kBytesPerPartitionTagRatio <
|
||||
SlotSpansSize(),
|
||||
"any smaller bitmap wouldn't suffice to cover slots spans");
|
||||
#endif // PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR
|
||||
|
||||
#else
|
||||
|
||||
constexpr PA_ALWAYS_INLINE size_t NumPartitionPagesPerTagBitmap() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
constexpr PA_ALWAYS_INLINE size_t ActualTagBitmapSize() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
constexpr PA_ALWAYS_INLINE size_t ReservedTagBitmapSize() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif // PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
|
||||
|
||||
} // namespace partition_alloc::internal
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_TAG_BITMAP_H_
|
@ -1,25 +0,0 @@
|
||||
// Copyright 2022 The Chromium Authors
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_TAG_TYPES_H_
|
||||
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_TAG_TYPES_H_
|
||||
|
||||
#include <cstdint>
|
||||
|
||||
// This header defines the types for MTECheckedPtr. Canonical
|
||||
// documentation available at `//base/memory/raw_ptr_mtecheckedptr.md`.
|
||||
|
||||
namespace partition_alloc {
|
||||
|
||||
// Use 8 bits for the partition tag. This is the "lower" byte of the
|
||||
// two top bytes in a 64-bit pointer. The "upper" byte of the same
|
||||
// is reserved for true ARM MTE.
|
||||
//
|
||||
// MTECheckedPtr is not yet compatible with ARM MTE, but it is a
|
||||
// distant goal to have them coexist.
|
||||
using PartitionTag = uint8_t;
|
||||
|
||||
} // namespace partition_alloc
|
||||
|
||||
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_TAG_TYPES_H_
|
@ -108,8 +108,9 @@ PA_ALWAYS_INLINE void* PartitionTlsGet(PartitionTlsKey key) {
|
||||
DWORD saved_error = GetLastError();
|
||||
void* ret = TlsGetValue(key);
|
||||
// Only non-zero errors need to be restored.
|
||||
if (PA_UNLIKELY(saved_error))
|
||||
if (PA_UNLIKELY(saved_error)) {
|
||||
SetLastError(saved_error);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -19,16 +19,19 @@ void (*g_on_dll_process_detach)() = nullptr;
|
||||
void NTAPI PartitionTlsOnThreadExit(PVOID module,
|
||||
DWORD reason,
|
||||
PVOID reserved) {
|
||||
if (reason != DLL_THREAD_DETACH && reason != DLL_PROCESS_DETACH)
|
||||
if (reason != DLL_THREAD_DETACH && reason != DLL_PROCESS_DETACH) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (reason == DLL_PROCESS_DETACH && g_on_dll_process_detach)
|
||||
if (reason == DLL_PROCESS_DETACH && g_on_dll_process_detach) {
|
||||
g_on_dll_process_detach();
|
||||
}
|
||||
|
||||
if (g_destructor) {
|
||||
void* per_thread_data = PartitionTlsGet(g_key);
|
||||
if (per_thread_data)
|
||||
if (per_thread_data) {
|
||||
g_destructor(per_thread_data);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -98,8 +98,9 @@ void Wrpkru(uint32_t pkru) {
|
||||
|
||||
LiftPkeyRestrictionsScope::LiftPkeyRestrictionsScope()
|
||||
: saved_pkey_value_(kDefaultPkeyValue) {
|
||||
if (!PkeySettings::settings.enabled)
|
||||
if (!PkeySettings::settings.enabled) {
|
||||
return;
|
||||
}
|
||||
saved_pkey_value_ = Rdpkru();
|
||||
if (saved_pkey_value_ != kDefaultPkeyValue) {
|
||||
Wrpkru(kAllowAllPkeyValue);
|
||||
@ -107,8 +108,9 @@ LiftPkeyRestrictionsScope::LiftPkeyRestrictionsScope()
|
||||
}
|
||||
|
||||
LiftPkeyRestrictionsScope::~LiftPkeyRestrictionsScope() {
|
||||
if (!PkeySettings::settings.enabled)
|
||||
if (!PkeySettings::settings.enabled) {
|
||||
return;
|
||||
}
|
||||
if (Rdpkru() != saved_pkey_value_) {
|
||||
Wrpkru(saved_pkey_value_);
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -10,6 +10,7 @@
|
||||
#include <type_traits>
|
||||
|
||||
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_base/cxx20_is_constant_evaluated.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
|
||||
|
||||
@ -26,20 +27,23 @@ template <bool IsAdjustablePtr>
|
||||
struct RawPtrAsanUnownedImpl {
|
||||
// Wraps a pointer.
|
||||
template <typename T>
|
||||
static PA_ALWAYS_INLINE T* WrapRawPtr(T* ptr) {
|
||||
PA_ALWAYS_INLINE static constexpr T* WrapRawPtr(T* ptr) {
|
||||
return ptr;
|
||||
}
|
||||
|
||||
// Notifies the allocator when a wrapped pointer is being removed or replaced.
|
||||
template <typename T>
|
||||
static PA_ALWAYS_INLINE void ReleaseWrappedPtr(T* wrapped_ptr) {
|
||||
ProbeForLowSeverityLifetimeIssue(wrapped_ptr);
|
||||
PA_ALWAYS_INLINE static constexpr void ReleaseWrappedPtr(T* wrapped_ptr) {
|
||||
if (!partition_alloc::internal::base::is_constant_evaluated()) {
|
||||
ProbeForLowSeverityLifetimeIssue(wrapped_ptr);
|
||||
}
|
||||
}
|
||||
|
||||
// Unwraps the pointer, while asserting that memory hasn't been freed. The
|
||||
// function is allowed to crash on nullptr.
|
||||
template <typename T>
|
||||
static PA_ALWAYS_INLINE T* SafelyUnwrapPtrForDereference(T* wrapped_ptr) {
|
||||
PA_ALWAYS_INLINE static constexpr T* SafelyUnwrapPtrForDereference(
|
||||
T* wrapped_ptr) {
|
||||
// ASAN will catch use of dereferenced ptr without additional probing.
|
||||
return wrapped_ptr;
|
||||
}
|
||||
@ -47,21 +51,25 @@ struct RawPtrAsanUnownedImpl {
|
||||
// Unwraps the pointer, while asserting that memory hasn't been freed. The
|
||||
// function must handle nullptr gracefully.
|
||||
template <typename T>
|
||||
static PA_ALWAYS_INLINE T* SafelyUnwrapPtrForExtraction(T* wrapped_ptr) {
|
||||
ProbeForLowSeverityLifetimeIssue(wrapped_ptr);
|
||||
PA_ALWAYS_INLINE static constexpr T* SafelyUnwrapPtrForExtraction(
|
||||
T* wrapped_ptr) {
|
||||
if (!partition_alloc::internal::base::is_constant_evaluated()) {
|
||||
ProbeForLowSeverityLifetimeIssue(wrapped_ptr);
|
||||
}
|
||||
return wrapped_ptr;
|
||||
}
|
||||
|
||||
// Unwraps the pointer, without making an assertion on whether memory was
|
||||
// freed or not.
|
||||
template <typename T>
|
||||
static PA_ALWAYS_INLINE T* UnsafelyUnwrapPtrForComparison(T* wrapped_ptr) {
|
||||
PA_ALWAYS_INLINE static constexpr T* UnsafelyUnwrapPtrForComparison(
|
||||
T* wrapped_ptr) {
|
||||
return wrapped_ptr;
|
||||
}
|
||||
|
||||
// Upcasts the wrapped pointer.
|
||||
template <typename To, typename From>
|
||||
static PA_ALWAYS_INLINE constexpr To* Upcast(From* wrapped_ptr) {
|
||||
PA_ALWAYS_INLINE static constexpr To* Upcast(From* wrapped_ptr) {
|
||||
static_assert(std::is_convertible<From*, To*>::value,
|
||||
"From must be convertible to To.");
|
||||
// Note, this cast may change the address if upcasting to base that lies in
|
||||
@ -74,21 +82,31 @@ struct RawPtrAsanUnownedImpl {
|
||||
typename T,
|
||||
typename Z,
|
||||
typename =
|
||||
std::enable_if_t<partition_alloc::internal::offset_type<Z>, void>>
|
||||
static PA_ALWAYS_INLINE T* Advance(T* wrapped_ptr, Z delta_elems) {
|
||||
std::enable_if_t<partition_alloc::internal::is_offset_type<Z>, void>>
|
||||
PA_ALWAYS_INLINE static constexpr T* Advance(T* wrapped_ptr, Z delta_elems) {
|
||||
return wrapped_ptr + delta_elems;
|
||||
}
|
||||
|
||||
// Retreat the wrapped pointer by `delta_elems`.
|
||||
template <
|
||||
typename T,
|
||||
typename Z,
|
||||
typename =
|
||||
std::enable_if_t<partition_alloc::internal::is_offset_type<Z>, void>>
|
||||
PA_ALWAYS_INLINE static constexpr T* Retreat(T* wrapped_ptr, Z delta_elems) {
|
||||
return wrapped_ptr - delta_elems;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static PA_ALWAYS_INLINE ptrdiff_t GetDeltaElems(T* wrapped_ptr1,
|
||||
T* wrapped_ptr2) {
|
||||
PA_ALWAYS_INLINE static constexpr ptrdiff_t GetDeltaElems(T* wrapped_ptr1,
|
||||
T* wrapped_ptr2) {
|
||||
return wrapped_ptr1 - wrapped_ptr2;
|
||||
}
|
||||
|
||||
// Returns a copy of a wrapped pointer, without making an assertion on whether
|
||||
// memory was freed or not.
|
||||
template <typename T>
|
||||
static PA_ALWAYS_INLINE T* Duplicate(T* wrapped_ptr) {
|
||||
PA_ALWAYS_INLINE static constexpr T* Duplicate(T* wrapped_ptr) {
|
||||
return wrapped_ptr;
|
||||
}
|
||||
|
||||
@ -103,19 +121,21 @@ struct RawPtrAsanUnownedImpl {
|
||||
// `WrapRawPtrForDuplication` and `UnsafelyUnwrapPtrForDuplication` are used
|
||||
// to create a new raw_ptr<T> from another raw_ptr<T> of a different flavor.
|
||||
template <typename T>
|
||||
static PA_ALWAYS_INLINE T* WrapRawPtrForDuplication(T* ptr) {
|
||||
PA_ALWAYS_INLINE static constexpr T* WrapRawPtrForDuplication(T* ptr) {
|
||||
return ptr;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static PA_ALWAYS_INLINE T* UnsafelyUnwrapPtrForDuplication(T* wrapped_ptr) {
|
||||
PA_ALWAYS_INLINE static constexpr T* UnsafelyUnwrapPtrForDuplication(
|
||||
T* wrapped_ptr) {
|
||||
return wrapped_ptr;
|
||||
}
|
||||
|
||||
// This is for accounting only, used by unit tests.
|
||||
static PA_ALWAYS_INLINE void IncrementSwapCountForTest() {}
|
||||
static PA_ALWAYS_INLINE void IncrementLessCountForTest() {}
|
||||
static PA_ALWAYS_INLINE void IncrementPointerToMemberOperatorCountForTest() {}
|
||||
PA_ALWAYS_INLINE static constexpr void IncrementSwapCountForTest() {}
|
||||
PA_ALWAYS_INLINE static constexpr void IncrementLessCountForTest() {}
|
||||
PA_ALWAYS_INLINE static constexpr void
|
||||
IncrementPointerToMemberOperatorCountForTest() {}
|
||||
};
|
||||
|
||||
} // namespace base::internal
|
||||
|
@ -64,6 +64,29 @@ void RawPtrBackupRefImpl<AllowDangling>::ReportIfDanglingInternal(
|
||||
}
|
||||
}
|
||||
|
||||
// static
|
||||
template <bool AllowDangling>
|
||||
bool RawPtrBackupRefImpl<AllowDangling>::CheckPointerWithinSameAlloc(
|
||||
uintptr_t before_addr,
|
||||
uintptr_t after_addr,
|
||||
size_t type_size) {
|
||||
partition_alloc::internal::PtrPosWithinAlloc ptr_pos_within_alloc =
|
||||
partition_alloc::internal::IsPtrWithinSameAlloc(before_addr, after_addr,
|
||||
type_size);
|
||||
// No need to check that |new_ptr| is in the same pool, as
|
||||
// IsPtrWithinSameAlloc() checks that it's within the same allocation, so
|
||||
// must be the same pool.
|
||||
PA_BASE_CHECK(ptr_pos_within_alloc !=
|
||||
partition_alloc::internal::PtrPosWithinAlloc::kFarOOB);
|
||||
|
||||
#if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
|
||||
return ptr_pos_within_alloc ==
|
||||
partition_alloc::internal::PtrPosWithinAlloc::kAllocEnd;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
template <bool AllowDangling>
|
||||
bool RawPtrBackupRefImpl<AllowDangling>::IsPointeeAlive(uintptr_t address) {
|
||||
#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
|
||||
@ -75,38 +98,11 @@ bool RawPtrBackupRefImpl<AllowDangling>::IsPointeeAlive(uintptr_t address) {
|
||||
->IsAlive();
|
||||
}
|
||||
|
||||
template <bool AllowDangling>
|
||||
template <typename Z>
|
||||
partition_alloc::PtrPosWithinAlloc
|
||||
RawPtrBackupRefImpl<AllowDangling>::IsValidDelta(
|
||||
uintptr_t address,
|
||||
partition_alloc::internal::PtrDelta<Z> delta) {
|
||||
return partition_alloc::internal::PartitionAllocIsValidPtrDelta(address,
|
||||
delta);
|
||||
}
|
||||
|
||||
// Explicitly instantiates the two BackupRefPtr variants in the .cc. This
|
||||
// ensures the definitions not visible from the .h are available in the binary.
|
||||
template struct RawPtrBackupRefImpl</*AllowDangling=*/false>;
|
||||
template struct RawPtrBackupRefImpl</*AllowDangling=*/true>;
|
||||
|
||||
template PA_COMPONENT_EXPORT(RAW_PTR)
|
||||
partition_alloc::PtrPosWithinAlloc RawPtrBackupRefImpl<false>::IsValidDelta(
|
||||
uintptr_t,
|
||||
partition_alloc::internal::PtrDelta<size_t>);
|
||||
template PA_COMPONENT_EXPORT(RAW_PTR)
|
||||
partition_alloc::PtrPosWithinAlloc RawPtrBackupRefImpl<false>::IsValidDelta(
|
||||
uintptr_t,
|
||||
partition_alloc::internal::PtrDelta<ptrdiff_t>);
|
||||
template PA_COMPONENT_EXPORT(RAW_PTR)
|
||||
partition_alloc::PtrPosWithinAlloc RawPtrBackupRefImpl<true>::IsValidDelta(
|
||||
uintptr_t,
|
||||
partition_alloc::internal::PtrDelta<size_t>);
|
||||
template PA_COMPONENT_EXPORT(RAW_PTR)
|
||||
partition_alloc::PtrPosWithinAlloc RawPtrBackupRefImpl<true>::IsValidDelta(
|
||||
uintptr_t,
|
||||
partition_alloc::internal::PtrDelta<ptrdiff_t>);
|
||||
|
||||
#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
|
||||
void CheckThatAddressIsntWithinFirstPartitionPage(uintptr_t address) {
|
||||
if (partition_alloc::internal::IsManagedByDirectMap(address)) {
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include "base/allocator/partition_allocator/partition_address_space.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_base/cxx20_is_constant_evaluated.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_config.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
|
||||
@ -36,7 +37,8 @@ struct RawPtrBackupRefImpl {
|
||||
// threads modify the same smart pointer object without synchronization, a
|
||||
// data race will occur.
|
||||
|
||||
static PA_ALWAYS_INLINE bool IsSupportedAndNotNull(uintptr_t address) {
|
||||
private:
|
||||
PA_ALWAYS_INLINE static bool IsSupportedAndNotNull(uintptr_t address) {
|
||||
// There are many situations where the compiler can prove that
|
||||
// `ReleaseWrappedPtr` is called on a value that is always nullptr, but the
|
||||
// way `IsManagedByPartitionAllocBRPPool` is written, the compiler can't
|
||||
@ -87,7 +89,7 @@ struct RawPtrBackupRefImpl {
|
||||
return is_in_brp_pool;
|
||||
}
|
||||
|
||||
#if PA_CONFIG(USE_OOB_POISON)
|
||||
#if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
|
||||
// Out-Of-Bounds (OOB) poison bit is set when the pointer has overflowed by
|
||||
// one byte.
|
||||
#if defined(ARCH_CPU_X86_64)
|
||||
@ -101,32 +103,36 @@ struct RawPtrBackupRefImpl {
|
||||
#endif
|
||||
|
||||
template <typename T>
|
||||
static PA_ALWAYS_INLINE T* UnpoisonPtr(T* ptr) {
|
||||
PA_ALWAYS_INLINE static T* UnpoisonPtr(T* ptr) {
|
||||
return reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(ptr) &
|
||||
~OOB_POISON_BIT);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static PA_ALWAYS_INLINE bool IsPtrOOB(T* ptr) {
|
||||
PA_ALWAYS_INLINE static bool IsPtrOOB(T* ptr) {
|
||||
return (reinterpret_cast<uintptr_t>(ptr) & OOB_POISON_BIT) ==
|
||||
OOB_POISON_BIT;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static PA_ALWAYS_INLINE T* PoisonOOBPtr(T* ptr) {
|
||||
PA_ALWAYS_INLINE static T* PoisonOOBPtr(T* ptr) {
|
||||
return reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(ptr) |
|
||||
OOB_POISON_BIT);
|
||||
}
|
||||
#else // PA_USE_OOB_POISON
|
||||
#else // BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
|
||||
template <typename T>
|
||||
static PA_ALWAYS_INLINE T* UnpoisonPtr(T* ptr) {
|
||||
PA_ALWAYS_INLINE static T* UnpoisonPtr(T* ptr) {
|
||||
return ptr;
|
||||
}
|
||||
#endif // PA_USE_OOB_POISON
|
||||
#endif // BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
|
||||
|
||||
public:
|
||||
// Wraps a pointer.
|
||||
template <typename T>
|
||||
static PA_ALWAYS_INLINE T* WrapRawPtr(T* ptr) {
|
||||
PA_ALWAYS_INLINE static constexpr T* WrapRawPtr(T* ptr) {
|
||||
if (partition_alloc::internal::base::is_constant_evaluated()) {
|
||||
return ptr;
|
||||
}
|
||||
uintptr_t address = partition_alloc::UntagPtr(UnpoisonPtr(ptr));
|
||||
if (IsSupportedAndNotNull(address)) {
|
||||
#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
|
||||
@ -157,7 +163,10 @@ struct RawPtrBackupRefImpl {
|
||||
|
||||
// Notifies the allocator when a wrapped pointer is being removed or replaced.
|
||||
template <typename T>
|
||||
static PA_ALWAYS_INLINE void ReleaseWrappedPtr(T* wrapped_ptr) {
|
||||
PA_ALWAYS_INLINE static constexpr void ReleaseWrappedPtr(T* wrapped_ptr) {
|
||||
if (partition_alloc::internal::base::is_constant_evaluated()) {
|
||||
return;
|
||||
}
|
||||
uintptr_t address = partition_alloc::UntagPtr(UnpoisonPtr(wrapped_ptr));
|
||||
if (IsSupportedAndNotNull(address)) {
|
||||
#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
|
||||
@ -177,9 +186,13 @@ struct RawPtrBackupRefImpl {
|
||||
// Unwraps the pointer, while asserting that memory hasn't been freed. The
|
||||
// function is allowed to crash on nullptr.
|
||||
template <typename T>
|
||||
static PA_ALWAYS_INLINE T* SafelyUnwrapPtrForDereference(T* wrapped_ptr) {
|
||||
PA_ALWAYS_INLINE static constexpr T* SafelyUnwrapPtrForDereference(
|
||||
T* wrapped_ptr) {
|
||||
if (partition_alloc::internal::base::is_constant_evaluated()) {
|
||||
return wrapped_ptr;
|
||||
}
|
||||
#if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS)
|
||||
#if PA_CONFIG(USE_OOB_POISON)
|
||||
#if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
|
||||
PA_BASE_CHECK(!IsPtrOOB(wrapped_ptr));
|
||||
#endif
|
||||
uintptr_t address = partition_alloc::UntagPtr(wrapped_ptr);
|
||||
@ -195,9 +208,13 @@ struct RawPtrBackupRefImpl {
|
||||
// Unwraps the pointer, while asserting that memory hasn't been freed. The
|
||||
// function must handle nullptr gracefully.
|
||||
template <typename T>
|
||||
static PA_ALWAYS_INLINE T* SafelyUnwrapPtrForExtraction(T* wrapped_ptr) {
|
||||
PA_ALWAYS_INLINE static constexpr T* SafelyUnwrapPtrForExtraction(
|
||||
T* wrapped_ptr) {
|
||||
if (partition_alloc::internal::base::is_constant_evaluated()) {
|
||||
return wrapped_ptr;
|
||||
}
|
||||
T* unpoisoned_ptr = UnpoisonPtr(wrapped_ptr);
|
||||
#if PA_CONFIG(USE_OOB_POISON)
|
||||
#if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
|
||||
// Some code uses invalid pointer values as indicators, so those values must
|
||||
// be passed through unchanged during extraction. The following check will
|
||||
// pass invalid values through if those values do not fall within the BRP
|
||||
@ -210,14 +227,18 @@ struct RawPtrBackupRefImpl {
|
||||
// OOB conditions, e.g., in code that extracts an end-of-allocation pointer
|
||||
// for use in a loop termination condition. The poison bit would make that
|
||||
// pointer appear to reference a very high address.
|
||||
#endif // PA_CONFIG(USE_OOB_POISON)
|
||||
#endif // BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
|
||||
return unpoisoned_ptr;
|
||||
}
|
||||
|
||||
// Unwraps the pointer, without making an assertion on whether memory was
|
||||
// freed or not.
|
||||
template <typename T>
|
||||
static PA_ALWAYS_INLINE T* UnsafelyUnwrapPtrForComparison(T* wrapped_ptr) {
|
||||
PA_ALWAYS_INLINE static constexpr T* UnsafelyUnwrapPtrForComparison(
|
||||
T* wrapped_ptr) {
|
||||
if (partition_alloc::internal::base::is_constant_evaluated()) {
|
||||
return wrapped_ptr;
|
||||
}
|
||||
// This may be used for unwrapping an end-of-allocation pointer to be used
|
||||
// as an endpoint in an iterative algorithm, so this removes the OOB poison
|
||||
// bit.
|
||||
@ -226,7 +247,7 @@ struct RawPtrBackupRefImpl {
|
||||
|
||||
// Upcasts the wrapped pointer.
|
||||
template <typename To, typename From>
|
||||
static PA_ALWAYS_INLINE constexpr To* Upcast(From* wrapped_ptr) {
|
||||
PA_ALWAYS_INLINE static constexpr To* Upcast(From* wrapped_ptr) {
|
||||
static_assert(std::is_convertible<From*, To*>::value,
|
||||
"From must be convertible to To.");
|
||||
// Note, this cast may change the address if upcasting to base that lies in
|
||||
@ -234,53 +255,12 @@ struct RawPtrBackupRefImpl {
|
||||
return wrapped_ptr;
|
||||
}
|
||||
|
||||
// Advance the wrapped pointer by `delta_elems`.
|
||||
template <
|
||||
typename T,
|
||||
typename Z,
|
||||
typename =
|
||||
std::enable_if_t<partition_alloc::internal::offset_type<Z>, void>>
|
||||
static PA_ALWAYS_INLINE T* Advance(T* wrapped_ptr, Z delta_elems) {
|
||||
#if BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
|
||||
T* unpoisoned_ptr = UnpoisonPtr(wrapped_ptr);
|
||||
T* new_ptr = unpoisoned_ptr + delta_elems;
|
||||
// First check if the new address didn't migrate in/out the BRP pool, and
|
||||
// that it lands within the same allocation. An end-of-allocation address is
|
||||
// ok, too, and that may lead to the pointer being poisoned if the relevant
|
||||
// feature is enabled. These checks add a non-trivial cost, but they're
|
||||
// cheaper and more secure than the previous implementation that rewrapped
|
||||
// the pointer (wrapped the new pointer and unwrapped the old one).
|
||||
//
|
||||
// Note, the value of these checks goes beyond OOB protection. They're
|
||||
// important for integrity of the BRP algorithm. Without these, an attacker
|
||||
// could make the pointer point to another allocation, and cause its
|
||||
// ref-count to go to 0 upon this pointer's destruction, even though there
|
||||
// may be another pointer still pointing to it, thus making it lose the BRP
|
||||
// protection prematurely.
|
||||
uintptr_t address = partition_alloc::UntagPtr(unpoisoned_ptr);
|
||||
// TODO(bartekn): Consider adding support for non-BRP pools too (without
|
||||
// removing the cross-pool migration check).
|
||||
if (IsSupportedAndNotNull(address)) {
|
||||
auto ptr_pos_within_alloc = IsValidDelta(
|
||||
address, delta_elems * static_cast<Z>(sizeof(T)), sizeof(T));
|
||||
// No need to check that |new_ptr| is in the same pool, as IsValidDeta()
|
||||
// checks that it's within the same allocation, so must be the same pool.
|
||||
PA_BASE_CHECK(ptr_pos_within_alloc !=
|
||||
partition_alloc::PtrPosWithinAlloc::kFarOOB);
|
||||
#if PA_CONFIG(USE_OOB_POISON)
|
||||
if (ptr_pos_within_alloc ==
|
||||
partition_alloc::PtrPosWithinAlloc::kAllocEnd) {
|
||||
new_ptr = PoisonOOBPtr(new_ptr);
|
||||
}
|
||||
#endif
|
||||
} else {
|
||||
// Check that the new address didn't migrate into the BRP pool, as it
|
||||
// would result in more pointers pointing to an allocation than its
|
||||
// ref-count reflects.
|
||||
PA_BASE_CHECK(!IsSupportedAndNotNull(partition_alloc::UntagPtr(new_ptr)));
|
||||
}
|
||||
return new_ptr;
|
||||
#else // BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
|
||||
// Verify the pointer stayed in the same slot, and return the poisoned version
|
||||
// of `new_ptr` if OOB poisoning is enabled.
|
||||
template <typename T>
|
||||
PA_ALWAYS_INLINE static T* VerifyAndPoisonPointerAfterAdvanceOrRetreat(
|
||||
T* unpoisoned_ptr,
|
||||
T* new_ptr) {
|
||||
// In the "before allocation" mode, on 32-bit, we can run into a problem
|
||||
// that the end-of-allocation address could fall outside of
|
||||
// PartitionAlloc's pools, if this is the last slot of the super page,
|
||||
@ -305,26 +285,99 @@ struct RawPtrBackupRefImpl {
|
||||
// This problem doesn't exist in the "previous slot" mode, or any mode that
|
||||
// involves putting extras after the allocation, because the
|
||||
// end-of-allocation address belongs to the same slot.
|
||||
static_assert(false);
|
||||
#endif // BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT)
|
||||
static_assert(BUILDFLAG(PUT_REF_COUNT_IN_PREVIOUS_SLOT));
|
||||
|
||||
// First check if the new address didn't migrate in/out the BRP pool, and
|
||||
// that it lands within the same allocation. An end-of-allocation address is
|
||||
// ok, too, and that may lead to the pointer being poisoned if the relevant
|
||||
// feature is enabled. These checks add a non-trivial cost, but they're
|
||||
// cheaper and more secure than the previous implementation that rewrapped
|
||||
// the pointer (wrapped the new pointer and unwrapped the old one).
|
||||
//
|
||||
// Note, the value of these checks goes beyond OOB protection. They're
|
||||
// important for integrity of the BRP algorithm. Without these, an attacker
|
||||
// could make the pointer point to another allocation, and cause its
|
||||
// ref-count to go to 0 upon this pointer's destruction, even though there
|
||||
// may be another pointer still pointing to it, thus making it lose the BRP
|
||||
// protection prematurely.
|
||||
const uintptr_t before_addr = partition_alloc::UntagPtr(unpoisoned_ptr);
|
||||
const uintptr_t after_addr = partition_alloc::UntagPtr(new_ptr);
|
||||
// TODO(bartekn): Consider adding support for non-BRP pools too (without
|
||||
// removing the cross-pool migration check).
|
||||
if (IsSupportedAndNotNull(before_addr)) {
|
||||
constexpr size_t size = sizeof(T);
|
||||
[[maybe_unused]] const bool is_end =
|
||||
CheckPointerWithinSameAlloc(before_addr, after_addr, size);
|
||||
#if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
|
||||
if (is_end) {
|
||||
new_ptr = PoisonOOBPtr(new_ptr);
|
||||
}
|
||||
#endif // BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR)
|
||||
} else {
|
||||
// Check that the new address didn't migrate into the BRP pool, as it
|
||||
// would result in more pointers pointing to an allocation than its
|
||||
// ref-count reflects.
|
||||
PA_BASE_CHECK(!IsSupportedAndNotNull(after_addr));
|
||||
}
|
||||
return new_ptr;
|
||||
}
|
||||
|
||||
// Advance the wrapped pointer by `delta_elems`.
|
||||
template <
|
||||
typename T,
|
||||
typename Z,
|
||||
typename =
|
||||
std::enable_if_t<partition_alloc::internal::is_offset_type<Z>, void>>
|
||||
PA_ALWAYS_INLINE static constexpr T* Advance(T* wrapped_ptr, Z delta_elems) {
|
||||
if (partition_alloc::internal::base::is_constant_evaluated()) {
|
||||
return wrapped_ptr + delta_elems;
|
||||
}
|
||||
T* unpoisoned_ptr = UnpoisonPtr(wrapped_ptr);
|
||||
return VerifyAndPoisonPointerAfterAdvanceOrRetreat(
|
||||
unpoisoned_ptr, unpoisoned_ptr + delta_elems);
|
||||
}
|
||||
|
||||
// Retreat the wrapped pointer by `delta_elems`.
|
||||
template <
|
||||
typename T,
|
||||
typename Z,
|
||||
typename =
|
||||
std::enable_if_t<partition_alloc::internal::is_offset_type<Z>, void>>
|
||||
PA_ALWAYS_INLINE static constexpr T* Retreat(T* wrapped_ptr, Z delta_elems) {
|
||||
if (partition_alloc::internal::base::is_constant_evaluated()) {
|
||||
return wrapped_ptr - delta_elems;
|
||||
}
|
||||
T* unpoisoned_ptr = UnpoisonPtr(wrapped_ptr);
|
||||
return VerifyAndPoisonPointerAfterAdvanceOrRetreat(
|
||||
unpoisoned_ptr, unpoisoned_ptr - delta_elems);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static PA_ALWAYS_INLINE ptrdiff_t GetDeltaElems(T* wrapped_ptr1,
|
||||
T* wrapped_ptr2) {
|
||||
PA_ALWAYS_INLINE static constexpr ptrdiff_t GetDeltaElems(T* wrapped_ptr1,
|
||||
T* wrapped_ptr2) {
|
||||
if (partition_alloc::internal::base::is_constant_evaluated()) {
|
||||
return wrapped_ptr1 - wrapped_ptr2;
|
||||
}
|
||||
|
||||
T* unpoisoned_ptr1 = UnpoisonPtr(wrapped_ptr1);
|
||||
T* unpoisoned_ptr2 = UnpoisonPtr(wrapped_ptr2);
|
||||
#if BUILDFLAG(ENABLE_POINTER_SUBTRACTION_CHECK)
|
||||
if (partition_alloc::internal::base::is_constant_evaluated()) {
|
||||
return unpoisoned_ptr1 - unpoisoned_ptr2;
|
||||
}
|
||||
uintptr_t address1 = partition_alloc::UntagPtr(unpoisoned_ptr1);
|
||||
uintptr_t address2 = partition_alloc::UntagPtr(unpoisoned_ptr2);
|
||||
// Ensure that both pointers are within the same slot, and pool!
|
||||
// TODO(bartekn): Consider adding support for non-BRP pool too.
|
||||
if (IsSupportedAndNotNull(address1)) {
|
||||
PA_BASE_CHECK(IsSupportedAndNotNull(address2));
|
||||
PA_BASE_CHECK(IsValidDelta(address2, address1 - address2, sizeof(T)) !=
|
||||
partition_alloc::PtrPosWithinAlloc::kFarOOB);
|
||||
PA_BASE_CHECK(partition_alloc::internal::IsPtrWithinSameAlloc(
|
||||
address2, address1, sizeof(T)) !=
|
||||
partition_alloc::internal::PtrPosWithinAlloc::kFarOOB);
|
||||
} else {
|
||||
PA_BASE_CHECK(!IsSupportedAndNotNull(address2));
|
||||
}
|
||||
#endif // BUILDFLAG(ENABLE_POINTER_SUBTRACTION_CHECK)
|
||||
return unpoisoned_ptr1 - unpoisoned_ptr2;
|
||||
}
|
||||
|
||||
@ -332,32 +385,45 @@ struct RawPtrBackupRefImpl {
|
||||
// memory was freed or not.
|
||||
// This method increments the reference count of the allocation slot.
|
||||
template <typename T>
|
||||
static PA_ALWAYS_INLINE T* Duplicate(T* wrapped_ptr) {
|
||||
PA_ALWAYS_INLINE static constexpr T* Duplicate(T* wrapped_ptr) {
|
||||
if (partition_alloc::internal::base::is_constant_evaluated()) {
|
||||
return wrapped_ptr;
|
||||
}
|
||||
return WrapRawPtr(wrapped_ptr);
|
||||
}
|
||||
|
||||
// Report the current wrapped pointer if pointee isn't alive anymore.
|
||||
template <typename T>
|
||||
static PA_ALWAYS_INLINE void ReportIfDangling(T* wrapped_ptr) {
|
||||
PA_ALWAYS_INLINE static void ReportIfDangling(T* wrapped_ptr) {
|
||||
ReportIfDanglingInternal(partition_alloc::UntagPtr(wrapped_ptr));
|
||||
}
|
||||
|
||||
// `WrapRawPtrForDuplication` and `UnsafelyUnwrapPtrForDuplication` are used
|
||||
// to create a new raw_ptr<T> from another raw_ptr<T> of a different flavor.
|
||||
template <typename T>
|
||||
static PA_ALWAYS_INLINE T* WrapRawPtrForDuplication(T* ptr) {
|
||||
return WrapRawPtr(ptr);
|
||||
PA_ALWAYS_INLINE static constexpr T* WrapRawPtrForDuplication(T* ptr) {
|
||||
if (partition_alloc::internal::base::is_constant_evaluated()) {
|
||||
return ptr;
|
||||
} else {
|
||||
return WrapRawPtr(ptr);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static PA_ALWAYS_INLINE T* UnsafelyUnwrapPtrForDuplication(T* wrapped_ptr) {
|
||||
return UnpoisonPtr(wrapped_ptr);
|
||||
PA_ALWAYS_INLINE static constexpr T* UnsafelyUnwrapPtrForDuplication(
|
||||
T* wrapped_ptr) {
|
||||
if (partition_alloc::internal::base::is_constant_evaluated()) {
|
||||
return wrapped_ptr;
|
||||
} else {
|
||||
return UnpoisonPtr(wrapped_ptr);
|
||||
}
|
||||
}
|
||||
|
||||
// This is for accounting only, used by unit tests.
|
||||
static PA_ALWAYS_INLINE void IncrementSwapCountForTest() {}
|
||||
static PA_ALWAYS_INLINE void IncrementLessCountForTest() {}
|
||||
static PA_ALWAYS_INLINE void IncrementPointerToMemberOperatorCountForTest() {}
|
||||
PA_ALWAYS_INLINE static constexpr void IncrementSwapCountForTest() {}
|
||||
PA_ALWAYS_INLINE static constexpr void IncrementLessCountForTest() {}
|
||||
PA_ALWAYS_INLINE static constexpr void
|
||||
IncrementPointerToMemberOperatorCountForTest() {}
|
||||
|
||||
private:
|
||||
// We've evaluated several strategies (inline nothing, various parts, or
|
||||
@ -366,31 +432,24 @@ struct RawPtrBackupRefImpl {
|
||||
// lightweight |IsManagedByPartitionAllocBRPPool()| check was inlined.
|
||||
// Therefore, we've extracted the rest into the functions below and marked
|
||||
// them as PA_NOINLINE to prevent unintended LTO effects.
|
||||
static PA_COMPONENT_EXPORT(RAW_PTR) PA_NOINLINE
|
||||
void AcquireInternal(uintptr_t address);
|
||||
static PA_COMPONENT_EXPORT(RAW_PTR) PA_NOINLINE
|
||||
void ReleaseInternal(uintptr_t address);
|
||||
static PA_COMPONENT_EXPORT(RAW_PTR) PA_NOINLINE
|
||||
bool IsPointeeAlive(uintptr_t address);
|
||||
static PA_COMPONENT_EXPORT(RAW_PTR) PA_NOINLINE
|
||||
void ReportIfDanglingInternal(uintptr_t address);
|
||||
template <
|
||||
typename Z,
|
||||
typename =
|
||||
std::enable_if_t<partition_alloc::internal::offset_type<Z>, void>>
|
||||
static PA_ALWAYS_INLINE partition_alloc::PtrPosWithinAlloc
|
||||
IsValidDelta(uintptr_t address, Z delta_in_bytes, size_t type_size) {
|
||||
using delta_t = std::conditional_t<std::is_signed_v<Z>, ptrdiff_t, size_t>;
|
||||
partition_alloc::internal::PtrDelta<delta_t> ptr_delta(delta_in_bytes,
|
||||
type_size);
|
||||
PA_NOINLINE static PA_COMPONENT_EXPORT(RAW_PTR) void AcquireInternal(
|
||||
uintptr_t address);
|
||||
PA_NOINLINE static PA_COMPONENT_EXPORT(RAW_PTR) void ReleaseInternal(
|
||||
uintptr_t address);
|
||||
PA_NOINLINE static PA_COMPONENT_EXPORT(RAW_PTR) bool IsPointeeAlive(
|
||||
uintptr_t address);
|
||||
PA_NOINLINE static PA_COMPONENT_EXPORT(RAW_PTR) void ReportIfDanglingInternal(
|
||||
uintptr_t address);
|
||||
|
||||
return IsValidDelta(address, ptr_delta);
|
||||
}
|
||||
template <typename Z>
|
||||
static PA_COMPONENT_EXPORT(RAW_PTR)
|
||||
PA_NOINLINE partition_alloc::PtrPosWithinAlloc
|
||||
IsValidDelta(uintptr_t address,
|
||||
partition_alloc::internal::PtrDelta<Z> delta);
|
||||
// CHECK if `before_addr` and `after_addr` are in the same allocation, for a
|
||||
// given `type_size`.
|
||||
// If BACKUP_REF_PTR_POISON_OOB_PTR is enabled, return whether the allocation
|
||||
// is at the end.
|
||||
// If BACKUP_REF_PTR_POISON_OOB_PTR is disable, return false.
|
||||
PA_NOINLINE static PA_COMPONENT_EXPORT(
|
||||
RAW_PTR) bool CheckPointerWithinSameAlloc(uintptr_t before_addr,
|
||||
uintptr_t after_addr,
|
||||
size_t type_size);
|
||||
};
|
||||
|
||||
} // namespace base::internal
|
||||
|
@ -11,6 +11,7 @@
|
||||
|
||||
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_base/cxx20_is_constant_evaluated.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
|
||||
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
|
||||
|
||||
@ -45,47 +46,60 @@ PA_COMPONENT_EXPORT(RAW_PTR) void ResetRawPtrHooks();
|
||||
struct RawPtrHookableImpl {
|
||||
// Wraps a pointer.
|
||||
template <typename T>
|
||||
static PA_ALWAYS_INLINE T* WrapRawPtr(T* ptr) {
|
||||
GetRawPtrHooks()->wrap_ptr(reinterpret_cast<uintptr_t>(ptr));
|
||||
PA_ALWAYS_INLINE static constexpr T* WrapRawPtr(T* ptr) {
|
||||
if (!partition_alloc::internal::base::is_constant_evaluated()) {
|
||||
GetRawPtrHooks()->wrap_ptr(reinterpret_cast<uintptr_t>(ptr));
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
|
||||
// Notifies the allocator when a wrapped pointer is being removed or replaced.
|
||||
template <typename T>
|
||||
static PA_ALWAYS_INLINE void ReleaseWrappedPtr(T* ptr) {
|
||||
GetRawPtrHooks()->release_wrapped_ptr(reinterpret_cast<uintptr_t>(ptr));
|
||||
PA_ALWAYS_INLINE static constexpr void ReleaseWrappedPtr(T* ptr) {
|
||||
if (!partition_alloc::internal::base::is_constant_evaluated()) {
|
||||
GetRawPtrHooks()->release_wrapped_ptr(reinterpret_cast<uintptr_t>(ptr));
|
||||
}
|
||||
}
|
||||
|
||||
// Unwraps the pointer, while asserting that memory hasn't been freed. The
|
||||
// function is allowed to crash on nullptr.
|
||||
template <typename T>
|
||||
static PA_ALWAYS_INLINE T* SafelyUnwrapPtrForDereference(T* wrapped_ptr) {
|
||||
GetRawPtrHooks()->safely_unwrap_for_dereference(
|
||||
reinterpret_cast<uintptr_t>(wrapped_ptr));
|
||||
PA_ALWAYS_INLINE static constexpr T* SafelyUnwrapPtrForDereference(
|
||||
T* wrapped_ptr) {
|
||||
if (!partition_alloc::internal::base::is_constant_evaluated()) {
|
||||
GetRawPtrHooks()->safely_unwrap_for_dereference(
|
||||
reinterpret_cast<uintptr_t>(wrapped_ptr));
|
||||
}
|
||||
return wrapped_ptr;
|
||||
}
|
||||
|
||||
// Unwraps the pointer, while asserting that memory hasn't been freed. The
|
||||
// function must handle nullptr gracefully.
|
||||
template <typename T>
|
||||
static PA_ALWAYS_INLINE T* SafelyUnwrapPtrForExtraction(T* wrapped_ptr) {
|
||||
GetRawPtrHooks()->safely_unwrap_for_extraction(
|
||||
reinterpret_cast<uintptr_t>(wrapped_ptr));
|
||||
PA_ALWAYS_INLINE static constexpr T* SafelyUnwrapPtrForExtraction(
|
||||
T* wrapped_ptr) {
|
||||
if (!partition_alloc::internal::base::is_constant_evaluated()) {
|
||||
GetRawPtrHooks()->safely_unwrap_for_extraction(
|
||||
reinterpret_cast<uintptr_t>(wrapped_ptr));
|
||||
}
|
||||
return wrapped_ptr;
|
||||
}
|
||||
|
||||
// Unwraps the pointer, without making an assertion on whether memory was
|
||||
// freed or not.
|
||||
template <typename T>
|
||||
static PA_ALWAYS_INLINE T* UnsafelyUnwrapPtrForComparison(T* wrapped_ptr) {
|
||||
GetRawPtrHooks()->unsafely_unwrap_for_comparison(
|
||||
reinterpret_cast<uintptr_t>(wrapped_ptr));
|
||||
PA_ALWAYS_INLINE static constexpr T* UnsafelyUnwrapPtrForComparison(
|
||||
T* wrapped_ptr) {
|
||||
if (!partition_alloc::internal::base::is_constant_evaluated()) {
|
||||
GetRawPtrHooks()->unsafely_unwrap_for_comparison(
|
||||
reinterpret_cast<uintptr_t>(wrapped_ptr));
|
||||
}
|
||||
return wrapped_ptr;
|
||||
}
|
||||
|
||||
// Upcasts the wrapped pointer.
|
||||
template <typename To, typename From>
|
||||
static PA_ALWAYS_INLINE constexpr To* Upcast(From* wrapped_ptr) {
|
||||
PA_ALWAYS_INLINE static constexpr To* Upcast(From* wrapped_ptr) {
|
||||
static_assert(std::is_convertible<From*, To*>::value,
|
||||
"From must be convertible to To.");
|
||||
// Note, this cast may change the address if upcasting to base that lies in
|
||||
@ -98,44 +112,65 @@ struct RawPtrHookableImpl {
|
||||
typename T,
|
||||
typename Z,
|
||||
typename =
|
||||
std::enable_if_t<partition_alloc::internal::offset_type<Z>, void>>
|
||||
static PA_ALWAYS_INLINE T* Advance(T* wrapped_ptr, Z delta_elems) {
|
||||
GetRawPtrHooks()->advance(
|
||||
reinterpret_cast<uintptr_t>(wrapped_ptr),
|
||||
reinterpret_cast<uintptr_t>(wrapped_ptr + delta_elems));
|
||||
std::enable_if_t<partition_alloc::internal::is_offset_type<Z>, void>>
|
||||
PA_ALWAYS_INLINE static constexpr T* Advance(T* wrapped_ptr, Z delta_elems) {
|
||||
if (!partition_alloc::internal::base::is_constant_evaluated()) {
|
||||
GetRawPtrHooks()->advance(
|
||||
reinterpret_cast<uintptr_t>(wrapped_ptr),
|
||||
reinterpret_cast<uintptr_t>(wrapped_ptr + delta_elems));
|
||||
}
|
||||
return wrapped_ptr + delta_elems;
|
||||
}
|
||||
|
||||
// Retreat the wrapped pointer by `delta_elems`.
|
||||
template <
|
||||
typename T,
|
||||
typename Z,
|
||||
typename =
|
||||
std::enable_if_t<partition_alloc::internal::is_offset_type<Z>, void>>
|
||||
PA_ALWAYS_INLINE static constexpr T* Retreat(T* wrapped_ptr, Z delta_elems) {
|
||||
if (!partition_alloc::internal::base::is_constant_evaluated()) {
|
||||
GetRawPtrHooks()->advance(
|
||||
reinterpret_cast<uintptr_t>(wrapped_ptr),
|
||||
reinterpret_cast<uintptr_t>(wrapped_ptr - delta_elems));
|
||||
}
|
||||
return wrapped_ptr - delta_elems;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static PA_ALWAYS_INLINE ptrdiff_t GetDeltaElems(T* wrapped_ptr1,
|
||||
T* wrapped_ptr2) {
|
||||
PA_ALWAYS_INLINE static constexpr ptrdiff_t GetDeltaElems(T* wrapped_ptr1,
|
||||
T* wrapped_ptr2) {
|
||||
return wrapped_ptr1 - wrapped_ptr2;
|
||||
}
|
||||
|
||||
// Returns a copy of a wrapped pointer, without making an assertion on whether
|
||||
// memory was freed or not.
|
||||
template <typename T>
|
||||
static PA_ALWAYS_INLINE T* Duplicate(T* wrapped_ptr) {
|
||||
GetRawPtrHooks()->duplicate(reinterpret_cast<uintptr_t>(wrapped_ptr));
|
||||
PA_ALWAYS_INLINE static constexpr T* Duplicate(T* wrapped_ptr) {
|
||||
if (!partition_alloc::internal::base::is_constant_evaluated()) {
|
||||
GetRawPtrHooks()->duplicate(reinterpret_cast<uintptr_t>(wrapped_ptr));
|
||||
}
|
||||
return wrapped_ptr;
|
||||
}
|
||||
|
||||
// `WrapRawPtrForDuplication` and `UnsafelyUnwrapPtrForDuplication` are used
|
||||
// to create a new raw_ptr<T> from another raw_ptr<T> of a different flavor.
|
||||
template <typename T>
|
||||
static PA_ALWAYS_INLINE T* WrapRawPtrForDuplication(T* ptr) {
|
||||
PA_ALWAYS_INLINE static constexpr T* WrapRawPtrForDuplication(T* ptr) {
|
||||
return ptr;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static PA_ALWAYS_INLINE T* UnsafelyUnwrapPtrForDuplication(T* wrapped_ptr) {
|
||||
PA_ALWAYS_INLINE static constexpr T* UnsafelyUnwrapPtrForDuplication(
|
||||
T* wrapped_ptr) {
|
||||
return wrapped_ptr;
|
||||
}
|
||||
|
||||
// This is for accounting only, used by unit tests.
|
||||
static PA_ALWAYS_INLINE void IncrementSwapCountForTest() {}
|
||||
static PA_ALWAYS_INLINE void IncrementLessCountForTest() {}
|
||||
static PA_ALWAYS_INLINE void IncrementPointerToMemberOperatorCountForTest() {}
|
||||
PA_ALWAYS_INLINE static constexpr void IncrementSwapCountForTest() {}
|
||||
PA_ALWAYS_INLINE static constexpr void IncrementLessCountForTest() {}
|
||||
PA_ALWAYS_INLINE static constexpr void
|
||||
IncrementPointerToMemberOperatorCountForTest() {}
|
||||
};
|
||||
|
||||
} // namespace base::internal
|
||||
|
@ -73,21 +73,28 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ref {
|
||||
// and aborts. Failure to clear would be indicated by the related death tests
|
||||
// not CHECKing appropriately.
|
||||
static constexpr bool need_clear_after_move =
|
||||
#if PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
|
||||
std::is_same_v<Impl,
|
||||
internal::MTECheckedPtrImpl<
|
||||
internal::MTECheckedPtrImplPartitionAllocSupport>> ||
|
||||
#endif // PA_CONFIG(ENABLE_MTE_CHECKED_PTR_SUPPORT_WITH_64_BITS_POINTERS)
|
||||
#if BUILDFLAG(USE_ASAN_UNOWNED_PTR)
|
||||
std::is_same_v<Impl, internal::RawPtrAsanUnownedImpl<true>> ||
|
||||
std::is_same_v<Impl, internal::RawPtrAsanUnownedImpl<false>> ||
|
||||
#endif // BUILDFLAG(USE_ASAN_UNOWNED_PTR)
|
||||
std::is_same_v<Impl, internal::RawPtrNoOpImpl>;
|
||||
|
||||
PA_ALWAYS_INLINE explicit raw_ref(T& p) noexcept
|
||||
// Construct a raw_ref from a pointer, which must not be null.
|
||||
//
|
||||
// This function is safe to use with any pointer, as it will CHECK and
|
||||
// terminate the process if the pointer is null. Avoid dereferencing a pointer
|
||||
// to avoid this CHECK as you may be dereferencing null.
|
||||
PA_ALWAYS_INLINE constexpr static raw_ref from_ptr(T* ptr) noexcept {
|
||||
PA_RAW_PTR_CHECK(ptr);
|
||||
return raw_ref(*ptr);
|
||||
}
|
||||
|
||||
// Construct a raw_ref from a reference.
|
||||
PA_ALWAYS_INLINE constexpr explicit raw_ref(T& p) noexcept
|
||||
: inner_(std::addressof(p)) {}
|
||||
|
||||
PA_ALWAYS_INLINE raw_ref& operator=(T& p) noexcept {
|
||||
// Assign a new reference to the raw_ref, replacing the existing reference.
|
||||
PA_ALWAYS_INLINE constexpr raw_ref& operator=(T& p) noexcept {
|
||||
inner_.operator=(&p);
|
||||
return *this;
|
||||
}
|
||||
@ -96,43 +103,12 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ref {
|
||||
raw_ref(const T&& p) = delete;
|
||||
raw_ref& operator=(const T&& p) = delete;
|
||||
|
||||
PA_ALWAYS_INLINE raw_ref(const raw_ref& p) noexcept : inner_(p.inner_) {
|
||||
PA_RAW_PTR_CHECK(inner_); // Catch use-after-move.
|
||||
}
|
||||
|
||||
PA_ALWAYS_INLINE raw_ref(raw_ref&& p) noexcept : inner_(std::move(p.inner_)) {
|
||||
PA_RAW_PTR_CHECK(inner_); // Catch use-after-move.
|
||||
if constexpr (need_clear_after_move) {
|
||||
p.inner_ = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
PA_ALWAYS_INLINE raw_ref& operator=(const raw_ref& p) noexcept {
|
||||
PA_RAW_PTR_CHECK(p.inner_); // Catch use-after-move.
|
||||
inner_.operator=(p.inner_);
|
||||
return *this;
|
||||
}
|
||||
|
||||
PA_ALWAYS_INLINE raw_ref& operator=(raw_ref&& p) noexcept {
|
||||
PA_RAW_PTR_CHECK(p.inner_); // Catch use-after-move.
|
||||
inner_.operator=(std::move(p.inner_));
|
||||
if constexpr (need_clear_after_move) {
|
||||
p.inner_ = nullptr;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
// Deliberately implicit in order to support implicit upcast.
|
||||
template <class U, class = std::enable_if_t<std::is_convertible_v<U&, T&>>>
|
||||
// NOLINTNEXTLINE(google-explicit-constructor)
|
||||
PA_ALWAYS_INLINE raw_ref(const raw_ref<U, Traits>& p) noexcept
|
||||
PA_ALWAYS_INLINE constexpr raw_ref(const raw_ref& p) noexcept
|
||||
: inner_(p.inner_) {
|
||||
PA_RAW_PTR_CHECK(inner_); // Catch use-after-move.
|
||||
}
|
||||
// Deliberately implicit in order to support implicit upcast.
|
||||
template <class U, class = std::enable_if_t<std::is_convertible_v<U&, T&>>>
|
||||
// NOLINTNEXTLINE(google-explicit-constructor)
|
||||
PA_ALWAYS_INLINE raw_ref(raw_ref<U, Traits>&& p) noexcept
|
||||
|
||||
PA_ALWAYS_INLINE constexpr raw_ref(raw_ref&& p) noexcept
|
||||
: inner_(std::move(p.inner_)) {
|
||||
PA_RAW_PTR_CHECK(inner_); // Catch use-after-move.
|
||||
if constexpr (need_clear_after_move) {
|
||||
@ -140,20 +116,13 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ref {
|
||||
}
|
||||
}
|
||||
|
||||
static PA_ALWAYS_INLINE raw_ref from_ptr(T* ptr) noexcept {
|
||||
PA_RAW_PTR_CHECK(ptr);
|
||||
return raw_ref(*ptr);
|
||||
}
|
||||
|
||||
// Upcast assignment
|
||||
template <class U, class = std::enable_if_t<std::is_convertible_v<U&, T&>>>
|
||||
PA_ALWAYS_INLINE raw_ref& operator=(const raw_ref<U, Traits>& p) noexcept {
|
||||
PA_ALWAYS_INLINE constexpr raw_ref& operator=(const raw_ref& p) noexcept {
|
||||
PA_RAW_PTR_CHECK(p.inner_); // Catch use-after-move.
|
||||
inner_.operator=(p.inner_);
|
||||
return *this;
|
||||
}
|
||||
template <class U, class = std::enable_if_t<std::is_convertible_v<U&, T&>>>
|
||||
PA_ALWAYS_INLINE raw_ref& operator=(raw_ref<U, Traits>&& p) noexcept {
|
||||
|
||||
PA_ALWAYS_INLINE constexpr raw_ref& operator=(raw_ref&& p) noexcept {
|
||||
PA_RAW_PTR_CHECK(p.inner_); // Catch use-after-move.
|
||||
inner_.operator=(std::move(p.inner_));
|
||||
if constexpr (need_clear_after_move) {
|
||||
@ -162,7 +131,60 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ref {
|
||||
return *this;
|
||||
}
|
||||
|
||||
PA_ALWAYS_INLINE T& operator*() const {
|
||||
// Deliberately implicit in order to support implicit upcast.
|
||||
// Delegate cross-kind conversion to the inner raw_ptr, which decides when to
|
||||
// allow it.
|
||||
template <class U,
|
||||
RawPtrTraits PassedTraits,
|
||||
class = std::enable_if_t<std::is_convertible_v<U&, T&>>>
|
||||
// NOLINTNEXTLINE(google-explicit-constructor)
|
||||
PA_ALWAYS_INLINE constexpr raw_ref(const raw_ref<U, PassedTraits>& p) noexcept
|
||||
: inner_(p.inner_) {
|
||||
PA_RAW_PTR_CHECK(inner_); // Catch use-after-move.
|
||||
}
|
||||
// Deliberately implicit in order to support implicit upcast.
|
||||
// Delegate cross-kind conversion to the inner raw_ptr, which decides when to
|
||||
// allow it.
|
||||
template <class U,
|
||||
RawPtrTraits PassedTraits,
|
||||
class = std::enable_if_t<std::is_convertible_v<U&, T&>>>
|
||||
// NOLINTNEXTLINE(google-explicit-constructor)
|
||||
PA_ALWAYS_INLINE constexpr raw_ref(raw_ref<U, PassedTraits>&& p) noexcept
|
||||
: inner_(std::move(p.inner_)) {
|
||||
PA_RAW_PTR_CHECK(inner_); // Catch use-after-move.
|
||||
if constexpr (need_clear_after_move) {
|
||||
p.inner_ = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
// Upcast assignment
|
||||
// Delegate cross-kind conversion to the inner raw_ptr, which decides when to
|
||||
// allow it.
|
||||
template <class U,
|
||||
RawPtrTraits PassedTraits,
|
||||
class = std::enable_if_t<std::is_convertible_v<U&, T&>>>
|
||||
PA_ALWAYS_INLINE constexpr raw_ref& operator=(
|
||||
const raw_ref<U, PassedTraits>& p) noexcept {
|
||||
PA_RAW_PTR_CHECK(p.inner_); // Catch use-after-move.
|
||||
inner_.operator=(p.inner_);
|
||||
return *this;
|
||||
}
|
||||
// Delegate cross-kind conversion to the inner raw_ptr, which decides when to
|
||||
// allow it.
|
||||
template <class U,
|
||||
RawPtrTraits PassedTraits,
|
||||
class = std::enable_if_t<std::is_convertible_v<U&, T&>>>
|
||||
PA_ALWAYS_INLINE constexpr raw_ref& operator=(
|
||||
raw_ref<U, PassedTraits>&& p) noexcept {
|
||||
PA_RAW_PTR_CHECK(p.inner_); // Catch use-after-move.
|
||||
inner_.operator=(std::move(p.inner_));
|
||||
if constexpr (need_clear_after_move) {
|
||||
p.inner_ = nullptr;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
PA_ALWAYS_INLINE constexpr T& operator*() const {
|
||||
PA_RAW_PTR_CHECK(inner_); // Catch use-after-move.
|
||||
return inner_.operator*();
|
||||
}
|
||||
@ -171,12 +193,13 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ref {
|
||||
// rather than GetForDereference semantics (see raw_ptr.h). This should be
|
||||
// used in place of operator*() when the memory referred to by the reference
|
||||
// is not immediately going to be accessed.
|
||||
PA_ALWAYS_INLINE T& get() const {
|
||||
PA_ALWAYS_INLINE constexpr T& get() const {
|
||||
PA_RAW_PTR_CHECK(inner_); // Catch use-after-move.
|
||||
return *inner_.get();
|
||||
}
|
||||
|
||||
PA_ALWAYS_INLINE T* operator->() const PA_ATTRIBUTE_RETURNS_NONNULL {
|
||||
PA_ALWAYS_INLINE constexpr T* operator->() const
|
||||
PA_ATTRIBUTE_RETURNS_NONNULL {
|
||||
PA_RAW_PTR_CHECK(inner_); // Catch use-after-move.
|
||||
return inner_.operator->();
|
||||
}
|
||||
@ -189,89 +212,90 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ref {
|
||||
inner_.ReportIfDangling();
|
||||
}
|
||||
|
||||
friend PA_ALWAYS_INLINE void swap(raw_ref& lhs, raw_ref& rhs) noexcept {
|
||||
PA_ALWAYS_INLINE friend constexpr void swap(raw_ref& lhs,
|
||||
raw_ref& rhs) noexcept {
|
||||
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
|
||||
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
|
||||
swap(lhs.inner_, rhs.inner_);
|
||||
}
|
||||
|
||||
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
|
||||
friend PA_ALWAYS_INLINE bool operator==(const raw_ref<U, Traits1>& lhs,
|
||||
const raw_ref<V, Traits2>& rhs);
|
||||
friend bool operator==(const raw_ref<U, Traits1>& lhs,
|
||||
const raw_ref<V, Traits2>& rhs);
|
||||
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
|
||||
friend PA_ALWAYS_INLINE bool operator!=(const raw_ref<U, Traits1>& lhs,
|
||||
const raw_ref<V, Traits2>& rhs);
|
||||
friend bool operator!=(const raw_ref<U, Traits1>& lhs,
|
||||
const raw_ref<V, Traits2>& rhs);
|
||||
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
|
||||
friend PA_ALWAYS_INLINE bool operator<(const raw_ref<U, Traits1>& lhs,
|
||||
const raw_ref<V, Traits2>& rhs);
|
||||
friend bool operator<(const raw_ref<U, Traits1>& lhs,
|
||||
const raw_ref<V, Traits2>& rhs);
|
||||
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
|
||||
friend PA_ALWAYS_INLINE bool operator>(const raw_ref<U, Traits1>& lhs,
|
||||
const raw_ref<V, Traits2>& rhs);
|
||||
friend bool operator>(const raw_ref<U, Traits1>& lhs,
|
||||
const raw_ref<V, Traits2>& rhs);
|
||||
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
|
||||
friend PA_ALWAYS_INLINE bool operator<=(const raw_ref<U, Traits1>& lhs,
|
||||
const raw_ref<V, Traits2>& rhs);
|
||||
friend bool operator<=(const raw_ref<U, Traits1>& lhs,
|
||||
const raw_ref<V, Traits2>& rhs);
|
||||
template <typename U, typename V, RawPtrTraits Traits1, RawPtrTraits Traits2>
|
||||
friend PA_ALWAYS_INLINE bool operator>=(const raw_ref<U, Traits1>& lhs,
|
||||
const raw_ref<V, Traits2>& rhs);
|
||||
friend bool operator>=(const raw_ref<U, Traits1>& lhs,
|
||||
const raw_ref<V, Traits2>& rhs);
|
||||
|
||||
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
|
||||
friend PA_ALWAYS_INLINE bool operator==(const raw_ref& lhs, const U& rhs) {
|
||||
PA_ALWAYS_INLINE friend bool operator==(const raw_ref& lhs, const U& rhs) {
|
||||
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
|
||||
return lhs.inner_ == &rhs;
|
||||
}
|
||||
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
|
||||
friend PA_ALWAYS_INLINE bool operator!=(const raw_ref& lhs, const U& rhs) {
|
||||
PA_ALWAYS_INLINE friend bool operator!=(const raw_ref& lhs, const U& rhs) {
|
||||
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
|
||||
return lhs.inner_ != &rhs;
|
||||
}
|
||||
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
|
||||
friend PA_ALWAYS_INLINE bool operator<(const raw_ref& lhs, const U& rhs) {
|
||||
PA_ALWAYS_INLINE friend bool operator<(const raw_ref& lhs, const U& rhs) {
|
||||
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
|
||||
return lhs.inner_ < &rhs;
|
||||
}
|
||||
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
|
||||
friend PA_ALWAYS_INLINE bool operator>(const raw_ref& lhs, const U& rhs) {
|
||||
PA_ALWAYS_INLINE friend bool operator>(const raw_ref& lhs, const U& rhs) {
|
||||
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
|
||||
return lhs.inner_ > &rhs;
|
||||
}
|
||||
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
|
||||
friend PA_ALWAYS_INLINE bool operator<=(const raw_ref& lhs, const U& rhs) {
|
||||
PA_ALWAYS_INLINE friend bool operator<=(const raw_ref& lhs, const U& rhs) {
|
||||
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
|
||||
return lhs.inner_ <= &rhs;
|
||||
}
|
||||
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
|
||||
friend PA_ALWAYS_INLINE bool operator>=(const raw_ref& lhs, const U& rhs) {
|
||||
PA_ALWAYS_INLINE friend bool operator>=(const raw_ref& lhs, const U& rhs) {
|
||||
PA_RAW_PTR_CHECK(lhs.inner_); // Catch use-after-move.
|
||||
return lhs.inner_ >= &rhs;
|
||||
}
|
||||
|
||||
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
|
||||
friend PA_ALWAYS_INLINE bool operator==(const U& lhs, const raw_ref& rhs) {
|
||||
PA_ALWAYS_INLINE friend bool operator==(const U& lhs, const raw_ref& rhs) {
|
||||
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
|
||||
return &lhs == rhs.inner_;
|
||||
}
|
||||
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
|
||||
friend PA_ALWAYS_INLINE bool operator!=(const U& lhs, const raw_ref& rhs) {
|
||||
PA_ALWAYS_INLINE friend bool operator!=(const U& lhs, const raw_ref& rhs) {
|
||||
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
|
||||
return &lhs != rhs.inner_;
|
||||
}
|
||||
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
|
||||
friend PA_ALWAYS_INLINE bool operator<(const U& lhs, const raw_ref& rhs) {
|
||||
PA_ALWAYS_INLINE friend bool operator<(const U& lhs, const raw_ref& rhs) {
|
||||
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
|
||||
return &lhs < rhs.inner_;
|
||||
}
|
||||
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
|
||||
friend PA_ALWAYS_INLINE bool operator>(const U& lhs, const raw_ref& rhs) {
|
||||
PA_ALWAYS_INLINE friend bool operator>(const U& lhs, const raw_ref& rhs) {
|
||||
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
|
||||
return &lhs > rhs.inner_;
|
||||
}
|
||||
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
|
||||
friend PA_ALWAYS_INLINE bool operator<=(const U& lhs, const raw_ref& rhs) {
|
||||
PA_ALWAYS_INLINE friend bool operator<=(const U& lhs, const raw_ref& rhs) {
|
||||
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
|
||||
return &lhs <= rhs.inner_;
|
||||
}
|
||||
template <class U, class = std::enable_if_t<!internal::is_raw_ref_v<U>, void>>
|
||||
friend PA_ALWAYS_INLINE bool operator>=(const U& lhs, const raw_ref& rhs) {
|
||||
PA_ALWAYS_INLINE friend bool operator>=(const U& lhs, const raw_ref& rhs) {
|
||||
PA_RAW_PTR_CHECK(rhs.inner_); // Catch use-after-move.
|
||||
return &lhs >= rhs.inner_;
|
||||
}
|
||||
|
@ -90,8 +90,9 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ReservationOffsetTable {
|
||||
uint16_t offsets[kReservationOffsetTableLength] = {};
|
||||
|
||||
constexpr _ReservationOffsetTable() {
|
||||
for (uint16_t& offset : offsets)
|
||||
for (uint16_t& offset : offsets) {
|
||||
offset = kOffsetTagNotAllocated;
|
||||
}
|
||||
}
|
||||
};
|
||||
#if BUILDFLAG(HAS_64_BIT_POINTERS)
|
||||
@ -106,7 +107,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) ReservationOffsetTable {
|
||||
static PA_CONSTINIT _PaddedReservationOffsetTables
|
||||
padded_reservation_offset_tables_ PA_PKEY_ALIGN;
|
||||
#else
|
||||
// A single table for the entire 32-bit address space.
|
||||
// A single table for the entire 32-bit address space.
|
||||
static PA_CONSTINIT struct _ReservationOffsetTable reservation_offset_table_;
|
||||
#endif // BUILDFLAG(HAS_64_BIT_POINTERS)
|
||||
};
|
||||
@ -193,8 +194,9 @@ PA_ALWAYS_INLINE uintptr_t GetDirectMapReservationStart(uintptr_t address) {
|
||||
#endif // BUILDFLAG(PA_DCHECK_IS_ON)
|
||||
uint16_t* offset_ptr = ReservationOffsetPointer(address);
|
||||
PA_DCHECK(*offset_ptr != kOffsetTagNotAllocated);
|
||||
if (*offset_ptr == kOffsetTagNormalBuckets)
|
||||
if (*offset_ptr == kOffsetTagNormalBuckets) {
|
||||
return 0;
|
||||
}
|
||||
uintptr_t reservation_start = ComputeReservationStart(address, offset_ptr);
|
||||
#if BUILDFLAG(PA_DCHECK_IS_ON)
|
||||
// MSVC workaround: the preprocessor seems to choke on an `#if` embedded
|
||||
@ -240,8 +242,9 @@ GetDirectMapReservationStart(uintptr_t address,
|
||||
address);
|
||||
uint16_t* offset_ptr = ReservationOffsetPointer(pool, offset_in_pool);
|
||||
PA_DCHECK(*offset_ptr != kOffsetTagNotAllocated);
|
||||
if (*offset_ptr == kOffsetTagNormalBuckets)
|
||||
if (*offset_ptr == kOffsetTagNormalBuckets) {
|
||||
return 0;
|
||||
}
|
||||
uintptr_t reservation_start = ComputeReservationStart(address, offset_ptr);
|
||||
PA_DCHECK(*ReservationOffsetPointer(reservation_start) == 0);
|
||||
return reservation_start;
|
||||
|
@ -189,6 +189,8 @@ BASE_EXPORT void EnablePartitionAllocMemoryReclaimer();
|
||||
|
||||
using EnableBrp = base::StrongAlias<class EnableBrpTag, bool>;
|
||||
using EnableBrpZapping = base::StrongAlias<class EnableBrpZappingTag, bool>;
|
||||
using EnableBrpPartitionMemoryReclaimer =
|
||||
base::StrongAlias<class EnableBrpPartitionMemoryReclaimerTag, bool>;
|
||||
using SplitMainPartition = base::StrongAlias<class SplitMainPartitionTag, bool>;
|
||||
using UseDedicatedAlignedPartition =
|
||||
base::StrongAlias<class UseDedicatedAlignedPartitionTag, bool>;
|
||||
@ -202,6 +204,7 @@ using AlternateBucketDistribution =
|
||||
BASE_EXPORT void ConfigurePartitions(
|
||||
EnableBrp enable_brp,
|
||||
EnableBrpZapping enable_brp_zapping,
|
||||
EnableBrpPartitionMemoryReclaimer enable_brp_memory_reclaimer,
|
||||
SplitMainPartition split_main_partition,
|
||||
UseDedicatedAlignedPartition use_dedicated_aligned_partition,
|
||||
AddDummyRefCount add_dummy_ref_count,
|
||||
|
@ -584,6 +584,7 @@ void EnablePartitionAllocMemoryReclaimer() {
|
||||
void ConfigurePartitions(
|
||||
EnableBrp enable_brp,
|
||||
EnableBrpZapping enable_brp_zapping,
|
||||
EnableBrpPartitionMemoryReclaimer enable_brp_memory_reclaimer,
|
||||
SplitMainPartition split_main_partition,
|
||||
UseDedicatedAlignedPartition use_dedicated_aligned_partition,
|
||||
AddDummyRefCount add_dummy_ref_count,
|
||||
@ -692,6 +693,14 @@ void ConfigurePartitions(
|
||||
// is replaced, it must've been g_original_root.
|
||||
PA_CHECK(current_aligned_root == g_original_root);
|
||||
|
||||
if (enable_brp_memory_reclaimer) {
|
||||
partition_alloc::MemoryReclaimer::Instance()->RegisterPartition(new_root);
|
||||
if (new_aligned_root != new_root) {
|
||||
partition_alloc::MemoryReclaimer::Instance()->RegisterPartition(
|
||||
new_aligned_root);
|
||||
}
|
||||
}
|
||||
|
||||
// Purge memory, now that the traffic to the original partition is cut off.
|
||||
current_root->PurgeMemory(
|
||||
partition_alloc::PurgeFlags::kDecommitEmptySlotSpans |
|
||||
|
@ -54,8 +54,9 @@ void SpinningMutex::AcquireSpinThenBlock() {
|
||||
int tries = 0;
|
||||
int backoff = 1;
|
||||
do {
|
||||
if (PA_LIKELY(Try()))
|
||||
if (PA_LIKELY(Try())) {
|
||||
return;
|
||||
}
|
||||
// Note: Per the intel optimization manual
|
||||
// (https://software.intel.com/content/dam/develop/public/us/en/documents/64-ia-32-architectures-optimization-manual.pdf),
|
||||
// the "pause" instruction is more costly on Skylake Client than on previous
|
||||
|
@ -73,7 +73,11 @@ class PA_LOCKABLE PA_COMPONENT_EXPORT(PARTITION_ALLOC) SpinningMutex {
|
||||
|
||||
private:
|
||||
PA_NOINLINE void AcquireSpinThenBlock() PA_EXCLUSIVE_LOCK_FUNCTION();
|
||||
#if PA_CONFIG(HAS_FAST_MUTEX)
|
||||
void LockSlow() PA_EXCLUSIVE_LOCK_FUNCTION();
|
||||
#else
|
||||
PA_ALWAYS_INLINE void LockSlow() PA_EXCLUSIVE_LOCK_FUNCTION();
|
||||
#endif
|
||||
|
||||
// See below, the latency of PA_YIELD_PROCESSOR can be as high as ~150
|
||||
// cycles. Meanwhile, sleeping costs a few us. Spinning 64 times at 3GHz would
|
||||
@ -119,8 +123,9 @@ PA_ALWAYS_INLINE void SpinningMutex::Acquire() {
|
||||
// 1. We don't know how much contention the lock would experience
|
||||
// 2. This may lead to weird-looking code layout when inlined into a caller
|
||||
// with PA_(UN)LIKELY() annotations.
|
||||
if (Try())
|
||||
if (Try()) {
|
||||
return;
|
||||
}
|
||||
|
||||
return AcquireSpinThenBlock();
|
||||
}
|
||||
|
@ -16,14 +16,15 @@
|
||||
#include "base/allocator/partition_allocator/partition_page.h"
|
||||
#include "base/allocator/partition_allocator/starscan/pcscan_scheduling.h"
|
||||
#include "base/allocator/partition_allocator/tagging.h"
|
||||
|
||||
namespace partition_alloc {
|
||||
|
||||
class StatsReporter;
|
||||
|
||||
namespace internal {
|
||||
|
||||
[[noreturn]] PA_COMPONENT_EXPORT(PARTITION_ALLOC) PA_NOINLINE PA_NOT_TAIL_CALLED
|
||||
void DoubleFreeAttempt();
|
||||
[[noreturn]] PA_NOINLINE PA_NOT_TAIL_CALLED
|
||||
PA_COMPONENT_EXPORT(PARTITION_ALLOC) void DoubleFreeAttempt();
|
||||
|
||||
// PCScan (Probabilistic Conservative Scanning) is the algorithm that eliminates
|
||||
// use-after-free bugs by verifying that there are no pointers in memory which
|
||||
@ -108,10 +109,10 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PCScan final {
|
||||
static void PerformDelayedScan(int64_t delay_in_microseconds);
|
||||
|
||||
// Enables safepoints in mutator threads.
|
||||
static void EnableSafepoints();
|
||||
PA_ALWAYS_INLINE static void EnableSafepoints();
|
||||
// Join scan from safepoint in mutator thread. As soon as PCScan is scheduled,
|
||||
// mutators can join PCScan helping out with clearing and scanning.
|
||||
static void JoinScanIfNeeded();
|
||||
PA_ALWAYS_INLINE static void JoinScanIfNeeded();
|
||||
|
||||
// Checks if there is a PCScan task currently in progress.
|
||||
PA_ALWAYS_INLINE static bool IsInProgress();
|
||||
@ -135,7 +136,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PCScan final {
|
||||
|
||||
static void UninitForTesting();
|
||||
|
||||
inline static PCScanScheduler& scheduler();
|
||||
static inline PCScanScheduler& scheduler();
|
||||
|
||||
// Registers reporting class.
|
||||
static void RegisterStatsReporter(partition_alloc::StatsReporter* reporter);
|
||||
|
@ -35,7 +35,7 @@ struct QuarantineData final {
|
||||
|
||||
class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PCScanSchedulingBackend {
|
||||
public:
|
||||
explicit inline constexpr PCScanSchedulingBackend(PCScanScheduler&);
|
||||
inline constexpr explicit PCScanSchedulingBackend(PCScanScheduler&);
|
||||
// No virtual destructor to allow constant initialization of PCScan as
|
||||
// static global which directly embeds LimitBackend as default backend.
|
||||
|
||||
@ -82,7 +82,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) LimitBackend final
|
||||
public:
|
||||
static constexpr double kQuarantineSizeFraction = 0.1;
|
||||
|
||||
explicit inline constexpr LimitBackend(PCScanScheduler&);
|
||||
inline constexpr explicit LimitBackend(PCScanScheduler&);
|
||||
|
||||
bool LimitReached() final;
|
||||
void UpdateScheduleAfterScan(size_t, base::TimeDelta, size_t) final;
|
||||
@ -188,7 +188,7 @@ QuarantineData& PCScanSchedulingBackend::GetQuarantineData() {
|
||||
constexpr LimitBackend::LimitBackend(PCScanScheduler& scheduler)
|
||||
: PCScanSchedulingBackend(scheduler) {}
|
||||
|
||||
bool PCScanScheduler::AccountFreed(size_t size) {
|
||||
PA_ALWAYS_INLINE bool PCScanScheduler::AccountFreed(size_t size) {
|
||||
const size_t size_before =
|
||||
quarantine_data_.current_size.fetch_add(size, std::memory_order_relaxed);
|
||||
return (size_before + size >
|
||||
|
@ -14,7 +14,7 @@ namespace partition_alloc::internal {
|
||||
|
||||
// Returns the current stack pointer.
|
||||
// TODO(bikineev,1202644): Remove this once base/stack_util.h lands.
|
||||
PA_COMPONENT_EXPORT(PARTITION_ALLOC) PA_NOINLINE uintptr_t* GetStackPointer();
|
||||
PA_NOINLINE PA_COMPONENT_EXPORT(PARTITION_ALLOC) uintptr_t* GetStackPointer();
|
||||
// Returns the top of the stack using system API.
|
||||
PA_COMPONENT_EXPORT(PARTITION_ALLOC) void* GetStackTop();
|
||||
|
||||
|
@ -130,8 +130,9 @@ void* TagRegionRandomlyForMTE(void* ptr, size_t sz, uint64_t mask) {
|
||||
// Randomly tag a region (MTE-enabled systems only). The first 16-byte
|
||||
// granule is randomly tagged, all other granules in the region are
|
||||
// then assigned that initial tag via __arm_mte_set_tag.
|
||||
if (!CheckTagRegionParameters(ptr, sz))
|
||||
if (!CheckTagRegionParameters(ptr, sz)) {
|
||||
return nullptr;
|
||||
}
|
||||
// __arm_mte_create_random_tag generates a randomly tagged pointer via the
|
||||
// hardware's random number generator, but does not apply it to the memory.
|
||||
char* nptr = reinterpret_cast<char*>(__arm_mte_create_random_tag(ptr, mask));
|
||||
@ -146,8 +147,9 @@ void* TagRegionRandomlyForMTE(void* ptr, size_t sz, uint64_t mask) {
|
||||
void* TagRegionIncrementForMTE(void* ptr, size_t sz) {
|
||||
// Increment a region's tag (MTE-enabled systems only), using the tag of the
|
||||
// first granule.
|
||||
if (!CheckTagRegionParameters(ptr, sz))
|
||||
if (!CheckTagRegionParameters(ptr, sz)) {
|
||||
return nullptr;
|
||||
}
|
||||
// Increment ptr's tag.
|
||||
char* nptr = reinterpret_cast<char*>(__arm_mte_increment_tag(ptr, 1u));
|
||||
for (size_t i = 0; i < sz; i += kMemTagGranuleSize) {
|
||||
|
@ -93,18 +93,22 @@ void ThreadCacheRegistry::RegisterThreadCache(ThreadCache* cache) {
|
||||
ThreadCache* previous_head = list_head_;
|
||||
list_head_ = cache;
|
||||
cache->next_ = previous_head;
|
||||
if (previous_head)
|
||||
if (previous_head) {
|
||||
previous_head->prev_ = cache;
|
||||
}
|
||||
}
|
||||
|
||||
void ThreadCacheRegistry::UnregisterThreadCache(ThreadCache* cache) {
|
||||
internal::ScopedGuard scoped_locker(GetLock());
|
||||
if (cache->prev_)
|
||||
if (cache->prev_) {
|
||||
cache->prev_->next_ = cache->next_;
|
||||
if (cache->next_)
|
||||
}
|
||||
if (cache->next_) {
|
||||
cache->next_->prev_ = cache->prev_;
|
||||
if (cache == list_head_)
|
||||
}
|
||||
if (cache == list_head_) {
|
||||
list_head_ = cache->next_;
|
||||
}
|
||||
}
|
||||
|
||||
void ThreadCacheRegistry::DumpStats(bool my_thread_only,
|
||||
@ -115,8 +119,9 @@ void ThreadCacheRegistry::DumpStats(bool my_thread_only,
|
||||
internal::ScopedGuard scoped_locker(GetLock());
|
||||
if (my_thread_only) {
|
||||
auto* tcache = ThreadCache::Get();
|
||||
if (!ThreadCache::IsValid(tcache))
|
||||
if (!ThreadCache::IsValid(tcache)) {
|
||||
return;
|
||||
}
|
||||
tcache->AccumulateStats(stats);
|
||||
} else {
|
||||
ThreadCache* tcache = list_head_;
|
||||
@ -146,8 +151,9 @@ void ThreadCacheRegistry::PurgeAll() {
|
||||
// the main thread for the partition lock, since it is acquired/released once
|
||||
// per bucket. By purging the main thread first, we avoid these interferences
|
||||
// for this thread at least.
|
||||
if (ThreadCache::IsValid(current_thread_tcache))
|
||||
if (ThreadCache::IsValid(current_thread_tcache)) {
|
||||
current_thread_tcache->Purge();
|
||||
}
|
||||
|
||||
{
|
||||
internal::ScopedGuard scoped_locker(GetLock());
|
||||
@ -158,8 +164,9 @@ void ThreadCacheRegistry::PurgeAll() {
|
||||
// point".
|
||||
// Note that this will not work if the other thread is sleeping forever.
|
||||
// TODO(lizeb): Handle sleeping threads.
|
||||
if (tcache != current_thread_tcache)
|
||||
if (tcache != current_thread_tcache) {
|
||||
tcache->SetShouldPurge();
|
||||
}
|
||||
tcache = tcache->next_;
|
||||
}
|
||||
}
|
||||
@ -217,8 +224,9 @@ void ThreadCacheRegistry::SetThreadCacheMultiplier(float multiplier) {
|
||||
// If this is called before *any* thread cache has serviced *any*
|
||||
// allocation, which can happen in tests, and in theory in non-test code as
|
||||
// well.
|
||||
if (!tcache)
|
||||
if (!tcache) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Setting the global limit while locked, because we need |tcache->root_|.
|
||||
ThreadCache::SetGlobalLimits(tcache->root_, multiplier);
|
||||
@ -256,8 +264,9 @@ void ThreadCacheRegistry::RunPeriodicPurge() {
|
||||
// Can run when there is no thread cache, in which case there is nothing to
|
||||
// do, and the task should not be rescheduled. This would typically indicate
|
||||
// a case where the thread cache was never enabled, or got disabled.
|
||||
if (!tcache)
|
||||
if (!tcache) {
|
||||
return;
|
||||
}
|
||||
|
||||
while (tcache) {
|
||||
cached_memory_approx += tcache->cached_memory_;
|
||||
@ -316,8 +325,9 @@ void ThreadCache::EnsureThreadSpecificDataInitialized() {
|
||||
// adding a special-pupose lock.
|
||||
internal::ScopedGuard scoped_locker(
|
||||
ThreadCacheRegistry::Instance().GetLock());
|
||||
if (g_thread_cache_key_created)
|
||||
if (g_thread_cache_key_created) {
|
||||
return;
|
||||
}
|
||||
|
||||
bool ok = internal::PartitionTlsCreate(&internal::g_thread_cache_key, Delete);
|
||||
PA_CHECK(ok);
|
||||
@ -333,8 +343,9 @@ void ThreadCache::DeleteForTesting(ThreadCache* tcache) {
|
||||
void ThreadCache::SwapForTesting(PartitionRoot<>* root) {
|
||||
auto* old_tcache = ThreadCache::Get();
|
||||
g_thread_cache_root.store(nullptr, std::memory_order_relaxed);
|
||||
if (old_tcache)
|
||||
if (old_tcache) {
|
||||
ThreadCache::DeleteForTesting(old_tcache);
|
||||
}
|
||||
if (root) {
|
||||
Init(root);
|
||||
Create(root);
|
||||
@ -421,8 +432,9 @@ void ThreadCache::SetGlobalLimits(PartitionRoot<>* root, float multiplier) {
|
||||
|
||||
// static
|
||||
void ThreadCache::SetLargestCachedSize(size_t size) {
|
||||
if (size > ThreadCache::kLargeSizeThreshold)
|
||||
if (size > ThreadCache::kLargeSizeThreshold) {
|
||||
size = ThreadCache::kLargeSizeThreshold;
|
||||
}
|
||||
largest_active_bucket_index_ =
|
||||
PartitionRoot<internal::ThreadSafe>::SizeToBucketIndex(
|
||||
size,
|
||||
@ -512,8 +524,9 @@ ThreadCache::~ThreadCache() {
|
||||
void ThreadCache::Delete(void* tcache_ptr) {
|
||||
auto* tcache = static_cast<ThreadCache*>(tcache_ptr);
|
||||
|
||||
if (!IsValid(tcache))
|
||||
if (!IsValid(tcache)) {
|
||||
return;
|
||||
}
|
||||
|
||||
#if PA_CONFIG(THREAD_CACHE_FAST_TLS)
|
||||
internal::g_thread_cache = nullptr;
|
||||
@ -617,8 +630,9 @@ void ThreadCache::FillBucket(size_t bucket_index) {
|
||||
// some objects, then the allocation will be handled normally. Otherwise,
|
||||
// this goes to the central allocator, which will service the allocation,
|
||||
// return nullptr or crash.
|
||||
if (!slot_start)
|
||||
if (!slot_start) {
|
||||
break;
|
||||
}
|
||||
|
||||
allocated_slots++;
|
||||
PutInBucket(bucket, slot_start);
|
||||
@ -634,8 +648,9 @@ void ThreadCache::ClearBucket(Bucket& bucket, size_t limit) {
|
||||
template <bool crash_on_corruption>
|
||||
void ThreadCache::ClearBucketHelper(Bucket& bucket, size_t limit) {
|
||||
// Avoids acquiring the lock needlessly.
|
||||
if (!bucket.count || bucket.count <= limit)
|
||||
if (!bucket.count || bucket.count <= limit) {
|
||||
return;
|
||||
}
|
||||
|
||||
// This serves two purposes: error checking and avoiding stalls when grabbing
|
||||
// the lock:
|
||||
@ -717,8 +732,9 @@ void ThreadCache::ResetForTesting() {
|
||||
|
||||
size_t ThreadCache::CachedMemory() const {
|
||||
size_t total = 0;
|
||||
for (const Bucket& bucket : buckets_)
|
||||
for (const Bucket& bucket : buckets_) {
|
||||
total += bucket.count * static_cast<size_t>(bucket.slot_size);
|
||||
}
|
||||
|
||||
return total;
|
||||
}
|
||||
@ -738,8 +754,9 @@ void ThreadCache::AccumulateStats(ThreadCacheStats* stats) const {
|
||||
stats->batch_fill_count += stats_.batch_fill_count;
|
||||
|
||||
#if PA_CONFIG(THREAD_CACHE_ALLOC_STATS)
|
||||
for (size_t i = 0; i < internal::kNumBuckets + 1; i++)
|
||||
for (size_t i = 0; i < internal::kNumBuckets + 1; i++) {
|
||||
stats->allocs_per_bucket_[i] += stats_.allocs_per_bucket_[i];
|
||||
}
|
||||
#endif // PA_CONFIG(THREAD_CACHE_ALLOC_STATS)
|
||||
|
||||
// cached_memory_ is not necessarily equal to |CachedMemory()| here, since
|
||||
@ -767,8 +784,9 @@ void ThreadCache::TryPurge() {
|
||||
// static
|
||||
void ThreadCache::PurgeCurrentThread() {
|
||||
auto* tcache = Get();
|
||||
if (IsValid(tcache))
|
||||
if (IsValid(tcache)) {
|
||||
tcache->Purge();
|
||||
}
|
||||
}
|
||||
|
||||
void ThreadCache::PurgeInternal() {
|
||||
@ -789,8 +807,9 @@ void ThreadCache::PurgeInternalHelper() {
|
||||
// |largest_active_bucket_index_| can be lowered at runtime, there may be
|
||||
// memory already cached in the inactive buckets. They should still be
|
||||
// purged.
|
||||
for (auto& bucket : buckets_)
|
||||
for (auto& bucket : buckets_) {
|
||||
ClearBucketHelper<crash_on_corruption>(bucket, 0);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace partition_alloc
|
||||
|
@ -194,8 +194,10 @@ class ReentrancyGuard {
|
||||
|
||||
} // namespace internal
|
||||
|
||||
#define PA_REENTRANCY_GUARD(x) \
|
||||
internal::ReentrancyGuard guard { x }
|
||||
#define PA_REENTRANCY_GUARD(x) \
|
||||
internal::ReentrancyGuard guard { \
|
||||
x \
|
||||
}
|
||||
|
||||
#else // BUILDFLAG(PA_DCHECK_IS_ON)
|
||||
|
||||
@ -493,8 +495,9 @@ PA_ALWAYS_INLINE bool ThreadCache::MaybePutInCache(uintptr_t slot_start,
|
||||
ClearBucket(bucket, limit / 2);
|
||||
}
|
||||
|
||||
if (PA_UNLIKELY(should_purge_.load(std::memory_order_relaxed)))
|
||||
if (PA_UNLIKELY(should_purge_.load(std::memory_order_relaxed))) {
|
||||
PurgeInternal();
|
||||
}
|
||||
|
||||
*slot_size = bucket.slot_size;
|
||||
return true;
|
||||
@ -527,8 +530,9 @@ PA_ALWAYS_INLINE uintptr_t ThreadCache::GetFromCache(size_t bucket_index,
|
||||
|
||||
// Very unlikely, means that the central allocator is out of memory. Let it
|
||||
// deal with it (may return 0, may crash).
|
||||
if (PA_UNLIKELY(!bucket.freelist_head))
|
||||
if (PA_UNLIKELY(!bucket.freelist_head)) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
PA_DCHECK(bucket.count != 0);
|
||||
@ -627,12 +631,12 @@ PA_ALWAYS_INLINE void ThreadCache::PutInBucket(Bucket& bucket,
|
||||
bucket.count++;
|
||||
}
|
||||
|
||||
void ThreadCache::RecordAllocation(size_t size) {
|
||||
PA_ALWAYS_INLINE void ThreadCache::RecordAllocation(size_t size) {
|
||||
thread_alloc_stats_.alloc_count++;
|
||||
thread_alloc_stats_.alloc_total_size += size;
|
||||
}
|
||||
|
||||
void ThreadCache::RecordDeallocation(size_t size) {
|
||||
PA_ALWAYS_INLINE void ThreadCache::RecordDeallocation(size_t size) {
|
||||
thread_alloc_stats_.dealloc_count++;
|
||||
thread_alloc_stats_.dealloc_total_size += size;
|
||||
}
|
||||
|
@ -82,7 +82,10 @@ BuildInfo::BuildInfo(const std::vector<std::string>& params)
|
||||
version_incremental_(StrDupParam(params, 24)),
|
||||
hardware_(StrDupParam(params, 25)),
|
||||
is_at_least_t_(GetIntParam(params, 26)),
|
||||
is_automotive_(GetIntParam(params, 27)) {}
|
||||
is_automotive_(GetIntParam(params, 27)),
|
||||
is_at_least_u_(GetIntParam(params, 28)),
|
||||
targets_at_least_u_(GetIntParam(params, 29)),
|
||||
codename_(StrDupParam(params, 30)) {}
|
||||
|
||||
// static
|
||||
BuildInfo* BuildInfo::GetInstance() {
|
||||
|
@ -146,6 +146,12 @@ class BASE_EXPORT BuildInfo {
|
||||
|
||||
bool is_automotive() const { return is_automotive_; }
|
||||
|
||||
bool is_at_least_u() const { return is_at_least_u_; }
|
||||
|
||||
bool targets_at_least_u() const { return targets_at_least_u_; }
|
||||
|
||||
const char* codename() const { return codename_; }
|
||||
|
||||
private:
|
||||
friend struct BuildInfoSingletonTraits;
|
||||
|
||||
@ -184,6 +190,9 @@ class BASE_EXPORT BuildInfo {
|
||||
const char* const hardware_;
|
||||
const bool is_at_least_t_;
|
||||
const bool is_automotive_;
|
||||
const bool is_at_least_u_;
|
||||
const bool targets_at_least_u_;
|
||||
const char* const codename_;
|
||||
};
|
||||
|
||||
} // namespace android
|
||||
|
@ -60,7 +60,10 @@ BuildInfo::BuildInfo(const std::vector<std::string>& params)
|
||||
version_incremental_(""),
|
||||
hardware_(""),
|
||||
is_at_least_t_(false),
|
||||
is_automotive_(false) {}
|
||||
is_automotive_(false),
|
||||
is_at_least_u_(false),
|
||||
targets_at_least_u_(false),
|
||||
codename_("") {}
|
||||
|
||||
// static
|
||||
BuildInfo* BuildInfo::GetInstance() {
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include "base/base_jni_headers/FieldTrialList_jni.h"
|
||||
#include "base/lazy_instance.h"
|
||||
#include "base/metrics/field_trial.h"
|
||||
#include "base/metrics/field_trial_list_including_low_anonymity.h"
|
||||
#include "base/metrics/field_trial_params.h"
|
||||
|
||||
using base::android::ConvertJavaStringToUTF8;
|
||||
@ -77,15 +78,39 @@ static ScopedJavaLocalRef<jstring> JNI_FieldTrialList_GetVariationParameter(
|
||||
env, parameters[ConvertJavaStringToUTF8(env, jparameter_key)]);
|
||||
}
|
||||
|
||||
// JNI_FieldTrialList_LogActiveTrials() is static function, this makes friending
|
||||
// it a hassle because it must be declared in the file that the friend
|
||||
// declaration is in, but its declaration can't be included in multiple places
|
||||
// or things get messy and the linker gets mad. This helper class exists only to
|
||||
// friend the JNI function and is, in turn, friended by
|
||||
// FieldTrialListIncludingLowAnonymity which allows for the private
|
||||
// GetActiveFieldTrialGroups() to be reached.
|
||||
class AndroidFieldTrialListLogActiveTrialsFriendHelper {
|
||||
private:
|
||||
friend void ::JNI_FieldTrialList_LogActiveTrials(JNIEnv* env);
|
||||
|
||||
static bool AddObserver(base::FieldTrialList::Observer* observer) {
|
||||
return base::FieldTrialListIncludingLowAnonymity::AddObserver(observer);
|
||||
}
|
||||
|
||||
static void GetActiveFieldTrialGroups(
|
||||
base::FieldTrial::ActiveGroups* active_groups) {
|
||||
base::FieldTrialListIncludingLowAnonymity::GetActiveFieldTrialGroups(
|
||||
active_groups);
|
||||
}
|
||||
};
|
||||
|
||||
static void JNI_FieldTrialList_LogActiveTrials(JNIEnv* env) {
|
||||
DCHECK(!g_trial_logger.IsCreated()); // This need only be called once.
|
||||
|
||||
LOG(INFO) << "Logging active field trials...";
|
||||
base::FieldTrialList::AddObserver(&g_trial_logger.Get());
|
||||
AndroidFieldTrialListLogActiveTrialsFriendHelper::AddObserver(
|
||||
&g_trial_logger.Get());
|
||||
|
||||
// Log any trials that were already active before adding the observer.
|
||||
std::vector<base::FieldTrial::ActiveGroup> active_groups;
|
||||
base::FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
|
||||
AndroidFieldTrialListLogActiveTrialsFriendHelper::GetActiveFieldTrialGroups(
|
||||
&active_groups);
|
||||
for (const base::FieldTrial::ActiveGroup& group : active_groups) {
|
||||
TrialLogger::Log(group.trial_name, group.group_name);
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user