Compare commits

...

No commits in common. "cf5478513dc210bdcac6596b437746108a251d24" and "063d04ae8ad09fe66cc03d419da3583ad821c444" have entirely different histories.

3463 changed files with 152382 additions and 127923 deletions

View File

@ -328,6 +328,7 @@ jobs:
key: ccache-mac-${{ matrix.arch }}-${{ hashFiles('CHROMIUM_VERSION') }}-${{ steps.ccache-timestamp.outputs.CCACHE_TIMESTAMP }}
restore-keys: ccache-mac-${{ matrix.arch }}-${{ hashFiles('CHROMIUM_VERSION') }}-
- run: brew install ninja ccache
- run: pip install setuptools
- run: ./get-clang.sh
- run: ccache -z
- run: ./build.sh
@ -380,6 +381,7 @@ jobs:
key: ccache-ios-${{ matrix.arch }}-${{ hashFiles('CHROMIUM_VERSION') }}-${{ steps.ccache-timestamp.outputs.CCACHE_TIMESTAMP }}
restore-keys: ccache-ios-${{ matrix.arch }}-${{ hashFiles('CHROMIUM_VERSION') }}-
- run: brew install ninja ccache
- run: pip install setuptools
- run: ./get-clang.sh
- run: ccache -z
- run: ./build.sh

View File

@ -1 +1 @@
118.0.5993.65
119.0.6045.66

View File

@ -37,11 +37,13 @@ Addanki Gandhi Kishor <kishor.ag@samsung.com>
Adenilson Cavalcanti <a.cavalcanti@samsung.com>
Aditi Singh <a20.singh@samsung.com>
Aditya Bhargava <heuristicist@gmail.com>
Aditya Sharma <a2.sharma@samsung.com>
Adrian Belgun <adrian.belgun@intel.com>
Adrian Ratiu <adrian.ratiu@collabora.corp-partner.google.com>
Adrià Vilanova Martínez <me@avm99963.com>
Ahmed Elwasefi <a.m.elwasefi@gmail.com>
Ahmet Emir Ercin <ahmetemiremir@gmail.com>
Aidarbek Suleimenov <suleimenov.aidarbek@gmail.com>
Aiden Grossman <aidengrossmanpso@gmail.com>
Ajay Berwal <a.berwal@samsung.com>
Ajay Berwal <ajay.berwal@samsung.com>
@ -323,6 +325,7 @@ Dean Leitersdorf <dean.leitersdorf@gmail.com>
Debadree Chatterjee <debadree333@gmail.com>
Debashish Samantaray <d.samantaray@samsung.com>
Debug Wang <debugwang@tencent.com>
Deep Shah <deep.shah@samsung.com>
Deepak Dilip Borade <deepak.db@samsung.com>
Deepak Mittal <deepak.m1@samsung.com>
Deepak Mohan <hop2deep@gmail.com>
@ -359,6 +362,7 @@ Dongyu Lin <l2d4y3@gmail.com>
Donna Wu <donna.wu@intel.com>
Douglas F. Turner <doug.turner@gmail.com>
Drew Blaisdell <drew.blaisdell@gmail.com>
Dushyant Kant Sharma <dush.sharma@samsung.com>
Dustin Doloff <doloffd@amazon.com>
Ebrahim Byagowi <ebrahim@gnu.org>
Ebrahim Byagowi <ebraminio@gmail.com>
@ -471,6 +475,7 @@ Harshikesh Kumar <harshikeshnobug@gmail.com>
Harshit Pal <harshitp12345@gmail.com>
Hassan Salehe Matar <hassansalehe@gmail.com>
Hautio Kari <khautio@gmail.com>
He Qi <heqi899@gmail.com>
Heejin R. Chung <heejin.r.chung@samsung.com>
Heeyoun Lee <heeyoun.lee@samsung.com>
Henrique de Carvalho <decarv.henrique@gmail.com>
@ -547,6 +552,7 @@ Jaeseok Yoon <yjaeseok@gmail.com>
Jaewon Choi <jaewon.james.choi@gmail.com>
Jaewon Jung <jw.jung@navercorp.com>
Jaeyong Bae <jdragon.bae@gmail.com>
Jagadesh P <jagadeshjai1999@gmail.com>
Jagdish Chourasia <jagdish.c@samsung.com>
Jaime Soriano Pastor <jsorianopastor@gmail.com>
Jake Helfert <jake@helfert.us>
@ -772,6 +778,7 @@ Lauren Yeun Kim <lauren.yeun.kim@gmail.com>
Lauri Oherd <lauri.oherd@gmail.com>
Lavar Askew <open.hyperion@gmail.com>
Le Hoang Quyen <le.hoang.q@gmail.com>
Leena Kaushik <l1.kaushik@samsung.com>
Legend Lee <guanxian.li@intel.com>
Leith Bade <leith@leithalweapon.geek.nz>
Lei Gao <leigao@huawei.com>
@ -874,6 +881,7 @@ Matthew Willis <appamatto@gmail.com>
Matthias Reitinger <reimarvin@gmail.com>
Matthieu Rigolot <matthieu.rigolot@gmail.com>
Matthieu Vlad Hauglustaine <matt.hauglustaine@gmail.com>
Max Coplan <mchcopl@gmail.com>
Max Karolinskiy <max@brave.com>
Max Perepelitsyn <pph34r@gmail.com>
Max Schmitt <max@schmitt.mx>
@ -945,6 +953,7 @@ Nagarajan Narayanan <nagarajan.n@samsung.com>
Nagarjuna Atluri <nagarjuna.a@samsung.com>
Naiem Shaik <naiem.shaik@gmail.com>
Naman Kumar Narula <namankumarnarula@gmail.com>
Naman Yadav <naman.yadav@samsung.com>
Naoki Takano <takano.naoki@gmail.com>
Naoto Ono <onoto1998@gmail.com>
Nathan Mitchell <nathaniel.v.mitchell@gmail.com>
@ -1047,6 +1056,7 @@ Praveen Akkiraju <praveen.anp@samsung.com>
Preeti Nayak <preeti.nayak@samsung.com>
Pritam Nikam <pritam.nikam@samsung.com>
Puttaraju R <puttaraju.r@samsung.com>
Punith Nayak <npunith125@gmail.com>
Qi Tiezheng <qitiezheng@360.cn>
Qi Yang <qi1988.yang@samsung.com>
Qiang Zeng <zengqiang1@huawei.com>
@ -1130,6 +1140,7 @@ Ryuan Choi <ryuan.choi@samsung.com>
Saikrishna Arcot <saiarcot895@gmail.com>
Sajal Khandelwal <skhandelwa22@bloomberg.net>
Sajeesh Sidharthan <sajeesh.sidharthan@amd.corp-partner.google.com>
Sakib Shabir <s1.tantray@samsung.com>
Saksham Mittal <gotlouemail@gmail.com>
Salvatore Iovene <salvatore.iovene@intel.com>
Sam James <sam@gentoo.org>
@ -1207,6 +1218,7 @@ Shobhit Goel <shobhit.goel@samsung.com>
Shouqun Liu <liushouqun@xiaomi.com>
Shouqun Liu <shouqun.liu@intel.com>
Shreeram Kushwaha <shreeram.k@samsung.com>
Shrey Patel <shrey1patel2@gmail.com>
Shreyas Gopal <shreyas.g@samsung.com>
Shreyas VA <v.a.shreyas@gmail.com>
Shubham Agrawal <shubag@amazon.com>
@ -1217,6 +1229,7 @@ Siddharth Bagai <b.siddharth@samsung.com>
Siddharth Shankar <funkysidd@gmail.com>
Simeon Kuran <simeon.kuran@gmail.com>
Simon Arlott <simon.arlott@gmail.com>
Simon Cadman <simon@cadman.uk>
Simon Jackson <simon.jackson@sonocent.com>
Simon La Macchia <smacchia@amazon.com>
Siva Kumar Gunturi <siva.gunturi@samsung.com>

390
src/DEPS

File diff suppressed because it is too large Load Diff

View File

@ -46,11 +46,6 @@ if (is_mac) {
import("//third_party/protobuf/proto_library.gni")
}
if (is_apple) {
# Buildflags to control time behavior on iOS in file shared with mac.
import("//base/time/buildflags/buildflags.gni")
}
if (is_win) {
import("//build/config/win/control_flow_guard.gni")
}
@ -91,14 +86,6 @@ assert(!is_nacl || is_nacl_saigo,
assert(!is_win || is_clang,
"only clang-cl is supported on Windows, see https://crbug.com/988071")
if (is_apple) {
assert(!use_blink || enable_mach_absolute_time_ticks,
"use_blink requires mach absolute time ticks")
assert(!is_mac || enable_mach_absolute_time_ticks,
"mac requires mach absolute time ticks")
}
# Determines whether libevent should be dep.
dep_libevent = !is_fuchsia && !is_win && !is_mac && !is_nacl
@ -107,6 +94,7 @@ use_libevent = dep_libevent && !is_ios
if (is_android || is_robolectric) {
import("//build/config/android/rules.gni")
import("//third_party/jni_zero/jni_zero.gni")
}
if (is_fuchsia) {
@ -339,6 +327,8 @@ component("base") {
"features.cc",
"features.h",
"file_version_info.h",
"files/block_tests_writing_to_special_dirs.cc",
"files/block_tests_writing_to_special_dirs.h",
"files/dir_reader_fallback.h",
"files/file.cc",
"files/file.h",
@ -440,6 +430,7 @@ component("base") {
"memory/ref_counted_memory.h",
"memory/safe_ref.h",
"memory/safe_ref_traits.h",
"memory/safety_checks.h",
"memory/scoped_policy.h",
"memory/scoped_refptr.h",
"memory/shared_memory_hooks.h",
@ -548,8 +539,6 @@ component("base") {
"power_monitor/battery_level_provider.h",
"power_monitor/battery_state_sampler.cc",
"power_monitor/battery_state_sampler.h",
"power_monitor/moving_average.cc",
"power_monitor/moving_average.h",
"power_monitor/power_monitor.cc",
"power_monitor/power_monitor.h",
"power_monitor/power_monitor_device_source.cc",
@ -831,7 +820,6 @@ component("base") {
"task/thread_pool/worker_thread_set.h",
"task/updateable_sequenced_task_runner.h",
"template_util.h",
"test/malloc_wrapper.h",
"test/scoped_logging_settings.h",
"test/spin_wait.h",
"third_party/cityhash/city.cc",
@ -898,8 +886,6 @@ component("base") {
"time/time_delta_from_string.h",
"time/time_override.cc",
"time/time_override.h",
"time/time_to_iso8601.cc",
"time/time_to_iso8601.h",
"timer/elapsed_timer.cc",
"timer/elapsed_timer.h",
"timer/hi_res_timer_manager.h",
@ -929,6 +915,7 @@ component("base") {
"types/expected.h",
"types/expected_internal.h",
"types/expected_macros.h",
"types/fixed_array.h",
"types/id_type.h",
"types/optional_ref.h",
"types/optional_util.h",
@ -965,6 +952,13 @@ component("base") {
#"system/sys_info_openbsd.cc",
]
if (is_apple || current_os == "freebsd" || current_os == "openbsd") {
sources += [
"posix/sysctl.cc",
"posix/sysctl.h",
]
}
if (is_posix) {
sources += [
"debug/debugger_posix.cc",
@ -1055,10 +1049,6 @@ component("base") {
# to provide the appropriate `#define` here.
defines += [ "IS_RAW_PTR_IMPL" ]
if (is_apple) {
deps += [ "//base/time/buildflags:buildflags" ]
}
if (build_rust_json_reader) {
deps += [ "//third_party/rust/serde_json_lenient/v0_1/wrapper" ]
}
@ -1498,6 +1488,8 @@ component("base") {
"files/important_file_writer_cleaner.h",
"files/scoped_temp_dir.cc",
"files/scoped_temp_dir.h",
"files/scoped_temp_file.cc",
"files/scoped_temp_file.h",
"json/json_file_value_serializer.cc",
"json/json_file_value_serializer.h",
"memory/discardable_memory.cc",
@ -1682,6 +1674,7 @@ component("base") {
"memory/platform_shared_memory_region_win.cc",
"message_loop/message_pump_win.cc",
"message_loop/message_pump_win.h",
"moving_window.h",
"native_library_win.cc",
"power_monitor/battery_level_provider_win.cc",
"power_monitor/power_monitor_device_source_win.cc",
@ -1830,6 +1823,8 @@ component("base") {
"win/win_util.cc",
"win/win_util.h",
"win/wincrypt_shim.h",
"win/window_enumerator.cc",
"win/window_enumerator.h",
"win/windows_defines.inc",
"win/windows_h_disallowed.h",
"win/windows_types.h",
@ -1911,8 +1906,8 @@ component("base") {
"apple/scoped_mach_port.h",
"apple/scoped_mach_vm.cc",
"apple/scoped_mach_vm.h",
"apple/scoped_nsautorelease_pool.cc",
"apple/scoped_nsautorelease_pool.h",
"apple/scoped_nsautorelease_pool.mm",
"apple/scoped_nsobject.h",
"apple/scoped_objc_class_swizzler.h",
"apple/scoped_objc_class_swizzler.mm",
@ -1976,7 +1971,6 @@ component("base") {
"mac/scoped_mach_msg_destroy.h",
"mac/scoped_sending_event.h",
"mac/scoped_sending_event.mm",
"mac/wrap_cg_display.h",
"message_loop/message_pump_kqueue.cc",
"message_loop/message_pump_kqueue.h",
"native_library_mac.mm",

View File

@ -8,6 +8,7 @@
#include "base/check.h"
#include "base/dcheck_is_on.h"
#include "base/debug/crash_logging.h"
#include "base/immediate_crash.h"
#include "build/build_config.h"
@ -18,6 +19,32 @@
#endif
namespace base::allocator::dispatcher::internal {
namespace {
base::debug::CrashKeySize GetCrashKeySize(const std::string& crash_key_name) {
if (std::size(crash_key_name) <= 32ul) {
return base::debug::CrashKeySize::Size32;
}
if (std::size(crash_key_name) <= 64ul) {
return base::debug::CrashKeySize::Size64;
}
if (std::size(crash_key_name) <= 256ul) {
return base::debug::CrashKeySize::Size256;
}
CHECK(std::size(crash_key_name) <= 1024ul);
return base::debug::CrashKeySize::Size1024;
}
#if DCHECK_IS_ON()
void Swap(std::atomic_bool& lh_op, std::atomic_bool& rh_op) {
auto lh_op_value = lh_op.load(std::memory_order_relaxed);
auto rh_op_value = rh_op.load(std::memory_order_relaxed);
CHECK(lh_op.compare_exchange_strong(lh_op_value, rh_op_value));
CHECK(rh_op.compare_exchange_strong(rh_op_value, lh_op_value));
}
#endif
} // namespace
void* MMapAllocator::AllocateMemory(size_t size_in_bytes) {
void* const mmap_res = mmap(nullptr, size_in_bytes, PROT_READ | PROT_WRITE,
@ -43,8 +70,31 @@ bool MMapAllocator::FreeMemoryForTesting(void* pointer_to_allocated,
return (munmap_res == 0);
}
PThreadTLSSystem::PThreadTLSSystem() = default;
PThreadTLSSystem::PThreadTLSSystem(PThreadTLSSystem&& other) {
std::swap(crash_key_, other.crash_key_);
std::swap(data_access_key_, other.data_access_key_);
#if DCHECK_IS_ON()
Swap(initialized_, other.initialized_);
#endif
}
PThreadTLSSystem& PThreadTLSSystem::operator=(PThreadTLSSystem&& other) {
std::swap(crash_key_, other.crash_key_);
std::swap(data_access_key_, other.data_access_key_);
#if DCHECK_IS_ON()
Swap(initialized_, other.initialized_);
#endif
return *this;
}
bool PThreadTLSSystem::Setup(
OnThreadTerminationFunction thread_termination_function) {
OnThreadTerminationFunction thread_termination_function,
const base::StringPiece instance_id) {
#if DCHECK_IS_ON()
// Initialize must happen outside of the allocation path. Therefore, it is
// secure to verify with DCHECK.
@ -61,6 +111,18 @@ bool PThreadTLSSystem::Setup(
// However, we strongly recommend to setup the TLS system as early as possible
// to avoid exceeding this limit.
// Some crashes might be caused by the initialization being performed too late
// and running into the problems mentioned above. Since there's no way to
// handle this issue programmatically, we include the key into the crashpad
// report to allow for later inspection.
std::string crash_key_name = "tls_system-";
crash_key_name += instance_id;
crash_key_ = base::debug::AllocateCrashKeyString(
crash_key_name.c_str(), GetCrashKeySize(crash_key_name));
base::debug::SetCrashKeyString(crash_key_,
base::NumberToString(data_access_key_));
return (0 == key_create_res);
}
@ -71,6 +133,9 @@ bool PThreadTLSSystem::TearDownForTesting() {
DCHECK(initialized_.exchange(false, std::memory_order_acq_rel));
#endif
base::debug::ClearCrashKeyString(crash_key_);
crash_key_ = nullptr;
auto const key_delete_res = pthread_key_delete(data_access_key_);
return (0 == key_delete_res);
}

View File

@ -24,6 +24,7 @@
#include "base/base_export.h"
#include "base/check.h"
#include "base/compiler_specific.h"
#include "base/strings/string_piece.h"
#include <pthread.h>
@ -33,6 +34,29 @@
#define DISABLE_TSAN_INSTRUMENTATION
#endif
#define STR_HELPER(x) #x
#define STR(x) STR_HELPER(x)
// Verify that a condition holds and cancel the process in case it doesn't. The
// functionality is similar to RAW_CHECK but includes more information in the
// logged messages. It is non allocating to prevent recursions.
#define TLS_RAW_CHECK(error_message, condition) \
TLS_RAW_CHECK_IMPL(error_message, condition, __FILE__, __LINE__)
#define TLS_RAW_CHECK_IMPL(error_message, condition, file, line) \
do { \
if (!(condition)) { \
constexpr const char* message = \
"TLS System: " error_message " Failed condition '" #condition \
"' in (" file "@" STR(line) ").\n"; \
::logging::RawCheckFailure(message); \
} \
} while (0)
namespace base::debug {
struct CrashKeyString;
}
namespace base::allocator::dispatcher {
namespace internal {
@ -71,11 +95,20 @@ using OnThreadTerminationFunction = void (*)(void*);
// The TLS system used by default for the thread local storage. It stores and
// retrieves thread specific data pointers.
struct BASE_EXPORT PThreadTLSSystem {
class BASE_EXPORT PThreadTLSSystem {
public:
PThreadTLSSystem();
PThreadTLSSystem(const PThreadTLSSystem&) = delete;
PThreadTLSSystem(PThreadTLSSystem&&);
PThreadTLSSystem& operator=(const PThreadTLSSystem&) = delete;
PThreadTLSSystem& operator=(PThreadTLSSystem&&);
// Initialize the TLS system to store a data set for different threads.
// @param thread_termination_function An optional function which will be
// invoked upon termination of a thread.
bool Setup(OnThreadTerminationFunction thread_termination_function);
bool Setup(OnThreadTerminationFunction thread_termination_function,
const base::StringPiece instance_id);
// Tear down the TLS system. After completing tear down, the thread
// termination function passed to Setup will not be invoked anymore.
bool TearDownForTesting();
@ -88,6 +121,7 @@ struct BASE_EXPORT PThreadTLSSystem {
bool SetThreadSpecificData(void* data);
private:
base::debug::CrashKeyString* crash_key_ = nullptr;
pthread_key_t data_access_key_ = 0;
#if DCHECK_IS_ON()
// From POSIX standard at https://www.open-std.org/jtc1/sc22/open/n4217.pdf:
@ -162,16 +196,21 @@ template <typename PayloadType,
size_t AllocationChunkSize,
bool IsDestructibleForTesting>
struct ThreadLocalStorage {
ThreadLocalStorage() : root_(AllocateAndInitializeChunk()) { Initialize(); }
explicit ThreadLocalStorage(const base::StringPiece instance_id)
: root_(AllocateAndInitializeChunk()) {
Initialize(instance_id);
}
// Create a new instance of |ThreadLocalStorage| using the passed allocator
// and TLS system. This initializes the underlying TLS system and creates the
// first chunk of data.
ThreadLocalStorage(AllocatorType allocator, TLSSystemType tlsSystem)
ThreadLocalStorage(const base::StringPiece instance_id,
AllocatorType allocator,
TLSSystemType tls_system)
: allocator_(std::move(allocator)),
tls_system_(std::move(tlsSystem)),
tls_system_(std::move(tls_system)),
root_(AllocateAndInitializeChunk()) {
Initialize();
Initialize(instance_id);
}
// Deletes an instance of |ThreadLocalStorage| and delete all the data chunks
@ -207,7 +246,8 @@ struct ThreadLocalStorage {
// We might be called in the course of handling a memory allocation. We do
// not use CHECK since they might allocate and cause a recursion.
RAW_CHECK(tls_system.SetThreadSpecificData(slot));
TLS_RAW_CHECK("Failed to set thread specific data.",
tls_system.SetThreadSpecificData(slot));
// Reset the content to wipe out any previous data.
Reset(slot->item);
@ -307,22 +347,24 @@ struct ThreadLocalStorage {
// SingleSlot and reset the is_used flag.
auto* const slot = static_cast<SingleSlot*>(data);
// We might be called in the course of handling a memory allocation. We do
// not use CHECK since they might allocate and cause a recursion.
RAW_CHECK(slot && slot->is_used.test_and_set());
// We might be called in the course of handling a memory allocation.
// Therefore, do not use CHECK since it might allocate and cause a
// recursion.
TLS_RAW_CHECK("Received an invalid slot.",
slot && slot->is_used.test_and_set());
slot->is_used.clear(std::memory_order_relaxed);
}
// Perform common initialization during construction of an instance.
void Initialize() {
void Initialize(const base::StringPiece instance_id) {
// The constructor must be called outside of the allocation path. Therefore,
// it is secure to verify with CHECK.
// Passing MarkSlotAsFree as thread_termination_function we ensure the
// slot/item assigned to the finished thread will be returned to the pool of
// unused items.
CHECK(dereference(tls_system_).Setup(&MarkSlotAsFree));
CHECK(dereference(tls_system_).Setup(&MarkSlotAsFree, instance_id));
}
Chunk* AllocateAndInitializeChunk() {
@ -331,7 +373,8 @@ struct ThreadLocalStorage {
// We might be called in the course of handling a memory allocation. We do
// not use CHECK since they might allocate and cause a recursion.
RAW_CHECK(uninitialized_memory != nullptr);
TLS_RAW_CHECK("Failed to allocate memory for new chunk.",
uninitialized_memory != nullptr);
return new (uninitialized_memory) Chunk{};
}
@ -428,5 +471,10 @@ using ThreadLocalStorage =
} // namespace base::allocator::dispatcher
#undef TLS_RAW_CHECK_IMPL
#undef TLS_RAW_CHECK
#undef STR
#undef STR_HELPER
#endif // USE_LOCAL_TLS_EMULATION()
#endif // BASE_ALLOCATOR_DISPATCHER_TLS_H_

View File

@ -404,5 +404,17 @@ const base::FeatureParam<int> kThreadCacheMinCachedMemoryForPurgingBytes{
"ThreadCacheMinCachedMemoryForPurgingBytes",
partition_alloc::kMinCachedMemoryForPurgingBytes};
// An apparent quarantine leak in the buffer partition unacceptably
// bloats memory when MiraclePtr is enabled in the renderer process.
// We believe we have found and patched the leak, but out of an
// abundance of caution, we provide this toggle that allows us to
// wholly disable MiraclePtr in the buffer partition, if necessary.
//
// TODO(crbug.com/1444624): this is unneeded once
// MiraclePtr-for-Renderer launches.
BASE_FEATURE(kPartitionAllocDisableBRPInBufferPartition,
"PartitionAllocDisableBRPInBufferPartition",
FEATURE_DISABLED_BY_DEFAULT);
} // namespace features
} // namespace base

View File

@ -219,6 +219,8 @@ BASE_EXPORT BASE_DECLARE_FEATURE(
extern const BASE_EXPORT base::FeatureParam<int>
kThreadCacheMinCachedMemoryForPurgingBytes;
BASE_EXPORT BASE_DECLARE_FEATURE(kPartitionAllocDisableBRPInBufferPartition);
} // namespace features
} // namespace base

View File

@ -134,6 +134,7 @@ source_set("allocator_core") {
"compressed_pointer.h",
"dangling_raw_ptr_checks.cc",
"dangling_raw_ptr_checks.h",
"flags.h",
"freeslot_bitmap.h",
"freeslot_bitmap_constants.h",
"gwp_asan_support.cc",
@ -285,6 +286,7 @@ source_set("allocator_core") {
configs += [
":partition_alloc_implementation",
":memory_tagging",
"//build/config/compiler:wexit_time_destructors",
]
deps = [ ":allocator_base" ]
public_configs = []
@ -321,7 +323,6 @@ source_set("allocator_core") {
]
}
configs += [ "//build/config/compiler:wexit_time_destructors" ]
configs -= _remove_configs
configs += _add_configs
@ -503,7 +504,10 @@ source_set("allocator_base") {
":partition_alloc_buildflags",
]
configs += [ ":partition_alloc_implementation" ]
configs += [
":partition_alloc_implementation",
"//build/config/compiler:wexit_time_destructors",
]
deps = []
if (is_fuchsia) {
@ -528,7 +532,10 @@ source_set("allocator_shim") {
sources = []
deps = [ ":allocator_base" ]
all_dependent_configs = []
configs += [ ":partition_alloc_implementation" ]
configs += [
":partition_alloc_implementation",
"//build/config/compiler:wexit_time_destructors",
]
configs -= _remove_configs
configs += _add_configs
@ -632,6 +639,8 @@ source_set("raw_ptr") {
"pointers/raw_ref.h",
]
sources = []
configs += [ "//build/config/compiler:wexit_time_destructors" ]
if (enable_backup_ref_ptr_support) {
sources += [
"pointers/raw_ptr_backup_ref_impl.cc",
@ -706,6 +715,7 @@ buildflag_header("partition_alloc_buildflags") {
"ENABLE_DANGLING_RAW_PTR_FEATURE_FLAG=$enable_dangling_raw_ptr_feature_flag",
"ENABLE_DANGLING_RAW_PTR_PERF_EXPERIMENT=$enable_dangling_raw_ptr_perf_experiment",
"ENABLE_POINTER_SUBTRACTION_CHECK=$enable_pointer_subtraction_check",
"ENABLE_POINTER_ARITHMETIC_TRAIT_CHECK=$enable_pointer_arithmetic_trait_check",
"BACKUP_REF_PTR_POISON_OOB_PTR=$backup_ref_ptr_poison_oob_ptr",
"PUT_REF_COUNT_IN_PREVIOUS_SLOT=$put_ref_count_in_previous_slot",
"USE_ASAN_BACKUP_REF_PTR=$use_asan_backup_ref_ptr",
@ -729,13 +739,16 @@ buildflag_header("partition_alloc_buildflags") {
"ENABLE_PKEYS=$enable_pkeys",
"ENABLE_THREAD_ISOLATION=$enable_pkeys",
]
}
if (is_apple) {
# TODO(crbug.com/1414153): once TimeTicks::Now behavior is unified on iOS,
# this should be removed.
flags += [ "PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS=" +
"$partition_alloc_enable_mach_absolute_time_ticks" ]
}
buildflag_header("raw_ptr_buildflags") {
header = "raw_ptr_buildflags.h"
flags = [
"RAW_PTR_ZERO_ON_CONSTRUCT=$raw_ptr_zero_on_construct",
"RAW_PTR_ZERO_ON_MOVE=$raw_ptr_zero_on_move",
"RAW_PTR_ZERO_ON_DESTRUCT=$raw_ptr_zero_on_destruct",
]
}
buildflag_header("chromecast_buildflags") {
@ -775,6 +788,7 @@ group("buildflags") {
":chromeos_buildflags",
":debugging_buildflags",
":partition_alloc_buildflags",
":raw_ptr_buildflags",
]
}
# TODO(crbug.com/1151236): After making partition_alloc a standalone library,

View File

@ -9,10 +9,14 @@ import("//build_overrides/build.gni")
# If embedders want to use PartitionAlloc, they need to create their own
# //build_overrides/partition_alloc.gni and define their own PartitionAlloc
# configuration.
use_partition_alloc_as_malloc_default = false
use_allocator_shim_default = false
enable_backup_ref_ptr_support_default = false
put_ref_count_in_previous_slot_default = true
enable_backup_ref_ptr_slow_checks_default = false
enable_dangling_raw_ptr_checks_default = false
# This is the default build configuration for pointers/raw_ptr*.
raw_ptr_zero_on_construct_default = true
raw_ptr_zero_on_move_default = true
raw_ptr_zero_on_destruct_default = false

View File

@ -2,7 +2,6 @@ digraph {
node[shape=box]
edge[dir=both]
compound = true
bgcolor = transparent
dpi = 192
nodesep = 0.91
// Allows aligning nodes in different subgraphs.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

After

Width:  |  Height:  |  Size: 41 KiB

View File

@ -1,5 +1,4 @@
digraph {
graph[bgcolor=transparent]
node[shape=plaintext]
edge[style=dashed, color=crimson]

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.2 KiB

After

Width:  |  Height:  |  Size: 10 KiB

View File

@ -1,5 +1,4 @@
digraph G {
graph[bgcolor=transparent]
node[shape=box,style="filled,rounded",color=deepskyblue]
subgraph cluster_tc {

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

After

Width:  |  Height:  |  Size: 15 KiB

View File

@ -1,5 +1,4 @@
digraph G {
graph[bgcolor=transparent]
node[shape=plaintext]
edge[style=dashed]

Binary file not shown.

Before

Width:  |  Height:  |  Size: 20 KiB

After

Width:  |  Height:  |  Size: 26 KiB

View File

@ -0,0 +1,101 @@
// Copyright 2023 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This header provides a type-safe way of storing OR-combinations of enum
// values.
//
// The traditional C++ approach for storing OR-combinations of enum values is to
// use an int or unsigned int variable. The inconvenience with this approach is
// that there's no type checking at all; any enum value can be OR'd with any
// other enum value and passed on to a function that takes an int or unsigned
// int.
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_FLAGS_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_FLAGS_H_
#include <type_traits>
namespace partition_alloc::internal {
// Returns `T` if and only if `EnumType` is a scoped enum.
template <typename EnumType, typename T = EnumType>
using IfEnum = std::enable_if_t<
std::is_enum_v<EnumType> &&
!std::is_convertible_v<EnumType, std::underlying_type_t<EnumType>>,
T>;
// We assume `EnumType` defines `kMaxValue` which has the largest value and all
// powers of two are represented in `EnumType`.
template <typename EnumType>
constexpr inline EnumType kAllFlags = static_cast<IfEnum<EnumType>>(
(static_cast<std::underlying_type_t<EnumType>>(EnumType::kMaxValue) << 1) -
1);
template <typename EnumType>
constexpr inline IfEnum<EnumType, bool> AreValidFlags(EnumType flags) {
const auto raw_flags = static_cast<std::underlying_type_t<EnumType>>(flags);
const auto raw_all_flags =
static_cast<std::underlying_type_t<EnumType>>(kAllFlags<EnumType>);
return (raw_flags & ~raw_all_flags) == 0;
}
// Checks `subset` is a subset of `superset` or not.
template <typename EnumType>
constexpr inline IfEnum<EnumType, bool> ContainsFlags(EnumType superset,
EnumType subset) {
return (superset & subset) == subset;
}
// Removes flags `target` from `from`.
template <typename EnumType>
constexpr inline IfEnum<EnumType> RemoveFlags(EnumType from, EnumType target) {
return from & ~target;
}
// A macro to define binary arithmetic over `EnumType`.
// Use inside `namespace partition_alloc::internal`.
#define PA_DEFINE_OPERATORS_FOR_FLAGS(EnumType) \
[[maybe_unused]] [[nodiscard]] inline constexpr EnumType operator&( \
const EnumType& lhs, const EnumType& rhs) { \
return static_cast<EnumType>( \
static_cast<std::underlying_type_t<EnumType>>(lhs) & \
static_cast<std::underlying_type_t<EnumType>>(rhs)); \
} \
[[maybe_unused]] inline constexpr EnumType& operator&=( \
EnumType& lhs, const EnumType& rhs) { \
lhs = lhs & rhs; \
return lhs; \
} \
[[maybe_unused]] [[nodiscard]] inline constexpr EnumType operator|( \
const EnumType& lhs, const EnumType& rhs) { \
return static_cast<EnumType>( \
static_cast<std::underlying_type_t<EnumType>>(lhs) | \
static_cast<std::underlying_type_t<EnumType>>(rhs)); \
} \
[[maybe_unused]] inline constexpr EnumType& operator|=( \
EnumType& lhs, const EnumType& rhs) { \
lhs = lhs | rhs; \
return lhs; \
} \
[[maybe_unused]] [[nodiscard]] inline constexpr EnumType operator^( \
const EnumType& lhs, const EnumType& rhs) { \
return static_cast<EnumType>( \
static_cast<std::underlying_type_t<EnumType>>(lhs) ^ \
static_cast<std::underlying_type_t<EnumType>>(rhs)); \
} \
[[maybe_unused]] inline constexpr EnumType& operator^=( \
EnumType& lhs, const EnumType& rhs) { \
lhs = lhs ^ rhs; \
return lhs; \
} \
[[maybe_unused]] [[nodiscard]] inline constexpr EnumType operator~( \
const EnumType& val) { \
return static_cast<EnumType>( \
static_cast<std::underlying_type_t<EnumType>>(kAllFlags<EnumType>) & \
~static_cast<std::underlying_type_t<EnumType>>(val)); \
} \
static_assert(true) /* semicolon here */
} // namespace partition_alloc::internal
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_FLAGS_H_

View File

@ -56,7 +56,7 @@ void* GwpAsanSupport::MapRegion(size_t slot_count,
{
internal::ScopedGuard locker{internal::PartitionRootLock(root.get())};
super_page_span_start = bucket->AllocNewSuperPageSpanForGwpAsan(
root.get(), super_page_count, 0);
root.get(), super_page_count, AllocFlags::kNone);
if (!super_page_span_start) {
return nullptr;

View File

@ -19,7 +19,6 @@
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
#include "base/allocator/partition_allocator/tagging.h"
#include "base/allocator/partition_allocator/thread_isolation/alignment.h"
#include "build/build_config.h"

View File

@ -118,7 +118,16 @@ void PartitionAllocator::init(PartitionOptions opts) {
<< "Cannot use a thread cache when PartitionAlloc is malloc().";
#endif
partition_root_.Init(opts);
MemoryReclaimer::Instance()->RegisterPartition(&partition_root_);
#if BUILDFLAG(ENABLE_THREAD_ISOLATION)
// The MemoryReclaimer won't have write access to the partition, so skip
// registration.
const bool use_memory_reclaimer = !opts.thread_isolation.enabled;
#else
constexpr bool use_memory_reclaimer = true;
#endif
if (use_memory_reclaimer) {
MemoryReclaimer::Instance()->RegisterPartition(&partition_root_);
}
}
} // namespace partition_alloc

View File

@ -101,6 +101,11 @@ declare_args() {
# Enables a bounds check when two pointers (at least one being raw_ptr) are
# subtracted (if supported by the underlying implementation).
enable_pointer_subtraction_check = false
# Enables a compile-time check that all raw_ptrs to which arithmetic
# operations are to be applied are annotated with the AllowPtrArithmetic
# trait,
enable_pointer_arithmetic_trait_check = false
}
declare_args() {
@ -200,12 +205,6 @@ declare_args() {
# Shadow metadata is still under development and only supports Linux
# for now.
enable_shadow_metadata = false
if (is_apple) {
# TODO(crbug.com/1414153): this should be removed once the use of mach
# absolute time ticks is successfully launched on iOS.
partition_alloc_enable_mach_absolute_time_ticks = true
}
}
# *Scan is currently only used by Chromium, and supports only 64-bit.
@ -308,14 +307,6 @@ assert(!use_asan_backup_ref_ptr || is_asan,
assert(!use_asan_unowned_ptr || is_asan,
"AsanUnownedPtr requires AddressSanitizer")
if (is_apple) {
assert(!use_blink || partition_alloc_enable_mach_absolute_time_ticks,
"use_blink requires partition_alloc_enable_mach_absolute_time_ticks")
assert(!is_mac || partition_alloc_enable_mach_absolute_time_ticks,
"mac requires partition_alloc_enable_mach_absolute_time_ticks")
}
# AsanBackupRefPtr is not supported outside Chromium. The implementation is
# entangled with `//base`. The code is only physically located with the rest of
# `raw_ptr` to keep it together.
@ -333,3 +324,21 @@ declare_args() {
}
assert(!enable_pkeys || (is_linux && target_cpu == "x64"),
"Pkeys are only supported on x64 linux")
# Some implementations of raw_ptr<>, like BackupRefPtr, require zeroing when
# constructing, destructing or moving out of a pointer. Some don't, like
# NoOpImpl. Setting these to true, triggers zeroing even for implementations
# that don't require it. This is the recommended setting.
# Caveat: _zero_on_move and _on_destruct will prevent the type from being
# trivially copyable, _zero_on_construct and _on_destruct will prevent the
# type from being trivially default constructible.
#
# Setting to false will make raw_ptr<> behave more like raw C++ pointer `T*`,
# making NoOpImpl act like an actual no-op, so use it if you're worried about
# performance of your project. Use at your own risk, as it's unsupported and
# untested within Chromium.
declare_args() {
raw_ptr_zero_on_construct = raw_ptr_zero_on_construct_default
raw_ptr_zero_on_move = raw_ptr_zero_on_move_default
raw_ptr_zero_on_destruct = raw_ptr_zero_on_destruct_default
}

View File

@ -5,6 +5,8 @@
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_CXX20_IS_CONSTANT_EVALUATED_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_CXX20_IS_CONSTANT_EVALUATED_H_
#include <type_traits>
namespace partition_alloc::internal::base {
// std::is_constant_evaluated was introduced in C++20. PartitionAlloc's minimum
@ -12,7 +14,6 @@ namespace partition_alloc::internal::base {
#if defined(__cpp_lib_is_constant_evaluated) && \
__cpp_lib_is_constant_evaluated >= 201811L
#include <type_traits>
using std::is_constant_evaluated;
#else

View File

@ -23,10 +23,10 @@ uintptr_t GetStackEnd();
// Record a stack trace with up to |count| frames into |trace|. Returns the
// number of frames read.
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
size_t CollectStackTrace(void** trace, size_t count);
size_t CollectStackTrace(const void** trace, size_t count);
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
void PrintStackTrace(void** trace, size_t count);
void PrintStackTrace(const void** trace, size_t count);
#if BUILDFLAG(IS_POSIX)
PA_COMPONENT_EXPORT(PARTITION_ALLOC)

View File

@ -47,7 +47,7 @@ _Unwind_Reason_Code TraceStackFrame(_Unwind_Context* context, void* arg) {
} // namespace
size_t CollectStackTrace(void** trace, size_t count) {
size_t CollectStackTrace(const void** trace, size_t count) {
StackCrawlState state(reinterpret_cast<uintptr_t*>(trace), count);
_Unwind_Backtrace(&TraceStackFrame, &state);
return state.frame_count;

View File

@ -9,7 +9,7 @@
namespace partition_alloc::internal::base::debug {
size_t CollectStackTrace(void** trace, size_t count) {
size_t CollectStackTrace(const void** trace, size_t count) {
// NOTE: This code MUST be async-signal safe (it's used by in-process
// stack dumping signal handler). NO malloc or stdio is allowed here.
@ -17,8 +17,7 @@ size_t CollectStackTrace(void** trace, size_t count) {
// Regarding Linux and Android, backtrace API internally invokes malloc().
// So the API is not available inside memory allocation. Instead try tracing
// using frame pointers.
return base::debug::TraceStackFramePointers(const_cast<const void**>(trace),
count, 0);
return base::debug::TraceStackFramePointers(trace, count, 0);
#else
// Not able to obtain stack traces.
return 0;

View File

@ -19,7 +19,7 @@
namespace partition_alloc::internal::base::debug {
size_t CollectStackTrace(void** trace, size_t count) {
size_t CollectStackTrace(const void** trace, size_t count) {
// NOTE: This code MUST be async-signal safe (it's used by in-process
// stack dumping signal handler). NO malloc or stdio is allowed here.
@ -28,7 +28,7 @@ size_t CollectStackTrace(void** trace, size_t count) {
// Though the backtrace API man page does not list any possible negative
// return values, we take no chance.
return base::saturated_cast<size_t>(
backtrace(trace, base::saturated_cast<int>(count)));
backtrace(const_cast<void**>(trace), base::saturated_cast<int>(count)));
#else
// Not able to obtain stack traces.
return 0;

View File

@ -274,7 +274,7 @@ void UpdateBaseAddress(unsigned permissions,
#endif // !BUILDFLAG(IS_ANDROID)
void PrintStackTraceInternal(void* const* trace, size_t count) {
void PrintStackTraceInternal(const void** trace, size_t count) {
int fd = PA_HANDLE_EINTR(open("/proc/self/maps", O_RDONLY));
if (fd == -1) {
PA_RAW_LOG(ERROR, "Failed to open /proc/self/maps\n");
@ -362,7 +362,7 @@ void PrintStackTraceInternal(void* const* trace, size_t count) {
#if BUILDFLAG(IS_APPLE)
// Since /proc/self/maps is not available, use dladdr() to obtain module
// names and offsets inside the modules from the given addresses.
void PrintStackTraceInternal(void* const* trace, size_t size) {
void PrintStackTraceInternal(const void* const* trace, size_t size) {
// NOTE: This code MUST be async-signal safe (it's used by in-process
// stack dumping signal handler). NO malloc or stdio is allowed here.
@ -392,7 +392,7 @@ void PrintStackTraceInternal(void* const* trace, size_t size) {
} // namespace
void PrintStackTrace(void** trace, size_t count) {
void PrintStackTrace(const void** trace, size_t count) {
PrintStackTraceInternal(trace, count);
}

View File

@ -16,7 +16,7 @@ namespace partition_alloc::internal::base::debug {
namespace {
void PrintStackTraceInternal(void** trace, size_t count) {
void PrintStackTraceInternal(const void** trace, size_t count) {
HANDLE process_handle = OpenProcess(
PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, FALSE, GetCurrentProcId());
if (!process_handle) {
@ -92,12 +92,12 @@ void PrintStackTraceInternal(void** trace, size_t count) {
} // namespace
PA_NOINLINE size_t CollectStackTrace(void** trace, size_t count) {
PA_NOINLINE size_t CollectStackTrace(const void** trace, size_t count) {
// When walking our own stack, use CaptureStackBackTrace().
return CaptureStackBackTrace(0, count, trace, NULL);
return CaptureStackBackTrace(0, count, const_cast<void**>(trace), NULL);
}
void PrintStackTrace(void** trace, size_t count) {
void PrintStackTrace(const void** trace, size_t count) {
PrintStackTraceInternal(trace, count);
}

View File

@ -116,7 +116,7 @@ LogMessage::~LogMessage() {
// attached.
if (severity_ == LOGGING_FATAL) {
constexpr size_t kMaxTracesOfLoggingFatal = 32u;
void* traces[kMaxTracesOfLoggingFatal];
const void* traces[kMaxTracesOfLoggingFatal];
size_t num_traces =
base::debug::CollectStackTrace(traces, kMaxTracesOfLoggingFatal);
base::debug::PrintStackTrace(traces, num_traces);

View File

@ -6,9 +6,7 @@
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_BASE_LOG_MESSAGE_H_
#include <stddef.h>
#include <cstdint>
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_base/scoped_clear_last_error.h"
@ -49,16 +47,6 @@ constexpr LogSeverity LOGGING_DFATAL = LOGGING_FATAL;
constexpr LogSeverity LOGGING_DFATAL = LOGGING_ERROR;
#endif
// This block duplicates the above entries to facilitate incremental conversion
// from LOG_FOO to LOGGING_FOO.
// TODO(thestig): Convert existing users to LOGGING_FOO and remove this block.
constexpr LogSeverity LOG_VERBOSE = LOGGING_VERBOSE;
constexpr LogSeverity LOG_INFO = LOGGING_INFO;
constexpr LogSeverity LOG_WARNING = LOGGING_WARNING;
constexpr LogSeverity LOG_ERROR = LOGGING_ERROR;
constexpr LogSeverity LOG_FATAL = LOGGING_FATAL;
constexpr LogSeverity LOG_DFATAL = LOGGING_DFATAL;
PA_COMPONENT_EXPORT(PARTITION_ALLOC)
extern base::strings::CStringBuilder* g_swallow_stream;

View File

@ -97,7 +97,7 @@ bool ShouldCreateLogMessage(int severity) {
}
int GetVlogVerbosity() {
return std::max(-1, LOG_INFO - GetMinLogLevel());
return std::max(-1, LOGGING_INFO - GetMinLogLevel());
}
void RawLog(int level, const char* message) {

View File

@ -140,9 +140,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) TimeDelta {
static TimeDelta FromZxDuration(zx_duration_t nanos);
#endif
#if BUILDFLAG(IS_APPLE)
#if BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
static TimeDelta FromMachTime(uint64_t mach_time);
#endif // BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
#endif // BUILDFLAG(IS_APPLE)
// Converts an integer value representing TimeDelta to a class. This is used
@ -851,14 +849,12 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) TimeTicks
#endif
#if BUILDFLAG(IS_APPLE)
#if BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
static TimeTicks FromMachAbsoluteTime(uint64_t mach_absolute_time);
// Sets the current Mach timebase to `timebase`. Returns the old timebase.
static mach_timebase_info_data_t SetMachTimebaseInfoForTesting(
mach_timebase_info_data_t timebase);
#endif // BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
#endif // BUILDFLAG(IS_APPLE)
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(PA_IS_CHROMEOS_ASH)

View File

@ -28,7 +28,6 @@ namespace partition_alloc::internal::base {
namespace {
#if BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
// Returns a pointer to the initialized Mach timebase info struct.
mach_timebase_info_data_t* MachTimebaseInfo() {
static mach_timebase_info_data_t timebase_info = []() {
@ -81,29 +80,14 @@ int64_t MachTimeToMicroseconds(uint64_t mach_time) {
// 9223372036854775807 / (1e6 * 60 * 60 * 24 * 365.2425) = 292,277).
return checked_cast<int64_t>(microseconds);
}
#endif // BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
// Returns monotonically growing number of ticks in microseconds since some
// unspecified starting point.
int64_t ComputeCurrentTicks() {
#if !BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
struct timespec tp;
// clock_gettime() returns 0 on success and -1 on failure. Failure can only
// happen because of bad arguments (unsupported clock type or timespec
// pointer out of accessible address space). Here it is known that neither
// can happen since the timespec parameter is stack allocated right above and
// `CLOCK_MONOTONIC` is supported on all versions of iOS that Chrome is
// supported on.
int res = clock_gettime(CLOCK_MONOTONIC, &tp);
PA_BASE_DCHECK(0 == res) << "Failed clock_gettime, errno: " << errno;
return (int64_t)tp.tv_sec * 1000000 + tp.tv_nsec / 1000;
#else
// mach_absolute_time is it when it comes to ticks on the Mac. Other calls
// with less precision (such as TickCount) just call through to
// mach_absolute_time.
return MachTimeToMicroseconds(mach_absolute_time());
#endif // !BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
}
int64_t ComputeThreadTicks() {
@ -188,12 +172,10 @@ NSDate* Time::ToNSDate() const {
// TimeDelta ------------------------------------------------------------------
#if BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
// static
TimeDelta TimeDelta::FromMachTime(uint64_t mach_time) {
return Microseconds(MachTimeToMicroseconds(mach_time));
}
#endif // BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
// TimeTicks ------------------------------------------------------------------
@ -213,7 +195,6 @@ bool TimeTicks::IsConsistentAcrossProcesses() {
return true;
}
#if BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
// static
TimeTicks TimeTicks::FromMachAbsoluteTime(uint64_t mach_absolute_time) {
return TimeTicks(MachTimeToMicroseconds(mach_absolute_time));
@ -229,15 +210,9 @@ mach_timebase_info_data_t TimeTicks::SetMachTimebaseInfoForTesting(
return orig_timebase;
}
#endif // BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
// static
TimeTicks::Clock TimeTicks::GetClock() {
#if !BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
return Clock::IOS_CF_ABSOLUTE_TIME_MINUS_KERN_BOOTTIME;
#else
return Clock::MAC_MACH_ABSOLUTE_TIME;
#endif // !BUILDFLAG(PARTITION_ALLOC_ENABLE_MACH_ABSOLUTE_TIME_TICKS)
}
// ThreadTicks ----------------------------------------------------------------

View File

@ -25,23 +25,13 @@
// As a consequence:
// - When PartitionAlloc is not malloc(), use the regular macros
// - Otherwise, crash immediately. This provides worse error messages though.
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && !PA_BASE_CHECK_WILL_STREAM()
// For official build discard log strings to reduce binary bloat.
#if !PA_BASE_CHECK_WILL_STREAM()
// See base/check.h for implementation details.
#define PA_CHECK(condition) \
PA_UNLIKELY(!(condition)) ? PA_IMMEDIATE_CRASH() \
: PA_EAT_CHECK_STREAM_PARAMS()
#else
// PartitionAlloc uses async-signal-safe RawCheckFailure() for error reporting.
// Async-signal-safe functions are guaranteed to not allocate as otherwise they
// could operate with inconsistent allocator state.
#define PA_CHECK(condition) \
PA_UNLIKELY(!(condition)) \
? ::partition_alloc::internal::logging::RawCheckFailure( \
__FILE__ "(" PA_STRINGIFY(__LINE__) ") Check failed: " #condition) \
: PA_EAT_CHECK_STREAM_PARAMS()
#endif // !CHECK_WILL_STREAM()
#if BUILDFLAG(PA_DCHECK_IS_ON)
#define PA_DCHECK(condition) PA_CHECK(condition)
@ -62,12 +52,14 @@
#define PA_DPCHECK(condition) PA_EAT_CHECK_STREAM_PARAMS(!(condition))
#endif // BUILDFLAG(PA_DCHECK_IS_ON)
#else
#else // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&
// !PA_BASE_CHECK_WILL_STREAM()
#define PA_CHECK(condition) PA_BASE_CHECK(condition)
#define PA_DCHECK(condition) PA_BASE_DCHECK(condition)
#define PA_PCHECK(condition) PA_BASE_PCHECK(condition)
#define PA_DPCHECK(condition) PA_BASE_DPCHECK(condition)
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&
// !PA_BASE_CHECK_WILL_STREAM()
// Expensive dchecks that run within *Scan. These checks are only enabled in
// debug builds with dchecks enabled.

View File

@ -11,51 +11,63 @@
#include <limits>
#include "base/allocator/partition_allocator/address_pool_manager_types.h"
#include "base/allocator/partition_allocator/flags.h"
#include "base/allocator/partition_allocator/page_allocator_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
#include "base/allocator/partition_allocator/tagging.h"
#include "build/build_config.h"
#if BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)
#include <mach/vm_page_size.h>
#endif
#if PA_CONFIG(HAS_MEMORY_TAGGING)
#include "base/allocator/partition_allocator/tagging.h"
#endif
namespace partition_alloc {
namespace internal {
// Bit flag constants used as `flag` argument of PartitionRoot::Alloc<flags>,
// AlignedAlloc, etc.
struct AllocFlags {
static constexpr unsigned int kReturnNull = 1 << 0;
static constexpr unsigned int kZeroFill = 1 << 1;
enum class AllocFlags {
kNone = 0,
kReturnNull = 1 << 0,
kZeroFill = 1 << 1,
// Don't allow allocation override hooks. Override hooks are expected to
// check for the presence of this flag and return false if it is active.
static constexpr unsigned int kNoOverrideHooks = 1 << 2;
kNoOverrideHooks = 1 << 2,
// Never let a memory tool like ASan (if active) perform the allocation.
static constexpr unsigned int kNoMemoryToolOverride = 1 << 3;
kNoMemoryToolOverride = 1 << 3,
// Don't allow any hooks (override or observers).
static constexpr unsigned int kNoHooks = 1 << 4; // Internal.
kNoHooks = 1 << 4, // Internal.
// If the allocation requires a "slow path" (such as allocating/committing a
// new slot span), return nullptr instead. Note this makes all large
// allocations return nullptr, such as direct-mapped ones, and even for
// smaller ones, a nullptr value is common.
static constexpr unsigned int kFastPathOrReturnNull = 1 << 5; // Internal.
kFastPathOrReturnNull = 1 << 5, // Internal.
// An allocation override hook should tag the allocated memory for MTE.
static constexpr unsigned int kMemoryShouldBeTaggedForMte =
1 << 6; // Internal.
static constexpr unsigned int kLastFlag = kMemoryShouldBeTaggedForMte;
kMemoryShouldBeTaggedForMte = 1 << 6, // Internal.
kMaxValue = kMemoryShouldBeTaggedForMte,
};
PA_DEFINE_OPERATORS_FOR_FLAGS(AllocFlags);
// Bit flag constants used as `flag` argument of PartitionRoot::Free<flags>.
struct FreeFlags {
enum class FreeFlags {
kNone = 0,
// See AllocFlags::kNoMemoryToolOverride.
static constexpr unsigned int kNoMemoryToolOverride = 1 << 0;
static constexpr unsigned int kLastFlag = kNoMemoryToolOverride;
kNoMemoryToolOverride = 1 << 0,
// Don't allow any hooks (override or observers).
kNoHooks = 1 << 1, // Internal.
kMaxValue = kNoHooks,
};
PA_DEFINE_OPERATORS_FOR_FLAGS(FreeFlags);
} // namespace internal
using internal::AllocFlags;
using internal::FreeFlags;
namespace internal {

View File

@ -76,7 +76,7 @@ void PartitionAllocHooks::AllocationObserverHookIfEnabled(
bool PartitionAllocHooks::AllocationOverrideHookIfEnabled(
void** out,
unsigned int flags,
AllocFlags flags,
size_t size,
const char* type_name) {
if (auto* hook = allocation_override_hook_.load(std::memory_order_relaxed)) {

View File

@ -10,6 +10,7 @@
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
namespace partition_alloc {
@ -28,7 +29,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAllocHooks {
// If it returns true, the allocation has been overridden with the pointer in
// *out.
typedef bool AllocationOverrideHook(void** out,
unsigned int flags,
AllocFlags flags,
size_t size,
const char* type_name);
// If it returns true, then the allocation was overridden and has been freed.
@ -60,7 +61,7 @@ class PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionAllocHooks {
static void AllocationObserverHookIfEnabled(
const partition_alloc::AllocationNotificationData& notification_data);
static bool AllocationOverrideHookIfEnabled(void** out,
unsigned int flags,
AllocFlags flags,
size_t size,
const char* type_name);

View File

@ -188,7 +188,7 @@ uintptr_t ReserveMemoryFromPool(pool_handle pool,
}
SlotSpanMetadata* PartitionDirectMap(PartitionRoot* root,
unsigned int flags,
AllocFlags flags,
size_t raw_size,
size_t slot_span_alignment) {
PA_DCHECK((slot_span_alignment >= PartitionPageSize()) &&
@ -198,7 +198,7 @@ SlotSpanMetadata* PartitionDirectMap(PartitionRoot* root,
// scoped unlocking.
PartitionRootLock(root).AssertAcquired();
const bool return_null = flags & AllocFlags::kReturnNull;
const bool return_null = ContainsFlags(flags, AllocFlags::kReturnNull);
if (PA_UNLIKELY(raw_size > MaxDirectMapped())) {
if (return_null) {
return nullptr;
@ -260,6 +260,7 @@ SlotSpanMetadata* PartitionDirectMap(PartitionRoot* root,
slot_span_alignment - PartitionPageSize();
const size_t reservation_size = PartitionRoot::GetDirectMapReservationSize(
raw_size + padding_for_alignment);
PA_DCHECK(reservation_size >= raw_size);
#if BUILDFLAG(PA_DCHECK_IS_ON)
const size_t available_reservation_size =
reservation_size - padding_for_alignment -
@ -336,15 +337,17 @@ SlotSpanMetadata* PartitionDirectMap(PartitionRoot* root,
// so no other thread can update the same offset table entries at the
// same time. Furthermore, nobody will be ready these offsets until this
// function returns.
uintptr_t address_start = reservation_start;
uintptr_t address_end = address_start + reservation_size;
auto* offset_ptr = ReservationOffsetPointer(address_start);
uint16_t offset = 0;
while (address_start < address_end) {
PA_DCHECK(offset_ptr < GetReservationOffsetTableEnd(address_start));
auto* offset_ptr = ReservationOffsetPointer(reservation_start);
[[maybe_unused]] const auto* offset_ptr_end =
GetReservationOffsetTableEnd(reservation_start);
// |raw_size| > MaxBucketed(). So |reservation_size| > 0.
PA_DCHECK(reservation_size > 0);
const uint16_t offset_end = (reservation_size - 1) >> kSuperPageShift;
for (uint16_t offset = 0; offset <= offset_end; ++offset) {
PA_DCHECK(offset < kOffsetTagNormalBuckets);
*offset_ptr++ = offset++;
address_start += kSuperPageSize;
PA_DCHECK(offset_ptr < offset_ptr_end);
*offset_ptr++ = offset;
}
auto* super_page_extent = PartitionSuperPageToExtent(reservation_start);
@ -621,7 +624,7 @@ void PartitionBucket::Init(uint32_t new_slot_size) {
PA_ALWAYS_INLINE SlotSpanMetadata* PartitionBucket::AllocNewSlotSpan(
PartitionRoot* root,
unsigned int flags,
AllocFlags flags,
size_t slot_span_alignment) {
PA_DCHECK(!(root->next_partition_page % PartitionPageSize()));
PA_DCHECK(!(root->next_partition_page_end % PartitionPageSize()));
@ -696,7 +699,7 @@ PA_ALWAYS_INLINE SlotSpanMetadata* PartitionBucket::AllocNewSlotSpan(
uintptr_t PartitionBucket::AllocNewSuperPageSpan(PartitionRoot* root,
size_t super_page_count,
unsigned int flags) {
AllocFlags flags) {
PA_CHECK(super_page_count > 0);
PA_CHECK(super_page_count <=
std::numeric_limits<size_t>::max() / kSuperPageSize);
@ -709,7 +712,7 @@ uintptr_t PartitionBucket::AllocNewSuperPageSpan(PartitionRoot* root,
uintptr_t super_page_span_start = ReserveMemoryFromPool(
pool, requested_address, super_page_count * kSuperPageSize);
if (PA_UNLIKELY(!super_page_span_start)) {
if (flags & AllocFlags::kReturnNull) {
if (ContainsFlags(flags, AllocFlags::kReturnNull)) {
return 0;
}
@ -729,12 +732,12 @@ uintptr_t PartitionBucket::AllocNewSuperPageSpan(PartitionRoot* root,
}
PA_ALWAYS_INLINE uintptr_t
PartitionBucket::AllocNewSuperPage(PartitionRoot* root, unsigned int flags) {
PartitionBucket::AllocNewSuperPage(PartitionRoot* root, AllocFlags flags) {
auto super_page = AllocNewSuperPageSpan(root, 1, flags);
if (PA_UNLIKELY(!super_page)) {
// If the `kReturnNull` flag isn't set and the allocation attempt fails,
// `AllocNewSuperPageSpan` should've failed with an OOM crash.
PA_DCHECK(flags & AllocFlags::kReturnNull);
PA_DCHECK(ContainsFlags(flags, AllocFlags::kReturnNull));
return 0;
}
return SuperPagePayloadBegin(super_page, root->IsQuarantineAllowed());
@ -1291,7 +1294,7 @@ void PartitionBucket::SortActiveSlotSpans() {
}
uintptr_t PartitionBucket::SlowPathAlloc(PartitionRoot* root,
unsigned int flags,
AllocFlags flags,
size_t raw_size,
size_t slot_span_alignment,
bool* is_already_zeroed) {
@ -1329,7 +1332,7 @@ uintptr_t PartitionBucket::SlowPathAlloc(PartitionRoot* root,
SlotSpanMetadata::get_sentinel_slot_span());
// No fast path for direct-mapped allocations.
if (flags & AllocFlags::kFastPathOrReturnNull) {
if (ContainsFlags(flags, AllocFlags::kFastPathOrReturnNull)) {
return 0;
}
@ -1375,7 +1378,7 @@ uintptr_t PartitionBucket::SlowPathAlloc(PartitionRoot* root,
if (PA_UNLIKELY(!new_slot_span) &&
PA_LIKELY(decommitted_slot_spans_head != nullptr)) {
// Commit can be expensive, don't do it.
if (flags & AllocFlags::kFastPathOrReturnNull) {
if (ContainsFlags(flags, AllocFlags::kFastPathOrReturnNull)) {
return 0;
}
@ -1406,7 +1409,7 @@ uintptr_t PartitionBucket::SlowPathAlloc(PartitionRoot* root,
PA_DCHECK(new_slot_span);
} else {
// Getting a new slot span is expensive, don't do it.
if (flags & AllocFlags::kFastPathOrReturnNull) {
if (ContainsFlags(flags, AllocFlags::kFastPathOrReturnNull)) {
return 0;
}
@ -1422,7 +1425,7 @@ uintptr_t PartitionBucket::SlowPathAlloc(PartitionRoot* root,
if (PA_UNLIKELY(!new_slot_span)) {
PA_DCHECK(active_slot_spans_head ==
SlotSpanMetadata::get_sentinel_slot_span());
if (flags & AllocFlags::kReturnNull) {
if (ContainsFlags(flags, AllocFlags::kReturnNull)) {
return 0;
}
// See comment in PartitionDirectMap() for unlocking.
@ -1459,7 +1462,7 @@ uintptr_t PartitionBucket::SlowPathAlloc(PartitionRoot* root,
uintptr_t PartitionBucket::AllocNewSuperPageSpanForGwpAsan(
PartitionRoot* root,
size_t super_page_count,
unsigned int flags) {
AllocFlags flags) {
return AllocNewSuperPageSpan(root, super_page_count, flags);
}

View File

@ -74,7 +74,7 @@ struct PartitionBucket {
// Note the matching Free() functions are in SlotSpanMetadata.
PA_NOINLINE PA_COMPONENT_EXPORT(PARTITION_ALLOC) uintptr_t
SlowPathAlloc(PartitionRoot* root,
unsigned int flags,
AllocFlags flags,
size_t raw_size,
size_t slot_span_alignment,
bool* is_already_zeroed)
@ -166,7 +166,7 @@ struct PartitionBucket {
// compilation unit.
uintptr_t AllocNewSuperPageSpanForGwpAsan(PartitionRoot* root,
size_t super_page_count,
unsigned int flags)
AllocFlags flags)
PA_EXCLUSIVE_LOCKS_REQUIRED(PartitionRootLock(root));
void InitializeSlotSpanForGwpAsan(SlotSpanMetadata* slot_span);
@ -175,14 +175,14 @@ struct PartitionBucket {
// super page.
PA_ALWAYS_INLINE uintptr_t AllocNewSuperPageSpan(PartitionRoot* root,
size_t super_page_count,
unsigned int flags)
AllocFlags flags)
PA_EXCLUSIVE_LOCKS_REQUIRED(PartitionRootLock(root));
// Allocates a new slot span with size |num_partition_pages| from the
// current extent. Metadata within this slot span will be initialized.
// Returns nullptr on error.
PA_ALWAYS_INLINE SlotSpanMetadata* AllocNewSlotSpan(
PartitionRoot* root,
unsigned int flags,
AllocFlags flags,
size_t slot_span_alignment)
PA_EXCLUSIVE_LOCKS_REQUIRED(PartitionRootLock(root));
@ -190,7 +190,7 @@ struct PartitionBucket {
// slot-spans will be in the decommitted state. Returns the address of the
// super page's payload, or 0 on error.
PA_ALWAYS_INLINE uintptr_t AllocNewSuperPage(PartitionRoot* root,
unsigned int flags)
AllocFlags flags)
PA_EXCLUSIVE_LOCKS_REQUIRED(PartitionRootLock(root));
// Each bucket allocates a slot span when it runs out of slots.

View File

@ -26,7 +26,6 @@
#include "base/allocator/partition_allocator/partition_page_constants.h"
#include "base/allocator/partition_allocator/partition_superpage_extent_entry.h"
#include "base/allocator/partition_allocator/reservation_offset_table.h"
#include "base/allocator/partition_allocator/tagging.h"
#include "build/build_config.h"
#if BUILDFLAG(USE_STARSCAN)
@ -37,6 +36,10 @@
#include "base/allocator/partition_allocator/partition_ref_count.h"
#endif
#if BUILDFLAG(PA_DCHECK_IS_ON)
#include "base/allocator/partition_allocator/tagging.h"
#endif
namespace partition_alloc::internal {
#if BUILDFLAG(USE_STARSCAN)
@ -124,7 +127,7 @@ struct SlotSpanMetadata {
PA_ALWAYS_INLINE void Free(uintptr_t ptr, PartitionRoot* root);
// Appends the passed freelist to the slot-span's freelist. Please note that
// the function doesn't increment the tags of the passed freelist entries,
// since FreeNoHooks() did it already.
// since FreeInline() did it already.
PA_ALWAYS_INLINE void AppendFreeList(EncodedNextFreelistEntry* head,
EncodedNextFreelistEntry* tail,
size_t number_of_freed,

View File

@ -1692,14 +1692,21 @@ void PartitionRoot::SetSortActiveSlotSpansEnabled(bool new_value) {
// Explicitly define common template instantiations to reduce compile time.
#define EXPORT_TEMPLATE \
template PA_EXPORT_TEMPLATE_DEFINE(PA_COMPONENT_EXPORT(PARTITION_ALLOC))
EXPORT_TEMPLATE void* PartitionRoot::Alloc<0>(size_t, const char*);
EXPORT_TEMPLATE void* PartitionRoot::Alloc<AllocFlags::kNone>(size_t,
const char*);
EXPORT_TEMPLATE void* PartitionRoot::Alloc<AllocFlags::kReturnNull>(
size_t,
const char*);
EXPORT_TEMPLATE void* PartitionRoot::Realloc<0>(void*, size_t, const char*);
EXPORT_TEMPLATE void*
PartitionRoot::Realloc<AllocFlags::kReturnNull>(void*, size_t, const char*);
EXPORT_TEMPLATE void* PartitionRoot::AlignedAlloc<0>(size_t, size_t);
PartitionRoot::Realloc<AllocFlags::kNone, FreeFlags::kNone>(void*,
size_t,
const char*);
EXPORT_TEMPLATE void*
PartitionRoot::Realloc<AllocFlags::kReturnNull, FreeFlags::kNone>(void*,
size_t,
const char*);
EXPORT_TEMPLATE void* PartitionRoot::AlignedAlloc<AllocFlags::kNone>(size_t,
size_t);
#undef EXPORT_TEMPLATE
static_assert(offsetof(PartitionRoot, sentinel_bucket) ==

View File

@ -77,16 +77,6 @@
#include "base/allocator/partition_allocator/starscan/pcscan.h"
#endif
// We use this to make MEMORY_TOOL_REPLACES_ALLOCATOR behave the same for max
// size as other alloc code.
#define CHECK_MAX_SIZE_OR_RETURN_NULLPTR(size, flags) \
if (size > partition_alloc::internal::MaxDirectMapped()) { \
if (flags & AllocFlags::kReturnNull) { \
return nullptr; \
} \
PA_CHECK(false); \
}
namespace partition_alloc::internal {
// We want this size to be big enough that we have time to start up other
@ -448,11 +438,11 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
//
// NOTE: This is incompatible with anything that adds extras before the
// returned pointer, such as ref-count.
template <unsigned int flags = 0>
template <AllocFlags flags = AllocFlags::kNone>
PA_NOINLINE void* AlignedAlloc(size_t alignment, size_t requested_size) {
return AlignedAllocInline<flags>(alignment, requested_size);
}
template <unsigned int flags = 0>
template <AllocFlags flags = AllocFlags::kNone>
PA_ALWAYS_INLINE void* AlignedAllocInline(size_t alignment,
size_t requested_size);
@ -461,98 +451,59 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
// detrimental to performance, for instance if multiple callers are hot (by
// increasing cache footprint). Set PA_NOINLINE on the "basic" top-level
// functions to mitigate that for "vanilla" callers.
template <unsigned int flags = 0>
PA_NOINLINE PA_MALLOC_FN void* Alloc(size_t requested_size,
const char* type_name)
PA_MALLOC_ALIGNED {
//
// |type_name == nullptr|: ONLY FOR TESTS except internal uses.
// You should provide |type_name| to make debugging easier.
template <AllocFlags flags = AllocFlags::kNone>
PA_NOINLINE PA_MALLOC_FN PA_MALLOC_ALIGNED void* Alloc(
size_t requested_size,
const char* type_name = nullptr) {
return AllocInline<flags>(requested_size, type_name);
}
template <unsigned int flags = 0>
PA_ALWAYS_INLINE PA_MALLOC_FN void* AllocInline(size_t requested_size,
const char* type_name)
PA_MALLOC_ALIGNED {
static_assert((flags & AllocFlags::kNoHooks) == 0); // Internal only.
template <AllocFlags flags = AllocFlags::kNone>
PA_ALWAYS_INLINE PA_MALLOC_FN PA_MALLOC_ALIGNED void* AllocInline(
size_t requested_size,
const char* type_name = nullptr) {
return AllocInternal<flags>(requested_size, internal::PartitionPageSize(),
type_name);
}
// Same as |Alloc()|, but allows specifying |slot_span_alignment|. It
// has to be a multiple of partition page size, greater than 0 and no greater
// than kMaxSupportedAlignment. If it equals exactly 1 partition page, no
// special action is taken as PartitionAlloc naturally guarantees this
// alignment, otherwise a sub-optimal allocation strategy is used to
// guarantee the higher-order alignment.
template <unsigned int flags>
PA_ALWAYS_INLINE PA_MALLOC_FN void* AllocInternal(size_t requested_size,
size_t slot_span_alignment,
const char* type_name)
PA_MALLOC_ALIGNED;
// Same as |Alloc()|, but bypasses the allocator hooks.
//
// This is separate from Alloc() because other callers of
// Alloc() should not have the extra branch checking whether the
// hooks should be ignored or not. This is the same reason why |FreeNoHooks()|
// exists. However, |AlignedAlloc()| and |Realloc()| have few callers, so
// taking the extra branch in the non-malloc() case doesn't hurt. In addition,
// for the malloc() case, the compiler correctly removes the branch, since
// this is marked |PA_ALWAYS_INLINE|.
template <unsigned int flags = 0>
PA_ALWAYS_INLINE PA_MALLOC_FN void* AllocNoHooks(size_t requested_size,
size_t slot_span_alignment)
PA_MALLOC_ALIGNED;
// Deprecated compatibility method.
// TODO(mikt): remove this once all third party usage is gone.
PA_ALWAYS_INLINE PA_MALLOC_FN void* AllocWithFlags(unsigned int flags,
size_t requested_size,
const char* type_name)
PA_MALLOC_ALIGNED {
// These conditional branching should be optimized away.
if (flags == (AllocFlags::kReturnNull)) {
return AllocInline<AllocFlags::kReturnNull>(requested_size, type_name);
} else if (flags == (AllocFlags::kZeroFill)) {
return AllocInline<AllocFlags::kZeroFill>(requested_size, type_name);
} else if (flags == (AllocFlags::kReturnNull | AllocFlags::kZeroFill)) {
return AllocInline<AllocFlags::kReturnNull | AllocFlags::kZeroFill>(
requested_size, type_name);
} else {
PA_CHECK(0);
PA_NOTREACHED();
}
// AllocInternal exposed for testing.
template <AllocFlags flags = AllocFlags::kNone>
PA_NOINLINE PA_MALLOC_FN PA_MALLOC_ALIGNED void* AllocInternalForTesting(
size_t requested_size,
size_t slot_span_alignment,
const char* type_name) {
return AllocInternal<flags>(requested_size, slot_span_alignment, type_name);
}
template <unsigned int flags = 0>
PA_NOINLINE void* Realloc(void* ptr,
size_t new_size,
const char* type_name) PA_MALLOC_ALIGNED {
return ReallocInline<flags>(ptr, new_size, type_name);
}
template <unsigned int flags = 0>
PA_ALWAYS_INLINE void* ReallocInline(void* ptr,
size_t new_size,
const char* type_name) PA_MALLOC_ALIGNED;
// Overload that may return nullptr if reallocation isn't possible. In this
// case, |ptr| remains valid.
PA_NOINLINE void* TryRealloc(void* ptr,
size_t new_size,
const char* type_name) PA_MALLOC_ALIGNED {
return ReallocInline<AllocFlags::kReturnNull>(ptr, new_size, type_name);
}
// Deprecated compatibility method.
// TODO(mikt): remove this once all third party usage is gone.
PA_NOINLINE void* ReallocWithFlags(unsigned int flags,
void* ptr,
size_t new_size,
const char* type_name) PA_MALLOC_ALIGNED {
PA_CHECK(flags == AllocFlags::kReturnNull);
return ReallocInline<AllocFlags::kReturnNull>(ptr, new_size, type_name);
template <AllocFlags alloc_flags = AllocFlags::kNone,
FreeFlags free_flags = FreeFlags::kNone>
PA_NOINLINE PA_MALLOC_ALIGNED void* Realloc(void* ptr,
size_t new_size,
const char* type_name) {
return ReallocInline<alloc_flags, free_flags>(ptr, new_size, type_name);
}
template <AllocFlags alloc_flags = AllocFlags::kNone,
FreeFlags free_flags = FreeFlags::kNone>
PA_ALWAYS_INLINE PA_MALLOC_ALIGNED void* ReallocInline(void* ptr,
size_t new_size,
const char* type_name);
template <unsigned int flags = 0>
PA_NOINLINE void Free(void* object);
PA_ALWAYS_INLINE void FreeNoHooks(void* object);
template <FreeFlags flags = FreeFlags::kNone>
PA_NOINLINE void Free(void* object) {
FreeInline<flags>(object);
}
template <FreeFlags flags = FreeFlags::kNone>
PA_ALWAYS_INLINE void FreeInline(void* object);
template <FreeFlags flags = FreeFlags::kNone>
PA_NOINLINE static void FreeInUnknownRoot(void* object) {
FreeInlineInUnknownRoot<flags>(object);
}
template <FreeFlags flags = FreeFlags::kNone>
PA_ALWAYS_INLINE static void FreeInlineInUnknownRoot(void* object);
template <unsigned int flags = 0>
PA_NOINLINE static void FreeInUnknownRoot(void* object);
PA_ALWAYS_INLINE static void FreeNoHooksInUnknownRoot(void* object);
// Immediately frees the pointer bypassing the quarantine. |slot_start| is the
// beginning of the slot that contains |object|.
PA_ALWAYS_INLINE void FreeNoHooksImmediate(void* object,
@ -882,7 +833,7 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
// Common path of Free() and FreeInUnknownRoot(). Returns
// true if the caller should return immediately.
template <unsigned int flags>
template <FreeFlags flags>
PA_ALWAYS_INLINE static bool FreeProlog(void* object,
const PartitionRoot* root);
@ -922,6 +873,23 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
return ret;
}
// Same as |Alloc()|, but allows specifying |slot_span_alignment|. It
// has to be a multiple of partition page size, greater than 0 and no greater
// than kMaxSupportedAlignment. If it equals exactly 1 partition page, no
// special action is taken as PartitionAlloc naturally guarantees this
// alignment, otherwise a sub-optimal allocation strategy is used to
// guarantee the higher-order alignment.
template <AllocFlags flags>
PA_ALWAYS_INLINE PA_MALLOC_FN PA_MALLOC_ALIGNED void* AllocInternal(
size_t requested_size,
size_t slot_span_alignment,
const char* type_name);
// Same as |AllocInternal()|, but don't handle allocation hooks.
template <AllocFlags flags = AllocFlags::kNone>
PA_ALWAYS_INLINE PA_MALLOC_FN PA_MALLOC_ALIGNED void* AllocInternalNoHooks(
size_t requested_size,
size_t slot_span_alignment);
// Allocates a memory slot, without initializing extras.
//
// - |flags| are as in Alloc().
@ -929,13 +897,13 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
// |requested_size|.
// - |usable_size| and |is_already_zeroed| are output only. |usable_size| is
// guaranteed to be larger or equal to Alloc()'s |requested_size|.
template <unsigned int flags>
template <AllocFlags flags>
PA_ALWAYS_INLINE uintptr_t RawAlloc(Bucket* bucket,
size_t raw_size,
size_t slot_span_alignment,
size_t* usable_size,
bool* is_already_zeroed);
template <unsigned int flags>
template <AllocFlags flags>
PA_ALWAYS_INLINE uintptr_t AllocFromBucket(Bucket* bucket,
size_t raw_size,
size_t slot_span_alignment,
@ -943,6 +911,20 @@ struct PA_ALIGNAS(64) PA_COMPONENT_EXPORT(PARTITION_ALLOC) PartitionRoot {
bool* is_already_zeroed)
PA_EXCLUSIVE_LOCKS_REQUIRED(internal::PartitionRootLock(this));
// We use this to make MEMORY_TOOL_REPLACES_ALLOCATOR behave the same for max
// size as other alloc code.
template <AllocFlags flags>
PA_ALWAYS_INLINE static bool AllocWithMemoryToolProlog(size_t size) {
if (size > partition_alloc::internal::MaxDirectMapped()) {
if constexpr (ContainsFlags(flags, AllocFlags::kReturnNull)) {
// Early return indicating not to proceed with allocation
return false;
}
PA_CHECK(false);
}
return true; // Allocation should proceed
}
bool TryReallocInPlaceForNormalBuckets(void* object,
SlotSpan* slot_span,
size_t new_size);
@ -1163,7 +1145,7 @@ PA_ALWAYS_INLINE void PartitionAllocFreeForRefCounting(uintptr_t slot_start) {
} // namespace internal
template <unsigned int flags>
template <AllocFlags flags>
PA_ALWAYS_INLINE uintptr_t
PartitionRoot::AllocFromBucket(Bucket* bucket,
size_t raw_size,
@ -1270,13 +1252,16 @@ FreeNotificationData PartitionRoot::CreateFreeNotificationData(
}
// static
template <unsigned int flags>
template <FreeFlags flags>
PA_ALWAYS_INLINE bool PartitionRoot::FreeProlog(void* object,
const PartitionRoot* root) {
PA_DCHECK(flags < FreeFlags::kLastFlag << 1);
static_assert(AreValidFlags(flags));
if constexpr (ContainsFlags(flags, FreeFlags::kNoHooks)) {
return false;
}
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
if constexpr (!(flags & FreeFlags::kNoMemoryToolOverride)) {
if constexpr (!ContainsFlags(flags, FreeFlags::kNoMemoryToolOverride)) {
free(object);
return true;
}
@ -1287,7 +1272,7 @@ PA_ALWAYS_INLINE bool PartitionRoot::FreeProlog(void* object,
if (PartitionAllocHooks::AreHooksEnabled()) {
// A valid |root| might not be available if this function is called from
// |FreeWithFlagsInUnknownRoot| and not deducible if object originates from
// |FreeInUnknownRoot| and not deducible if object originates from
// an override hook.
// TODO(crbug.com/1137393): See if we can make the root available more
// reliably or even make this function non-static.
@ -1302,29 +1287,6 @@ PA_ALWAYS_INLINE bool PartitionRoot::FreeProlog(void* object,
return false;
}
template <unsigned int flags>
PA_NOINLINE void PartitionRoot::Free(void* object) {
bool early_return = FreeProlog<flags>(object, this);
if (early_return) {
return;
}
FreeNoHooks(object);
}
// static
template <unsigned int flags>
PA_NOINLINE void PartitionRoot::FreeInUnknownRoot(void* object) {
// The correct PartitionRoot might not be deducible if the |object| originates
// from an override hook.
bool early_return = FreeProlog<flags>(object, nullptr);
if (early_return) {
return;
}
FreeNoHooksInUnknownRoot(object);
}
PA_ALWAYS_INLINE bool PartitionRoot::IsMemoryTaggingEnabled() const {
#if PA_CONFIG(HAS_MEMORY_TAGGING)
return settings.memory_tagging_enabled_;
@ -1343,7 +1305,13 @@ PartitionRoot::memory_tagging_reporting_mode() const {
}
// static
PA_ALWAYS_INLINE void PartitionRoot::FreeNoHooksInUnknownRoot(void* object) {
template <FreeFlags flags>
PA_ALWAYS_INLINE void PartitionRoot::FreeInlineInUnknownRoot(void* object) {
bool early_return = FreeProlog<flags>(object, nullptr);
if (early_return) {
return;
}
if (PA_UNLIKELY(!object)) {
return;
}
@ -1356,13 +1324,21 @@ PA_ALWAYS_INLINE void PartitionRoot::FreeNoHooksInUnknownRoot(void* object) {
// change the critical path from object -> slot_span -> root into two
// *parallel* ones:
// 1. object -> root
// 2. object -> slot_span (inside FreeNoHooks)
// 2. object -> slot_span (inside FreeInline)
uintptr_t object_addr = internal::ObjectPtr2Addr(object);
auto* root = FromAddrInFirstSuperpage(object_addr);
root->FreeNoHooks(object);
root->FreeInline<flags | FreeFlags::kNoHooks>(object);
}
PA_ALWAYS_INLINE void PartitionRoot::FreeNoHooks(void* object) {
template <FreeFlags flags>
PA_ALWAYS_INLINE void PartitionRoot::FreeInline(void* object) {
// The correct PartitionRoot might not be deducible if the |object| originates
// from an override hook.
bool early_return = FreeProlog<flags>(object, this);
if (early_return) {
return;
}
if (PA_UNLIKELY(!object)) {
return;
}
@ -1479,7 +1455,7 @@ PA_ALWAYS_INLINE void PartitionRoot::FreeNoHooksImmediate(
// Note: ref-count and cookie can be 0-sized.
//
// For more context, see the other "Layout inside the slot" comment inside
// AllocNoHooks().
// AllocInternalNoHooks().
if (settings.use_cookie) {
// Verify the cookie after the allocated region.
@ -1917,69 +1893,80 @@ PartitionRoot::SizeToBucketIndex(size_t size,
}
}
template <unsigned int flags>
template <AllocFlags flags>
PA_ALWAYS_INLINE void* PartitionRoot::AllocInternal(size_t requested_size,
size_t slot_span_alignment,
const char* type_name) {
static_assert(flags < AllocFlags::kLastFlag << 1);
static_assert(AreValidFlags(flags));
PA_DCHECK(
(slot_span_alignment >= internal::PartitionPageSize()) &&
partition_alloc::internal::base::bits::IsPowerOfTwo(slot_span_alignment));
static_assert(!ContainsFlags(
flags, AllocFlags::kMemoryShouldBeTaggedForMte)); // Internal only.
PA_DCHECK((flags & AllocFlags::kNoHooks) == 0); // Internal only.
static_assert((flags & AllocFlags::kMemoryShouldBeTaggedForMte) ==
0); // Internal only.
PA_DCHECK(initialized);
constexpr bool no_hooks = ContainsFlags(flags, AllocFlags::kNoHooks);
bool hooks_enabled;
if constexpr (!no_hooks) {
PA_DCHECK(initialized);
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
if constexpr (!(flags & AllocFlags::kNoMemoryToolOverride)) {
CHECK_MAX_SIZE_OR_RETURN_NULLPTR(requested_size, flags);
constexpr bool zero_fill = flags & AllocFlags::kZeroFill;
void* result =
zero_fill ? calloc(1, requested_size) : malloc(requested_size);
PA_CHECK(result || flags & AllocFlags::kReturnNull);
return result;
}
#endif // defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
void* object = nullptr;
const bool hooks_enabled = PartitionAllocHooks::AreHooksEnabled();
if (hooks_enabled) {
unsigned int additional_flags = 0;
#if PA_CONFIG(HAS_MEMORY_TAGGING)
if (IsMemoryTaggingEnabled()) {
additional_flags |= AllocFlags::kMemoryShouldBeTaggedForMte;
if constexpr (!ContainsFlags(flags, AllocFlags::kNoMemoryToolOverride)) {
if (!PartitionRoot::AllocWithMemoryToolProlog<flags>(requested_size)) {
// Early return if AllocWithMemoryToolProlog returns false
return nullptr;
}
constexpr bool zero_fill = ContainsFlags(flags, AllocFlags::kZeroFill);
void* result =
zero_fill ? calloc(1, requested_size) : malloc(requested_size);
if constexpr (!ContainsFlags(flags, AllocFlags::kReturnNull)) {
PA_CHECK(result);
}
return result;
}
#endif // defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
void* object = nullptr;
hooks_enabled = PartitionAllocHooks::AreHooksEnabled();
if (hooks_enabled) {
auto additional_flags = AllocFlags::kNone;
#if PA_CONFIG(HAS_MEMORY_TAGGING)
if (IsMemoryTaggingEnabled()) {
additional_flags |= AllocFlags::kMemoryShouldBeTaggedForMte;
}
#endif
// The override hooks will return false if it can't handle the request, i.e.
// due to unsupported flags. In this case, we forward the allocation request
// to the default mechanisms.
// TODO(crbug.com/1137393): See if we can make the forwarding more verbose
// to ensure that this situation doesn't go unnoticed.
if (PartitionAllocHooks::AllocationOverrideHookIfEnabled(
&object, flags | additional_flags, requested_size, type_name)) {
// The override hooks will return false if it can't handle the request,
// i.e. due to unsupported flags. In this case, we forward the allocation
// request to the default mechanisms.
// TODO(crbug.com/1137393): See if we can make the forwarding more verbose
// to ensure that this situation doesn't go unnoticed.
if (PartitionAllocHooks::AllocationOverrideHookIfEnabled(
&object, flags | additional_flags, requested_size, type_name)) {
PartitionAllocHooks::AllocationObserverHookIfEnabled(
CreateAllocationNotificationData(object, requested_size,
type_name));
return object;
}
}
}
void* const object =
AllocInternalNoHooks<flags>(requested_size, slot_span_alignment);
if constexpr (!no_hooks) {
if (PA_UNLIKELY(hooks_enabled)) {
PartitionAllocHooks::AllocationObserverHookIfEnabled(
CreateAllocationNotificationData(object, requested_size, type_name));
return object;
}
}
object = AllocNoHooks<flags>(requested_size, slot_span_alignment);
if (PA_UNLIKELY(hooks_enabled)) {
PartitionAllocHooks::AllocationObserverHookIfEnabled(
CreateAllocationNotificationData(object, requested_size, type_name));
}
return object;
}
template <unsigned int flags>
PA_ALWAYS_INLINE void* PartitionRoot::AllocNoHooks(size_t requested_size,
size_t slot_span_alignment) {
static_assert(flags < AllocFlags::kLastFlag << 1);
PA_DCHECK(
(slot_span_alignment >= internal::PartitionPageSize()) &&
partition_alloc::internal::base::bits::IsPowerOfTwo(slot_span_alignment));
template <AllocFlags flags>
PA_ALWAYS_INLINE void* PartitionRoot::AllocInternalNoHooks(
size_t requested_size,
size_t slot_span_alignment) {
static_assert(AreValidFlags(flags));
// The thread cache is added "in the middle" of the main allocator, that is:
// - After all the cookie/ref-count management
@ -2123,7 +2110,7 @@ PA_ALWAYS_INLINE void* PartitionRoot::AllocNoHooks(size_t requested_size,
// Fill the region kUninitializedByte (on debug builds, if not requested to 0)
// or 0 (if requested and not 0 already).
constexpr bool zero_fill = flags & AllocFlags::kZeroFill;
constexpr bool zero_fill = ContainsFlags(flags, AllocFlags::kZeroFill);
// PA_LIKELY: operator new() calls malloc(), not calloc().
if constexpr (!zero_fill) {
// memset() can be really expensive.
@ -2172,7 +2159,7 @@ PA_ALWAYS_INLINE void* PartitionRoot::AllocNoHooks(size_t requested_size,
return object;
}
template <unsigned int flags>
template <AllocFlags flags>
PA_ALWAYS_INLINE uintptr_t PartitionRoot::RawAlloc(Bucket* bucket,
size_t raw_size,
size_t slot_span_alignment,
@ -2184,7 +2171,7 @@ PA_ALWAYS_INLINE uintptr_t PartitionRoot::RawAlloc(Bucket* bucket,
usable_size, is_already_zeroed);
}
template <unsigned int flags>
template <AllocFlags flags>
PA_ALWAYS_INLINE void* PartitionRoot::AlignedAllocInline(
size_t alignment,
size_t requested_size) {
@ -2237,12 +2224,12 @@ PA_ALWAYS_INLINE void* PartitionRoot::AlignedAllocInline(
raw_size - 1));
}
PA_DCHECK(partition_alloc::internal::base::bits::IsPowerOfTwo(raw_size));
// Adjust back, because AllocNoHooks/Alloc will adjust it again.
// Adjust back, because AllocInternalNoHooks/Alloc will adjust it again.
adjusted_size = AdjustSizeForExtrasSubtract(raw_size);
// Overflow check. adjusted_size must be larger or equal to requested_size.
if (PA_UNLIKELY(adjusted_size < requested_size)) {
if constexpr (flags & AllocFlags::kReturnNull) {
if constexpr (ContainsFlags(flags, AllocFlags::kReturnNull)) {
return nullptr;
}
// OutOfMemoryDeathTest.AlignedAlloc requires
@ -2258,10 +2245,9 @@ PA_ALWAYS_INLINE void* PartitionRoot::AlignedAllocInline(
// don't pass anything less, because it'll mess up callee's calculations.
size_t slot_span_alignment =
std::max(alignment, internal::PartitionPageSize());
constexpr bool no_hooks = flags & AllocFlags::kNoHooks;
void* object = no_hooks
? AllocNoHooks(adjusted_size, slot_span_alignment)
: AllocInternal<0>(adjusted_size, slot_span_alignment, "");
// TODO(mikt): Investigate why all flags except kNoHooks are ignored here.
void* object = AllocInternal<flags & AllocFlags::kNoHooks>(
adjusted_size, slot_span_alignment, nullptr);
// |alignment| is a power of two, but the compiler doesn't necessarily know
// that. A regular % operation is very slow, make sure to use the equivalent,
@ -2272,36 +2258,39 @@ PA_ALWAYS_INLINE void* PartitionRoot::AlignedAllocInline(
return object;
}
template <unsigned int flags>
template <AllocFlags alloc_flags, FreeFlags free_flags>
void* PartitionRoot::ReallocInline(void* ptr,
size_t new_size,
const char* type_name) {
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
CHECK_MAX_SIZE_OR_RETURN_NULLPTR(new_size, flags);
if (!PartitionRoot::AllocWithMemoryToolProlog<alloc_flags>(new_size)) {
// Early return if AllocWithMemoryToolProlog returns false
return nullptr;
}
void* result = realloc(ptr, new_size);
PA_CHECK(result || flags & AllocFlags::kReturnNull);
if constexpr (!ContainsFlags(alloc_flags, AllocFlags::kReturnNull)) {
PA_CHECK(result);
}
return result;
#else
constexpr bool no_hooks = flags & AllocFlags::kNoHooks;
if (PA_UNLIKELY(!ptr)) {
return no_hooks
? AllocNoHooks<flags>(new_size, internal::PartitionPageSize())
: AllocInternal<flags>(new_size, internal::PartitionPageSize(),
return AllocInternal<alloc_flags>(new_size, internal::PartitionPageSize(),
type_name);
}
if (PA_UNLIKELY(!new_size)) {
FreeInUnknownRoot(ptr);
FreeInUnknownRoot<free_flags>(ptr);
return nullptr;
}
if (new_size > internal::MaxDirectMapped()) {
if constexpr (flags & AllocFlags::kReturnNull) {
if constexpr (ContainsFlags(alloc_flags, AllocFlags::kReturnNull)) {
return nullptr;
}
internal::PartitionExcessiveAllocationSize(new_size);
}
constexpr bool no_hooks = ContainsFlags(alloc_flags, AllocFlags::kNoHooks);
const bool hooks_enabled = PartitionAllocHooks::AreHooksEnabled();
bool overridden = false;
size_t old_usable_size;
@ -2348,19 +2337,18 @@ void* PartitionRoot::ReallocInline(void* ptr,
}
// This realloc cannot be resized in-place. Sadness.
void* ret = no_hooks
? AllocNoHooks<flags>(new_size, internal::PartitionPageSize())
: AllocInternal<flags>(
new_size, internal::PartitionPageSize(), type_name);
void* ret = AllocInternal<alloc_flags>(
new_size, internal::PartitionPageSize(), type_name);
if (!ret) {
if constexpr (flags & AllocFlags::kReturnNull) {
if constexpr (ContainsFlags(alloc_flags, AllocFlags::kReturnNull)) {
return nullptr;
}
internal::PartitionExcessiveAllocationSize(new_size);
}
memcpy(ret, ptr, std::min(old_usable_size, new_size));
FreeInUnknownRoot(ptr); // Implicitly protects the old ptr on MTE systems.
FreeInUnknownRoot<free_flags>(
ptr); // Implicitly protects the old ptr on MTE systems.
return ret;
#endif
}
@ -2412,14 +2400,21 @@ ThreadCache* PartitionRoot::GetThreadCache() {
#define EXPORT_TEMPLATE \
extern template PA_EXPORT_TEMPLATE_DECLARE( \
PA_COMPONENT_EXPORT(PARTITION_ALLOC))
EXPORT_TEMPLATE void* PartitionRoot::Alloc<0>(size_t, const char*);
EXPORT_TEMPLATE void* PartitionRoot::Alloc<AllocFlags::kNone>(size_t,
const char*);
EXPORT_TEMPLATE void* PartitionRoot::Alloc<AllocFlags::kReturnNull>(
size_t,
const char*);
EXPORT_TEMPLATE void* PartitionRoot::Realloc<0>(void*, size_t, const char*);
EXPORT_TEMPLATE void*
PartitionRoot::Realloc<AllocFlags::kReturnNull>(void*, size_t, const char*);
EXPORT_TEMPLATE void* PartitionRoot::AlignedAlloc<0>(size_t, size_t);
PartitionRoot::Realloc<AllocFlags::kNone, FreeFlags::kNone>(void*,
size_t,
const char*);
EXPORT_TEMPLATE void*
PartitionRoot::Realloc<AllocFlags::kReturnNull, FreeFlags::kNone>(void*,
size_t,
const char*);
EXPORT_TEMPLATE void* PartitionRoot::AlignedAlloc<AllocFlags::kNone>(size_t,
size_t);
#undef EXPORT_TEMPLATE
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)

View File

@ -13,6 +13,7 @@
#include <type_traits>
#include <utility>
#include "base/allocator/partition_allocator/flags.h"
#include "base/allocator/partition_allocator/partition_alloc_base/compiler_specific.h"
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_base/cxx20_is_constant_evaluated.h"
@ -21,6 +22,7 @@
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
#include "base/allocator/partition_allocator/pointers/raw_ptr_exclusion.h"
#include "base/allocator/partition_allocator/raw_ptr_buildflags.h"
#include "build/build_config.h"
#include "build/buildflag.h"
@ -67,7 +69,7 @@ namespace content::responsiveness {
class Calculator;
}
namespace base {
namespace partition_alloc::internal {
// NOTE: All methods should be `PA_ALWAYS_INLINE`. raw_ptr is meant to be a
// lightweight replacement of a raw pointer, hence performance is critical.
@ -120,40 +122,23 @@ enum class RawPtrTraits : unsigned {
//
// Test only.
kDummyForTest = (1 << 11),
};
// Used to combine RawPtrTraits:
constexpr RawPtrTraits operator|(RawPtrTraits a, RawPtrTraits b) {
return static_cast<RawPtrTraits>(static_cast<unsigned>(a) |
static_cast<unsigned>(b));
}
constexpr RawPtrTraits operator&(RawPtrTraits a, RawPtrTraits b) {
return static_cast<RawPtrTraits>(static_cast<unsigned>(a) &
static_cast<unsigned>(b));
}
constexpr RawPtrTraits operator~(RawPtrTraits a) {
return static_cast<RawPtrTraits>(~static_cast<unsigned>(a));
}
kAllMask = kMayDangle | kDisableHooks | kAllowPtrArithmetic |
kExperimentalAsh | kUseCountingWrapperForTest | kDummyForTest,
};
// Template specialization to use |PA_DEFINE_OPERATORS_FOR_FLAGS| without
// |kMaxValue| declaration.
template <>
constexpr inline RawPtrTraits kAllFlags<RawPtrTraits> = RawPtrTraits::kAllMask;
PA_DEFINE_OPERATORS_FOR_FLAGS(RawPtrTraits);
} // namespace partition_alloc::internal
namespace base {
using partition_alloc::internal::RawPtrTraits;
namespace raw_ptr_traits {
constexpr bool Contains(RawPtrTraits a, RawPtrTraits b) {
return (a & b) != RawPtrTraits::kEmpty;
}
constexpr RawPtrTraits Remove(RawPtrTraits a, RawPtrTraits b) {
return a & ~b;
}
constexpr bool AreValid(RawPtrTraits traits) {
return Remove(traits, RawPtrTraits::kMayDangle | RawPtrTraits::kDisableHooks |
RawPtrTraits::kAllowPtrArithmetic |
RawPtrTraits::kExperimentalAsh |
RawPtrTraits::kUseCountingWrapperForTest |
RawPtrTraits::kDummyForTest) ==
RawPtrTraits::kEmpty;
}
// IsSupportedType<T>::value answers whether raw_ptr<T> 1) compiles and 2) is
// always safe at runtime. Templates that may end up using `raw_ptr<T>` should
// use IsSupportedType to ensure that raw_ptr is not used with unsupported
@ -230,26 +215,33 @@ struct IsSupportedType<T,
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
template <RawPtrTraits Traits>
using UnderlyingImplForTraits = internal::RawPtrBackupRefImpl<
/*AllowDangling=*/Contains(Traits, RawPtrTraits::kMayDangle),
/*ExperimentalAsh=*/Contains(Traits, RawPtrTraits::kExperimentalAsh)>;
/*AllowDangling=*/ContainsFlags(Traits, RawPtrTraits::kMayDangle),
/*ExperimentalAsh=*/ContainsFlags(Traits, RawPtrTraits::kExperimentalAsh)>;
#elif BUILDFLAG(USE_ASAN_UNOWNED_PTR)
template <RawPtrTraits Traits>
using UnderlyingImplForTraits =
internal::RawPtrAsanUnownedImpl<Contains(Traits,
RawPtrTraits::kAllowPtrArithmetic),
Contains(Traits, RawPtrTraits::kMayDangle)>;
using UnderlyingImplForTraits = internal::RawPtrAsanUnownedImpl<
ContainsFlags(Traits, RawPtrTraits::kAllowPtrArithmetic),
ContainsFlags(Traits, RawPtrTraits::kMayDangle)>;
#elif BUILDFLAG(USE_HOOKABLE_RAW_PTR)
template <RawPtrTraits Traits>
using UnderlyingImplForTraits = internal::RawPtrHookableImpl<
/*EnableHooks=*/!Contains(Traits, RawPtrTraits::kDisableHooks)>;
/*EnableHooks=*/!ContainsFlags(Traits, RawPtrTraits::kDisableHooks)>;
#else
template <RawPtrTraits Traits>
using UnderlyingImplForTraits = internal::RawPtrNoOpImpl;
#endif
constexpr bool IsPtrArithmeticAllowed(RawPtrTraits Traits) {
#if BUILDFLAG(ENABLE_POINTER_ARITHMETIC_TRAIT_CHECK)
return ContainsFlags(Traits, RawPtrTraits::kAllowPtrArithmetic);
#else
return true;
#endif
}
} // namespace raw_ptr_traits
namespace test {
@ -267,9 +259,9 @@ namespace raw_ptr_traits {
// wrapper.
template <RawPtrTraits Traits>
using ImplForTraits = std::conditional_t<
Contains(Traits, RawPtrTraits::kUseCountingWrapperForTest),
ContainsFlags(Traits, RawPtrTraits::kUseCountingWrapperForTest),
test::RawPtrCountingImplWrapperForTest<
Remove(Traits, RawPtrTraits::kUseCountingWrapperForTest)>,
RemoveFlags(Traits, RawPtrTraits::kUseCountingWrapperForTest)>,
UnderlyingImplForTraits<Traits>>;
} // namespace raw_ptr_traits
@ -297,43 +289,44 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ptr {
static_assert(std::is_same_v<Impl, internal::RawPtrNoOpImpl>);
#endif // !BUILDFLAG(USE_PARTITION_ALLOC)
static_assert(raw_ptr_traits::AreValid(Traits), "Unknown raw_ptr trait(s)");
static_assert(AreValidFlags(Traits), "Unknown raw_ptr trait(s)");
static_assert(raw_ptr_traits::IsSupportedType<T>::value,
"raw_ptr<T> doesn't work with this kind of pointee type T");
// TODO(bartekn): Turn on zeroing as much as possible, to reduce
// pointer-related UBs. In the current implementation we do it only when the
// underlying implementation needs it for correctness, for performance
// reasons. There are two secnarios where it's important:
// 1. When rewriting renderer, we don't want extra overhead get in the way of
// our perf evaluation.
// 2. The same applies to rewriting 3rd party libraries, but also we want
// RawPtrNoOpImpl to be a true no-op, in case the library is linked with
// a product other than Chromium (this can be mitigated using
// `build_with_chromium` GN variable).
static constexpr bool kZeroOnInit = Impl::kMustZeroOnInit;
static constexpr bool kZeroOnMove = Impl::kMustZeroOnMove;
static constexpr bool kZeroOnDestruct = Impl::kMustZeroOnDestruct;
static constexpr bool kZeroOnConstruct =
Impl::kMustZeroOnConstruct || BUILDFLAG(RAW_PTR_ZERO_ON_CONSTRUCT);
static constexpr bool kZeroOnMove =
Impl::kMustZeroOnMove || BUILDFLAG(RAW_PTR_ZERO_ON_MOVE);
static constexpr bool kZeroOnDestruct =
Impl::kMustZeroOnDestruct || BUILDFLAG(RAW_PTR_ZERO_ON_DESTRUCT);
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) || \
BUILDFLAG(USE_ASAN_UNOWNED_PTR) || BUILDFLAG(USE_HOOKABLE_RAW_PTR)
// BackupRefPtr requires a non-trivial default constructor, destructor, etc.
// A non-trivial default ctor is required for complex implementations (e.g.
// BackupRefPtr), or even for NoOpImpl when zeroing is requested.
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) || \
BUILDFLAG(USE_ASAN_UNOWNED_PTR) || BUILDFLAG(USE_HOOKABLE_RAW_PTR) || \
BUILDFLAG(RAW_PTR_ZERO_ON_CONSTRUCT)
PA_ALWAYS_INLINE constexpr raw_ptr() noexcept {
if constexpr (kZeroOnInit) {
if constexpr (kZeroOnConstruct) {
wrapped_ptr_ = nullptr;
}
}
#else
// raw_ptr can be trivially default constructed (leaving |wrapped_ptr_|
// uninitialized).
PA_ALWAYS_INLINE constexpr raw_ptr() noexcept = default;
static_assert(!kZeroOnConstruct);
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) ||
// BUILDFLAG(USE_ASAN_UNOWNED_PTR) || BUILDFLAG(USE_HOOKABLE_RAW_PTR) ||
// BUILDFLAG(RAW_PTR_ZERO_ON_CONSTRUCT)
// A non-trivial copy ctor and assignment operator are required for complex
// implementations (e.g. BackupRefPtr). Unlike the blocks around, we don't need
// these for NoOpImpl even when zeroing is requested; better to keep them
// trivial.
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) || \
BUILDFLAG(USE_ASAN_UNOWNED_PTR) || BUILDFLAG(USE_HOOKABLE_RAW_PTR)
PA_ALWAYS_INLINE constexpr raw_ptr(const raw_ptr& p) noexcept
: wrapped_ptr_(Impl::Duplicate(p.wrapped_ptr_)) {}
PA_ALWAYS_INLINE constexpr raw_ptr(raw_ptr&& p) noexcept {
wrapped_ptr_ = p.wrapped_ptr_;
if constexpr (kZeroOnMove) {
p.wrapped_ptr_ = nullptr;
}
}
PA_ALWAYS_INLINE constexpr raw_ptr& operator=(const raw_ptr& p) noexcept {
// Duplicate before releasing, in case the pointer is assigned to itself.
//
@ -347,9 +340,26 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ptr {
wrapped_ptr_ = new_ptr;
return *this;
}
#else
PA_ALWAYS_INLINE raw_ptr(const raw_ptr&) noexcept = default;
PA_ALWAYS_INLINE raw_ptr& operator=(const raw_ptr&) noexcept = default;
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) ||
// BUILDFLAG(USE_ASAN_UNOWNED_PTR) || BUILDFLAG(USE_HOOKABLE_RAW_PTR)
// A non-trivial move ctor and assignment operator are required for complex
// implementations (e.g. BackupRefPtr), or even for NoOpImpl when zeroing is
// requested.
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) || \
BUILDFLAG(USE_ASAN_UNOWNED_PTR) || BUILDFLAG(USE_HOOKABLE_RAW_PTR) || \
BUILDFLAG(RAW_PTR_ZERO_ON_MOVE)
PA_ALWAYS_INLINE constexpr raw_ptr(raw_ptr&& p) noexcept {
wrapped_ptr_ = p.wrapped_ptr_;
if constexpr (kZeroOnMove) {
p.wrapped_ptr_ = nullptr;
}
}
PA_ALWAYS_INLINE constexpr raw_ptr& operator=(raw_ptr&& p) noexcept {
// Unlike the the copy version of this operator, this branch is necessaty
// Unlike the the copy version of this operator, this branch is necessary
// for correctness.
if (PA_LIKELY(this != &p)) {
Impl::ReleaseWrappedPtr(wrapped_ptr_);
@ -360,7 +370,19 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ptr {
}
return *this;
}
#else
PA_ALWAYS_INLINE raw_ptr(raw_ptr&&) noexcept = default;
PA_ALWAYS_INLINE raw_ptr& operator=(raw_ptr&&) noexcept = default;
static_assert(!kZeroOnMove);
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) ||
// BUILDFLAG(USE_ASAN_UNOWNED_PTR) || BUILDFLAG(USE_HOOKABLE_RAW_PTR) ||
// BUILDFLAG(RAW_PTR_ZERO_ON_MOVE)
// A non-trivial default dtor is required for complex implementations (e.g.
// BackupRefPtr), or even for NoOpImpl when zeroing is requested.
#if BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) || \
BUILDFLAG(USE_ASAN_UNOWNED_PTR) || BUILDFLAG(USE_HOOKABLE_RAW_PTR) || \
BUILDFLAG(RAW_PTR_ZERO_ON_DESTRUCT)
PA_ALWAYS_INLINE PA_CONSTEXPR_DTOR ~raw_ptr() noexcept {
Impl::ReleaseWrappedPtr(wrapped_ptr_);
// Work around external issues where raw_ptr is used after destruction.
@ -368,33 +390,12 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ptr {
wrapped_ptr_ = nullptr;
}
}
#else // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) ||
// BUILDFLAG(USE_ASAN_UNOWNED_PTR) || BUILDFLAG(USE_HOOKABLE_RAW_PTR)
// raw_ptr can be trivially default constructed (leaving |wrapped_ptr_|
// uninitialized).
PA_ALWAYS_INLINE constexpr raw_ptr() noexcept = default;
// In addition to nullptr_t ctor above, raw_ptr needs to have these
// as |=default| or |constexpr| to avoid hitting -Wglobal-constructors in
// cases like this:
// struct SomeStruct { int int_field; raw_ptr<int> ptr_field; };
// SomeStruct g_global_var = { 123, nullptr };
PA_ALWAYS_INLINE raw_ptr(const raw_ptr&) noexcept = default;
PA_ALWAYS_INLINE raw_ptr(raw_ptr&&) noexcept = default;
PA_ALWAYS_INLINE raw_ptr& operator=(const raw_ptr&) noexcept = default;
PA_ALWAYS_INLINE raw_ptr& operator=(raw_ptr&&) noexcept = default;
#else
PA_ALWAYS_INLINE ~raw_ptr() noexcept = default;
// With default constructor, destructor and move operations, we don't have an
// opportunity to zero the underlying pointer, so ensure this isn't expected.
static_assert(!kZeroOnInit);
static_assert(!kZeroOnMove);
static_assert(!kZeroOnDestruct);
#endif // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) ||
// BUILDFLAG(USE_ASAN_UNOWNED_PTR) || BUILDFLAG(USE_HOOKABLE_RAW_PTR)
// BUILDFLAG(USE_ASAN_UNOWNED_PTR) || BUILDFLAG(USE_HOOKABLE_RAW_PTR) ||
// BUILDFLAG(RAW_PTR_ZERO_ON_DESTRUCT)
// Cross-kind copy constructor.
// Move is not supported as different traits may use different ref-counts, so
@ -436,8 +437,9 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ptr {
}
// Deliberately implicit, because raw_ptr is supposed to resemble raw ptr.
// Ignore kZeroOnInit, because here the caller explicitly wishes to initialize
// with nullptr. NOLINTNEXTLINE(google-explicit-constructor)
// Ignore kZeroOnConstruct, because here the caller explicitly wishes to
// initialize with nullptr.
// NOLINTNEXTLINE(google-explicit-constructor)
PA_ALWAYS_INLINE constexpr raw_ptr(std::nullptr_t) noexcept
: wrapped_ptr_(nullptr) {}
@ -569,12 +571,6 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ptr {
return GetForDereference();
}
// Disables `(my_raw_ptr->*pmf)(...)` as a workaround for
// the ICE in GCC parsing the code, reported at
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=103455
template <typename PMF>
void operator->*(PMF) const = delete;
// Deliberately implicit, because raw_ptr is supposed to resemble raw ptr.
// NOLINTNEXTLINE(google-explicit-constructor)
PA_ALWAYS_INLINE constexpr operator T*() const { return GetForExtraction(); }
@ -586,19 +582,31 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ptr {
}
PA_ALWAYS_INLINE constexpr raw_ptr& operator++() {
static_assert(
raw_ptr_traits::IsPtrArithmeticAllowed(Traits),
"cannot increment raw_ptr unless AllowPtrArithmetic trait is present.");
wrapped_ptr_ = Impl::Advance(wrapped_ptr_, 1);
return *this;
}
PA_ALWAYS_INLINE constexpr raw_ptr& operator--() {
static_assert(
raw_ptr_traits::IsPtrArithmeticAllowed(Traits),
"cannot decrement raw_ptr unless AllowPtrArithmetic trait is present.");
wrapped_ptr_ = Impl::Retreat(wrapped_ptr_, 1);
return *this;
}
PA_ALWAYS_INLINE constexpr raw_ptr operator++(int /* post_increment */) {
static_assert(
raw_ptr_traits::IsPtrArithmeticAllowed(Traits),
"cannot increment raw_ptr unless AllowPtrArithmetic trait is present.");
raw_ptr result = *this;
++(*this);
return result;
}
PA_ALWAYS_INLINE constexpr raw_ptr operator--(int /* post_decrement */) {
static_assert(
raw_ptr_traits::IsPtrArithmeticAllowed(Traits),
"cannot decrement raw_ptr unless AllowPtrArithmetic trait is present.");
raw_ptr result = *this;
--(*this);
return result;
@ -607,6 +615,9 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ptr {
typename Z,
typename = std::enable_if_t<partition_alloc::internal::is_offset_type<Z>>>
PA_ALWAYS_INLINE constexpr raw_ptr& operator+=(Z delta_elems) {
static_assert(
raw_ptr_traits::IsPtrArithmeticAllowed(Traits),
"cannot increment raw_ptr unless AllowPtrArithmetic trait is present.");
wrapped_ptr_ = Impl::Advance(wrapped_ptr_, delta_elems);
return *this;
}
@ -614,10 +625,26 @@ class PA_TRIVIAL_ABI PA_GSL_POINTER raw_ptr {
typename Z,
typename = std::enable_if_t<partition_alloc::internal::is_offset_type<Z>>>
PA_ALWAYS_INLINE constexpr raw_ptr& operator-=(Z delta_elems) {
static_assert(
raw_ptr_traits::IsPtrArithmeticAllowed(Traits),
"cannot increment raw_ptr unless AllowPtrArithmetic trait is present.");
wrapped_ptr_ = Impl::Retreat(wrapped_ptr_, delta_elems);
return *this;
}
template <typename Z,
typename U = T,
RawPtrTraits CopyTraits = Traits,
typename Unused = std::enable_if_t<
!std::is_void<typename std::remove_cv<U>::type>::value &&
partition_alloc::internal::is_offset_type<Z>>>
U& operator[](Z delta_elems) const {
static_assert(
raw_ptr_traits::IsPtrArithmeticAllowed(Traits),
"cannot index raw_ptr unless AllowPtrArithmetic trait is present.");
return wrapped_ptr_[delta_elems];
}
// Do not disable operator+() and operator-().
// They provide OOB checks, which prevent from assigning an arbitrary value to
// raw_ptr, leading BRP to modifying arbitrary memory thinking it's ref-count.
@ -876,7 +903,7 @@ inline constexpr bool IsRawPtrMayDangleV = false;
template <typename T, RawPtrTraits Traits>
inline constexpr bool IsRawPtrMayDangleV<raw_ptr<T, Traits>> =
raw_ptr_traits::Contains(Traits, RawPtrTraits::kMayDangle);
ContainsFlags(Traits, RawPtrTraits::kMayDangle);
// Template helpers for working with T* or raw_ptr<T>.
template <typename T>
@ -975,6 +1002,13 @@ constexpr auto ExperimentalAsh = base::RawPtrTraits::kExperimentalAsh;
// This is not meant to be added manually. You can ignore this flag.
constexpr auto LeakedDanglingUntriaged = base::RawPtrTraits::kMayDangle;
// Temporary annotation for new pointers added during the renderer rewrite.
// TODO(crbug.com/1444624): Find pre-existing dangling pointers and remove
// this annotation.
//
// DO NOT ADD new occurrences of this.
constexpr auto ExperimentalRenderer = base::RawPtrTraits::kMayDangle;
// Public verson used in callbacks arguments when it is known that they might
// receive dangling pointers. In any other cases, please
// use one of:

View File

@ -27,7 +27,7 @@ template <bool IsAdjustablePtr, bool MayDangle>
struct RawPtrAsanUnownedImpl {
// The first two are needed for correctness. The last one isn't technically a
// must, but better to set it.
static constexpr bool kMustZeroOnInit = true;
static constexpr bool kMustZeroOnConstruct = true;
static constexpr bool kMustZeroOnMove = true;
static constexpr bool kMustZeroOnDestruct = true;
@ -144,8 +144,6 @@ struct RawPtrAsanUnownedImpl {
// This is for accounting only, used by unit tests.
PA_ALWAYS_INLINE static constexpr void IncrementSwapCountForTest() {}
PA_ALWAYS_INLINE static constexpr void IncrementLessCountForTest() {}
PA_ALWAYS_INLINE static constexpr void
IncrementPointerToMemberOperatorCountForTest() {}
};
} // namespace base::internal

View File

@ -18,6 +18,7 @@
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
#include "base/allocator/partition_allocator/tagging.h"
#include "build/build_config.h"
#if !BUILDFLAG(HAS_64_BIT_POINTERS)
@ -70,7 +71,7 @@ struct RawPtrBackupRefImpl {
// the first two, kMustZeroOnDestruct wouldn't be needed if raw_ptr was used
// correctly, but we already caught cases where a value is written after
// destruction.
static constexpr bool kMustZeroOnInit = true;
static constexpr bool kMustZeroOnConstruct = true;
static constexpr bool kMustZeroOnMove = true;
static constexpr bool kMustZeroOnDestruct = true;
@ -475,8 +476,6 @@ struct RawPtrBackupRefImpl {
// This is for accounting only, used by unit tests.
PA_ALWAYS_INLINE static constexpr void IncrementSwapCountForTest() {}
PA_ALWAYS_INLINE static constexpr void IncrementLessCountForTest() {}
PA_ALWAYS_INLINE static constexpr void
IncrementPointerToMemberOperatorCountForTest() {}
private:
// We've evaluated several strategies (inline nothing, various parts, or

View File

@ -20,13 +20,12 @@ namespace base::test {
template <RawPtrTraits Traits>
struct RawPtrCountingImplWrapperForTest
: public raw_ptr_traits::ImplForTraits<Traits> {
static_assert(
!raw_ptr_traits::Contains(Traits,
RawPtrTraits::kUseCountingWrapperForTest));
static_assert(!ContainsFlags(Traits,
RawPtrTraits::kUseCountingWrapperForTest));
using SuperImpl = typename raw_ptr_traits::ImplForTraits<Traits>;
static constexpr bool kMustZeroOnInit = SuperImpl::kMustZeroOnInit;
static constexpr bool kMustZeroOnConstruct = SuperImpl::kMustZeroOnConstruct;
static constexpr bool kMustZeroOnMove = SuperImpl::kMustZeroOnMove;
static constexpr bool kMustZeroOnDestruct = SuperImpl::kMustZeroOnDestruct;
@ -71,11 +70,6 @@ struct RawPtrCountingImplWrapperForTest
++wrapped_ptr_less_cnt;
}
PA_ALWAYS_INLINE static constexpr void
IncrementPointerToMemberOperatorCountForTest() {
++pointer_to_member_operator_cnt;
}
template <typename T>
PA_ALWAYS_INLINE static constexpr T* WrapRawPtrForDuplication(T* ptr) {
++wrap_raw_ptr_for_dup_cnt;

View File

@ -50,7 +50,7 @@ PA_COMPONENT_EXPORT(RAW_PTR) void ResetRawPtrHooks();
template <bool EnableHooks>
struct RawPtrHookableImpl {
// Since this Impl is used for BRP-ASan, match BRP as closely as possible.
static constexpr bool kMustZeroOnInit = true;
static constexpr bool kMustZeroOnConstruct = true;
static constexpr bool kMustZeroOnMove = true;
static constexpr bool kMustZeroOnDestruct = true;
@ -207,8 +207,6 @@ struct RawPtrHookableImpl {
// This is for accounting only, used by unit tests.
PA_ALWAYS_INLINE static constexpr void IncrementSwapCountForTest() {}
PA_ALWAYS_INLINE static constexpr void IncrementLessCountForTest() {}
PA_ALWAYS_INLINE static constexpr void
IncrementPointerToMemberOperatorCountForTest() {}
};
} // namespace base::internal

View File

@ -13,7 +13,7 @@
namespace base::internal {
struct RawPtrNoOpImpl {
static constexpr bool kMustZeroOnInit = false;
static constexpr bool kMustZeroOnConstruct = false;
static constexpr bool kMustZeroOnMove = false;
static constexpr bool kMustZeroOnDestruct = false;
@ -111,8 +111,6 @@ struct RawPtrNoOpImpl {
// This is for accounting only, used by unit tests.
PA_ALWAYS_INLINE constexpr static void IncrementSwapCountForTest() {}
PA_ALWAYS_INLINE constexpr static void IncrementLessCountForTest() {}
PA_ALWAYS_INLINE constexpr static void
IncrementPointerToMemberOperatorCountForTest() {}
};
} // namespace base::internal

View File

@ -222,8 +222,8 @@ void* AllocateAlignedMemory(size_t alignment, size_t size) {
PA_CHECK(partition_alloc::internal::base::bits::IsPowerOfTwo(alignment));
// TODO(bartekn): See if the compiler optimizes branches down the stack on
// Mac, where PartitionPageSize() isn't constexpr.
return Allocator()->AllocNoHooks(size,
partition_alloc::PartitionPageSize());
return Allocator()->AllocInline<partition_alloc::AllocFlags::kNoHooks>(
size);
}
return AlignedAllocator()
@ -237,15 +237,16 @@ namespace allocator_shim::internal {
void* PartitionMalloc(const AllocatorDispatch*, size_t size, void* context) {
partition_alloc::ScopedDisallowAllocations guard{};
return Allocator()->AllocNoHooks(size, partition_alloc::PartitionPageSize());
return Allocator()->AllocInline<partition_alloc::AllocFlags::kNoHooks>(size);
}
void* PartitionMallocUnchecked(const AllocatorDispatch*,
size_t size,
void* context) {
partition_alloc::ScopedDisallowAllocations guard{};
return Allocator()->AllocNoHooks<partition_alloc::AllocFlags::kReturnNull>(
size, partition_alloc::PartitionPageSize());
return Allocator()
->AllocInline<partition_alloc::AllocFlags::kReturnNull |
partition_alloc::AllocFlags::kNoHooks>(size);
}
void* PartitionCalloc(const AllocatorDispatch*,
@ -255,8 +256,9 @@ void* PartitionCalloc(const AllocatorDispatch*,
partition_alloc::ScopedDisallowAllocations guard{};
const size_t total =
partition_alloc::internal::base::CheckMul(n, size).ValueOrDie();
return Allocator()->AllocNoHooks<partition_alloc::AllocFlags::kZeroFill>(
total, partition_alloc::PartitionPageSize());
return Allocator()
->AllocInline<partition_alloc::AllocFlags::kZeroFill |
partition_alloc::AllocFlags::kNoHooks>(total);
}
void* PartitionMemalign(const AllocatorDispatch*,
@ -293,7 +295,8 @@ void* PartitionAlignedRealloc(const AllocatorDispatch* dispatch,
} else {
// size == 0 and address != null means just "free(address)".
if (address) {
partition_alloc::PartitionRoot::FreeNoHooksInUnknownRoot(address);
partition_alloc::PartitionRoot::FreeInlineInUnknownRoot<
partition_alloc::FreeFlags::kNoHooks>(address);
}
}
// The original memory block (specified by address) is unchanged if ENOMEM.
@ -307,7 +310,8 @@ void* PartitionAlignedRealloc(const AllocatorDispatch* dispatch,
size_t copy_size = usage > size ? size : usage;
memcpy(new_ptr, address, copy_size);
partition_alloc::PartitionRoot::FreeNoHooksInUnknownRoot(address);
partition_alloc::PartitionRoot::FreeInlineInUnknownRoot<
partition_alloc::FreeFlags::kNoHooks>(address);
}
return new_ptr;
}
@ -367,7 +371,8 @@ void PartitionFree(const AllocatorDispatch*, void* object, void* context) {
}
#endif // BUILDFLAG(PA_IS_CAST_ANDROID)
partition_alloc::PartitionRoot::FreeNoHooksInUnknownRoot(object);
partition_alloc::PartitionRoot::FreeInlineInUnknownRoot<
partition_alloc::FreeFlags::kNoHooks>(object);
}
#if BUILDFLAG(IS_APPLE)
@ -384,7 +389,8 @@ void PartitionFreeDefiniteSize(const AllocatorDispatch*,
partition_alloc::ScopedDisallowAllocations guard{};
// TODO(lizeb): Optimize PartitionAlloc to use the size information. This is
// still useful though, as we avoid double-checking that the address is owned.
partition_alloc::PartitionRoot::FreeNoHooksInUnknownRoot(address);
partition_alloc::PartitionRoot::FreeInlineInUnknownRoot<
partition_alloc::FreeFlags::kNoHooks>(address);
}
#endif // BUILDFLAG(IS_APPLE)
@ -469,7 +475,8 @@ void PartitionTryFreeDefault(const AllocatorDispatch*,
return allocator_shim::TryFreeDefaultFallbackToFindZoneAndFree(address);
}
partition_alloc::PartitionRoot::FreeNoHooksInUnknownRoot(address);
partition_alloc::PartitionRoot::FreeInlineInUnknownRoot<
partition_alloc::FreeFlags::kNoHooks>(address);
}
#endif // BUILDFLAG(IS_APPLE)

View File

@ -39,24 +39,25 @@ void* NonScannableAllocatorImpl<quarantinable>::Alloc(size_t size) {
// TODO(bikineev): Change to LIKELY once PCScan is enabled by default.
if (PA_UNLIKELY(pcscan_enabled_.load(std::memory_order_acquire))) {
PA_DCHECK(allocator_.get());
return allocator_->root()->AllocNoHooks(
size, partition_alloc::PartitionPageSize());
return allocator_->root()
->AllocInline<partition_alloc::AllocFlags::kNoHooks>(size);
}
#endif // BUILDFLAG(USE_STARSCAN)
// Otherwise, dispatch to default partition.
return allocator_shim::internal::PartitionAllocMalloc::Allocator()
->AllocNoHooks(size, partition_alloc::PartitionPageSize());
->AllocInline<partition_alloc::AllocFlags::kNoHooks>(size);
}
template <bool quarantinable>
void NonScannableAllocatorImpl<quarantinable>::Free(void* ptr) {
#if BUILDFLAG(USE_STARSCAN)
if (PA_UNLIKELY(pcscan_enabled_.load(std::memory_order_acquire))) {
allocator_->root()->FreeNoHooks(ptr);
allocator_->root()->FreeInline<partition_alloc::FreeFlags::kNoHooks>(ptr);
return;
}
#endif // BUILDFLAG(USE_STARSCAN)
partition_alloc::PartitionRoot::FreeNoHooksInUnknownRoot(ptr);
partition_alloc::PartitionRoot::FreeInlineInUnknownRoot<
partition_alloc::FreeFlags::kNoHooks>(ptr);
}
template <bool quarantinable>

View File

@ -45,35 +45,39 @@ class MetadataAllocator {
}
value_type* allocate(size_t size) {
return static_cast<value_type*>(PCScanMetadataAllocator().AllocNoHooks(
size * sizeof(value_type), PartitionPageSize()));
return static_cast<value_type*>(
PCScanMetadataAllocator()
.AllocInline<partition_alloc::AllocFlags::kNoHooks>(
size * sizeof(value_type)));
}
void deallocate(value_type* ptr, size_t size) {
PCScanMetadataAllocator().FreeNoHooks(ptr);
PCScanMetadataAllocator().FreeInline<FreeFlags::kNoHooks>(ptr);
}
};
// Inherit from it to make a class allocated on the metadata partition.
struct AllocatedOnPCScanMetadataPartition {
static void* operator new(size_t size) {
return PCScanMetadataAllocator().AllocNoHooks(size, PartitionPageSize());
return PCScanMetadataAllocator()
.AllocInline<partition_alloc::AllocFlags::kNoHooks>(size);
}
static void operator delete(void* ptr) {
PCScanMetadataAllocator().FreeNoHooks(ptr);
PCScanMetadataAllocator().FreeInline<FreeFlags::kNoHooks>(ptr);
}
};
template <typename T, typename... Args>
T* MakePCScanMetadata(Args&&... args) {
auto* memory = static_cast<T*>(
PCScanMetadataAllocator().AllocNoHooks(sizeof(T), PartitionPageSize()));
PCScanMetadataAllocator()
.AllocInline<partition_alloc::AllocFlags::kNoHooks>(sizeof(T)));
return new (memory) T(std::forward<Args>(args)...);
}
struct PCScanMetadataDeleter final {
inline void operator()(void* ptr) const {
PCScanMetadataAllocator().FreeNoHooks(ptr);
PCScanMetadataAllocator().FreeInline<FreeFlags::kNoHooks>(ptr);
}
};

View File

@ -8,7 +8,6 @@
// This file contains method definitions to support Armv8.5-A's memory tagging
// extension.
#include <csignal>
#include <cstddef>
#include <cstdint>
@ -17,6 +16,10 @@
#include "base/allocator/partition_allocator/partition_alloc_config.h"
#include "build/build_config.h"
#if PA_CONFIG(HAS_MEMORY_TAGGING) && BUILDFLAG(IS_ANDROID)
#include <csignal>
#endif
namespace partition_alloc {
// Enum configures Arm's MTE extension to operate in different modes

View File

@ -22,7 +22,7 @@ BASE_FEATURE(kBrowserProcessMemoryPurge,
// fresh when next used, hopefully resolving the issue.
BASE_FEATURE(kCrashBrowserOnChildMismatchIfBrowserChanged,
"CrashBrowserOnChildMismatchIfBrowserChanged",
FEATURE_DISABLED_BY_DEFAULT);
FEATURE_ENABLED_BY_DEFAULT);
// Crash the browser process if a child process is created which does not match
// the browser process regardless of whether the browser package appears to have

View File

@ -21,34 +21,87 @@ void RecordJankMetricReportingIntervalTraceEvent(
int64_t reporting_interval_start_time,
int64_t reporting_interval_duration,
uint64_t janky_frame_count,
uint64_t non_janky_frame_count) {
if (reporting_interval_start_time < 0) {
uint64_t non_janky_frame_count,
int scenario) {
if (reporting_interval_start_time <= 0) {
return;
}
// The following code does nothing if base tracing is disabled.
[[maybe_unused]] auto t =
perfetto::Track(static_cast<uint64_t>(reporting_interval_start_time));
[[maybe_unused]] auto t = perfetto::Track(
static_cast<uint64_t>(reporting_interval_start_time + scenario));
TRACE_EVENT_BEGIN(
"android_webview.timeline", "JankMetricsReportingInterval", t,
"android_webview.timeline,android.ui.jank",
"JankMetricsReportingInterval", t,
base::TimeTicks::FromUptimeMillis(reporting_interval_start_time),
"janky_frames", janky_frame_count, "non_janky_frames",
non_janky_frame_count);
non_janky_frame_count, "scenario", scenario);
TRACE_EVENT_END(
"android_webview.timeline", t,
"android_webview.timeline,android.ui.jank", t,
base::TimeTicks::FromUptimeMillis(
(reporting_interval_start_time + reporting_interval_duration)));
}
} // namespace
// These values are persisted to logs. Entries should not be renumbered and
// numeric values should never be reused.
enum class FrameJankStatus {
kJanky = 0,
kNonJanky = 1,
kMaxValue = kNonJanky,
};
const char* GetAndroidFrameTimelineJankHistogramName(JankScenario scenario) {
#define HISTOGRAM_NAME(x) "Android.FrameTimelineJank.FrameJankStatus." #x
switch (scenario) {
case JankScenario::PERIODIC_REPORTING:
return HISTOGRAM_NAME(Total);
case JankScenario::OMNIBOX_FOCUS:
return HISTOGRAM_NAME(OmniboxFocus);
case JankScenario::NEW_TAB_PAGE:
return HISTOGRAM_NAME(NewTabPage);
case JankScenario::STARTUP:
return HISTOGRAM_NAME(Startup);
case JankScenario::TAB_SWITCHER:
return HISTOGRAM_NAME(TabSwitcher);
case JankScenario::OPEN_LINK_IN_NEW_TAB:
return HISTOGRAM_NAME(OpenLinkInNewTab);
case JankScenario::START_SURFACE_HOMEPAGE:
return HISTOGRAM_NAME(StartSurfaceHomepage);
case JankScenario::START_SURFACE_TAB_SWITCHER:
return HISTOGRAM_NAME(StartSurfaceTabSwitcher);
case JankScenario::FEED_SCROLLING:
return HISTOGRAM_NAME(FeedScrolling);
case JankScenario::WEBVIEW_SCROLLING:
return HISTOGRAM_NAME(WebviewScrolling);
default:
return HISTOGRAM_NAME(UNKNOWN);
}
#undef HISTOGRAM_NAME
}
const char* GetAndroidFrameTimelineDurationHistogramName(
JankScenario scenario) {
#define HISTOGRAM_NAME(x) "Android.FrameTimelineJank.Duration." #x
switch (scenario) {
case JankScenario::PERIODIC_REPORTING:
return HISTOGRAM_NAME(Total);
case JankScenario::OMNIBOX_FOCUS:
return HISTOGRAM_NAME(OmniboxFocus);
case JankScenario::NEW_TAB_PAGE:
return HISTOGRAM_NAME(NewTabPage);
case JankScenario::STARTUP:
return HISTOGRAM_NAME(Startup);
case JankScenario::TAB_SWITCHER:
return HISTOGRAM_NAME(TabSwitcher);
case JankScenario::OPEN_LINK_IN_NEW_TAB:
return HISTOGRAM_NAME(OpenLinkInNewTab);
case JankScenario::START_SURFACE_HOMEPAGE:
return HISTOGRAM_NAME(StartSurfaceHomepage);
case JankScenario::START_SURFACE_TAB_SWITCHER:
return HISTOGRAM_NAME(StartSurfaceTabSwitcher);
case JankScenario::FEED_SCROLLING:
return HISTOGRAM_NAME(FeedScrolling);
case JankScenario::WEBVIEW_SCROLLING:
return HISTOGRAM_NAME(WebviewScrolling);
default:
return HISTOGRAM_NAME(UNKNOWN);
}
#undef HISTOGRAM_NAME
}
// This function is called from Java with JNI, it's declared in
// base/base_jni/JankMetricUMARecorder_jni.h which is an autogenerated
@ -59,10 +112,11 @@ void JNI_JankMetricUMARecorder_RecordJankMetrics(
const base::android::JavaParamRef<jlongArray>& java_durations_ns,
const base::android::JavaParamRef<jbooleanArray>& java_jank_status,
jlong java_reporting_interval_start_time,
jlong java_reporting_interval_duration) {
jlong java_reporting_interval_duration,
jint java_scenario_enum) {
RecordJankMetrics(env, java_durations_ns, java_jank_status,
java_reporting_interval_start_time,
java_reporting_interval_duration);
java_reporting_interval_duration, java_scenario_enum);
}
void RecordJankMetrics(
@ -70,15 +124,20 @@ void RecordJankMetrics(
const base::android::JavaParamRef<jlongArray>& java_durations_ns,
const base::android::JavaParamRef<jbooleanArray>& java_jank_status,
jlong java_reporting_interval_start_time,
jlong java_reporting_interval_duration) {
jlong java_reporting_interval_duration,
jint java_scenario_enum) {
std::vector<int64_t> durations_ns;
JavaLongArrayToInt64Vector(env, java_durations_ns, &durations_ns);
std::vector<bool> jank_status;
JavaBooleanArrayToBoolVector(env, java_jank_status, &jank_status);
std::string frame_duration_histogram_name = "Android.Jank.FrameDuration";
std::string janky_frames_histogram_name = "Android.Jank.FrameJankStatus";
JankScenario scenario = static_cast<JankScenario>(java_scenario_enum);
const char* frame_duration_histogram_name =
GetAndroidFrameTimelineDurationHistogramName(scenario);
const char* janky_frames_per_scenario_histogram_name =
GetAndroidFrameTimelineJankHistogramName(scenario);
for (const int64_t frame_duration_ns : durations_ns) {
base::UmaHistogramTimes(frame_duration_histogram_name,
@ -89,7 +148,7 @@ void RecordJankMetrics(
for (bool is_janky : jank_status) {
base::UmaHistogramEnumeration(
janky_frames_histogram_name,
janky_frames_per_scenario_histogram_name,
is_janky ? FrameJankStatus::kJanky : FrameJankStatus::kNonJanky);
if (is_janky) {
++janky_frame_count;
@ -98,7 +157,8 @@ void RecordJankMetrics(
RecordJankMetricReportingIntervalTraceEvent(
java_reporting_interval_start_time, java_reporting_interval_duration,
janky_frame_count, jank_status.size() - janky_frame_count);
janky_frame_count, jank_status.size() - janky_frame_count,
java_scenario_enum);
}
} // namespace base::android

View File

@ -7,14 +7,49 @@
#include "base/android/jni_android.h"
#include "base/base_export.h"
#include "base/feature_list.h"
namespace base::android {
// These values are persisted to logs. Entries should not be renumbered and
// numeric values should never be reused.
enum class FrameJankStatus {
kJanky = 0,
kNonJanky = 1,
kMaxValue = kNonJanky,
};
// These values are persisted to logs. Entries should not be renumbered and
// numeric values should never be reused.
enum class JankScenario {
PERIODIC_REPORTING = 1,
OMNIBOX_FOCUS = 2,
NEW_TAB_PAGE = 3,
STARTUP = 4,
TAB_SWITCHER = 5,
OPEN_LINK_IN_NEW_TAB = 6,
START_SURFACE_HOMEPAGE = 7,
START_SURFACE_TAB_SWITCHER = 8,
FEED_SCROLLING = 9,
WEBVIEW_SCROLLING = 10,
// This value should always be last and is not persisted to logs, exposed only
// for testing.
MAX_VALUE = WEBVIEW_SCROLLING + 1
};
// Resolves the above name to a histogram value.
BASE_EXPORT const char* GetAndroidFrameTimelineJankHistogramName(
JankScenario scenario);
// Resolves the above name to a histogram value.
BASE_EXPORT const char* GetAndroidFrameTimelineDurationHistogramName(
JankScenario scenario);
BASE_EXPORT void RecordJankMetrics(
JNIEnv* env,
const base::android::JavaParamRef<jlongArray>& java_durations_ns,
const base::android::JavaParamRef<jbooleanArray>& java_jank_status,
jlong java_reporting_interval_start_time,
jlong java_reporting_interval_duration);
jlong java_reporting_interval_duration,
jint java_scenario_enum);
} // namespace base::android
#endif // BASE_ANDROID_JANK_METRIC_UMA_RECORDER_H_

View File

@ -10,6 +10,7 @@
#include "base/android/java_exception_reporter.h"
#include "base/android/jni_string.h"
#include "base/android/jni_utils.h"
#include "base/android_runtime_jni_headers/Throwable_jni.h"
#include "base/base_jni/PiiElider_jni.h"
#include "base/debug/debugging_buildflags.h"
#include "base/logging.h"
@ -24,10 +25,6 @@ JavaVM* g_jvm = nullptr;
jobject g_class_loader = nullptr;
jmethodID g_class_loader_load_class_method_id = 0;
#if BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
ABSL_CONST_INIT thread_local void* stack_frame_pointer = nullptr;
#endif
bool g_fatal_exception_occurred = false;
ScopedJavaLocalRef<jclass> GetClassInternal(JNIEnv* env,
@ -129,6 +126,10 @@ JavaVM* GetVM() {
return g_jvm;
}
void DisableJvmForTesting() {
g_jvm = nullptr;
}
void InitGlobalClassLoader(JNIEnv* env) {
DCHECK(g_class_loader == nullptr);
@ -267,8 +268,8 @@ void CheckException(JNIEnv* env) {
if (!HasException(env))
return;
jthrowable java_throwable = env->ExceptionOccurred();
if (java_throwable) {
ScopedJavaLocalRef<jthrowable> throwable(env, env->ExceptionOccurred());
if (throwable) {
// Clear the pending exception, since a local reference is now held.
env->ExceptionDescribe();
env->ExceptionClear();
@ -281,7 +282,7 @@ void CheckException(JNIEnv* env) {
g_fatal_exception_occurred = true;
// RVO should avoid any extra copies of the exception string.
base::android::SetJavaException(
GetJavaExceptionInfo(env, java_throwable).c_str());
GetJavaExceptionInfo(env, throwable).c_str());
}
}
@ -289,26 +290,37 @@ void CheckException(JNIEnv* env) {
LOG(FATAL) << "Please include Java exception stack in crash report";
}
std::string GetJavaExceptionInfo(JNIEnv* env, jthrowable java_throwable) {
std::string GetJavaExceptionInfo(JNIEnv* env,
const JavaRef<jthrowable>& throwable) {
ScopedJavaLocalRef<jstring> sanitized_exception_string =
Java_PiiElider_getSanitizedStacktrace(
env, ScopedJavaLocalRef(env, java_throwable));
Java_PiiElider_getSanitizedStacktrace(env, throwable);
return ConvertJavaStringToUTF8(sanitized_exception_string);
}
#if BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
JNIStackFrameSaver::JNIStackFrameSaver(void* current_fp)
: resetter_(&stack_frame_pointer, current_fp) {}
JNIStackFrameSaver::~JNIStackFrameSaver() = default;
void* JNIStackFrameSaver::SavedFrame() {
return stack_frame_pointer;
std::string GetJavaStackTraceIfPresent() {
JNIEnv* env = nullptr;
if (g_jvm) {
g_jvm->GetEnv(reinterpret_cast<void**>(&env), JNI_VERSION_1_2);
}
if (!env) {
// JNI has not been initialized on this thread.
return {};
}
ScopedJavaLocalRef<jthrowable> throwable =
JNI_Throwable::Java_Throwable_Constructor(env);
std::string ret = GetJavaExceptionInfo(env, throwable);
// Strip the exception message and leave only the "at" lines. Example:
// java.lang.Throwable:
// {tab}at Clazz.method(Clazz.java:111)
// {tab}at ...
size_t newline_idx = ret.find('\n');
if (newline_idx == std::string::npos) {
// There are no java frames.
return {};
}
return ret.substr(newline_idx + 1);
}
#endif // BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
} // namespace android
} // namespace base

View File

@ -18,37 +18,6 @@
#include "base/debug/debugging_buildflags.h"
#include "base/debug/stack_trace.h"
#if BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
// When profiling is enabled (enable_profiling=true) this macro is added to
// all generated JNI stubs so that it becomes the last thing that runs before
// control goes into Java.
//
// This macro saves stack frame pointer of the current function. Saved value
// used later by JNI_LINK_SAVED_FRAME_POINTER.
#define JNI_SAVE_FRAME_POINTER \
base::android::JNIStackFrameSaver jni_frame_saver(__builtin_frame_address(0))
// When profiling is enabled (enable_profiling=true) this macro is added to
// all generated JNI callbacks so that it becomes the first thing that runs
// after control returns from Java.
//
// This macro links stack frame of the current function to the stack frame
// saved by JNI_SAVE_FRAME_POINTER, allowing frame-based unwinding
// (used by the heap profiler) to produce complete traces.
#define JNI_LINK_SAVED_FRAME_POINTER \
base::debug::ScopedStackFrameLinker jni_frame_linker( \
__builtin_frame_address(0), \
base::android::JNIStackFrameSaver::SavedFrame())
#else
// Frame-based stack unwinding is not supported, do nothing.
#define JNI_SAVE_FRAME_POINTER
#define JNI_LINK_SAVED_FRAME_POINTER
#endif // BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
namespace base {
namespace android {
@ -83,6 +52,12 @@ BASE_EXPORT bool IsVMInitialized();
// Returns the global JVM, or nullptr if it has not been initialized.
BASE_EXPORT JavaVM* GetVM();
// Do not allow any future native->java calls.
// This is necessary in gtest DEATH_TESTS to prevent
// GetJavaStackTraceIfPresent() from accessing a defunct JVM (due to fork()).
// https://crbug.com/1484834
BASE_EXPORT void DisableJvmForTesting();
// Initializes the global ClassLoader used by the GetClass and LazyGetClass
// methods. This is needed because JNI will use the base ClassLoader when there
// is no Java code on the stack. The base ClassLoader doesn't know about any of
@ -154,28 +129,11 @@ BASE_EXPORT bool ClearException(JNIEnv* env);
BASE_EXPORT void CheckException(JNIEnv* env);
// This returns a string representation of the java stack trace.
BASE_EXPORT std::string GetJavaExceptionInfo(JNIEnv* env,
jthrowable java_throwable);
#if BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
// Saves caller's PC and stack frame in a thread-local variable.
// Implemented only when profiling is enabled (enable_profiling=true).
class BASE_EXPORT JNIStackFrameSaver {
public:
JNIStackFrameSaver(void* current_fp);
JNIStackFrameSaver(const JNIStackFrameSaver&) = delete;
JNIStackFrameSaver& operator=(const JNIStackFrameSaver&) = delete;
~JNIStackFrameSaver();
static void* SavedFrame();
private:
const AutoReset<void*> resetter_;
};
#endif // BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
BASE_EXPORT std::string GetJavaExceptionInfo(
JNIEnv* env,
const JavaRef<jthrowable>& throwable);
// This returns a string representation of the java stack trace.
BASE_EXPORT std::string GetJavaStackTraceIfPresent();
} // namespace android
} // namespace base

View File

@ -11,12 +11,6 @@
namespace base {
namespace android {
namespace {
#if BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
ABSL_CONST_INIT thread_local void* stack_frame_pointer = nullptr;
#endif
} // namespace
JNIEnv* AttachCurrentThread() {
return nullptr;
}
@ -35,13 +29,19 @@ bool IsVMInitialized() {
return false;
}
void InitReplacementClassLoader(JNIEnv* env,
const JavaRef<jobject>& class_loader) {
JavaVM* GetVM() {
return nullptr;
}
void DisableJvmForTesting() {
}
void InitGlobalClassLoader(JNIEnv* env) {
}
ScopedJavaLocalRef<jclass> GetClass(JNIEnv* env,
const char* class_name,
const std::string& split_name) {
const char* split_name) {
return nullptr;
}
@ -53,7 +53,7 @@ ScopedJavaLocalRef<jclass> GetClass(JNIEnv* env, const char* class_name) {
// sensitive.
jclass LazyGetClass(JNIEnv* env,
const char* class_name,
const std::string& split_name,
const char* split_name,
std::atomic<jclass>* atomic_class_id) {
return nullptr;
}
@ -114,22 +114,14 @@ bool ClearException(JNIEnv* env) {
void CheckException(JNIEnv* env) {
}
std::string GetJavaExceptionInfo(JNIEnv* env, jthrowable java_throwable) {
std::string GetJavaExceptionInfo(JNIEnv* env,
const JavaRef<jthrowable>& java_throwable) {
return {};
}
#if BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
JNIStackFrameSaver::JNIStackFrameSaver(void* current_fp)
: resetter_(&stack_frame_pointer, current_fp) {}
JNIStackFrameSaver::~JNIStackFrameSaver() = default;
void* JNIStackFrameSaver::SavedFrame() {
return stack_frame_pointer;
std::string GetJavaStackTraceIfPresent() {
return {};
}
#endif // BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
} // namespace android
} // namespace base

View File

@ -174,7 +174,7 @@ BASE_EXPORT ScopedJavaLocalRef<jobjectArray> ToJavaArrayOfObjects(
BASE_EXPORT ScopedJavaLocalRef<jobjectArray> ToTypedJavaArrayOfObjects(
JNIEnv* env,
base::span<const ScopedJavaLocalRef<jobject>> v,
ScopedJavaLocalRef<jclass> type) {
const JavaRef<jclass>& type) {
jobjectArray joa =
env->NewObjectArray(checked_cast<jsize>(v.size()), type.obj(), nullptr);
CheckException(env);
@ -188,7 +188,7 @@ BASE_EXPORT ScopedJavaLocalRef<jobjectArray> ToTypedJavaArrayOfObjects(
BASE_EXPORT ScopedJavaLocalRef<jobjectArray> ToTypedJavaArrayOfObjects(
JNIEnv* env,
base::span<const ScopedJavaGlobalRef<jobject>> v,
ScopedJavaLocalRef<jclass> type) {
const JavaRef<jclass>& type) {
jobjectArray joa =
env->NewObjectArray(checked_cast<jsize>(v.size()), type.obj(), nullptr);
CheckException(env);

View File

@ -101,11 +101,11 @@ BASE_EXPORT ScopedJavaLocalRef<jobjectArray> ToJavaArrayOfObjects(
BASE_EXPORT ScopedJavaLocalRef<jobjectArray> ToTypedJavaArrayOfObjects(
JNIEnv* env,
base::span<const ScopedJavaLocalRef<jobject>> v,
ScopedJavaLocalRef<jclass> type);
const JavaRef<jclass>& type);
BASE_EXPORT ScopedJavaLocalRef<jobjectArray> ToTypedJavaArrayOfObjects(
JNIEnv* env,
base::span<const ScopedJavaGlobalRef<jobject>> v,
ScopedJavaLocalRef<jclass> type);
const JavaRef<jclass>& type);
// Returns a array of Java byte array converted from |v|.
BASE_EXPORT ScopedJavaLocalRef<jobjectArray> ToJavaArrayOfByteArray(

View File

@ -29,12 +29,8 @@ enum LibraryProcessType {
PROCESS_WEBVIEW = 3,
// Shared library is running in child process as part of webview.
PROCESS_WEBVIEW_CHILD = 4,
// Shared library is running in the app that uses weblayer.
PROCESS_WEBLAYER = 5,
// Shared library is running in child process as part of weblayer.
PROCESS_WEBLAYER_CHILD = 6,
// Shared library is running in a non-embedded WebView process.
PROCESS_WEBVIEW_NONEMBEDDED = 7,
PROCESS_WEBVIEW_NONEMBEDDED = 5,
};
// Returns the library process type this library was loaded for.

View File

@ -53,7 +53,7 @@ bool MeminfoDumpProvider::OnMemoryDump(
// would confuse data in UMA. In particular, the background/foreground session
// filter would no longer be accurate.
if (stale_data && args.level_of_detail !=
base::trace_event::MemoryDumpLevelOfDetail::DETAILED) {
base::trace_event::MemoryDumpLevelOfDetail::kDetailed) {
return true;
}

View File

@ -61,3 +61,8 @@
-assumenosideeffects class android.**, java.** {
static <fields>;
}
# Keep the names of exception types, to make it easier to understand stack
# traces in contexts where it's not trivial to deobfuscate them - for example
# when reported to app developers who are using WebView.
-keepnames class ** extends java.lang.Throwable {}

View File

@ -0,0 +1,77 @@
// Copyright 2023 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/android/shared_preferences/shared_preferences_manager.h"
#include "base/android/jni_android.h"
#include "base/android/jni_string.h"
#include "base/base_shared_preferences_jni/SharedPreferencesManager_jni.h"
#include "base/check.h"
namespace base::android {
SharedPreferencesManager::SharedPreferencesManager(const JavaRef<jobject>& jobj,
JNIEnv* env)
: java_obj_(jobj), env_(env) {}
SharedPreferencesManager::SharedPreferencesManager(
const SharedPreferencesManager& other)
: java_obj_(other.java_obj_), env_(other.env_) {}
SharedPreferencesManager::~SharedPreferencesManager() {}
void SharedPreferencesManager::RemoveKey(
const std::string& shared_preference_key) {
ScopedJavaLocalRef<jstring> jkey =
ConvertUTF8ToJavaString(env_, shared_preference_key);
Java_SharedPreferencesManager_removeKey(env_, java_obj_, jkey);
}
bool SharedPreferencesManager::ContainsKey(
const std::string& shared_preference_key) {
ScopedJavaLocalRef<jstring> jkey =
ConvertUTF8ToJavaString(env_, shared_preference_key);
return Java_SharedPreferencesManager_contains(env_, java_obj_, jkey);
}
bool SharedPreferencesManager::ReadBoolean(
const std::string& shared_preference_key,
bool default_value) {
ScopedJavaLocalRef<jstring> jkey =
ConvertUTF8ToJavaString(env_, shared_preference_key);
return Java_SharedPreferencesManager_readBoolean(env_, java_obj_, jkey,
default_value);
}
int SharedPreferencesManager::ReadInt(const std::string& shared_preference_key,
int default_value) {
ScopedJavaLocalRef<jstring> jkey =
ConvertUTF8ToJavaString(env_, shared_preference_key);
return Java_SharedPreferencesManager_readInt(env_, java_obj_, jkey,
default_value);
}
std::string SharedPreferencesManager::ReadString(
const std::string& shared_preference_key,
const std::string& default_value) {
ScopedJavaLocalRef<jstring> jkey =
ConvertUTF8ToJavaString(env_, shared_preference_key);
ScopedJavaLocalRef<jstring> jdefault_value =
ConvertUTF8ToJavaString(env_, default_value);
ScopedJavaLocalRef<jstring> java_result =
Java_SharedPreferencesManager_readString(env_, java_obj_, jkey,
jdefault_value);
return ConvertJavaStringToUTF8(java_result);
}
void SharedPreferencesManager::WriteString(
const std::string& shared_preference_key,
const std::string& value) {
ScopedJavaLocalRef<jstring> jkey =
ConvertUTF8ToJavaString(env_, shared_preference_key);
ScopedJavaLocalRef<jstring> jvalue = ConvertUTF8ToJavaString(env_, value);
Java_SharedPreferencesManager_writeString(env_, java_obj_, jkey, jvalue);
}
} // namespace base::android

View File

@ -0,0 +1,40 @@
// Copyright 2023 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_ANDROID_SHARED_PREFERENCES_SHARED_PREFERENCES_MANAGER_H_
#define BASE_ANDROID_SHARED_PREFERENCES_SHARED_PREFERENCES_MANAGER_H_
#include "base/base_export.h"
#include "base/android/jni_android.h"
namespace base::android {
// A SharedPreferencesManager that provides access to Android SharedPreferences
// with uniqueness key checking.
class BASE_EXPORT SharedPreferencesManager {
public:
explicit SharedPreferencesManager(const JavaRef<jobject>& jobj, JNIEnv* env);
SharedPreferencesManager(const SharedPreferencesManager&);
SharedPreferencesManager& operator=(const SharedPreferencesManager&) = delete;
~SharedPreferencesManager();
void RemoveKey(const std::string& shared_preference_key);
bool ContainsKey(const std::string& shared_preference_key);
bool ReadBoolean(const std::string& shared_preference_key,
bool default_value);
int ReadInt(const std::string& shared_preference_key, int default_value);
std::string ReadString(const std::string& shared_preference_key,
const std::string& default_value);
void WriteString(const std::string& shared_preference_key,
const std::string& value);
private:
ScopedJavaLocalRef<jobject> java_obj_;
raw_ptr<JNIEnv> env_;
};
} // namespace base::android
#endif // BASE_ANDROID_SHARED_PREFERENCES_SHARED_PREFERENCES_MANAGER_H_

View File

@ -1,34 +0,0 @@
// Copyright 2010 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/apple/scoped_nsautorelease_pool.h"
// Note that this uses the direct runtime interface to the autorelease pool.
// https://clang.llvm.org/docs/AutomaticReferenceCounting.html#runtime-support
// This is so this can work when compiled for ARC.
extern "C" {
void* objc_autoreleasePoolPush(void);
void objc_autoreleasePoolPop(void* pool);
}
namespace base::apple {
ScopedNSAutoreleasePool::ScopedNSAutoreleasePool()
: autorelease_pool_(objc_autoreleasePoolPush()) {}
ScopedNSAutoreleasePool::~ScopedNSAutoreleasePool() {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
objc_autoreleasePoolPop(autorelease_pool_);
}
// Cycle the internal pool, allowing everything there to get cleaned up and
// start anew.
void ScopedNSAutoreleasePool::Recycle() {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
objc_autoreleasePoolPop(autorelease_pool_);
autorelease_pool_ = objc_autoreleasePoolPush();
}
} // namespace base::apple

View File

@ -6,7 +6,10 @@
#define BASE_APPLE_SCOPED_NSAUTORELEASE_POOL_H_
#include "base/base_export.h"
#include "base/dcheck_is_on.h"
#include "base/memory/raw_ptr_exclusion.h"
#include "base/memory/stack_allocated.h"
#include "base/thread_annotations.h"
#include "base/threading/thread_checker.h"
namespace base::apple {
@ -20,12 +23,11 @@ namespace base::apple {
// pool lower on the stack destroys all pools higher on the stack, which does
// not mesh well with the existence of C++ objects for each pool.
//
// TODO(https://crbug.com/1424190): Enforce stack-only use via the
// STACK_ALLOCATED annotation.
//
// Use this class only in C++ code; use @autoreleasepool in Obj-C(++) code.
class BASE_EXPORT ScopedNSAutoreleasePool {
STACK_ALLOCATED();
public:
ScopedNSAutoreleasePool();
@ -43,11 +45,21 @@ class BASE_EXPORT ScopedNSAutoreleasePool {
void Recycle();
private:
// Pushes the autorelease pool and does all required verification.
void PushImpl() VALID_CONTEXT_REQUIRED(thread_checker_);
// Pops the autorelease pool and does all required verification.
void PopImpl() VALID_CONTEXT_REQUIRED(thread_checker_);
// This field is not a raw_ptr<> because it is a pointer to an Objective-C
// object.
RAW_PTR_EXCLUSION void* autorelease_pool_ GUARDED_BY_CONTEXT(thread_checker_);
THREAD_CHECKER(thread_checker_);
#if DCHECK_IS_ON()
unsigned long level_ = 0;
#endif
};
} // namespace base::apple

View File

@ -0,0 +1,140 @@
// Copyright 2010 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/apple/scoped_nsautorelease_pool.h"
#include "base/dcheck_is_on.h"
#if DCHECK_IS_ON()
#import <Foundation/Foundation.h>
#include "base/debug/crash_logging.h"
#include "base/debug/stack_trace.h"
#include "base/immediate_crash.h"
#include "base/strings/sys_string_conversions.h"
#endif
// Note that this uses the direct runtime interface to the autorelease pool.
// https://clang.llvm.org/docs/AutomaticReferenceCounting.html#runtime-support
// This is so this can work when compiled for ARC.
extern "C" {
void* objc_autoreleasePoolPush(void);
void objc_autoreleasePoolPop(void* pool);
}
namespace base::apple {
#if DCHECK_IS_ON()
namespace {
using BlockReturningStackTrace = debug::StackTrace (^)();
// Because //base is not allowed to define Objective-C classes, which would be
// the most reasonable way to wrap a C++ object like base::debug::StackTrace, do
// it in a much more absurd, yet not completely unreasonable, way.
//
// This uses a default argument for the stack trace so that the creation of the
// stack trace is attributed to the parent function.
BlockReturningStackTrace MakeBlockReturningStackTrace(
debug::StackTrace stack_trace = debug::StackTrace()) {
// Return a block that references the stack trace. That will cause a copy of
// the stack trace to be made by the block, and because blocks are effectively
// Objective-C objects, they can be used in the NSThread thread dictionary.
return ^() {
return stack_trace;
};
}
// For each NSThread, maintain an array of stack traces, one for the state of
// the stack for each invocation of an autorelease pool push. Even though one is
// allowed to clear out an entire stack of autorelease pools by releasing one
// near the bottom, because the stack abstraction is mapped to C++ classes, this
// cannot be allowed.
NSMutableArray<BlockReturningStackTrace>* GetLevelStackTraces() {
NSMutableArray* traces =
NSThread.currentThread
.threadDictionary[@"CrScopedNSAutoreleasePoolTraces"];
if (traces) {
return traces;
}
traces = [NSMutableArray array];
NSThread.currentThread.threadDictionary[@"CrScopedNSAutoreleasePoolTraces"] =
traces;
return traces;
}
} // namespace
#endif
ScopedNSAutoreleasePool::ScopedNSAutoreleasePool() {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
PushImpl();
}
ScopedNSAutoreleasePool::~ScopedNSAutoreleasePool() {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
PopImpl();
}
void ScopedNSAutoreleasePool::Recycle() {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
// Cycle the internal pool, allowing everything there to get cleaned up and
// start anew.
PopImpl();
PushImpl();
}
void ScopedNSAutoreleasePool::PushImpl() {
#if DCHECK_IS_ON()
[GetLevelStackTraces() addObject:MakeBlockReturningStackTrace()];
level_ = GetLevelStackTraces().count;
#endif
autorelease_pool_ = objc_autoreleasePoolPush();
}
void ScopedNSAutoreleasePool::PopImpl() {
#if DCHECK_IS_ON()
auto level_count = GetLevelStackTraces().count;
if (level_ != level_count) {
NSLog(@"Popping autorelease pool at level %lu while pools exist through "
@"level %lu",
level_, level_count);
if (level_ < level_count) {
NSLog(@"WARNING: This abandons ScopedNSAutoreleasePool objects which now "
@"have no corresponding implementation.");
} else {
NSLog(@"ERROR: This is an abandoned ScopedNSAutoreleasePool that cannot "
@"release; expect the autorelease machinery to crash.");
}
NSLog(@"====================");
NSString* current_stack = SysUTF8ToNSString(debug::StackTrace().ToString());
NSLog(@"Pop:\n%@", current_stack);
[GetLevelStackTraces()
enumerateObjectsWithOptions:NSEnumerationReverse
usingBlock:^(BlockReturningStackTrace obj,
NSUInteger idx, BOOL* stop) {
NSLog(@"====================");
NSLog(@"Autorelease pool level %lu was pushed:\n%@",
idx + 1, SysUTF8ToNSString(obj().ToString()));
}];
// Assume an interactive use of Chromium where crashing immediately is
// desirable, and die. When investigating a failing automated test that dies
// here, remove these crash keys and call to ImmediateCrash() to reveal
// where the abandoned ScopedNSAutoreleasePool was expected to be released.
SCOPED_CRASH_KEY_NUMBER("ScopedNSAutoreleasePool", "currentlevel", level_);
SCOPED_CRASH_KEY_NUMBER("ScopedNSAutoreleasePool", "levelcount",
level_count);
SCOPED_CRASH_KEY_STRING1024("ScopedNSAutoreleasePool", "currentstack",
SysNSStringToUTF8(current_stack));
SCOPED_CRASH_KEY_STRING1024("ScopedNSAutoreleasePool", "recentstack",
GetLevelStackTraces().lastObject().ToString());
ImmediateCrash();
}
[GetLevelStackTraces() removeLastObject];
#endif
objc_autoreleasePoolPop(autorelease_pool_);
}
} // namespace base::apple

View File

@ -50,6 +50,10 @@ void Base64Encode(StringPiece input, std::string* output) {
*output = Base64Encode(base::as_bytes(base::make_span(input)));
}
std::string Base64Encode(StringPiece input) {
return Base64Encode(base::as_bytes(base::make_span(input)));;
}
bool Base64Decode(StringPiece input,
std::string* output,
Base64DecodePolicy policy) {

View File

@ -25,8 +25,13 @@ BASE_EXPORT void Base64EncodeAppend(span<const uint8_t> input,
std::string* output);
// Encodes the input string in base64.
// DEPRECATED, use `std::string Base64Encode(StringPiece input)` instead.
// TODO(crbug.com/1486214): Remove this.
BASE_EXPORT void Base64Encode(StringPiece input, std::string* output);
// Encodes the input string in base64.
BASE_EXPORT std::string Base64Encode(StringPiece input);
// Decodes the base64 input string. Returns true if successful and false
// otherwise. The output string is only modified if successful. The decoding can
// be done in-place.

View File

@ -22,6 +22,7 @@
#include "base/nix/xdg_util.h"
#include "base/notreached.h"
#include "base/path_service.h"
#include "base/posix/sysctl.h"
#include "base/process/process_metrics.h"
#include "build/build_config.h"
@ -48,16 +49,12 @@ bool PathProviderPosix(int key, FilePath* result) {
return true;
#elif BUILDFLAG(IS_FREEBSD)
int name[] = { CTL_KERN, KERN_PROC, KERN_PROC_PATHNAME, -1 };
char bin_dir[PATH_MAX + 1];
size_t length = sizeof(bin_dir);
// Upon return, |length| is the number of bytes written to |bin_dir|
// including the string terminator.
int error = sysctl(name, 4, bin_dir, &length, NULL, 0);
if (error < 0 || length <= 1) {
absl::optional<std::string> bin_dir = StringSysctl(name, std::size(name));
if (!bin_dir.has_value() || bin_dir.value().length() <= 1) {
NOTREACHED() << "Unable to resolve path.";
return false;
}
*result = FilePath(FilePath::StringType(bin_dir, length - 1));
*result = FilePath(bin_dir.value());
return true;
#elif BUILDFLAG(IS_SOLARIS)
char bin_dir[PATH_MAX + 1];

View File

@ -0,0 +1,49 @@
// Copyright 2013 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This is a "No Compile Test" suite.
// http://dev.chromium.org/developers/testing/no-compile-tests
#include "base/callback_list.h"
#include <memory>
#include <utility>
#include "base/functional/bind.h"
#include "base/functional/callback_helpers.h"
namespace base {
class Foo {
public:
Foo() {}
~Foo() {}
};
class FooListener {
public:
FooListener() = default;
FooListener(const FooListener&) = delete;
FooListener& operator=(const FooListener&) = delete;
void GotAScopedFoo(std::unique_ptr<Foo> f) { foo_ = std::move(f); }
std::unique_ptr<Foo> foo_;
};
// Callbacks run with a move-only typed parameter.
//
// CallbackList does not support move-only typed parameters. Notify() is
// designed to take zero or more parameters, and run each registered callback
// with them. With move-only types, the parameter will be set to NULL after the
// first callback has been run.
void WontCompile() {
FooListener f;
RepeatingCallbackList<void(std::unique_ptr<Foo>)> c1;
CallbackListSubscription sub =
c1.Add(BindRepeating(&FooListener::GotAScopedFoo, Unretained(&f)));
c1.Notify(std::unique_ptr<Foo>(new Foo())); // expected-error@*:* {{call to implicitly-deleted copy constructor of 'std::unique_ptr<base::Foo>'}}
}
} // namespace base

View File

@ -40,8 +40,13 @@ struct IsStringIterImpl
// `static_assert(is_trivial_v<value_type>)` inside libc++'s std::basic_string.
template <typename T>
struct IsStringIter
: std::conjunction<std::is_trivial<iter_value_t<T>>, IsStringIterImpl<T>> {
};
: std::conjunction<
std::disjunction<std::is_same<iter_value_t<T>, char>,
std::is_same<iter_value_t<T>, wchar_t>,
std::is_same<iter_value_t<T>, char8_t>,
std::is_same<iter_value_t<T>, char16_t>,
std::is_same<iter_value_t<T>, char32_t>>,
IsStringIterImpl<T>> {};
// An iterator to std::array is contiguous.
// Reference: https://wg21.link/array.overview#1

View File

@ -5,6 +5,8 @@
#ifndef BASE_CONTAINERS_CXX20_ERASE_INTERNAL_H_
#define BASE_CONTAINERS_CXX20_ERASE_INTERNAL_H_
#include <cstddef>
// Internal portion of base/containers/cxx20_erase_*.h. Please include those
// headers instead of including this directly.

View File

@ -152,7 +152,7 @@ class EnumSet {
return i;
}
const raw_ptr<const EnumBitSet, DanglingUntriaged> enums_;
const raw_ptr<const EnumBitSet> enums_;
size_t i_;
};

View File

@ -85,21 +85,21 @@ BASE_EXPORT size_t wcslcpy(wchar_t* dst, const wchar_t* src, size_t dst_size);
} // namespace base
// Convenience macro that copies the null-terminated string from |c_str| into a
// stack-allocated char array named |var_name| that holds up to |char_count|
// Convenience macro that copies the null-terminated string from `c_str` into a
// stack-allocated char array named `var_name` that holds up to `array_size - 1`
// characters and should be preserved in memory dumps.
#define DEBUG_ALIAS_FOR_CSTR(var_name, c_str, char_count) \
char var_name[char_count]; \
::base::strlcpy(var_name, (c_str), sizeof(var_name)); \
#define DEBUG_ALIAS_FOR_CSTR(var_name, c_str, array_size) \
char var_name[array_size] = {}; \
::base::strlcpy(var_name, (c_str), std::size(var_name)); \
::base::debug::Alias(var_name)
#define DEBUG_ALIAS_FOR_U16CSTR(var_name, c_str, char_count) \
char16_t var_name[char_count]; \
#define DEBUG_ALIAS_FOR_U16CSTR(var_name, c_str, array_size) \
char16_t var_name[array_size] = {}; \
::base::u16cstrlcpy(var_name, (c_str), std::size(var_name)); \
::base::debug::Alias(var_name)
#define DEBUG_ALIAS_FOR_WCHARCSTR(var_name, c_str, char_count) \
wchar_t var_name[char_count]; \
#define DEBUG_ALIAS_FOR_WCHARCSTR(var_name, c_str, array_size) \
wchar_t var_name[array_size] = {}; \
::base::wcslcpy(var_name, (c_str), std::size(var_name)); \
::base::debug::Alias(var_name)

View File

@ -14,7 +14,8 @@ declare_args() {
#
# Although it should work on other platforms as well, for the above reasons,
# we currently enable it only for Android when compiling for Arm64.
build_allocation_stack_trace_recorder = false
build_allocation_stack_trace_recorder =
!is_official_build && current_cpu == "arm64" && is_android
}
declare_args() {

View File

@ -1267,7 +1267,7 @@ void PopulateCompileUnitOffsets(int fd,
} // namespace
bool GetDwarfSourceLineNumber(void* pc,
bool GetDwarfSourceLineNumber(const void* pc,
uintptr_t cu_offset,
char* out,
size_t out_size) {
@ -1291,7 +1291,7 @@ bool GetDwarfSourceLineNumber(void* pc,
return true;
}
void GetDwarfCompileUnitOffsets(void* const* trace,
void GetDwarfCompileUnitOffsets(const void* const* trace,
uint64_t* cu_offsets,
size_t num_frames) {
// Ensure `cu_offsets` always has a known state.

View File

@ -15,7 +15,7 @@ namespace debug {
//
// Expects `trace` and `cu_offsets` to be `num_frames` in size. If a frame
// cannot be found, the corresponding value stored in `cu_offsets` is 0.
void GetDwarfCompileUnitOffsets(void* const* trace,
void GetDwarfCompileUnitOffsets(const void* const* trace,
uint64_t* cu_offsets,
size_t num_frames);
@ -29,7 +29,7 @@ void GetDwarfCompileUnitOffsets(void* const* trace,
// ../../base/debug/stack_trace_unittest.cc:120,16
//
// This means `pc` was from line 120, column 16, of stack_trace_unittest.cc.
bool GetDwarfSourceLineNumber(void* pc,
bool GetDwarfSourceLineNumber(const void* pc,
uint64_t cu_offsets,
char* out,
size_t out_size);

View File

@ -137,7 +137,7 @@ class BASE_EXPORT StackTrace {
void InitTrace(const _CONTEXT* context_record);
#endif
void* trace_[kMaxTraces];
const void* trace_[kMaxTraces];
// The number of valid frames in |trace_|.
size_t count_;
@ -148,7 +148,7 @@ BASE_EXPORT std::ostream& operator<<(std::ostream& os, const StackTrace& s);
// Record a stack trace with up to |count| frames into |trace|. Returns the
// number of frames read.
BASE_EXPORT size_t CollectStackTrace(void** trace, size_t count);
BASE_EXPORT size_t CollectStackTrace(const void** trace, size_t count);
#if BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)

View File

@ -31,7 +31,7 @@ struct StackCrawlState {
max_depth(max_depth),
have_skipped_self(false) {}
raw_ptr<uintptr_t> frames;
raw_ptr<uintptr_t, AllowPtrArithmetic> frames;
size_t frame_count;
size_t max_depth;
bool have_skipped_self;
@ -75,7 +75,7 @@ bool EnableInProcessStackDumping() {
return (sigaction(SIGPIPE, &action, NULL) == 0);
}
size_t CollectStackTrace(void** trace, size_t count) {
size_t CollectStackTrace(const void** trace, size_t count) {
StackCrawlState state(reinterpret_cast<uintptr_t*>(trace), count);
_Unwind_Backtrace(&TraceStackFrame, &state);
return state.frame_count;

View File

@ -29,7 +29,7 @@ namespace debug {
namespace {
struct BacktraceData {
void** trace_array;
const void** trace_array;
size_t* count;
size_t max;
};
@ -203,7 +203,7 @@ void SymbolMap::Populate() {
// Returns true if |address| is contained by any of the memory regions
// mapped for |module_entry|.
bool ModuleContainsFrameAddress(void* address,
bool ModuleContainsFrameAddress(const void* address,
const SymbolMap::Module& module_entry) {
for (size_t i = 0; i < module_entry.segment_count; ++i) {
const SymbolMap::Segment& segment = module_entry.segments[i];
@ -229,7 +229,7 @@ bool EnableInProcessStackDumping() {
return true;
}
size_t CollectStackTrace(void** trace, size_t count) {
size_t CollectStackTrace(const void** trace, size_t count) {
size_t frame_count = 0;
BacktraceData data = {trace, &frame_count, count};
_Unwind_Backtrace(&UnwindStore, &data);

View File

@ -60,6 +60,8 @@
#endif
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
#include <sys/prctl.h>
#include "base/debug/proc_maps_linux.h"
#endif
@ -161,7 +163,7 @@ class BacktraceOutputHandler {
};
#if defined(HAVE_BACKTRACE)
void OutputPointer(void* pointer, BacktraceOutputHandler* handler) {
void OutputPointer(const void* pointer, BacktraceOutputHandler* handler) {
// This should be more than enough to store a 64-bit number in hex:
// 16 hex digits + 1 for null-terminator.
char buf[17] = { '\0' };
@ -189,7 +191,7 @@ void OutputFrameId(size_t frame_id, BacktraceOutputHandler* handler) {
}
#endif // defined(USE_SYMBOLIZE)
void ProcessBacktrace(void* const* trace,
void ProcessBacktrace(const void* const* trace,
size_t size,
const char* prefix_string,
BacktraceOutputHandler* handler) {
@ -216,8 +218,8 @@ void ProcessBacktrace(void* const* trace,
// Subtract by one as return address of function may be in the next
// function when a function is annotated as noreturn.
void* address = static_cast<char*>(trace[i]) - 1;
if (google::Symbolize(address, buf, sizeof(buf))) {
const void* address = static_cast<const char*>(trace[i]) - 1;
if (google::Symbolize(const_cast<void*>(address), buf, sizeof(buf))) {
handler->HandleOutput(buf);
#if BUILDFLAG(ENABLE_STACK_TRACE_LINE_NUMBERS)
// Only output the source line number if the offset was found. Otherwise,
@ -266,8 +268,8 @@ void ProcessBacktrace(void* const* trace,
}
printed = true;
#else // defined(HAVE_DLADDR)
std::unique_ptr<char*, FreeDeleter> trace_symbols(
backtrace_symbols(trace, static_cast<int>(size)));
std::unique_ptr<char*, FreeDeleter> trace_symbols(backtrace_symbols(
const_cast<void* const*>(trace), static_cast<int>(size)));
if (trace_symbols.get()) {
for (size_t i = 0; i < size; ++i) {
std::string trace_symbol = trace_symbols.get()[i];
@ -762,13 +764,51 @@ class SandboxSymbolizeHelper {
return -1;
}
// This class is copied from
// third_party/crashpad/crashpad/util/linux/scoped_pr_set_dumpable.h.
// It aims at ensuring the process is dumpable before opening /proc/self/mem.
// If the process is already dumpable, this class doesn't do anything.
class ScopedPrSetDumpable {
public:
// Uses `PR_SET_DUMPABLE` to make the current process dumpable.
//
// Restores the dumpable flag to its original value on destruction. If the
// original value couldn't be determined, the destructor attempts to
// restore the flag to 0 (non-dumpable).
explicit ScopedPrSetDumpable() {
int result = prctl(PR_GET_DUMPABLE, 0, 0, 0, 0);
was_dumpable_ = result > 0;
if (!was_dumpable_) {
std::ignore = prctl(PR_SET_DUMPABLE, 1, 0, 0, 0);
}
}
ScopedPrSetDumpable(const ScopedPrSetDumpable&) = delete;
ScopedPrSetDumpable& operator=(const ScopedPrSetDumpable&) = delete;
~ScopedPrSetDumpable() {
if (!was_dumpable_) {
std::ignore = prctl(PR_SET_DUMPABLE, 0, 0, 0, 0);
}
}
private:
bool was_dumpable_;
};
// Set the base address for each memory region by reading ELF headers in
// process memory.
void SetBaseAddressesForMemoryRegions() {
base::ScopedFD mem_fd(
HANDLE_EINTR(open("/proc/self/mem", O_RDONLY | O_CLOEXEC)));
if (!mem_fd.is_valid())
return;
base::ScopedFD mem_fd;
{
ScopedPrSetDumpable s;
mem_fd = base::ScopedFD(
HANDLE_EINTR(open("/proc/self/mem", O_RDONLY | O_CLOEXEC)));
if (!mem_fd.is_valid()) {
return;
}
}
auto safe_memcpy = [&mem_fd](void* dst, uintptr_t src, size_t size) {
return HANDLE_EINTR(pread(mem_fd.get(), dst, size,
@ -986,19 +1026,18 @@ bool SetStackDumpFirstChanceCallback(bool (*handler)(int, siginfo_t*, void*)) {
}
#endif
size_t CollectStackTrace(void** trace, size_t count) {
size_t CollectStackTrace(const void** trace, size_t count) {
// NOTE: This code MUST be async-signal safe (it's used by in-process
// stack dumping signal handler). NO malloc or stdio is allowed here.
#if defined(NO_UNWIND_TABLES) && BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
// If we do not have unwind tables, then try tracing using frame pointers.
return base::debug::TraceStackFramePointers(const_cast<const void**>(trace),
count, 0);
return base::debug::TraceStackFramePointers(trace, count, 0);
#elif defined(HAVE_BACKTRACE)
// Though the backtrace API man page does not list any possible negative
// return values, we take no chance.
return base::saturated_cast<size_t>(
backtrace(trace, base::saturated_cast<int>(count)));
backtrace(const_cast<void**>(trace), base::saturated_cast<int>(count)));
#else
return 0;
#endif

View File

@ -17,8 +17,8 @@
#include "base/files/file_path.h"
#include "base/logging.h"
#include "base/memory/singleton.h"
#include "base/strings/strcat_win.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "base/synchronization/lock.h"
#include "build/build_config.h"
@ -201,8 +201,8 @@ bool InitializeSymbols() {
return false;
}
std::wstring new_path = StringPrintf(L"%ls;%ls", symbols_path,
GetExePath().DirName().value().c_str());
std::wstring new_path =
StrCat({symbols_path, L";", GetExePath().DirName().value()});
if (!SymSetSearchPathW(GetCurrentProcess(), new_path.c_str())) {
g_init_error = GetLastError();
DLOG(WARNING) << "SymSetSearchPath failed." << g_init_error;
@ -324,9 +324,9 @@ bool EnableInProcessStackDumping() {
return InitializeSymbols();
}
NOINLINE size_t CollectStackTrace(void** trace, size_t count) {
NOINLINE size_t CollectStackTrace(const void** trace, size_t count) {
// When walking our own stack, use CaptureStackBackTrace().
return CaptureStackBackTrace(0, count, trace, NULL);
return CaptureStackBackTrace(0, count, const_cast<void**>(trace), NULL);
}
StackTrace::StackTrace(EXCEPTION_POINTERS* exception_pointers) {

View File

@ -87,13 +87,8 @@ class EarlyFeatureAccessTracker {
SCOPED_CRASH_KEY_STRING256("FeatureList", "feature-accessed-too-early",
feature->name);
#endif // !BUILDFLAG(IS_NACL)
// Fail if DCHECKs are enabled.
DCHECK(!feature) << "Accessed feature " << feature->name
<< " before FeatureList registration.";
// TODO(crbug.com/1383852): When we believe that all early accesses have
// been fixed, remove this base::debug::DumpWithoutCrashing() and change the
// above DCHECK to a CHECK.
base::debug::DumpWithoutCrashing();
CHECK(!feature) << "Accessed feature " << feature->name
<< " before FeatureList registration.";
#endif // !BUILDFLAG(IS_IOS) && !BUILDFLAG(IS_ANDROID) &&
// !BUILDFLAG(IS_CHROMEOS)
}

View File

@ -32,6 +32,12 @@ BASE_FEATURE(kSupportsUserDataFlatHashMap,
"SupportsUserDataFlatHashMap",
FEATURE_DISABLED_BY_DEFAULT);
BASE_FEATURE(kUseRustJsonParser,
"UseRustJsonParser",
FEATURE_DISABLED_BY_DEFAULT);
BASE_FEATURE(kJsonNegativeZero, "JsonNegativeZero", FEATURE_ENABLED_BY_DEFAULT);
#if BUILDFLAG(IS_ANDROID)
// Force to enable LowEndDeviceMode partially on Android mid-range devices.
// Such devices aren't considered low-end, but we'd like experiment with
@ -46,6 +52,10 @@ BASE_FEATURE(kPartialLowEndModeOnMidRangeDevices,
"PartialLowEndModeOnMidRangeDevices",
base::FEATURE_ENABLED_BY_DEFAULT);
// Whether to report frame metrics to the Android.FrameTimeline.* histograms.
BASE_FEATURE(kCollectAndroidFrameTimelineMetrics,
"CollectAndroidFrameTimelineMetrics",
base::FEATURE_DISABLED_BY_DEFAULT);
#endif // BUILDFLAG(IS_ANDROID)
} // namespace base::features

View File

@ -23,10 +23,16 @@ BASE_EXPORT BASE_DECLARE_FEATURE(kOptimizeDataUrls);
BASE_EXPORT BASE_DECLARE_FEATURE(kSupportsUserDataFlatHashMap);
BASE_EXPORT BASE_DECLARE_FEATURE(kUseRustJsonParser);
BASE_EXPORT BASE_DECLARE_FEATURE(kJsonNegativeZero);
#if BUILDFLAG(IS_ANDROID)
BASE_EXPORT BASE_DECLARE_FEATURE(kPartialLowEndModeOnMidRangeDevices);
extern const BASE_EXPORT FeatureParam<bool>
kPartialLowEndModeExcludeLowEndBackgroundCleanup;
BASE_EXPORT BASE_DECLARE_FEATURE(kCollectAndroidFrameTimelineMetrics);
#endif
} // namespace base::features

View File

@ -0,0 +1,66 @@
// Copyright 2023 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/files/block_tests_writing_to_special_dirs.h"
#include <utility>
#include "base/files/file_path.h"
#include "base/no_destructor.h"
#include "base/path_service.h"
namespace base {
// static
absl::optional<BlockTestsWritingToSpecialDirs>&
BlockTestsWritingToSpecialDirs::Get() {
static NoDestructor<absl::optional<BlockTestsWritingToSpecialDirs>>
block_tests_writing_to_special_dirs;
return *block_tests_writing_to_special_dirs;
}
// static
bool BlockTestsWritingToSpecialDirs::CanWriteToPath(const FilePath& path) {
auto& dir_blocker = Get();
if (!dir_blocker.has_value()) {
return true;
}
if (!dir_blocker->blocked_dirs_.empty()) {
// `blocked_paths_` needs to be initialized lazily because PathService::Get
// can't be called from the test harness code before the indiviudal tests
// run. On Windows, calling PathService::Get in the test harness startup
// codel causes user32.dll to get loaded, which breaks delayload_unittests.
// On the Mac, it triggers a change in `AmIBundled`.
for (const int dir_key : dir_blocker->blocked_dirs_) {
// If test infrastructure has overridden `dir_key` already, there is no
// need to block writes to it. Android tests apparently do this.
if (PathService::IsOverriddenForTesting(dir_key)) {
continue;
}
FilePath path_to_block;
// Sandbox can make PathService::Get fail.
if (PathService::Get(dir_key, &path_to_block)) {
dir_blocker->blocked_paths_.insert(std::move(path_to_block));
}
}
dir_blocker->blocked_dirs_.clear();
}
for (const auto& path_to_block : dir_blocker->blocked_paths_) {
if (path_to_block.IsParent(path)) {
(*dir_blocker->failure_callback_)(path);
return false;
}
}
return true;
}
BlockTestsWritingToSpecialDirs::BlockTestsWritingToSpecialDirs(
std::vector<int> blocked_dirs,
FileWriteBlockedForTestingFunctionPtr failure_callback)
: blocked_dirs_(std::move(blocked_dirs)),
failure_callback_(failure_callback) {}
BlockTestsWritingToSpecialDirs::~BlockTestsWritingToSpecialDirs() = default;
} // namespace base

View File

@ -0,0 +1,56 @@
// Copyright 2023 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_FILES_BLOCK_TESTS_WRITING_TO_SPECIAL_DIRS_H_
#define BASE_FILES_BLOCK_TESTS_WRITING_TO_SPECIAL_DIRS_H_
#include <set>
#include <vector>
#include "base/base_export.h"
#include "base/gtest_prod_util.h"
#include "third_party/abseil-cpp/absl/types/optional.h"
namespace base {
class FilePath;
using FileWriteBlockedForTestingFunctionPtr = void (*)(const FilePath&);
// Utility class for production code to check if writing to special directories
// is blocked for tests.
class BASE_EXPORT BlockTestsWritingToSpecialDirs {
public:
static bool CanWriteToPath(const FilePath& path);
BlockTestsWritingToSpecialDirs(
std::vector<int> blocked_dirs,
FileWriteBlockedForTestingFunctionPtr failure_callback);
BlockTestsWritingToSpecialDirs(
const BlockTestsWritingToSpecialDirs& blocker) = delete;
BlockTestsWritingToSpecialDirs& operator=(
const BlockTestsWritingToSpecialDirs&) = delete;
~BlockTestsWritingToSpecialDirs();
private:
friend class BlockTestsWritingToSpecialDirsTest;
friend class ScopedBlockTestsWritingToSpecialDirs;
// This private method is used by `ScopedBlockTestsWritingToSpecialDirs` to
// create an object of this class stored in a function static object.
// `CanWriteToPath` above checks the paths stored in that object, if it is
// set. Thus, only ScopedBlockTestsWritingToSpecialDirs should be able to
// block tests writing to special dirs.
static absl::optional<BlockTestsWritingToSpecialDirs>& Get();
// `blocked_paths_` will be initialized lazily, from `blocked_dirs_`.
std::set<FilePath> blocked_paths_;
std::vector<int> blocked_dirs_;
FileWriteBlockedForTestingFunctionPtr failure_callback_ = nullptr;
};
} // namespace base
#endif // BASE_FILES_BLOCK_TESTS_WRITING_TO_SPECIAL_DIRS_H_

View File

@ -781,6 +781,7 @@ bool CreateDirectoryAndGetError(const FilePath& full_path,
if (!DirectoryExists(subpath)) {
if (error)
*error = File::OSErrorToFileError(saved_errno);
errno = saved_errno;
return false;
}
}

View File

@ -0,0 +1,60 @@
// Copyright 2023 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/files/scoped_temp_file.h"
#include <utility>
#include "base/check.h"
#include "base/files/file_util.h"
#include "base/logging.h"
namespace base {
ScopedTempFile::ScopedTempFile() = default;
ScopedTempFile::ScopedTempFile(ScopedTempFile&& other) noexcept
: path_(std::move(other.path_)) {}
ScopedTempFile& ScopedTempFile::operator=(ScopedTempFile&& other) noexcept {
if (!path_.empty()) {
CHECK_NE(path_, other.path_);
}
if (!Delete()) {
DLOG(WARNING) << "Could not delete temp dir in operator=().";
}
path_ = std::move(other.path_);
return *this;
}
ScopedTempFile::~ScopedTempFile() {
if (!Delete()) {
DLOG(WARNING) << "Could not delete temp dir in destructor.";
}
}
bool ScopedTempFile::Create() {
CHECK(path_.empty());
return base::CreateTemporaryFile(&path_);
}
bool ScopedTempFile::Delete() {
if (path_.empty()) {
return true;
}
if (DeleteFile(path_)) {
path_.clear();
return true;
}
return false;
}
void ScopedTempFile::Reset() {
if (!Delete()) {
DLOG(WARNING) << "Could not delete temp dir in Reset().";
}
path_.clear();
}
} // namespace base

View File

@ -0,0 +1,47 @@
// Copyright 2023 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_FILES_SCOPED_TEMP_FILE_H_
#define BASE_FILES_SCOPED_TEMP_FILE_H_
#include "base/base_export.h"
#include "base/files/file_path.h"
namespace base {
// An owned FilePath that's deleted when this object goes out of scope.
// Deletion is attempted on destruction, but is not guaranteed.
class BASE_EXPORT ScopedTempFile {
public:
// No file is owned/created initially.
ScopedTempFile();
ScopedTempFile(ScopedTempFile&&) noexcept;
ScopedTempFile& operator=(ScopedTempFile&&) noexcept;
~ScopedTempFile();
// The owned path must be empty before calling Create().
// Returns true on success.
[[nodiscard]] bool Create();
// Returns true on success or if the file was never created.
[[nodiscard]] bool Delete();
// Attempts to delete the file. The managed path is reset regardless of
// if the deletion was successful.
void Reset();
[[nodiscard]] const base::FilePath& path() const { return path_; }
// NOLINTNEXTLINE(google-explicit-constructor)
operator bool() const { return !path_.empty(); }
private:
FilePath path_;
};
} // namespace base
#endif // BASE_FILES_SCOPED_TEMP_FILE_H_

View File

@ -5,8 +5,8 @@
library base.testfidl;
@discoverable
protocol TestInterface {
Add(struct {
closed protocol TestInterface {
strict Add(struct {
a int32;
b int32;
}) -> (struct {

View File

@ -149,6 +149,7 @@ class TRIVIAL_ABI OnceCallback<R(Args...)> {
internal::BindStateHolder holder = std::move(holder_);
PolymorphicInvoke f =
reinterpret_cast<PolymorphicInvoke>(holder.polymorphic_invoke());
CHECK(f);
return f(holder.bind_state().get(), std::forward<Args>(args)...);
}
@ -330,6 +331,7 @@ class TRIVIAL_ABI RepeatingCallback<R(Args...)> {
PolymorphicInvoke f =
reinterpret_cast<PolymorphicInvoke>(holder_.polymorphic_invoke());
CHECK(f);
return f(bind_state.get(), std::forward<Args>(args)...);
}
@ -345,6 +347,7 @@ class TRIVIAL_ABI RepeatingCallback<R(Args...)> {
internal::BindStateHolder holder = std::move(holder_);
PolymorphicInvoke f =
reinterpret_cast<PolymorphicInvoke>(holder.polymorphic_invoke());
CHECK(f);
return f(holder.bind_state().get(), std::forward<Args>(args)...);
}

View File

@ -96,8 +96,33 @@ class OnceCallbackHolder final {
const bool ignore_extra_runs_;
};
template <typename... Args>
void ForwardRepeatingCallbacksImpl(
std::vector<RepeatingCallback<void(Args...)>> cbs,
Args... args) {
for (auto& cb : cbs) {
if (cb) {
cb.Run(std::forward<Args>(args)...);
}
}
}
} // namespace internal
// Wraps the given RepeatingCallbacks and return one RepeatingCallbacks with an
// identical signature. On invocation of this callback, all the given
// RepeatingCallbacks will be called with the same arguments. Unbound arguments
// must be copyable.
template <typename... Args>
RepeatingCallback<void(Args...)> ForwardRepeatingCallbacks(
std::initializer_list<RepeatingCallback<void(Args...)>>&& cbs) {
std::vector<RepeatingCallback<void(Args...)>> v(
std::forward<std::initializer_list<RepeatingCallback<void(Args...)>>>(
cbs));
return BindRepeating(&internal::ForwardRepeatingCallbacksImpl<Args...>,
std::move(v));
}
// Wraps the given OnceCallback and returns two OnceCallbacks with an identical
// signature. On first invokation of either returned callbacks, the original
// callback is invoked. Invoking the remaining callback results in a crash.

View File

@ -4,6 +4,8 @@
#include "base/hash/hash.h"
#include <string_view>
#include "base/check_op.h"
#include "base/notreached.h"
#include "base/rand_util.h"
@ -130,10 +132,6 @@ uint32_t Hash(const std::string& str) {
return PersistentHash(as_bytes(make_span(str)));
}
uint32_t Hash(const std::u16string& str) {
return PersistentHash(as_bytes(make_span(str)));
}
uint32_t PersistentHash(span<const uint8_t> data) {
// This hash function must not change, since it is designed to be persistable
// to disk.
@ -149,8 +147,8 @@ uint32_t PersistentHash(const void* data, size_t length) {
return PersistentHash(make_span(static_cast<const uint8_t*>(data), length));
}
uint32_t PersistentHash(const std::string& str) {
return PersistentHash(str.data(), str.size());
uint32_t PersistentHash(std::string_view str) {
return PersistentHash(as_bytes(make_span(str)));
}
size_t HashInts32(uint32_t value1, uint32_t value2) {

View File

@ -10,6 +10,7 @@
#include <limits>
#include <string>
#include <string_view>
#include <utility>
#include "base/base_export.h"
@ -27,7 +28,6 @@ namespace base {
// TODO(https://crbug.com/1025358): Migrate client code to new hash function.
BASE_EXPORT uint32_t Hash(const void* data, size_t length);
BASE_EXPORT uint32_t Hash(const std::string& str);
BASE_EXPORT uint32_t Hash(const std::u16string& str);
// Really *fast* and high quality hash.
// Recommended hash function for general use, we pick the best performant
@ -48,7 +48,7 @@ inline size_t FastHash(StringPiece str) {
// WARNING: This hash function should not be used for any cryptographic purpose.
BASE_EXPORT uint32_t PersistentHash(base::span<const uint8_t> data);
BASE_EXPORT uint32_t PersistentHash(const void* data, size_t length);
BASE_EXPORT uint32_t PersistentHash(const std::string& str);
BASE_EXPORT uint32_t PersistentHash(std::string_view str);
// Hash pairs of 32-bit or 64-bit numbers.
BASE_EXPORT size_t HashInts32(uint32_t value1, uint32_t value2);

Some files were not shown because too many files have changed in this diff Show More