naiveproxy/base/test/test_shared_memory_util.cc

188 lines
5.8 KiB
C++
Raw Permalink Normal View History

2018-12-10 05:59:24 +03:00
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/test/test_shared_memory_util.h"
#include <gtest/gtest.h>
#include <stddef.h>
#include <stdint.h>
#include "base/logging.h"
#include "build/build_config.h"
#if defined(OS_POSIX) && !defined(OS_NACL)
#include <errno.h>
#include <string.h>
#include <sys/mman.h>
#include <unistd.h>
#endif
#if defined(OS_FUCHSIA)
#include <zircon/process.h>
#include <zircon/rights.h>
#include <zircon/syscalls.h>
#endif
#if defined(OS_MACOSX) && !defined(OS_IOS)
#include <mach/mach_vm.h>
#endif
#if defined(OS_WIN)
#include <aclapi.h>
#endif
namespace base {
#if !defined(OS_NACL)
static const size_t kDataSize = 1024;
// Common routine used with Posix file descriptors. Check that shared memory
// file descriptor |fd| does not allow writable mappings. Return true on
// success, false otherwise.
#if defined(OS_POSIX)
static bool CheckReadOnlySharedMemoryFdPosix(int fd) {
// Note that the error on Android is EPERM, unlike other platforms where
// it will be EACCES.
#if defined(OS_ANDROID)
const int kExpectedErrno = EPERM;
#else
const int kExpectedErrno = EACCES;
#endif
errno = 0;
void* address =
mmap(nullptr, kDataSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
const bool success = (address != nullptr) && (address != MAP_FAILED);
if (success) {
LOG(ERROR) << "mmap() should have failed!";
munmap(address, kDataSize); // Cleanup.
return false;
}
if (errno != kExpectedErrno) {
LOG(ERROR) << "Expected mmap() to return " << kExpectedErrno
<< " but returned " << errno << ": " << strerror(errno) << "\n";
return false;
}
return true;
}
#endif // OS_POSIX && !OS_FUCHSIA
#if defined(OS_FUCHSIA)
// Fuchsia specific implementation.
bool CheckReadOnlySharedMemoryFuchsiaHandle(zx_handle_t handle) {
const uint32_t flags = ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE;
uintptr_t addr;
const zx_handle_t root = zx_vmar_root_self();
const zx_status_t status =
zx_vmar_map_old(root, 0, handle, 0U, kDataSize, flags, &addr);
if (status == ZX_OK) {
LOG(ERROR) << "zx_vmar_map() should have failed!";
zx_vmar_unmap(root, addr, kDataSize);
return false;
}
if (status != ZX_ERR_ACCESS_DENIED) {
LOG(ERROR) << "Expected zx_vmar_map() to return " << ZX_ERR_ACCESS_DENIED
<< " (ZX_ERR_ACCESS_DENIED) but returned " << status << "\n";
return false;
}
return true;
}
#elif defined(OS_MACOSX) && !defined(OS_IOS)
bool CheckReadOnlySharedMemoryMachPort(mach_port_t memory_object) {
mach_vm_address_t memory;
const kern_return_t kr = mach_vm_map(
mach_task_self(), &memory, kDataSize, 0, VM_FLAGS_ANYWHERE, memory_object,
0, FALSE, VM_PROT_READ | VM_PROT_WRITE,
VM_PROT_READ | VM_PROT_WRITE | VM_PROT_IS_MASK, VM_INHERIT_NONE);
if (kr == KERN_SUCCESS) {
LOG(ERROR) << "mach_vm_map() should have failed!";
mach_vm_deallocate(mach_task_self(), memory, kDataSize); // Cleanup.
return false;
}
return true;
}
#elif defined(OS_WIN)
bool CheckReadOnlySharedMemoryWindowsHandle(HANDLE handle) {
void* memory =
MapViewOfFile(handle, FILE_MAP_READ | FILE_MAP_WRITE, 0, 0, kDataSize);
if (memory != nullptr) {
LOG(ERROR) << "MapViewOfFile() should have failed!";
UnmapViewOfFile(memory);
return false;
}
return true;
}
#endif
bool CheckReadOnlySharedMemoryHandleForTesting(SharedMemoryHandle handle) {
#if defined(OS_MACOSX) && !defined(OS_IOS)
// For OSX, the code has to deal with both POSIX and MACH handles.
if (handle.type_ == SharedMemoryHandle::POSIX)
return CheckReadOnlySharedMemoryFdPosix(handle.file_descriptor_.fd);
else
return CheckReadOnlySharedMemoryMachPort(handle.memory_object_);
#elif defined(OS_FUCHSIA)
return CheckReadOnlySharedMemoryFuchsiaHandle(handle.GetHandle());
#elif defined(OS_WIN)
return CheckReadOnlySharedMemoryWindowsHandle(handle.GetHandle());
#else
return CheckReadOnlySharedMemoryFdPosix(handle.GetHandle());
#endif
}
bool CheckReadOnlyPlatformSharedMemoryRegionForTesting(
subtle::PlatformSharedMemoryRegion region) {
if (region.GetMode() != subtle::PlatformSharedMemoryRegion::Mode::kReadOnly) {
LOG(ERROR) << "Expected region mode is "
<< static_cast<int>(
subtle::PlatformSharedMemoryRegion::Mode::kReadOnly)
<< " but actual is " << static_cast<int>(region.GetMode());
return false;
}
#if defined(OS_MACOSX) && !defined(OS_IOS)
return CheckReadOnlySharedMemoryMachPort(region.GetPlatformHandle());
#elif defined(OS_FUCHSIA)
return CheckReadOnlySharedMemoryFuchsiaHandle(region.GetPlatformHandle());
#elif defined(OS_WIN)
return CheckReadOnlySharedMemoryWindowsHandle(region.GetPlatformHandle());
#elif defined(OS_ANDROID)
return CheckReadOnlySharedMemoryFdPosix(region.GetPlatformHandle());
#else
return CheckReadOnlySharedMemoryFdPosix(region.GetPlatformHandle().fd);
#endif
}
#endif // !OS_NACL
WritableSharedMemoryMapping MapForTesting(
subtle::PlatformSharedMemoryRegion* region) {
return MapAtForTesting(region, 0, region->GetSize());
}
WritableSharedMemoryMapping MapAtForTesting(
subtle::PlatformSharedMemoryRegion* region,
off_t offset,
size_t size) {
void* memory = nullptr;
size_t mapped_size = 0;
if (!region->MapAt(offset, size, &memory, &mapped_size))
return {};
return WritableSharedMemoryMapping(memory, size, mapped_size,
region->GetGUID());
}
template <>
std::pair<ReadOnlySharedMemoryRegion, WritableSharedMemoryMapping>
CreateMappedRegion(size_t size) {
MappedReadOnlyRegion mapped_region = ReadOnlySharedMemoryRegion::Create(size);
return {std::move(mapped_region.region), std::move(mapped_region.mapping)};
}
} // namespace base