Update PartitionAlloc from Chromium at r709322.
This merges in the following list of PartitionAlloc CLs:
- https://crrev.com/635426
- https://crrev.com/638413
- https://crrev.com/646910
- https://crrev.com/659314
- https://crrev.com/659941
- https://crrev.com/664574
- https://crrev.com/684173
- https://crrev.com/688217
- https://crrev.com/690723
- https://crrev.com/692015
- https://crrev.com/704306
- https://crrev.com/705884
- https://crrev.com/709322
But skips these CLs:
- https://crrev.com/646977 N/A
- https://crrev.com/648434 N/A
- https://crrev.com/660935 No base::mac::IsAtLeastOS10_14()
- https://crrev.com/663992 N/A
- https://crrev.com/664559 N/A
- https://crrev.com/665991 N/A
- https://crrev.com/669614 Memory reclaimer uses too many base/ bits
- https://crrev.com/677822 N/A
- https://crrev.com/679933 No Memory reclaimer
- https://crrev.com/680794 N/A
- https://crrev.com/680794 No Memory reclaimer
- https://crrev.com/688086 N/A
- https://crrev.com/689794 N/A
- https://crrev.com/690701 N/A
- https://crrev.com/693060 N/A
Change-Id: Iddefd185701535c50270a3630967b0c45e877068
Reviewed-on: https://pdfium-review.googlesource.com/c/pdfium/+/63110
Reviewed-by: Tom Sepez <tsepez@chromium.org>
Commit-Queue: Lei Zhang <thestig@chromium.org>
diff --git a/third_party/BUILD.gn b/third_party/BUILD.gn
index 9f90d96..e856c8d 100644
--- a/third_party/BUILD.gn
+++ b/third_party/BUILD.gn
@@ -615,6 +615,8 @@
"base/allocator/partition_allocator/partition_page.h",
"base/allocator/partition_allocator/partition_root_base.cc",
"base/allocator/partition_allocator/partition_root_base.h",
+ "base/allocator/partition_allocator/random.cc",
+ "base/allocator/partition_allocator/random.h",
"base/allocator/partition_allocator/spin_lock.cc",
"base/allocator/partition_allocator/spin_lock.h",
"base/base_export.h",
diff --git a/third_party/base/allocator/partition_allocator/address_space_randomization.cc b/third_party/base/allocator/partition_allocator/address_space_randomization.cc
index 135c67d..c6f268f 100644
--- a/third_party/base/allocator/partition_allocator/address_space_randomization.cc
+++ b/third_party/base/allocator/partition_allocator/address_space_randomization.cc
@@ -6,6 +6,7 @@
#include "build/build_config.h"
#include "third_party/base/allocator/partition_allocator/page_allocator.h"
+#include "third_party/base/allocator/partition_allocator/random.h"
#include "third_party/base/allocator/partition_allocator/spin_lock.h"
#include "third_party/base/logging.h"
@@ -13,95 +14,17 @@
#include <windows.h> // Must be in front of other Windows header files.
#include <VersionHelpers.h>
-#else
-#include <sys/time.h>
-#include <unistd.h>
#endif
namespace pdfium {
namespace base {
-namespace {
-
-// This is the same PRNG as used by tcmalloc for mapping address randomness;
-// see http://burtleburtle.net/bob/rand/smallprng.html
-struct RandomContext {
- subtle::SpinLock lock;
- bool initialized;
- uint32_t a;
- uint32_t b;
- uint32_t c;
- uint32_t d;
-};
-
-RandomContext* GetRandomContext() {
- static RandomContext* s_RandomContext = nullptr;
- if (!s_RandomContext)
- s_RandomContext = new RandomContext();
- return s_RandomContext;
-}
-
-#define rot(x, k) (((x) << (k)) | ((x) >> (32 - (k))))
-
-uint32_t RandomValueInternal(RandomContext* x) {
- uint32_t e = x->a - rot(x->b, 27);
- x->a = x->b ^ rot(x->c, 17);
- x->b = x->c + x->d;
- x->c = x->d + e;
- x->d = e + x->a;
- return x->d;
-}
-
-#undef rot
-
-uint32_t RandomValue(RandomContext* x) {
- subtle::SpinLock::Guard guard(x->lock);
- if (UNLIKELY(!x->initialized)) {
- x->initialized = true;
- char c;
- uint32_t seed = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(&c));
- uint32_t pid;
- uint32_t usec;
-#if defined(OS_WIN)
- pid = GetCurrentProcessId();
- SYSTEMTIME st;
- GetSystemTime(&st);
- usec = static_cast<uint32_t>(st.wMilliseconds * 1000);
-#else
- pid = static_cast<uint32_t>(getpid());
- struct timeval tv;
- gettimeofday(&tv, 0);
- usec = static_cast<uint32_t>(tv.tv_usec);
-#endif
- seed ^= pid;
- seed ^= usec;
- x->a = 0xf1ea5eed;
- x->b = x->c = x->d = seed;
- for (int i = 0; i < 20; ++i) {
- RandomValueInternal(x);
- }
- }
-
- return RandomValueInternal(x);
-}
-
-} // namespace
-
-void SetRandomPageBaseSeed(int64_t seed) {
- RandomContext* x = GetRandomContext();
- subtle::SpinLock::Guard guard(x->lock);
- // Set RNG to initial state.
- x->initialized = true;
- x->a = x->b = static_cast<uint32_t>(seed);
- x->c = x->d = static_cast<uint32_t>(seed >> 32);
-}
-
void* GetRandomPageBase() {
- uintptr_t random = static_cast<uintptr_t>(RandomValue(GetRandomContext()));
+ uintptr_t random = static_cast<uintptr_t>(RandomValue());
#if defined(ARCH_CPU_64_BITS)
random <<= 32ULL;
- random |= static_cast<uintptr_t>(RandomValue(GetRandomContext()));
+ random |= static_cast<uintptr_t>(RandomValue());
// The kASLRMask and kASLROffset constants will be suitable for the
// OS and build configuration.
@@ -128,7 +51,7 @@
// On win32 host systems the randomization plus huge alignment causes
// excessive fragmentation. Plus most of these systems lack ASLR, so the
// randomization isn't buying anything. In that case we just skip it.
- // TODO(jschuh): Just dump the randomization when HE-ASLR is present.
+ // TODO(palmer): Just dump the randomization when HE-ASLR is present.
static BOOL is_wow64 = -1;
if (is_wow64 == -1 && !IsWow64Process(GetCurrentProcess(), &is_wow64))
is_wow64 = FALSE;
diff --git a/third_party/base/allocator/partition_allocator/address_space_randomization.h b/third_party/base/allocator/partition_allocator/address_space_randomization.h
index efad668..5cb2ccc 100644
--- a/third_party/base/allocator/partition_allocator/address_space_randomization.h
+++ b/third_party/base/allocator/partition_allocator/address_space_randomization.h
@@ -12,11 +12,6 @@
namespace pdfium {
namespace base {
-// Sets the seed for the random number generator used by GetRandomPageBase in
-// order to generate a predictable sequence of addresses. May be called multiple
-// times.
-BASE_EXPORT void SetRandomPageBaseSeed(int64_t seed);
-
// Calculates a random preferred mapping address. In calculating an address, we
// balance good ASLR against not fragmenting the address space too badly.
BASE_EXPORT void* GetRandomPageBase();
diff --git a/third_party/base/allocator/partition_allocator/page_allocator.cc b/third_party/base/allocator/partition_allocator/page_allocator.cc
index 683632d..14e9379 100644
--- a/third_party/base/allocator/partition_allocator/page_allocator.cc
+++ b/third_party/base/allocator/partition_allocator/page_allocator.cc
@@ -117,18 +117,6 @@
uintptr_t align_base_mask = ~align_offset_mask;
DCHECK(!(reinterpret_cast<uintptr_t>(address) & align_offset_mask));
-#if defined(OS_LINUX) && defined(ARCH_CPU_64_BITS)
- // On 64 bit Linux, we may need to adjust the address space limit for
- // guarded allocations.
- if (length >= kMinimumGuardedMemorySize) {
- CHECK_EQ(PageInaccessible, accessibility);
- CHECK(!commit);
- if (!AdjustAddressSpaceLimit(base::checked_cast<int64_t>(length))) {
- // Fall through. Try the allocation, since we may have a reserve.
- }
- }
-#endif
-
// If the client passed null as the address, choose a good one.
if (address == nullptr) {
address = GetRandomPageBase();
diff --git a/third_party/base/allocator/partition_allocator/page_allocator.h b/third_party/base/allocator/partition_allocator/page_allocator.h
index a48a841..9b076f9 100644
--- a/third_party/base/allocator/partition_allocator/page_allocator.h
+++ b/third_party/base/allocator/partition_allocator/page_allocator.h
@@ -27,12 +27,15 @@
PageReadWriteExecute,
};
-// Mac OSX supports tagged memory regions, to help in debugging.
+// macOS supports tagged memory regions, to help in debugging. On Android,
+// these tags are used to name anonymous mappings.
enum class PageTag {
- kFirst = 240, // Minimum tag value.
- kChromium = 254, // Chromium page, including off-heap V8 ArrayBuffers.
- kV8 = 255, // V8 heap pages.
- kLast = kV8 // Maximum tag value.
+ kFirst = 240, // Minimum tag value.
+ kBlinkGC = 252, // Blink GC pages.
+ kPartitionAlloc = 253, // PartitionAlloc, no matter the partition.
+ kChromium = 254, // Chromium page.
+ kV8 = 255, // V8 heap pages.
+ kLast = kV8 // Maximum tag value.
};
// Allocate one or more pages.
@@ -47,13 +50,15 @@
// automatically.
//
// |page_accessibility| controls the permission of the allocated pages.
+// |page_tag| is used on some platforms to identify the source of the
+// allocation. Use PageTag::kChromium as a catch-all category.
//
// This call will return null if the allocation cannot be satisfied.
BASE_EXPORT void* AllocPages(void* address,
size_t length,
size_t align,
PageAccessibilityConfiguration page_accessibility,
- PageTag tag = PageTag::kChromium,
+ PageTag tag,
bool commit = true);
// Free one or more pages starting at |address| and continuing for |length|
diff --git a/third_party/base/allocator/partition_allocator/page_allocator_constants.h b/third_party/base/allocator/partition_allocator/page_allocator_constants.h
index 945273b..567e3a3 100644
--- a/third_party/base/allocator/partition_allocator/page_allocator_constants.h
+++ b/third_party/base/allocator/partition_allocator/page_allocator_constants.h
@@ -11,7 +11,7 @@
namespace pdfium {
namespace base {
-#if defined(OS_WIN)
+#if defined(OS_WIN) || defined(ARCH_CPU_PPC64)
static constexpr size_t kPageAllocationGranularityShift = 16; // 64KB
#elif defined(_MIPS_ARCH_LOONGSON)
static constexpr size_t kPageAllocationGranularityShift = 14; // 16KB
@@ -27,6 +27,12 @@
#if defined(_MIPS_ARCH_LOONGSON)
static constexpr size_t kSystemPageSize = 16384;
+#elif defined(ARCH_CPU_PPC64)
+// Modern ppc64 systems support 4KB and 64KB page sizes.
+// Since 64KB is the de-facto standard on the platform
+// and binaries compiled for 64KB are likely to work on 4KB systems,
+// 64KB is a good choice here.
+static constexpr size_t kSystemPageSize = 65536;
#else
static constexpr size_t kSystemPageSize = 4096;
#endif
diff --git a/third_party/base/allocator/partition_allocator/page_allocator_internals_posix.h b/third_party/base/allocator/partition_allocator/page_allocator_internals_posix.h
index 026d10d..2d62cb8 100644
--- a/third_party/base/allocator/partition_allocator/page_allocator_internals_posix.h
+++ b/third_party/base/allocator/partition_allocator/page_allocator_internals_posix.h
@@ -13,12 +13,17 @@
#if defined(OS_MACOSX)
#include <mach/mach.h>
#endif
+#if defined(OS_ANDROID)
+#include <sys/prctl.h>
+#endif
#if defined(OS_LINUX)
#include <sys/resource.h>
#include <algorithm>
#endif
+#include "third_party/base/allocator/partition_allocator/page_allocator.h"
+
#ifndef MAP_ANONYMOUS
#define MAP_ANONYMOUS MAP_ANON
#endif
@@ -26,6 +31,32 @@
namespace pdfium {
namespace base {
+#if defined(OS_ANDROID)
+namespace {
+const char* PageTagToName(PageTag tag) {
+ // Important: All the names should be string literals. As per prctl.h in
+ // //third_party/android_ndk the kernel keeps a pointer to the name instead
+ // of copying it.
+ //
+ // Having the name in .rodata ensures that the pointer remains valid as
+ // long as the mapping is alive.
+ switch (tag) {
+ case PageTag::kBlinkGC:
+ return "blink_gc";
+ case PageTag::kPartitionAlloc:
+ return "partition_alloc";
+ case PageTag::kChromium:
+ return "chromium";
+ case PageTag::kV8:
+ return "v8";
+ default:
+ DCHECK(false);
+ return "";
+ }
+}
+} // namespace
+#endif // defined(OS_ANDROID)
+
// |mmap| uses a nearby address if the hint address is blocked.
constexpr bool kHintIsAdvisory = true;
std::atomic<int32_t> s_allocPageErrorCode{0};
@@ -48,28 +79,6 @@
}
}
-#if defined(OS_LINUX) && defined(ARCH_CPU_64_BITS)
-
-// Multiple guarded memory regions may exceed the process address space limit.
-// This function will raise or lower the limit by |amount|.
-bool AdjustAddressSpaceLimit(int64_t amount) {
- struct rlimit old_rlimit;
- if (getrlimit(RLIMIT_AS, &old_rlimit))
- return false;
- const rlim_t new_limit =
- CheckAdd(old_rlimit.rlim_cur, amount).ValueOrDefault(old_rlimit.rlim_max);
- const struct rlimit new_rlimit = {std::min(new_limit, old_rlimit.rlim_max),
- old_rlimit.rlim_max};
- // setrlimit will fail if limit > old_rlimit.rlim_max.
- return setrlimit(RLIMIT_AS, &new_rlimit) == 0;
-}
-
-// Current WASM guarded memory regions have 8 GiB of address space. There are
-// schemes that reduce that to 4 GiB.
-constexpr size_t kMinimumGuardedMemorySize = 1ULL << 32; // 4 GiB
-
-#endif // defined(OS_LINUX) && defined(ARCH_CPU_64_BITS)
-
void* SystemAllocPagesInternal(void* hint,
size_t length,
PageAccessibilityConfiguration accessibility,
@@ -86,12 +95,32 @@
#endif
int access_flag = GetAccessFlags(accessibility);
- void* ret =
- mmap(hint, length, access_flag, MAP_ANONYMOUS | MAP_PRIVATE, fd, 0);
+ int map_flags = MAP_ANONYMOUS | MAP_PRIVATE;
+
+ // TODO(https://crbug.com/927411): Remove once Fuchsia uses a native page
+ // allocator, rather than relying on POSIX compatibility.
+#if defined(OS_FUCHSIA)
+ if (page_tag == PageTag::kV8) {
+ map_flags |= MAP_JIT;
+ }
+#endif
+
+ void* ret = mmap(hint, length, access_flag, map_flags, fd, 0);
if (ret == MAP_FAILED) {
s_allocPageErrorCode = errno;
ret = nullptr;
}
+
+#if defined(OS_ANDROID)
+ // On Android, anonymous mappings can have a name attached to them. This is
+ // useful for debugging, and double-checking memory attribution.
+ if (ret) {
+ // No error checking on purpose, testing only.
+ prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, ret, length,
+ PageTagToName(page_tag));
+ }
+#endif
+
return ret;
}
@@ -133,13 +162,6 @@
void FreePagesInternal(void* address, size_t length) {
CHECK(!munmap(address, length));
-
-#if defined(OS_LINUX) && defined(ARCH_CPU_64_BITS)
- // Restore the address space limit.
- if (length >= kMinimumGuardedMemorySize) {
- CHECK(AdjustAddressSpaceLimit(-base::checked_cast<int64_t>(length)));
- }
-#endif
}
void DecommitSystemPagesInternal(void* address, size_t length) {
diff --git a/third_party/base/allocator/partition_allocator/partition_alloc.cc b/third_party/base/allocator/partition_allocator/partition_alloc.cc
index e0bae85..a6acfc1 100644
--- a/third_party/base/allocator/partition_allocator/partition_alloc.cc
+++ b/third_party/base/allocator/partition_allocator/partition_alloc.cc
@@ -198,13 +198,9 @@
this->num_buckets = num_buckets;
this->max_allocation = max_allocation;
- size_t i;
- for (i = 0; i < this->num_buckets; ++i) {
+ for (size_t i = 0; i < this->num_buckets; ++i) {
internal::PartitionBucket* bucket = &this->buckets()[i];
- if (!i)
- bucket->Init(kAllocationGranularity);
- else
- bucket->Init(i << kBucketShift);
+ bucket->Init(i == 0 ? kAllocationGranularity : (i << kBucketShift));
}
}
@@ -360,10 +356,7 @@
size_t new_size,
const char* type_name) {
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
- // Make MEMORY_TOOL_REPLACES_ALLOCATOR behave the same for max size
- // as other alloc code.
- if (new_size > kGenericMaxDirectMapped)
- return nullptr;
+ CHECK_MAX_SIZE_OR_RETURN_NULLPTR(new_size, flags);
void* result = realloc(ptr, new_size);
CHECK(result || flags & PartitionAllocReturnNull);
return result;
@@ -501,14 +494,14 @@
size_t slot_index = (reinterpret_cast<char*>(entry) - ptr) / slot_size;
DCHECK(slot_index < num_slots);
slot_usage[slot_index] = 0;
- entry = internal::PartitionFreelistEntry::Transform(entry->next);
+ entry = internal::EncodedPartitionFreelistEntry::Decode(entry->next);
#if !defined(OS_WIN)
// If we have a slot where the masked freelist entry is 0, we can actually
// discard that freelist entry because touching a discarded page is
// guaranteed to return original content or 0. (Note that this optimization
// won't fire on big-endian machines because the masking function is
// negation.)
- if (!internal::PartitionFreelistEntry::Transform(entry))
+ if (!internal::PartitionFreelistEntry::Encode(entry))
last_slot = slot_index;
#endif
}
@@ -542,25 +535,33 @@
DCHECK(truncated_slots > 0);
size_t num_new_entries = 0;
page->num_unprovisioned_slots += static_cast<uint16_t>(truncated_slots);
+
// Rewrite the freelist.
- internal::PartitionFreelistEntry** entry_ptr = &page->freelist_head;
+ internal::PartitionFreelistEntry* head = nullptr;
+ internal::PartitionFreelistEntry* back = head;
for (size_t slot_index = 0; slot_index < num_slots; ++slot_index) {
if (slot_usage[slot_index])
continue;
+
auto* entry = reinterpret_cast<internal::PartitionFreelistEntry*>(
ptr + (slot_size * slot_index));
- *entry_ptr = internal::PartitionFreelistEntry::Transform(entry);
- entry_ptr = reinterpret_cast<internal::PartitionFreelistEntry**>(entry);
+ if (!head) {
+ head = entry;
+ back = entry;
+ } else {
+ back->next = internal::PartitionFreelistEntry::Encode(entry);
+ back = entry;
+ }
num_new_entries++;
#if !defined(OS_WIN)
last_slot = slot_index;
#endif
}
- // Terminate the freelist chain.
- *entry_ptr = nullptr;
- // The freelist head is stored unmasked.
- page->freelist_head =
- internal::PartitionFreelistEntry::Transform(page->freelist_head);
+
+ page->freelist_head = head;
+ if (back)
+ back->next = internal::PartitionFreelistEntry::Encode(nullptr);
+
DCHECK(num_new_entries == num_slots - page->num_allocated_slots);
// Discard the memory.
DiscardSystemPages(begin_ptr, unprovisioned_bytes);
diff --git a/third_party/base/allocator/partition_allocator/partition_alloc.h b/third_party/base/allocator/partition_allocator/partition_alloc.h
index 88f37bc..bd6505c 100644
--- a/third_party/base/allocator/partition_allocator/partition_alloc.h
+++ b/third_party/base/allocator/partition_allocator/partition_alloc.h
@@ -82,6 +82,16 @@
#include <stdlib.h>
#endif
+// We use this to make MEMORY_TOOL_REPLACES_ALLOCATOR behave the same for max
+// size as other alloc code.
+#define CHECK_MAX_SIZE_OR_RETURN_NULLPTR(size, flags) \
+ if (size > kGenericMaxDirectMapped) { \
+ if (flags & PartitionAllocReturnNull) { \
+ return nullptr; \
+ } \
+ CHECK(false); \
+ }
+
namespace pdfium {
namespace base {
@@ -117,7 +127,7 @@
ALWAYS_INLINE void* Alloc(size_t size, const char* type_name);
ALWAYS_INLINE void* AllocFlags(int flags, size_t size, const char* type_name);
- void PurgeMemory(int flags);
+ void PurgeMemory(int flags) override;
void DumpStats(const char* partition_name,
bool is_light_dump,
@@ -157,7 +167,7 @@
ALWAYS_INLINE size_t ActualSize(size_t size);
- void PurgeMemory(int flags);
+ void PurgeMemory(int flags) override;
void DumpStats(const char* partition_name,
bool is_light_dump,
@@ -290,14 +300,12 @@
size_t size,
const char* type_name) {
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
- // Make MEMORY_TOOL_REPLACES_ALLOCATOR behave the same for max size
- // as other alloc code.
- if (size > kGenericMaxDirectMapped)
- return nullptr;
+ CHECK_MAX_SIZE_OR_RETURN_NULLPTR(size, flags);
void* result = malloc(size);
CHECK(result);
return result;
#else
+ DCHECK(max_allocation == 0 || size <= max_allocation);
void* result;
const bool hooks_enabled = PartitionAllocHooks::AreHooksEnabled();
if (UNLIKELY(hooks_enabled)) {
@@ -389,16 +397,15 @@
DCHECK(flags < PartitionAllocLastFlag << 1);
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
- // Make MEMORY_TOOL_REPLACES_ALLOCATOR behave the same for max size
- // as other alloc code.
- if (size > kGenericMaxDirectMapped)
- return nullptr;
+ CHECK_MAX_SIZE_OR_RETURN_NULLPTR(size, flags);
const bool zero_fill = flags & PartitionAllocZeroFill;
void* result = zero_fill ? calloc(1, size) : malloc(size);
CHECK(result || flags & PartitionAllocReturnNull);
return result;
#else
DCHECK(root->initialized);
+ // Only SizeSpecificPartitionAllocator should use max_allocation.
+ DCHECK(root->max_allocation == 0);
void* result;
const bool hooks_enabled = PartitionAllocHooks::AreHooksEnabled();
if (UNLIKELY(hooks_enabled)) {
diff --git a/third_party/base/allocator/partition_allocator/partition_alloc_constants.h b/third_party/base/allocator/partition_allocator/partition_alloc_constants.h
index 437e4df..8ebc4f5 100644
--- a/third_party/base/allocator/partition_allocator/partition_alloc_constants.h
+++ b/third_party/base/allocator/partition_allocator/partition_alloc_constants.h
@@ -7,6 +7,7 @@
#include <limits.h>
+#include "build/build_config.h"
#include "third_party/base/allocator/partition_allocator/page_allocator_constants.h"
#include "third_party/base/logging.h"
@@ -36,6 +37,8 @@
#if defined(_MIPS_ARCH_LOONGSON)
static const size_t kPartitionPageShift = 16; // 64 KiB
+#elif defined(ARCH_CPU_PPC64)
+static const size_t kPartitionPageShift = 18; // 256 KiB
#else
static const size_t kPartitionPageShift = 14; // 16 KiB
#endif
diff --git a/third_party/base/allocator/partition_allocator/partition_bucket.cc b/third_party/base/allocator/partition_allocator/partition_bucket.cc
index ae1cf75..54acfde 100644
--- a/third_party/base/allocator/partition_allocator/partition_bucket.cc
+++ b/third_party/base/allocator/partition_allocator/partition_bucket.cc
@@ -39,8 +39,9 @@
map_size += kPageAllocationGranularityOffsetMask;
map_size &= kPageAllocationGranularityBaseMask;
- char* ptr = reinterpret_cast<char*>(
- AllocPages(nullptr, map_size, kSuperPageSize, PageReadWrite));
+ char* ptr = reinterpret_cast<char*>(AllocPages(nullptr, map_size,
+ kSuperPageSize, PageReadWrite,
+ PageTag::kPartitionAlloc));
if (UNLIKELY(!ptr))
return nullptr;
@@ -78,7 +79,7 @@
page->freelist_head = reinterpret_cast<PartitionFreelistEntry*>(slot);
PartitionFreelistEntry* next_entry =
reinterpret_cast<PartitionFreelistEntry*>(slot);
- next_entry->next = PartitionFreelistEntry::Transform(nullptr);
+ next_entry->next = PartitionFreelistEntry::Encode(nullptr);
DCHECK(!bucket->active_pages_head);
DCHECK(!bucket->empty_pages_head);
@@ -219,8 +220,9 @@
// page table bloat and not fragmenting address spaces in 32 bit
// architectures.
char* requested_address = root->next_super_page;
- char* super_page = reinterpret_cast<char*>(AllocPages(
- requested_address, kSuperPageSize, kSuperPageSize, PageReadWrite));
+ char* super_page = reinterpret_cast<char*>(
+ AllocPages(requested_address, kSuperPageSize, kSuperPageSize,
+ PageReadWrite, PageTag::kPartitionAlloc));
if (UNLIKELY(!super_page))
return nullptr;
@@ -392,10 +394,10 @@
freelist_pointer += size;
PartitionFreelistEntry* next_entry =
reinterpret_cast<PartitionFreelistEntry*>(freelist_pointer);
- entry->next = PartitionFreelistEntry::Transform(next_entry);
+ entry->next = PartitionFreelistEntry::Encode(next_entry);
entry = next_entry;
}
- entry->next = PartitionFreelistEntry::Transform(nullptr);
+ entry->next = PartitionFreelistEntry::Encode(nullptr);
} else {
page->freelist_head = nullptr;
}
@@ -553,7 +555,7 @@
if (LIKELY(new_page->freelist_head != nullptr)) {
PartitionFreelistEntry* entry = new_page->freelist_head;
PartitionFreelistEntry* new_head =
- PartitionFreelistEntry::Transform(entry->next);
+ EncodedPartitionFreelistEntry::Decode(entry->next);
new_page->freelist_head = new_head;
new_page->num_allocated_slots++;
return entry;
diff --git a/third_party/base/allocator/partition_allocator/partition_freelist_entry.h b/third_party/base/allocator/partition_allocator/partition_freelist_entry.h
index e9f2284..5d46f0f 100644
--- a/third_party/base/allocator/partition_allocator/partition_freelist_entry.h
+++ b/third_party/base/allocator/partition_allocator/partition_freelist_entry.h
@@ -16,33 +16,56 @@
namespace base {
namespace internal {
-// TODO(ajwong): Introduce an EncodedFreelistEntry type and then replace
-// Transform() with Encode()/Decode() such that the API provides some static
-// type safety.
-//
-// https://crbug.com/787153
-struct PartitionFreelistEntry {
- PartitionFreelistEntry* next;
+struct EncodedPartitionFreelistEntry;
- static ALWAYS_INLINE PartitionFreelistEntry* Transform(
+struct PartitionFreelistEntry {
+ EncodedPartitionFreelistEntry* next;
+
+ PartitionFreelistEntry() = delete;
+ ~PartitionFreelistEntry() = delete;
+
+ ALWAYS_INLINE static EncodedPartitionFreelistEntry* Encode(
PartitionFreelistEntry* ptr) {
-// We use bswap on little endian as a fast mask for two reasons:
-// 1) If an object is freed and its vtable used where the attacker doesn't
-// get the chance to run allocations between the free and use, the vtable
-// dereference is likely to fault.
-// 2) If the attacker has a linear buffer overflow and elects to try and
-// corrupt a freelist pointer, partial pointer overwrite attacks are
-// thwarted.
-// For big endian, similar guarantees are arrived at with a negation.
+ return reinterpret_cast<EncodedPartitionFreelistEntry*>(Transform(ptr));
+ }
+
+ private:
+ friend struct EncodedPartitionFreelistEntry;
+ static ALWAYS_INLINE void* Transform(void* ptr) {
+ // We use bswap on little endian as a fast mask for two reasons:
+ // 1) If an object is freed and its vtable used where the attacker doesn't
+ // get the chance to run allocations between the free and use, the vtable
+ // dereference is likely to fault.
+ // 2) If the attacker has a linear buffer overflow and elects to try and
+ // corrupt a freelist pointer, partial pointer overwrite attacks are
+ // thwarted.
+ // For big endian, similar guarantees are arrived at with a negation.
#if defined(ARCH_CPU_BIG_ENDIAN)
uintptr_t masked = ~reinterpret_cast<uintptr_t>(ptr);
#else
uintptr_t masked = ByteSwapUintPtrT(reinterpret_cast<uintptr_t>(ptr));
#endif
- return reinterpret_cast<PartitionFreelistEntry*>(masked);
+ return reinterpret_cast<void*>(masked);
}
};
+struct EncodedPartitionFreelistEntry {
+ char scrambled[sizeof(PartitionFreelistEntry*)];
+
+ EncodedPartitionFreelistEntry() = delete;
+ ~EncodedPartitionFreelistEntry() = delete;
+
+ ALWAYS_INLINE static PartitionFreelistEntry* Decode(
+ EncodedPartitionFreelistEntry* ptr) {
+ return reinterpret_cast<PartitionFreelistEntry*>(
+ PartitionFreelistEntry::Transform(ptr));
+ }
+};
+
+static_assert(sizeof(PartitionFreelistEntry) ==
+ sizeof(EncodedPartitionFreelistEntry),
+ "Should not have padding");
+
} // namespace internal
} // namespace base
} // namespace pdfium
diff --git a/third_party/base/allocator/partition_allocator/partition_page.h b/third_party/base/allocator/partition_allocator/partition_page.h
index f5ae1e4..a4aa3ac 100644
--- a/third_party/base/allocator/partition_allocator/partition_page.h
+++ b/third_party/base/allocator/partition_allocator/partition_page.h
@@ -11,6 +11,7 @@
#include "third_party/base/allocator/partition_allocator/partition_bucket.h"
#include "third_party/base/allocator/partition_allocator/partition_cookie.h"
#include "third_party/base/allocator/partition_allocator/partition_freelist_entry.h"
+#include "third_party/base/allocator/partition_allocator/random.h"
namespace pdfium {
namespace base {
@@ -217,17 +218,14 @@
#endif
DCHECK(this->num_allocated_slots);
- // TODO(palmer): See if we can afford to make this a CHECK.
- // FIX FIX FIX
- // DCHECK(!freelist_head || PartitionRootBase::IsValidPage(
- // PartitionPage::FromPointer(freelist_head)));
- CHECK(ptr != freelist_head); // Catches an immediate double free.
+ // Catches an immediate double free.
+ CHECK(ptr != freelist_head);
// Look for double free one level deeper in debug.
- DCHECK(!freelist_head || ptr != internal::PartitionFreelistEntry::Transform(
- freelist_head->next));
+ DCHECK(!freelist_head ||
+ ptr != EncodedPartitionFreelistEntry::Decode(freelist_head->next));
internal::PartitionFreelistEntry* entry =
static_cast<internal::PartitionFreelistEntry*>(ptr);
- entry->next = internal::PartitionFreelistEntry::Transform(freelist_head);
+ entry->next = internal::PartitionFreelistEntry::Encode(freelist_head);
freelist_head = entry;
--this->num_allocated_slots;
if (UNLIKELY(this->num_allocated_slots <= 0)) {
diff --git a/third_party/base/allocator/partition_allocator/partition_root_base.h b/third_party/base/allocator/partition_allocator/partition_root_base.h
index e4f7228..2f4b70e 100644
--- a/third_party/base/allocator/partition_allocator/partition_root_base.h
+++ b/third_party/base/allocator/partition_allocator/partition_root_base.h
@@ -82,6 +82,9 @@
ALWAYS_INLINE void DecommitSystemPages(void* address, size_t length);
ALWAYS_INLINE void RecommitSystemPages(void* address, size_t length);
+ // Frees memory from this partition, if possible, by decommitting pages.
+ // |flags| is an OR of base::PartitionPurgeFlags.
+ virtual void PurgeMemory(int flags) = 0;
void DecommitEmptyPages();
};
@@ -104,8 +107,8 @@
// the size metadata.
DCHECK(page->get_raw_size() == 0);
internal::PartitionFreelistEntry* new_head =
- internal::PartitionFreelistEntry::Transform(
- static_cast<internal::PartitionFreelistEntry*>(ret)->next);
+ internal::EncodedPartitionFreelistEntry::Decode(
+ page->freelist_head->next);
page->freelist_head = new_head;
page->num_allocated_slots++;
} else {
diff --git a/third_party/base/allocator/partition_allocator/random.cc b/third_party/base/allocator/partition_allocator/random.cc
new file mode 100644
index 0000000..02309c3
--- /dev/null
+++ b/third_party/base/allocator/partition_allocator/random.cc
@@ -0,0 +1,96 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/base/allocator/partition_allocator/random.h"
+
+#include "build/build_config.h"
+#include "third_party/base/allocator/partition_allocator/spin_lock.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#else
+#include <sys/time.h>
+#include <unistd.h>
+#endif
+
+namespace pdfium {
+namespace base {
+
+// This is the same PRNG as used by tcmalloc for mapping address randomness;
+// see http://burtleburtle.net/bob/rand/smallprng.html.
+struct RandomContext {
+ subtle::SpinLock lock;
+ bool initialized;
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+ uint32_t d;
+};
+
+namespace {
+
+#define rot(x, k) (((x) << (k)) | ((x) >> (32 - (k))))
+
+uint32_t RandomValueInternal(RandomContext* x) {
+ uint32_t e = x->a - rot(x->b, 27);
+ x->a = x->b ^ rot(x->c, 17);
+ x->b = x->c + x->d;
+ x->c = x->d + e;
+ x->d = e + x->a;
+ return x->d;
+}
+
+#undef rot
+
+RandomContext* GetRandomContext() {
+ static RandomContext* s_RandomContext = nullptr;
+ if (!s_RandomContext)
+ s_RandomContext = new RandomContext();
+ return s_RandomContext;
+}
+
+} // namespace
+
+uint32_t RandomValue() {
+ RandomContext* x = GetRandomContext();
+ subtle::SpinLock::Guard guard(x->lock);
+ if (UNLIKELY(!x->initialized)) {
+ x->initialized = true;
+ char c;
+ uint32_t seed = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(&c));
+ uint32_t pid;
+ uint32_t usec;
+#if defined(OS_WIN)
+ pid = GetCurrentProcessId();
+ SYSTEMTIME st;
+ GetSystemTime(&st);
+ usec = static_cast<uint32_t>(st.wMilliseconds * 1000);
+#else
+ pid = static_cast<uint32_t>(getpid());
+ struct timeval tv;
+ gettimeofday(&tv, 0);
+ usec = static_cast<uint32_t>(tv.tv_usec);
+#endif
+ seed ^= pid;
+ seed ^= usec;
+ x->a = 0xf1ea5eed;
+ x->b = x->c = x->d = seed;
+ for (int i = 0; i < 20; ++i) {
+ RandomValueInternal(x);
+ }
+ }
+
+ return RandomValueInternal(x);
+}
+
+void SetMmapSeedForTesting(int64_t seed) {
+ RandomContext* x = GetRandomContext();
+ subtle::SpinLock::Guard guard(x->lock);
+ x->a = x->b = static_cast<uint32_t>(seed);
+ x->c = x->d = static_cast<uint32_t>(seed >> 32);
+ x->initialized = true;
+}
+
+} // namespace base
+} // namespace pdfium
diff --git a/third_party/base/allocator/partition_allocator/random.h b/third_party/base/allocator/partition_allocator/random.h
new file mode 100644
index 0000000..d5c0ce3
--- /dev/null
+++ b/third_party/base/allocator/partition_allocator/random.h
@@ -0,0 +1,27 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_RANDOM_H_
+#define THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_RANDOM_H_
+
+#include <stdint.h>
+
+#include "third_party/base/base_export.h"
+
+namespace pdfium {
+namespace base {
+
+BASE_EXPORT uint32_t RandomValue();
+
+// TODO(crbug.com/984742): Rename this to `SetRandomSeedForTesting`.
+//
+// Sets the seed for the random number generator to a known value, to cause the
+// RNG to generate a predictable sequence of outputs. May be called multiple
+// times.
+BASE_EXPORT void SetRandomPageBaseSeed(int64_t seed);
+
+} // namespace base
+} // namespace pdfium
+
+#endif // THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_RANDOM_H_