Update PartititionAlloc to r760877.
- r731754 [windows] Release the Win32 address space reservation on OOM
- r733968 Record failed allocation sizes in crash reports
- r737206 Revert "Speculatively turn off the `PartitionAllocZeroFill`"
- r755954 Move unmapping of large pages out of the spin-lock
- r758198 Remove a Lock from PartitionAllocator
- r760877 Add missing locking in PartitionAlloc
Make FX_OutOfMemoryTerminate() compatible with r733968 by adding a size
parameter.
Change-Id: I04b3d4b0c570e351cec2d09aed78ca0e924fc4c0
Reviewed-on: https://pdfium-review.googlesource.com/c/pdfium/+/68910
Reviewed-by: Tom Sepez <tsepez@chromium.org>
Commit-Queue: Lei Zhang <thestig@chromium.org>
diff --git a/core/fxcrt/fx_memory.cpp b/core/fxcrt/fx_memory.cpp
index 22862b2..183adde 100644
--- a/core/fxcrt/fx_memory.cpp
+++ b/core/fxcrt/fx_memory.cpp
@@ -62,7 +62,7 @@
pdfium::base::PartitionFree(pointer);
}
-NOINLINE void FX_OutOfMemoryTerminate() {
+NOINLINE void FX_OutOfMemoryTerminate(size_t size) {
// Convince the linker this should not be folded with similar functions using
// Identical Code Folding.
static int make_this_function_aliased = 0xbd;
@@ -89,14 +89,14 @@
void* AllocOrDie(size_t num_members, size_t member_size) {
void* result = Alloc(num_members, member_size);
if (!result)
- FX_OutOfMemoryTerminate(); // Never returns.
+ FX_OutOfMemoryTerminate(0); // Never returns.
return result;
}
void* AllocOrDie2D(size_t w, size_t h, size_t member_size) {
if (w >= std::numeric_limits<size_t>::max() / h)
- FX_OutOfMemoryTerminate(); // Never returns.
+ FX_OutOfMemoryTerminate(0); // Never returns.
return AllocOrDie(w * h, member_size);
}
@@ -129,14 +129,14 @@
void* CallocOrDie(size_t num_members, size_t member_size) {
void* result = Calloc(num_members, member_size);
if (!result)
- FX_OutOfMemoryTerminate(); // Never returns.
+ FX_OutOfMemoryTerminate(0); // Never returns.
return result;
}
void* CallocOrDie2D(size_t w, size_t h, size_t member_size) {
if (w >= std::numeric_limits<size_t>::max() / h)
- FX_OutOfMemoryTerminate(); // Never returns.
+ FX_OutOfMemoryTerminate(0); // Never returns.
return CallocOrDie(w * h, member_size);
}
@@ -144,7 +144,7 @@
void* ReallocOrDie(void* ptr, size_t num_members, size_t member_size) {
void* result = Realloc(ptr, num_members, member_size);
if (!result)
- FX_OutOfMemoryTerminate(); // Never returns.
+ FX_OutOfMemoryTerminate(0); // Never returns.
return result;
}
diff --git a/core/fxcrt/fx_memory.h b/core/fxcrt/fx_memory.h
index ca9a08a..31d7d9e 100644
--- a/core/fxcrt/fx_memory.h
+++ b/core/fxcrt/fx_memory.h
@@ -35,7 +35,7 @@
pdfium::base::PartitionAllocatorGeneric& GetStringPartitionAllocator();
void FXMEM_InitializePartitionAlloc();
-NOINLINE void FX_OutOfMemoryTerminate();
+NOINLINE void FX_OutOfMemoryTerminate(size_t size);
// These never return nullptr, and must return cleared memory.
#define FX_Alloc(type, size) \
diff --git a/third_party/base/allocator/partition_allocator/oom.h b/third_party/base/allocator/partition_allocator/oom.h
index bbd1ead..8f3e9c6 100644
--- a/third_party/base/allocator/partition_allocator/oom.h
+++ b/third_party/base/allocator/partition_allocator/oom.h
@@ -21,7 +21,7 @@
// OOM_CRASH() - Specialization of IMMEDIATE_CRASH which will raise a custom
// exception on Windows to signal this is OOM and not a normal assert.
#if defined(OS_WIN)
-#define OOM_CRASH() \
+#define OOM_CRASH(size) \
do { \
OOM_CRASH_PREVENT_ICF(); \
base::internal::RunPartitionAllocOomCallback(); \
@@ -29,7 +29,7 @@
IMMEDIATE_CRASH(); \
} while (0)
#else
-#define OOM_CRASH() \
+#define OOM_CRASH(size) \
do { \
base::internal::RunPartitionAllocOomCallback(); \
OOM_CRASH_PREVENT_ICF(); \
diff --git a/third_party/base/allocator/partition_allocator/page_allocator.cc b/third_party/base/allocator/partition_allocator/page_allocator.cc
index 14e9379..e158bd1 100644
--- a/third_party/base/allocator/partition_allocator/page_allocator.cc
+++ b/third_party/base/allocator/partition_allocator/page_allocator.cc
@@ -239,14 +239,16 @@
return false;
}
-void ReleaseReservation() {
+bool ReleaseReservation() {
// To avoid deadlock, call only FreePages.
subtle::SpinLock::Guard guard(*GetReserveLock());
- if (s_reservation_address != nullptr) {
- FreePages(s_reservation_address, s_reservation_size);
- s_reservation_address = nullptr;
- s_reservation_size = 0;
- }
+ if (!s_reservation_address)
+ return false;
+
+ FreePages(s_reservation_address, s_reservation_size);
+ s_reservation_address = nullptr;
+ s_reservation_size = 0;
+ return true;
}
uint32_t GetAllocPageErrorCode() {
diff --git a/third_party/base/allocator/partition_allocator/page_allocator.h b/third_party/base/allocator/partition_allocator/page_allocator.h
index 9b076f9..c50b908 100644
--- a/third_party/base/allocator/partition_allocator/page_allocator.h
+++ b/third_party/base/allocator/partition_allocator/page_allocator.h
@@ -183,7 +183,9 @@
// Releases any reserved address space. |AllocPages| calls this automatically on
// an allocation failure. External allocators may also call this on failure.
-BASE_EXPORT void ReleaseReservation();
+//
+// Returns true when an existing reservation was released.
+BASE_EXPORT bool ReleaseReservation();
// Returns |errno| (POSIX) or the result of |GetLastError| (Windows) when |mmap|
// (POSIX) or |VirtualAlloc| (Windows) fails.
diff --git a/third_party/base/allocator/partition_allocator/page_allocator_internals_win.h b/third_party/base/allocator/partition_allocator/page_allocator_internals_win.h
index 2ba0a25..9174ee8 100644
--- a/third_party/base/allocator/partition_allocator/page_allocator_internals_win.h
+++ b/third_party/base/allocator/partition_allocator/page_allocator_internals_win.h
@@ -91,7 +91,7 @@
GetAccessFlags(accessibility))) {
int32_t error = GetLastError();
if (error == ERROR_COMMITMENT_LIMIT)
- OOM_CRASH();
+ OOM_CRASH(length);
// We check `GetLastError` for `ERROR_SUCCESS` here so that in a crash
// report we get the error number.
CHECK_EQ(ERROR_SUCCESS, error);
diff --git a/third_party/base/allocator/partition_allocator/partition_alloc.cc b/third_party/base/allocator/partition_allocator/partition_alloc.cc
index 19ce17b..910891b 100644
--- a/third_party/base/allocator/partition_allocator/partition_alloc.cc
+++ b/third_party/base/allocator/partition_allocator/partition_alloc.cc
@@ -17,6 +17,18 @@
namespace pdfium {
namespace base {
+namespace {
+
+bool InitializeOnce() {
+ // We mark the sentinel bucket/page as free to make sure it is skipped by our
+ // logic to find a new active page.
+ internal::PartitionBucket::get_sentinel_bucket()->active_pages_head =
+ internal::PartitionPage::get_sentinel_page();
+ return true;
+}
+
+} // namespace
+
// Two partition pages are used as guard / metadata page so make sure the super
// page size is bigger.
static_assert(kPartitionPageSize * 4 <= kSuperPageSize, "ok super page size");
@@ -63,9 +75,7 @@
return s_initialized_lock;
}
-static bool g_initialized = false;
-
-void (*internal::PartitionRootBase::gOomHandlingFunction)() = nullptr;
+OomFunction internal::PartitionRootBase::g_oom_handling_function = nullptr;
std::atomic<bool> PartitionAllocHooks::hooks_enabled_(false);
subtle::SpinLock PartitionAllocHooks::set_hooks_lock_;
std::atomic<PartitionAllocHooks::AllocationObserverHook*>
@@ -171,26 +181,17 @@
static void PartitionAllocBaseInit(internal::PartitionRootBase* root) {
DCHECK(!root->initialized);
- {
- subtle::SpinLock::Guard guard(*GetLock());
- if (!g_initialized) {
- g_initialized = true;
- // We mark the sentinel bucket/page as free to make sure it is skipped by
- // our logic to find a new active page.
- internal::PartitionBucket::get_sentinel_bucket()->active_pages_head =
- internal::PartitionPage::get_sentinel_page();
- }
- }
-
- root->initialized = true;
+ static bool initialized = InitializeOnce();
+ static_cast<void>(initialized);
// This is a "magic" value so we can test if a root pointer is valid.
root->inverted_self = ~reinterpret_cast<uintptr_t>(root);
+ root->initialized = true;
}
-void PartitionAllocGlobalInit(void (*oom_handling_function)()) {
- DCHECK(oom_handling_function);
- internal::PartitionRootBase::gOomHandlingFunction = oom_handling_function;
+void PartitionAllocGlobalInit(OomFunction on_out_of_memory) {
+ DCHECK(on_out_of_memory);
+ internal::PartitionRootBase::g_oom_handling_function = on_out_of_memory;
}
void PartitionRoot::Init(size_t bucket_count, size_t maximum_allocation) {
@@ -371,7 +372,7 @@
if (new_size > kGenericMaxDirectMapped) {
if (flags & PartitionAllocReturnNull)
return nullptr;
- internal::PartitionExcessiveAllocationSize();
+ internal::PartitionExcessiveAllocationSize(new_size);
}
const bool hooks_enabled = PartitionAllocHooks::AreHooksEnabled();
@@ -384,21 +385,26 @@
if (LIKELY(!overridden)) {
internal::PartitionPage* page = internal::PartitionPage::FromPointer(
internal::PartitionCookieFreePointerAdjust(ptr));
- // TODO(palmer): See if we can afford to make this a CHECK.
- DCHECK(root->IsValidPage(page));
+ bool success = false;
+ {
+ subtle::SpinLock::Guard guard{root->lock};
+ // TODO(palmer): See if we can afford to make this a CHECK.
+ DCHECK(root->IsValidPage(page));
- if (UNLIKELY(page->bucket->is_direct_mapped())) {
- // We may be able to perform the realloc in place by changing the
- // accessibility of memory pages and, if reducing the size, decommitting
- // them.
- if (PartitionReallocDirectMappedInPlace(root, page, new_size)) {
- if (UNLIKELY(hooks_enabled)) {
- PartitionAllocHooks::ReallocObserverHookIfEnabled(ptr, ptr, new_size,
- type_name);
- }
- return ptr;
+ if (UNLIKELY(page->bucket->is_direct_mapped())) {
+ // We may be able to perform the realloc in place by changing the
+ // accessibility of memory pages and, if reducing the size, decommitting
+ // them.
+ success = PartitionReallocDirectMappedInPlace(root, page, new_size);
}
}
+ if (success) {
+ if (UNLIKELY(hooks_enabled)) {
+ PartitionAllocHooks::ReallocObserverHookIfEnabled(ptr, ptr, new_size,
+ type_name);
+ }
+ return ptr;
+ }
const size_t actual_new_size = root->ActualSize(new_size);
actual_old_size = PartitionAllocGetSize(ptr);
@@ -426,7 +432,7 @@
if (!ret) {
if (flags & PartitionAllocReturnNull)
return nullptr;
- internal::PartitionExcessiveAllocationSize();
+ internal::PartitionExcessiveAllocationSize(new_size);
}
size_t copy_size = actual_old_size;
diff --git a/third_party/base/allocator/partition_allocator/partition_alloc.h b/third_party/base/allocator/partition_allocator/partition_alloc.h
index e3ce36c..2dc62b6 100644
--- a/third_party/base/allocator/partition_allocator/partition_alloc.h
+++ b/third_party/base/allocator/partition_allocator/partition_alloc.h
@@ -220,7 +220,7 @@
const PartitionBucketMemoryStats*) = 0;
};
-BASE_EXPORT void PartitionAllocGlobalInit(void (*oom_handling_function)());
+BASE_EXPORT void PartitionAllocGlobalInit(OomFunction on_out_of_memory);
// PartitionAlloc supports setting hooks to observe allocations/frees as they
// occur as well as 'override' hooks that allow overriding those operations.
@@ -368,7 +368,8 @@
internal::PartitionPage* page = internal::PartitionPage::FromPointer(ptr);
// TODO(palmer): See if we can afford to make this a CHECK.
DCHECK(internal::PartitionRootBase::IsValidPage(page));
- page->Free(ptr);
+ internal::DeferredUnmap deferred_unmap = page->Free(ptr);
+ deferred_unmap.Run();
#endif
}
@@ -462,10 +463,12 @@
internal::PartitionPage* page = internal::PartitionPage::FromPointer(ptr);
// TODO(palmer): See if we can afford to make this a CHECK.
DCHECK(IsValidPage(page));
+ internal::DeferredUnmap deferred_unmap;
{
subtle::SpinLock::Guard guard(lock);
- page->Free(ptr);
+ deferred_unmap = page->Free(ptr);
}
+ deferred_unmap.Run();
#endif
}
diff --git a/third_party/base/allocator/partition_allocator/partition_bucket.cc b/third_party/base/allocator/partition_allocator/partition_bucket.cc
index 7b24c90..066f40c 100644
--- a/third_party/base/allocator/partition_allocator/partition_bucket.cc
+++ b/third_party/base/allocator/partition_allocator/partition_bucket.cc
@@ -185,7 +185,7 @@
}
NOINLINE void PartitionBucket::OnFull() {
- OOM_CRASH();
+ OOM_CRASH(0);
}
ALWAYS_INLINE void* PartitionBucket::AllocNewSlotSpan(
@@ -478,13 +478,10 @@
if (size > kGenericMaxDirectMapped) {
if (return_null)
return nullptr;
- PartitionExcessiveAllocationSize();
+ PartitionExcessiveAllocationSize(size);
}
new_page = PartitionDirectMap(root, flags, size);
-#if !defined(OS_MACOSX)
- // Turn off the optimization to see if it helps https://crbug.com/892550.
*is_already_zeroed = true;
-#endif
} else if (LIKELY(SetNewActivePage())) {
// First, did we find an active page in the active pages list?
new_page = active_pages_head;
@@ -538,7 +535,7 @@
DCHECK(active_pages_head == PartitionPage::get_sentinel_page());
if (return_null)
return nullptr;
- root->OutOfMemory();
+ root->OutOfMemory(size);
}
// TODO(ajwong): Is there a way to avoid the reading of bucket here?
diff --git a/third_party/base/allocator/partition_allocator/partition_oom.cc b/third_party/base/allocator/partition_allocator/partition_oom.cc
index a4052d1..f93dae7 100644
--- a/third_party/base/allocator/partition_allocator/partition_oom.cc
+++ b/third_party/base/allocator/partition_allocator/partition_oom.cc
@@ -11,13 +11,13 @@
namespace base {
namespace internal {
-void NOINLINE PartitionExcessiveAllocationSize() {
- OOM_CRASH();
+void NOINLINE PartitionExcessiveAllocationSize(size_t size) {
+ OOM_CRASH(size);
}
#if !defined(ARCH_CPU_64_BITS)
-NOINLINE void PartitionOutOfMemoryWithLotsOfUncommitedPages() {
- OOM_CRASH();
+NOINLINE void PartitionOutOfMemoryWithLotsOfUncommitedPages(size_t size) {
+ OOM_CRASH(size);
}
#endif
diff --git a/third_party/base/allocator/partition_allocator/partition_oom.h b/third_party/base/allocator/partition_allocator/partition_oom.h
index be43ff3..683358e 100644
--- a/third_party/base/allocator/partition_allocator/partition_oom.h
+++ b/third_party/base/allocator/partition_allocator/partition_oom.h
@@ -8,6 +8,8 @@
#ifndef THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_OOM_H_
#define THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_OOM_H_
+#include <stddef.h>
+
#include "build/build_config.h"
#include "third_party/base/compiler_specific.h"
@@ -15,10 +17,10 @@
namespace base {
namespace internal {
-NOINLINE void PartitionExcessiveAllocationSize();
+NOINLINE void PartitionExcessiveAllocationSize(size_t size);
#if !defined(ARCH_CPU_64_BITS)
-NOINLINE void PartitionOutOfMemoryWithLotsOfUncommitedPages();
+NOINLINE void PartitionOutOfMemoryWithLotsOfUncommitedPages(size_t size);
#endif
} // namespace internal
diff --git a/third_party/base/allocator/partition_allocator/partition_page.cc b/third_party/base/allocator/partition_allocator/partition_page.cc
index 8b507c3..0ddfe12 100644
--- a/third_party/base/allocator/partition_allocator/partition_page.cc
+++ b/third_party/base/allocator/partition_allocator/partition_page.cc
@@ -13,7 +13,7 @@
namespace {
-ALWAYS_INLINE void PartitionDirectUnmap(PartitionPage* page) {
+ALWAYS_INLINE DeferredUnmap PartitionDirectUnmap(PartitionPage* page) {
PartitionRootBase* root = PartitionRootBase::FromPage(page);
const PartitionDirectMapExtent* extent =
PartitionDirectMapExtent::FromPage(page);
@@ -46,8 +46,7 @@
// Account for the mapping starting a partition page before the actual
// allocation address.
ptr -= kPartitionPageSize;
-
- FreePages(ptr, unmap_size);
+ return {ptr, unmap_size};
}
ALWAYS_INLINE void PartitionRegisterEmptyPage(PartitionPage* page) {
@@ -90,13 +89,12 @@
return &sentinel_page_;
}
-void PartitionPage::FreeSlowPath() {
+DeferredUnmap PartitionPage::FreeSlowPath() {
DCHECK(this != get_sentinel_page());
if (LIKELY(num_allocated_slots == 0)) {
// Page became fully unused.
if (UNLIKELY(bucket->is_direct_mapped())) {
- PartitionDirectUnmap(this);
- return;
+ return PartitionDirectUnmap(this);
}
// If it's the current active page, change it. We bounce the page to
// the empty list as a force towards defragmentation.
@@ -130,8 +128,9 @@
// Special case: for a partition page with just a single slot, it may
// now be empty and we want to run it through the empty logic.
if (UNLIKELY(num_allocated_slots == 0))
- FreeSlowPath();
+ return FreeSlowPath();
}
+ return {};
}
void PartitionPage::Decommit(PartitionRootBase* root) {
@@ -160,6 +159,10 @@
Decommit(root);
}
+void DeferredUnmap::Unmap() {
+ FreePages(ptr, size);
+}
+
} // namespace internal
} // namespace base
} // namespace pdfium
diff --git a/third_party/base/allocator/partition_allocator/partition_page.h b/third_party/base/allocator/partition_allocator/partition_page.h
index 4bbb76b..049ff26 100644
--- a/third_party/base/allocator/partition_allocator/partition_page.h
+++ b/third_party/base/allocator/partition_allocator/partition_page.h
@@ -19,6 +19,20 @@
struct PartitionRootBase;
+// PartitionPage::Free() defers unmapping a large page until the lock is
+// released. Callers of PartitionPage::Free() must invoke Run().
+// TODO(1061437): Reconsider once the new locking mechanism is implemented.
+struct DeferredUnmap {
+ void* ptr = nullptr;
+ size_t size = 0;
+ // In most cases there is no page to unmap and ptr == nullptr. This function
+ // is inlined to avoid the overhead of a function call in the common case.
+ ALWAYS_INLINE void Run();
+
+ private:
+ BASE_EXPORT NOINLINE void Unmap();
+};
+
// Some notes on page states. A page can be in one of four major states:
// 1) Active.
// 2) Full.
@@ -62,8 +76,9 @@
// Public API
// Note the matching Alloc() functions are in PartitionPage.
- BASE_EXPORT NOINLINE void FreeSlowPath();
- ALWAYS_INLINE void Free(void* ptr);
+ // Callers must invoke DeferredUnmap::Run() after releasing the lock.
+ BASE_EXPORT NOINLINE DeferredUnmap FreeSlowPath() WARN_UNUSED_RESULT;
+ ALWAYS_INLINE DeferredUnmap Free(void* ptr) WARN_UNUSED_RESULT;
void Decommit(PartitionRootBase* root);
void DecommitIfPossible(PartitionRootBase* root);
@@ -201,7 +216,7 @@
return 0;
}
-ALWAYS_INLINE void PartitionPage::Free(void* ptr) {
+ALWAYS_INLINE DeferredUnmap PartitionPage::Free(void* ptr) {
#if DCHECK_IS_ON()
size_t slot_size = bucket->slot_size;
const size_t raw_size = get_raw_size();
@@ -229,12 +244,13 @@
freelist_head = entry;
--num_allocated_slots;
if (UNLIKELY(num_allocated_slots <= 0)) {
- FreeSlowPath();
+ return FreeSlowPath();
} else {
// All single-slot allocations must go through the slow path to
// correctly update the size metadata.
DCHECK(get_raw_size() == 0);
}
+ return {};
}
ALWAYS_INLINE bool PartitionPage::is_active() const {
@@ -287,6 +303,12 @@
next_page = nullptr;
}
+ALWAYS_INLINE void DeferredUnmap::Run() {
+ if (UNLIKELY(ptr)) {
+ Unmap();
+ }
+}
+
} // namespace internal
} // namespace base
} // namespace pdfium
diff --git a/third_party/base/allocator/partition_allocator/partition_root_base.cc b/third_party/base/allocator/partition_allocator/partition_root_base.cc
index 3136584..5434347 100644
--- a/third_party/base/allocator/partition_allocator/partition_root_base.cc
+++ b/third_party/base/allocator/partition_allocator/partition_root_base.cc
@@ -13,19 +13,19 @@
namespace base {
namespace internal {
-NOINLINE void PartitionRootBase::OutOfMemory() {
+NOINLINE void PartitionRootBase::OutOfMemory(size_t size) {
#if !defined(ARCH_CPU_64_BITS)
// Check whether this OOM is due to a lot of super pages that are allocated
// but not committed, probably due to http://crbug.com/421387.
if (total_size_of_super_pages + total_size_of_direct_mapped_pages -
total_size_of_committed_pages >
kReasonableSizeOfUnusedPages) {
- PartitionOutOfMemoryWithLotsOfUncommitedPages();
+ PartitionOutOfMemoryWithLotsOfUncommitedPages(size);
}
#endif
- if (PartitionRootBase::gOomHandlingFunction)
- (*PartitionRootBase::gOomHandlingFunction)();
- OOM_CRASH();
+ if (PartitionRootBase::g_oom_handling_function)
+ (*PartitionRootBase::g_oom_handling_function)(size);
+ OOM_CRASH(size);
}
void PartitionRootBase::DecommitEmptyPages() {
diff --git a/third_party/base/allocator/partition_allocator/partition_root_base.h b/third_party/base/allocator/partition_allocator/partition_root_base.h
index 2f4b70e..5d692b2 100644
--- a/third_party/base/allocator/partition_allocator/partition_root_base.h
+++ b/third_party/base/allocator/partition_allocator/partition_root_base.h
@@ -14,6 +14,9 @@
namespace pdfium {
namespace base {
+
+typedef void (*OomFunction)(size_t);
+
namespace internal {
struct PartitionPage;
@@ -73,9 +76,9 @@
ALWAYS_INLINE static bool IsValidPage(PartitionPage* page);
ALWAYS_INLINE static PartitionRootBase* FromPage(PartitionPage* page);
- // gOomHandlingFunction is invoked when PartitionAlloc hits OutOfMemory.
- static void (*gOomHandlingFunction)();
- NOINLINE void OutOfMemory();
+ // g_oom_handling_function is invoked when PartitionAlloc hits OutOfMemory.
+ static OomFunction g_oom_handling_function;
+ NOINLINE void OutOfMemory(size_t size);
ALWAYS_INLINE void IncreaseCommittedPages(size_t len);
ALWAYS_INLINE void DecreaseCommittedPages(size_t len);