mac: Make the system page size a run-time property
Port https://crrev.com/808610 to PDFium's copy of PartitionAlloc.
Tbr: tsepez@chromium.org
Bug: chromium:1102194
Change-Id: I098fa53ec71e9350cfc759fbb05e1b2647422cb4
Reviewed-on: https://pdfium-review.googlesource.com/c/pdfium/+/74111
Reviewed-by: Lei Zhang <thestig@chromium.org>
Commit-Queue: Lei Zhang <thestig@chromium.org>
diff --git a/third_party/BUILD.gn b/third_party/BUILD.gn
index 5982217..3dd2aea 100644
--- a/third_party/BUILD.gn
+++ b/third_party/BUILD.gn
@@ -572,6 +572,7 @@
"base/allocator/partition_allocator/page_allocator_internal.h",
"base/allocator/partition_allocator/partition_alloc.cc",
"base/allocator/partition_allocator/partition_alloc.h",
+ "base/allocator/partition_allocator/partition_alloc_check.h",
"base/allocator/partition_allocator/partition_alloc_constants.h",
"base/allocator/partition_allocator/partition_bucket.cc",
"base/allocator/partition_allocator/partition_bucket.h",
diff --git a/third_party/base/allocator/partition_allocator/address_space_randomization.cc b/third_party/base/allocator/partition_allocator/address_space_randomization.cc
index c6f268f..18807a6 100644
--- a/third_party/base/allocator/partition_allocator/address_space_randomization.cc
+++ b/third_party/base/allocator/partition_allocator/address_space_randomization.cc
@@ -37,14 +37,14 @@
windows_81_initialized = true;
}
if (!windows_81) {
- random &= internal::kASLRMaskBefore8_10;
+ random &= internal::ASLRMaskBefore8_10();
} else {
- random &= internal::kASLRMask;
+ random &= internal::ASLRMask();
}
- random += internal::kASLROffset;
+ random += internal::ASLROffset();
#else
- random &= internal::kASLRMask;
- random += internal::kASLROffset;
+ random &= internal::ASLRMask();
+ random += internal::ASLROffset();
#endif // defined(OS_WIN) && !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
#else // defined(ARCH_CPU_32_BITS)
#if defined(OS_WIN)
@@ -58,11 +58,11 @@
if (!is_wow64)
return nullptr;
#endif // defined(OS_WIN)
- random &= internal::kASLRMask;
- random += internal::kASLROffset;
+ random &= internal::ASLRMask();
+ random += internal::ASLROffset();
#endif // defined(ARCH_CPU_32_BITS)
- DCHECK_EQ(0ULL, (random & kPageAllocationGranularityOffsetMask));
+ DCHECK_EQ(0ULL, (random & PageAllocationGranularityOffsetMask()));
return reinterpret_cast<void*>(random);
}
diff --git a/third_party/base/allocator/partition_allocator/address_space_randomization.h b/third_party/base/allocator/partition_allocator/address_space_randomization.h
index c6d8ca9..28c8271 100644
--- a/third_party/base/allocator/partition_allocator/address_space_randomization.h
+++ b/third_party/base/allocator/partition_allocator/address_space_randomization.h
@@ -8,6 +8,7 @@
#include "build/build_config.h"
#include "third_party/base/allocator/partition_allocator/page_allocator.h"
#include "third_party/base/base_export.h"
+#include "third_party/base/compiler_specific.h"
namespace pdfium {
namespace base {
@@ -18,10 +19,12 @@
namespace internal {
-constexpr uintptr_t AslrAddress(uintptr_t mask) {
- return mask & kPageAllocationGranularityBaseMask;
+PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE uintptr_t
+AslrAddress(uintptr_t mask) {
+ return mask & PageAllocationGranularity();
}
-constexpr uintptr_t AslrMask(uintptr_t bits) {
+PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE uintptr_t
+AslrMask(uintptr_t bits) {
return AslrAddress((1ULL << bits) - 1ULL);
}
@@ -41,19 +44,31 @@
// hard-coded in those tools, bad things happen. This address range is
// copied from TSAN source but works with all tools. See
// https://crbug.com/539863.
- constexpr uintptr_t kASLRMask = AslrAddress(0x007fffffffffULL);
- constexpr uintptr_t kASLROffset = AslrAddress(0x7e8000000000ULL);
+ PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE uintptr_t
+ ASLRMask() {
+ return AslrAddress(0x007fffffffffULL);
+ }
+ PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE uintptr_t
+ ASLROffset() {
+ return AslrAddress(0x7e8000000000ULL);
+ }
#elif defined(OS_WIN)
// Windows 8.10 and newer support the full 48 bit address range. Older
- // versions of Windows only support 44 bits. Since kASLROffset is non-zero
+ // versions of Windows only support 44 bits. Since ASLROffset() is non-zero
// and may cause a carry, use 47 and 43 bit masks. See
// http://www.alex-ionescu.com/?p=246
- constexpr uintptr_t kASLRMask = AslrMask(47);
- constexpr uintptr_t kASLRMaskBefore8_10 = AslrMask(43);
+ constexpr ALWAYS_INLINE uintptr_t ASLRMask() {
+ return AslrMask(47);
+ }
+ constexpr ALWAYS_INLINE uintptr_t ASLRMaskBefore8_10() {
+ return AslrMask(43);
+ }
// Try not to map pages into the range where Windows loads DLLs by default.
- constexpr uintptr_t kASLROffset = 0x80000000ULL;
+ constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
+ return 0x80000000ULL;
+ }
#elif defined(OS_APPLE)
@@ -70,8 +85,14 @@
//
// TODO(crbug.com/738925): Remove this limitation if/when the macOS behavior
// changes.
- constexpr uintptr_t kASLRMask = AslrMask(38);
- constexpr uintptr_t kASLROffset = AslrAddress(0x1000000000ULL);
+ PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE uintptr_t
+ ASLRMask() {
+ return AslrMask(38);
+ }
+ PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE uintptr_t
+ ASLROffset() {
+ return AslrAddress(0x1000000000ULL);
+ }
#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
@@ -79,8 +100,12 @@
// Linux (and macOS) support the full 47-bit user space of x64 processors.
// Use only 46 to allow the kernel a chance to fulfill the request.
- constexpr uintptr_t kASLRMask = AslrMask(46);
- constexpr uintptr_t kASLROffset = AslrAddress(0);
+ constexpr ALWAYS_INLINE uintptr_t ASLRMask() {
+ return AslrMask(46);
+ }
+ constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
+ return AslrAddress(0);
+ }
#elif defined(ARCH_CPU_ARM64)
@@ -88,15 +113,23 @@
// Restrict the address range on Android to avoid a large performance
// regression in single-process WebViews. See https://crbug.com/837640.
- constexpr uintptr_t kASLRMask = AslrMask(30);
- constexpr uintptr_t kASLROffset = AslrAddress(0x20000000ULL);
+ constexpr ALWAYS_INLINE uintptr_t ASLRMask() {
+ return AslrMask(30);
+ }
+ constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
+ return AslrAddress(0x20000000ULL);
+ }
#else
// ARM64 on Linux has 39-bit user space. Use 38 bits since kASLROffset
// could cause a carry.
- constexpr uintptr_t kASLRMask = AslrMask(38);
- constexpr uintptr_t kASLROffset = AslrAddress(0x1000000000ULL);
+ constexpr ALWAYS_INLINE uintptr_t ASLRMask() {
+ return AslrMask(38);
+ }
+ constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
+ return AslrAddress(0x1000000000ULL);
+ }
#endif
@@ -107,20 +140,32 @@
// AIX has 64 bits of virtual addressing, but we limit the address range
// to (a) minimize segment lookaside buffer (SLB) misses; and (b) use
// extra address space to isolate the mmap regions.
- constexpr uintptr_t kASLRMask = AslrMask(30);
- constexpr uintptr_t kASLROffset = AslrAddress(0x400000000000ULL);
+ constexpr ALWAYS_INLINE uintptr_t ASLRMask() {
+ return AslrMask(30);
+ }
+ constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
+ return AslrAddress(0x400000000000ULL);
+ }
#elif defined(ARCH_CPU_BIG_ENDIAN)
// Big-endian Linux PPC has 44 bits of virtual addressing. Use 42.
- constexpr uintptr_t kASLRMask = AslrMask(42);
- constexpr uintptr_t kASLROffset = AslrAddress(0);
+ constexpr ALWAYS_INLINE uintptr_t ASLRMask() {
+ return AslrMask(42);
+ }
+ constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
+ return AslrAddress(0);
+ }
#else // !defined(OS_AIX) && !defined(ARCH_CPU_BIG_ENDIAN)
// Little-endian Linux PPC has 48 bits of virtual addressing. Use 46.
- constexpr uintptr_t kASLRMask = AslrMask(46);
- constexpr uintptr_t kASLROffset = AslrAddress(0);
+ constexpr ALWAYS_INLINE uintptr_t ASLRMask() {
+ return AslrMask(46);
+ }
+ constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
+ return AslrAddress(0);
+ }
#endif // !defined(OS_AIX) && !defined(ARCH_CPU_BIG_ENDIAN)
@@ -129,21 +174,31 @@
// Linux on Z uses bits 22 - 32 for Region Indexing, which translates to
// 42 bits of virtual addressing. Truncate to 40 bits to allow kernel a
// chance to fulfill the request.
- constexpr uintptr_t kASLRMask = AslrMask(40);
- constexpr uintptr_t kASLROffset = AslrAddress(0);
+ constexpr ALWAYS_INLINE uintptr_t ASLRMask() {
+ return AslrMask(40);
+ }
+ constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
+ return AslrAddress(0);
+ }
#elif defined(ARCH_CPU_S390)
// 31 bits of virtual addressing. Truncate to 29 bits to allow the kernel
// a chance to fulfill the request.
- constexpr uintptr_t kASLRMask = AslrMask(29);
- constexpr uintptr_t kASLROffset = AslrAddress(0);
+ constexpr ALWAYS_INLINE uintptr_t ASLRMask() {
+ return AslrMask(29);
+ }
+ constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
+ return AslrAddress(0);
+ }
#else // !defined(ARCH_CPU_X86_64) && !defined(ARCH_CPU_PPC64) &&
// !defined(ARCH_CPU_S390X) && !defined(ARCH_CPU_S390)
// For all other POSIX variants, use 30 bits.
- constexpr uintptr_t kASLRMask = AslrMask(30);
+ constexpr ALWAYS_INLINE uintptr_t ASLRMask() {
+ return AslrMask(30);
+ }
#if defined(OS_SOLARIS)
@@ -157,20 +212,26 @@
// fails allocate as if there were no hint at all. The high hint
// prevents the break from getting hemmed in at low values, ceding half
// of the address space to the system heap.
- constexpr uintptr_t kASLROffset = AslrAddress(0x80000000ULL);
+ constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
+ return AslrAddress(0x80000000ULL);
+ }
#elif defined(OS_AIX)
// The range 0x30000000 - 0xD0000000 is available on AIX; choose the
// upper range.
- constexpr uintptr_t kASLROffset = AslrAddress(0x90000000ULL);
+ constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
+ return AslrAddress(0x90000000ULL);
+ }
#else // !defined(OS_SOLARIS) && !defined(OS_AIX)
// The range 0x20000000 - 0x60000000 is relatively unpopulated across a
// variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macOS
// 10.6 and 10.7.
- constexpr uintptr_t kASLROffset = AslrAddress(0x20000000ULL);
+ constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
+ return AslrAddress(0x20000000ULL);
+ }
#endif // !defined(OS_SOLARIS) && !defined(OS_AIX)
@@ -184,8 +245,12 @@
// This is a good range on 32-bit Windows and Android (the only platforms on
// which we support 32-bitness). Allocates in the 0.5 - 1.5 GiB region. There
// is no issue with carries here.
- constexpr uintptr_t kASLRMask = AslrMask(30);
- constexpr uintptr_t kASLROffset = AslrAddress(0x20000000ULL);
+ constexpr ALWAYS_INLINE uintptr_t ASLRMask() {
+ return AslrMask(30);
+ }
+ constexpr ALWAYS_INLINE uintptr_t ASLROffset() {
+ return AslrAddress(0x20000000ULL);
+ }
#else
diff --git a/third_party/base/allocator/partition_allocator/page_allocator.cc b/third_party/base/allocator/partition_allocator/page_allocator.cc
index e158bd1..91d00d2 100644
--- a/third_party/base/allocator/partition_allocator/page_allocator.cc
+++ b/third_party/base/allocator/partition_allocator/page_allocator.cc
@@ -92,9 +92,9 @@
PageAccessibilityConfiguration accessibility,
PageTag page_tag,
bool commit) {
- DCHECK(!(length & kPageAllocationGranularityOffsetMask));
+ DCHECK(!(length & PageAllocationGranularityOffsetMask()));
DCHECK(!(reinterpret_cast<uintptr_t>(hint) &
- kPageAllocationGranularityOffsetMask));
+ PageAllocationGranularityOffsetMask()));
DCHECK(commit || accessibility == PageInaccessible);
return SystemAllocPagesInternal(hint, length, accessibility, page_tag,
commit);
@@ -106,13 +106,13 @@
PageAccessibilityConfiguration accessibility,
PageTag page_tag,
bool commit) {
- DCHECK(length >= kPageAllocationGranularity);
- DCHECK(!(length & kPageAllocationGranularityOffsetMask));
- DCHECK(align >= kPageAllocationGranularity);
+ DCHECK(length >= PageAllocationGranularity());
+ DCHECK(!(length & PageAllocationGranularityOffsetMask()));
+ DCHECK(align >= PageAllocationGranularity());
// Alignment must be power of 2 for masking math to work.
DCHECK(pdfium::base::bits::IsPowerOfTwo(align));
DCHECK(!(reinterpret_cast<uintptr_t>(address) &
- kPageAllocationGranularityOffsetMask));
+ PageAllocationGranularityOffsetMask()));
uintptr_t align_offset_mask = align - 1;
uintptr_t align_base_mask = ~align_offset_mask;
DCHECK(!(reinterpret_cast<uintptr_t>(address) & align_offset_mask));
@@ -164,7 +164,7 @@
}
// Make a larger allocation so we can force alignment.
- size_t try_length = length + (align - kPageAllocationGranularity);
+ size_t try_length = length + (align - PageAllocationGranularity());
CHECK(try_length >= length);
void* ret;
@@ -184,40 +184,40 @@
void FreePages(void* address, size_t length) {
DCHECK(!(reinterpret_cast<uintptr_t>(address) &
- kPageAllocationGranularityOffsetMask));
- DCHECK(!(length & kPageAllocationGranularityOffsetMask));
+ PageAllocationGranularityOffsetMask()));
+ DCHECK(!(length & PageAllocationGranularityOffsetMask()));
FreePagesInternal(address, length);
}
bool TrySetSystemPagesAccess(void* address,
size_t length,
PageAccessibilityConfiguration accessibility) {
- DCHECK(!(length & kSystemPageOffsetMask));
+ DCHECK(!(length & SystemPageOffsetMask()));
return TrySetSystemPagesAccessInternal(address, length, accessibility);
}
void SetSystemPagesAccess(void* address,
size_t length,
PageAccessibilityConfiguration accessibility) {
- DCHECK(!(length & kSystemPageOffsetMask));
+ DCHECK(!(length & SystemPageOffsetMask()));
SetSystemPagesAccessInternal(address, length, accessibility);
}
void DecommitSystemPages(void* address, size_t length) {
- DCHECK_EQ(0UL, length & kSystemPageOffsetMask);
+ DCHECK_EQ(0UL, length & SystemPageOffsetMask());
DecommitSystemPagesInternal(address, length);
}
bool RecommitSystemPages(void* address,
size_t length,
PageAccessibilityConfiguration accessibility) {
- DCHECK_EQ(0UL, length & kSystemPageOffsetMask);
+ DCHECK_EQ(0UL, length & SystemPageOffsetMask());
DCHECK(PageInaccessible != accessibility);
return RecommitSystemPagesInternal(address, length, accessibility);
}
void DiscardSystemPages(void* address, size_t length) {
- DCHECK_EQ(0UL, length & kSystemPageOffsetMask);
+ DCHECK_EQ(0UL, length & SystemPageOffsetMask());
DiscardSystemPagesInternal(address, length);
}
@@ -230,7 +230,7 @@
if (mem != nullptr) {
// We guarantee this alignment when reserving address space.
DCHECK(!(reinterpret_cast<uintptr_t>(mem) &
- kPageAllocationGranularityOffsetMask));
+ PageAllocationGranularityOffsetMask()));
s_reservation_address = mem;
s_reservation_size = size;
return true;
diff --git a/third_party/base/allocator/partition_allocator/page_allocator.h b/third_party/base/allocator/partition_allocator/page_allocator.h
index c50b908..b2eb7f6 100644
--- a/third_party/base/allocator/partition_allocator/page_allocator.h
+++ b/third_party/base/allocator/partition_allocator/page_allocator.h
@@ -42,9 +42,9 @@
//
// The requested |address| is just a hint; the actual address returned may
// differ. The returned address will be aligned at least to |align| bytes.
-// |length| is in bytes, and must be a multiple of |kPageAllocationGranularity|.
-// |align| is in bytes, and must be a power-of-two multiple of
-// |kPageAllocationGranularity|.
+// |length| is in bytes, and must be a multiple of
+// |PageAllocationGranularity()|. |align| is in bytes, and must be a
+// power-of-two multiple of |PageAllocationGranularity()|.
//
// If |address| is null, then a suitable and randomized address will be chosen
// automatically.
@@ -65,12 +65,13 @@
// bytes.
//
// |address| and |length| must match a previous call to |AllocPages|. Therefore,
-// |address| must be aligned to |kPageAllocationGranularity| bytes, and |length|
-// must be a multiple of |kPageAllocationGranularity|.
+// |address| must be aligned to |PageAllocationGranularity()| bytes, and
+// |length| must be a multiple of |PageAllocationGranularity()|.
BASE_EXPORT void FreePages(void* address, size_t length);
// Mark one or more system pages, starting at |address| with the given
-// |page_accessibility|. |length| must be a multiple of |kSystemPageSize| bytes.
+// |page_accessibility|. |length| must be a multiple of |SystemPageSize()|
+// bytes.
//
// Returns true if the permission change succeeded. In most cases you must
// |CHECK| the result.
@@ -80,7 +81,8 @@
PageAccessibilityConfiguration page_accessibility);
// Mark one or more system pages, starting at |address| with the given
-// |page_accessibility|. |length| must be a multiple of |kSystemPageSize| bytes.
+// |page_accessibility|. |length| must be a multiple of |SystemPageSize()|
+// bytes.
//
// Performs a CHECK that the operation succeeds.
BASE_EXPORT void SetSystemPagesAccess(
@@ -89,7 +91,7 @@
PageAccessibilityConfiguration page_accessibility);
// Decommit one or more system pages starting at |address| and continuing for
-// |length| bytes. |length| must be a multiple of |kSystemPageSize|.
+// |length| bytes. |length| must be a multiple of |SystemPageSize()|.
//
// Decommitted means that physical resources (RAM or swap) backing the allocated
// virtual address range are released back to the system, but the address space
@@ -113,7 +115,7 @@
// Recommit one or more system pages, starting at |address| and continuing for
// |length| bytes with the given |page_accessibility|. |length| must be a
-// multiple of |kSystemPageSize|.
+// multiple of |SystemPageSize()|.
//
// Decommitted system pages must be recommitted with their original permissions
// before they are used again.
@@ -126,7 +128,7 @@
PageAccessibilityConfiguration page_accessibility);
// Discard one or more system pages starting at |address| and continuing for
-// |length| bytes. |length| must be a multiple of |kSystemPageSize|.
+// |length| bytes. |length| must be a multiple of |SystemPageSize()|.
//
// Discarding is a hint to the system that the page is no longer required. The
// hint may:
@@ -148,35 +150,37 @@
// based on the original page content, or a page of zeroes.
BASE_EXPORT void DiscardSystemPages(void* address, size_t length);
-// Rounds up |address| to the next multiple of |kSystemPageSize|. Returns
+// Rounds up |address| to the next multiple of |SystemPageSize()|. Returns
// 0 for an |address| of 0.
-constexpr ALWAYS_INLINE uintptr_t RoundUpToSystemPage(uintptr_t address) {
- return (address + kSystemPageOffsetMask) & kSystemPageBaseMask;
+PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE uintptr_t
+RoundUpToSystemPage(uintptr_t address) {
+ return (address + SystemPageOffsetMask()) & SystemPageBaseMask();
}
-// Rounds down |address| to the previous multiple of |kSystemPageSize|. Returns
+// Rounds down |address| to the previous multiple of |SystemPageSize()|. Returns
// 0 for an |address| of 0.
-constexpr ALWAYS_INLINE uintptr_t RoundDownToSystemPage(uintptr_t address) {
- return address & kSystemPageBaseMask;
+PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE uintptr_t
+RoundDownToSystemPage(uintptr_t address) {
+ return address & SystemPageBaseMask();
}
-// Rounds up |address| to the next multiple of |kPageAllocationGranularity|.
+// Rounds up |address| to the next multiple of |PageAllocationGranularity()|.
// Returns 0 for an |address| of 0.
-constexpr ALWAYS_INLINE uintptr_t
+PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE uintptr_t
RoundUpToPageAllocationGranularity(uintptr_t address) {
- return (address + kPageAllocationGranularityOffsetMask) &
- kPageAllocationGranularityBaseMask;
+ return (address + PageAllocationGranularityOffsetMask()) &
+ PageAllocationGranularityBaseMask();
}
// Rounds down |address| to the previous multiple of
-// |kPageAllocationGranularity|. Returns 0 for an |address| of 0.
-constexpr ALWAYS_INLINE uintptr_t
+// |PageAllocationGranularity()|. Returns 0 for an |address| of 0.
+PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE uintptr_t
RoundDownToPageAllocationGranularity(uintptr_t address) {
- return address & kPageAllocationGranularityBaseMask;
+ return address & PageAllocationGranularityBaseMask();
}
// Reserves (at least) |size| bytes of address space, aligned to
-// |kPageAllocationGranularity|. This can be called early on to make it more
+// |PageAllocationGranularity()|. This can be called early on to make it more
// likely that large allocations will succeed. Returns true if the reservation
// succeeded, false if the reservation failed or a reservation was already made.
BASE_EXPORT bool ReserveAddressSpace(size_t size);
diff --git a/third_party/base/allocator/partition_allocator/page_allocator_constants.h b/third_party/base/allocator/partition_allocator/page_allocator_constants.h
index 77c065a..fdc65ac 100644
--- a/third_party/base/allocator/partition_allocator/page_allocator_constants.h
+++ b/third_party/base/allocator/partition_allocator/page_allocator_constants.h
@@ -8,46 +8,109 @@
#include <stddef.h>
#include "build/build_config.h"
+#include "third_party/base/compiler_specific.h"
+
+#if defined(OS_APPLE)
+
+#include <mach/vm_page_size.h>
+
+// Although page allocator constants are not constexpr, they are run-time
+// constant. Because the underlying variables they access, such as vm_page_size,
+// are not marked const, the compiler normally has no way to know that they
+// don’t change and must obtain their values whenever it can't prove that they
+// haven't been modified, even if they had already been obtained previously.
+// Attaching __attribute__((const)) to these declarations allows these redundant
+// accesses to be omitted under optimization such as common subexpression
+// elimination.
+#define PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR __attribute__((const))
+
+#else
+
+// When defined, page size constants are fixed at compile time. When not
+// defined, they may vary at run time.
+#define PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR 1
+
+// Use this macro to declare a function as constexpr or not based on whether
+// PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR is defined.
+#define PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR constexpr
+
+#endif
namespace pdfium {
-namespace base {
-#if defined(OS_WIN) || defined(ARCH_CPU_PPC64)
-static constexpr size_t kPageAllocationGranularityShift = 16; // 64KB
-#elif defined(_MIPS_ARCH_LOONGSON)
-static constexpr size_t kPageAllocationGranularityShift = 14; // 16KB
-#elif defined(OS_APPLE) && defined(ARCH_CPU_ARM64)
-static constexpr size_t kPageAllocationGranularityShift = 14; // 16KB
-#else
-static constexpr size_t kPageAllocationGranularityShift = 12; // 4KB
-#endif
-static constexpr size_t kPageAllocationGranularity =
- 1 << kPageAllocationGranularityShift;
-static constexpr size_t kPageAllocationGranularityOffsetMask =
- kPageAllocationGranularity - 1;
-static constexpr size_t kPageAllocationGranularityBaseMask =
- ~kPageAllocationGranularityOffsetMask;
+namespace {
-#if defined(_MIPS_ARCH_LOONGSON)
-static constexpr size_t kSystemPageSize = 16384;
-#elif defined(ARCH_CPU_PPC64)
-// Modern ppc64 systems support 4KB and 64KB page sizes.
-// Since 64KB is the de-facto standard on the platform
-// and binaries compiled for 64KB are likely to work on 4KB systems,
-// 64KB is a good choice here.
-static constexpr size_t kSystemPageSize = 65536;
-#elif defined(OS_APPLE) && defined(ARCH_CPU_ARM64)
-static constexpr size_t kSystemPageSize = 16384;
+#if !defined(OS_APPLE)
+
+constexpr ALWAYS_INLINE int PageAllocationGranularityShift() {
+#if defined(OS_WIN) || defined(ARCH_CPU_PPC64)
+ // Modern ppc64 systems support 4kB (shift = 12) and 64kB (shift = 16) page
+ // sizes. Since 64kB is the de facto standard on the platform and binaries
+ // compiled for 64kB are likely to work on 4kB systems, 64kB is a good choice
+ // here.
+ return 16; // 64kB
+#elif defined(_MIPS_ARCH_LOONGSON)
+ return 14; // 16kB
#else
-static constexpr size_t kSystemPageSize = 4096;
+ return 12; // 4kB
#endif
-static constexpr size_t kSystemPageOffsetMask = kSystemPageSize - 1;
-static_assert((kSystemPageSize & (kSystemPageSize - 1)) == 0,
- "kSystemPageSize must be power of 2");
-static constexpr size_t kSystemPageBaseMask = ~kSystemPageOffsetMask;
+}
+
+#endif
+
+} // namespace
+
+namespace base {
+
+PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
+PageAllocationGranularity() {
+#if defined(OS_APPLE)
+ return vm_page_size;
+#else
+ return 1ULL << PageAllocationGranularityShift();
+#endif
+}
+
+PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
+PageAllocationGranularityOffsetMask() {
+ return PageAllocationGranularity() - 1;
+}
+
+PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
+PageAllocationGranularityBaseMask() {
+ return ~PageAllocationGranularityOffsetMask();
+}
+
+PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
+SystemPageSize() {
+#if defined(OS_WIN)
+ return 4096;
+#else
+ return PageAllocationGranularity();
+#endif
+}
+
+PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
+SystemPageOffsetMask() {
+ return SystemPageSize() - 1;
+}
+
+PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
+SystemPageBaseMask() {
+ return ~SystemPageOffsetMask();
+}
static constexpr size_t kPageMetadataShift = 5; // 32 bytes per partition page.
static constexpr size_t kPageMetadataSize = 1 << kPageMetadataShift;
+// See DecommitSystemPages(), this is not guaranteed to be synchronous on all
+// platforms.
+static constexpr bool kDecommittedPagesAreAlwaysZeroed =
+#if defined(OS_APPLE)
+ false;
+#else
+ true;
+#endif
+
} // namespace base
} // namespace pdfium
diff --git a/third_party/base/allocator/partition_allocator/partition_alloc.cc b/third_party/base/allocator/partition_allocator/partition_alloc.cc
index 910891b..2e5e87f 100644
--- a/third_party/base/allocator/partition_allocator/partition_alloc.cc
+++ b/third_party/base/allocator/partition_allocator/partition_alloc.cc
@@ -9,6 +9,7 @@
#include <memory>
#include <type_traits>
+#include "third_party/base/allocator/partition_allocator/partition_alloc_check.h"
#include "third_party/base/allocator/partition_allocator/partition_direct_map_extent.h"
#include "third_party/base/allocator/partition_allocator/partition_oom.h"
#include "third_party/base/allocator/partition_allocator/partition_page.h"
@@ -29,36 +30,6 @@
} // namespace
-// Two partition pages are used as guard / metadata page so make sure the super
-// page size is bigger.
-static_assert(kPartitionPageSize * 4 <= kSuperPageSize, "ok super page size");
-static_assert(!(kSuperPageSize % kPartitionPageSize), "ok super page multiple");
-// Four system pages gives us room to hack out a still-guard-paged piece
-// of metadata in the middle of a guard partition page.
-static_assert(kSystemPageSize * 4 <= kPartitionPageSize,
- "ok partition page size");
-static_assert(!(kPartitionPageSize % kSystemPageSize),
- "ok partition page multiple");
-static_assert(sizeof(internal::PartitionPage) <= kPageMetadataSize,
- "PartitionPage should not be too big");
-static_assert(sizeof(internal::PartitionBucket) <= kPageMetadataSize,
- "PartitionBucket should not be too big");
-static_assert(sizeof(internal::PartitionSuperPageExtentEntry) <=
- kPageMetadataSize,
- "PartitionSuperPageExtentEntry should not be too big");
-static_assert(kPageMetadataSize * kNumPartitionPagesPerSuperPage <=
- kSystemPageSize,
- "page metadata fits in hole");
-// Limit to prevent callers accidentally overflowing an int size.
-static_assert(kGenericMaxDirectMapped <=
- (1UL << 31) + kPageAllocationGranularity,
- "maximum direct mapped allocation");
-// Check that some of our zanier calculations worked out as expected.
-static_assert(kGenericSmallestBucket == 8, "generic smallest bucket");
-static_assert(kGenericMaxBucketed == 983040, "generic max bucketed");
-static_assert(kMaxSystemPagesPerSlotSpan < (1 << 8),
- "System pages per slot span must be less than 128.");
-
internal::PartitionRootBase::PartitionRootBase() = default;
internal::PartitionRootBase::~PartitionRootBase() = default;
PartitionRoot::PartitionRoot() = default;
@@ -190,6 +161,38 @@
}
void PartitionAllocGlobalInit(OomFunction on_out_of_memory) {
+ // Two partition pages are used as guard / metadata page so make sure the
+ // super page size is bigger.
+ STATIC_ASSERT_OR_CHECK(PartitionPageSize() * 4 <= kSuperPageSize,
+ "ok super page size");
+ STATIC_ASSERT_OR_CHECK(!(kSuperPageSize % PartitionPageSize()),
+ "ok super page multiple");
+ // Four system pages gives us room to hack out a still-guard-paged piece
+ // of metadata in the middle of a guard partition page.
+ STATIC_ASSERT_OR_CHECK(SystemPageSize() * 4 <= PartitionPageSize(),
+ "ok partition page size");
+ STATIC_ASSERT_OR_CHECK(!(PartitionPageSize() % SystemPageSize()),
+ "ok partition page multiple");
+ static_assert(sizeof(internal::PartitionPage) <= kPageMetadataSize,
+ "PartitionPage should not be too big");
+ static_assert(sizeof(internal::PartitionBucket) <= kPageMetadataSize,
+ "PartitionBucket should not be too big");
+ static_assert(
+ sizeof(internal::PartitionSuperPageExtentEntry) <= kPageMetadataSize,
+ "PartitionSuperPageExtentEntry should not be too big");
+ STATIC_ASSERT_OR_CHECK(
+ kPageMetadataSize * NumPartitionPagesPerSuperPage() <= SystemPageSize(),
+ "page metadata fits in hole");
+ // Limit to prevent callers accidentally overflowing an int size.
+ STATIC_ASSERT_OR_CHECK(
+ GenericMaxDirectMapped() <= (1UL << 31) + PageAllocationGranularity(),
+ "maximum direct mapped allocation");
+ // Check that some of our zanier calculations worked out as expected.
+ static_assert(kGenericSmallestBucket == 8, "generic smallest bucket");
+ static_assert(kGenericMaxBucketed == 983040, "generic max bucketed");
+ STATIC_ASSERT_OR_CHECK(MaxSystemPagesPerSlotSpan() < (1 << 8),
+ "System pages per slot span must be less than 128.");
+
DCHECK(on_out_of_memory);
internal::PartitionRootBase::g_oom_handling_function = on_out_of_memory;
}
@@ -314,7 +317,7 @@
// Don't reallocate in-place if new size is less than 80 % of the full
// map size, to avoid holding on to too much unused address space.
- if ((new_size / kSystemPageSize) * 5 < (map_size / kSystemPageSize) * 4)
+ if ((new_size / SystemPageSize()) * 5 < (map_size / SystemPageSize()) * 4)
return false;
// Shrink by decommitting unneeded pages and making them inaccessible.
@@ -369,7 +372,7 @@
return nullptr;
}
- if (new_size > kGenericMaxDirectMapped) {
+ if (new_size > GenericMaxDirectMapped()) {
if (flags & PartitionAllocReturnNull)
return nullptr;
internal::PartitionExcessiveAllocationSize(new_size);
@@ -461,7 +464,7 @@
static size_t PartitionPurgePage(internal::PartitionPage* page, bool discard) {
const internal::PartitionBucket* bucket = page->bucket;
size_t slot_size = bucket->slot_size;
- if (slot_size < kSystemPageSize || !page->num_allocated_slots)
+ if (slot_size < SystemPageSize() || !page->num_allocated_slots)
return 0;
size_t bucket_num_slots = bucket->get_slots_per_span();
@@ -480,8 +483,18 @@
return discardable_bytes;
}
+#if defined(PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR)
constexpr size_t kMaxSlotCount =
- (kPartitionPageSize * kMaxPartitionPagesPerSlotSpan) / kSystemPageSize;
+ (PartitionPageSize() * kMaxPartitionPagesPerSlotSpan) / SystemPageSize();
+#elif defined(OS_APPLE)
+ // It's better for slot_usage to be stack-allocated and fixed-size, which
+ // demands that its size be constexpr. On OS_APPLE, PartitionPageSize() is
+ // always SystemPageSize() << 2, so regardless of what the run time page size
+ // is, kMaxSlotCount can always be simplified to this expression.
+ constexpr size_t kMaxSlotCount = 4 * kMaxPartitionPagesPerSlotSpan;
+ CHECK(kMaxSlotCount == (PartitionPageSize() * kMaxPartitionPagesPerSlotSpan) /
+ SystemPageSize());
+#endif
DCHECK(bucket_num_slots <= kMaxSlotCount);
DCHECK(page->num_unprovisioned_slots < bucket_num_slots);
size_t num_slots = bucket_num_slots - page->num_unprovisioned_slots;
@@ -632,7 +645,7 @@
if (flags & PartitionPurgeDiscardUnusedSystemPages) {
for (size_t i = 0; i < kGenericNumBuckets; ++i) {
internal::PartitionBucket* bucket = &buckets[i];
- if (bucket->slot_size >= kSystemPageSize)
+ if (bucket->slot_size >= SystemPageSize())
PartitionPurgeBucket(bucket);
}
}
diff --git a/third_party/base/allocator/partition_allocator/partition_alloc.h b/third_party/base/allocator/partition_allocator/partition_alloc.h
index 2dc62b6..084a2a2 100644
--- a/third_party/base/allocator/partition_allocator/partition_alloc.h
+++ b/third_party/base/allocator/partition_allocator/partition_alloc.h
@@ -85,7 +85,7 @@
// We use this to make MEMORY_TOOL_REPLACES_ALLOCATOR behave the same for max
// size as other alloc code.
#define CHECK_MAX_SIZE_OR_RETURN_NULLPTR(size, flags) \
- if (size > kGenericMaxDirectMapped) { \
+ if (size > GenericMaxDirectMapped()) { \
if (flags & PartitionAllocReturnNull) { \
return nullptr; \
} \
@@ -487,7 +487,7 @@
internal::PartitionBucket* bucket = PartitionGenericSizeToBucket(this, size);
if (LIKELY(!bucket->is_direct_mapped())) {
size = bucket->slot_size;
- } else if (size > kGenericMaxDirectMapped) {
+ } else if (size > GenericMaxDirectMapped()) {
// Too large to allocate => return the size unchanged.
} else {
size = internal::PartitionBucket::get_direct_map_size(size);
diff --git a/third_party/base/allocator/partition_allocator/partition_alloc_check.h b/third_party/base/allocator/partition_allocator/partition_alloc_check.h
new file mode 100644
index 0000000..51c23e2
--- /dev/null
+++ b/third_party/base/allocator/partition_allocator/partition_alloc_check.h
@@ -0,0 +1,32 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CHECK_H_
+#define THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CHECK_H_
+
+#include "third_party/base/allocator/partition_allocator/page_allocator_constants.h"
+#include "third_party/base/logging.h"
+
+#if defined(PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR)
+
+// Use this macro to assert on things that are conditionally constexpr as
+// determined by PAGE_ALLOCATOR_CONSTANTS_ARE_CONSTEXPR or
+// PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR. Where fixed at compile time, this
+// is a static_assert. Where determined at run time, this is a CHECK.
+// Therefore, this macro must only be used where both a static_assert and a
+// CHECK would be viable, that is, within a function, and ideally a function
+// that executes only once, early in the program, such as during initialization.
+#define STATIC_ASSERT_OR_CHECK(condition, message) \
+ static_assert(condition, message)
+
+#else
+
+#define STATIC_ASSERT_OR_CHECK(condition, message) \
+ do { \
+ CHECK(condition); \
+ } while (false)
+
+#endif
+
+#endif // THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CHECK_H_
diff --git a/third_party/base/allocator/partition_allocator/partition_alloc_constants.h b/third_party/base/allocator/partition_allocator/partition_alloc_constants.h
index ef39b41..71d63ba 100644
--- a/third_party/base/allocator/partition_allocator/partition_alloc_constants.h
+++ b/third_party/base/allocator/partition_allocator/partition_alloc_constants.h
@@ -6,11 +6,16 @@
#define THIRD_PARTY_BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CONSTANTS_H_
#include <limits.h>
+#include <stddef.h>
#include "build/build_config.h"
#include "third_party/base/allocator/partition_allocator/page_allocator_constants.h"
#include "third_party/base/logging.h"
+#if defined(OS_APPLE)
+#include <mach/vm_page_size.h>
+#endif
+
namespace pdfium {
namespace base {
@@ -36,17 +41,38 @@
// perfectly up against the end of a system page.
#if defined(_MIPS_ARCH_LOONGSON)
-static const size_t kPartitionPageShift = 16; // 64 KiB
+PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE int
+PartitionPageShift() {
+ return 16; // 64 KiB
+}
#elif defined(ARCH_CPU_PPC64)
-static const size_t kPartitionPageShift = 18; // 256 KiB
-#elif defined(OS_APPLE) && defined(ARCH_CPU_ARM64)
-static const size_t kPartitionPageShift = 16; // 64 KiB
+PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE int
+PartitionPageShift() {
+ return 18; // 256 KiB
+}
+#elif defined(OS_APPLE)
+PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE int
+PartitionPageShift() {
+ return vm_page_shift + 2;
+}
#else
-static const size_t kPartitionPageShift = 14; // 16 KiB
+PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE int
+PartitionPageShift() {
+ return 14; // 16 KiB
+}
#endif
-static const size_t kPartitionPageSize = 1 << kPartitionPageShift;
-static const size_t kPartitionPageOffsetMask = kPartitionPageSize - 1;
-static const size_t kPartitionPageBaseMask = ~kPartitionPageOffsetMask;
+PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
+PartitionPageSize() {
+ return 1ULL << PartitionPageShift();
+}
+PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
+PartitionPageOffsetMask() {
+ return PartitionPageSize() - 1;
+}
+PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
+PartitionPageBaseMask() {
+ return ~PartitionPageOffsetMask();
+}
// TODO: Should this be 1 if defined(_MIPS_ARCH_LOONGSON)?
static const size_t kMaxPartitionPagesPerSlotSpan = 4;
@@ -57,10 +83,15 @@
// dirty a private page, which is very wasteful if we never actually store
// objects there.
-static const size_t kNumSystemPagesPerPartitionPage =
- kPartitionPageSize / kSystemPageSize;
-static const size_t kMaxSystemPagesPerSlotSpan =
- kNumSystemPagesPerPartitionPage * kMaxPartitionPagesPerSlotSpan;
+PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
+NumSystemPagesPerPartitionPage() {
+ return PartitionPageSize() / SystemPageSize();
+}
+
+PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
+MaxSystemPagesPerSlotSpan() {
+ return NumSystemPagesPerPartitionPage() * kMaxPartitionPagesPerSlotSpan;
+}
// We reserve virtual address space in 2 MiB chunks (aligned to 2 MiB as well).
// These chunks are called *super pages*. We do this so that we can store
@@ -126,8 +157,10 @@
static const size_t kSuperPageSize = 1 << kSuperPageShift;
static const size_t kSuperPageOffsetMask = kSuperPageSize - 1;
static const size_t kSuperPageBaseMask = ~kSuperPageOffsetMask;
-static const size_t kNumPartitionPagesPerSuperPage =
- kSuperPageSize / kPartitionPageSize;
+PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
+NumPartitionPagesPerSuperPage() {
+ return kSuperPageSize / PartitionPageSize();
+}
// The following kGeneric* constants apply to the generic variants of the API.
// The "order" of an allocation is closely related to the power-of-1 size of the
@@ -159,8 +192,10 @@
((kGenericNumBucketsPerOrder - 1) * kGenericMaxBucketSpacing);
// Limit when downsizing a direct mapping using `realloc`:
static const size_t kGenericMinDirectMappedDownsize = kGenericMaxBucketed + 1;
-static const size_t kGenericMaxDirectMapped =
- (1UL << 31) + kPageAllocationGranularity; // 2 GiB plus 1 more page.
+PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR ALWAYS_INLINE size_t
+GenericMaxDirectMapped() {
+ return (1UL << 31) + PageAllocationGranularity(); // 2 GiB plus 1 more page.
+}
static const size_t kBitsPerSizeT = sizeof(void*) * CHAR_BIT;
// Constant for the memory reclaim logic.
diff --git a/third_party/base/allocator/partition_allocator/partition_bucket.cc b/third_party/base/allocator/partition_allocator/partition_bucket.cc
index 066f40c..4a02d4b 100644
--- a/third_party/base/allocator/partition_allocator/partition_bucket.cc
+++ b/third_party/base/allocator/partition_allocator/partition_bucket.cc
@@ -31,13 +31,13 @@
// page sized clump.
// - We add a trailing guard page on 32-bit (on 64-bit we rely on the
// massive address space plus randomization instead).
- size_t map_size = size + kPartitionPageSize;
+ size_t map_size = size + PartitionPageSize();
#if !defined(ARCH_CPU_64_BITS)
- map_size += kSystemPageSize;
+ map_size += SystemPageSize();
#endif
// Round up to the allocation granularity.
- map_size += kPageAllocationGranularityOffsetMask;
- map_size &= kPageAllocationGranularityBaseMask;
+ map_size += PageAllocationGranularityOffsetMask();
+ map_size &= PageAllocationGranularityBaseMask();
char* ptr = reinterpret_cast<char*>(AllocPages(nullptr, map_size,
kSuperPageSize, PageReadWrite,
@@ -45,17 +45,17 @@
if (UNLIKELY(!ptr))
return nullptr;
- size_t committed_page_size = size + kSystemPageSize;
+ size_t committed_page_size = size + SystemPageSize();
root->total_size_of_direct_mapped_pages += committed_page_size;
root->IncreaseCommittedPages(committed_page_size);
- char* slot = ptr + kPartitionPageSize;
- SetSystemPagesAccess(ptr + (kSystemPageSize * 2),
- kPartitionPageSize - (kSystemPageSize * 2),
+ char* slot = ptr + PartitionPageSize();
+ SetSystemPagesAccess(ptr + (SystemPageSize() * 2),
+ PartitionPageSize() - (SystemPageSize() * 2),
PageInaccessible);
#if !defined(ARCH_CPU_64_BITS)
- SetSystemPagesAccess(ptr, kSystemPageSize, PageInaccessible);
- SetSystemPagesAccess(slot + size, kSystemPageSize, PageInaccessible);
+ SetSystemPagesAccess(ptr, SystemPageSize(), PageInaccessible);
+ SetSystemPagesAccess(slot + size, SystemPageSize(), PageInaccessible);
#endif
PartitionSuperPageExtentEntry* extent =
@@ -90,7 +90,7 @@
PartitionDirectMapExtent* map_extent =
PartitionDirectMapExtent::FromPage(page);
- map_extent->map_size = map_size - kPartitionPageSize - kSystemPageSize;
+ map_extent->map_size = map_size - PartitionPageSize() - SystemPageSize();
map_extent->bucket = bucket;
// Maintain the doubly-linked list of all direct mappings.
@@ -114,7 +114,7 @@
// TODO(ajwong): This seems to interact badly with
// get_pages_per_slot_span() which rounds the value from this up to a
-// multiple of kNumSystemPagesPerPartitionPage (aka 4) anyways.
+// multiple of NumSystemPagesPerPartitionPage() (aka 4) anyways.
// http://crbug.com/776537
//
// TODO(ajwong): The waste calculation seems wrong. The PTE usage should cover
@@ -133,21 +133,21 @@
// to using fewer system pages.
double best_waste_ratio = 1.0f;
uint16_t best_pages = 0;
- if (slot_size > kMaxSystemPagesPerSlotSpan * kSystemPageSize) {
+ if (slot_size > MaxSystemPagesPerSlotSpan() * SystemPageSize()) {
// TODO(ajwong): Why is there a DCHECK here for this?
// http://crbug.com/776537
- DCHECK(!(slot_size % kSystemPageSize));
- best_pages = static_cast<uint16_t>(slot_size / kSystemPageSize);
+ DCHECK(!(slot_size % SystemPageSize()));
+ best_pages = static_cast<uint16_t>(slot_size / SystemPageSize());
// TODO(ajwong): Should this be checking against
- // kMaxSystemPagesPerSlotSpan or numeric_limits<uint8_t>::max?
+ // MaxSystemPagesPerSlotSpan() or numeric_limits<uint8_t>::max?
// http://crbug.com/776537
CHECK(best_pages < (1 << 8));
return static_cast<uint8_t>(best_pages);
}
- DCHECK(slot_size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize);
- for (uint16_t i = kNumSystemPagesPerPartitionPage - 1;
- i <= kMaxSystemPagesPerSlotSpan; ++i) {
- size_t page_size = kSystemPageSize * i;
+ DCHECK(slot_size <= MaxSystemPagesPerSlotSpan() * SystemPageSize());
+ for (uint16_t i = NumSystemPagesPerPartitionPage() - 1;
+ i <= MaxSystemPagesPerSlotSpan(); ++i) {
+ size_t page_size = SystemPageSize() * i;
size_t num_slots = page_size / slot_size;
size_t waste = page_size - (num_slots * slot_size);
// Leaving a page unfaulted is not free; the page will occupy an empty page
@@ -157,10 +157,10 @@
// regardless of whether or not they are wasted. Should it just
// be waste += i * sizeof(void*)?
// http://crbug.com/776537
- size_t num_remainder_pages = i & (kNumSystemPagesPerPartitionPage - 1);
+ size_t num_remainder_pages = i & (NumSystemPagesPerPartitionPage() - 1);
size_t num_unfaulted_pages =
num_remainder_pages
- ? (kNumSystemPagesPerPartitionPage - num_remainder_pages)
+ ? (NumSystemPagesPerPartitionPage() - num_remainder_pages)
: 0;
waste += sizeof(void*) * num_unfaulted_pages;
double waste_ratio =
@@ -171,7 +171,7 @@
}
}
DCHECK(best_pages > 0);
- CHECK(best_pages <= kMaxSystemPagesPerSlotSpan);
+ CHECK(best_pages <= MaxSystemPagesPerSlotSpan());
return static_cast<uint8_t>(best_pages);
}
@@ -193,14 +193,14 @@
int flags,
uint16_t num_partition_pages) {
DCHECK(!(reinterpret_cast<uintptr_t>(root->next_partition_page) %
- kPartitionPageSize));
+ PartitionPageSize()));
DCHECK(!(reinterpret_cast<uintptr_t>(root->next_partition_page_end) %
- kPartitionPageSize));
- DCHECK(num_partition_pages <= kNumPartitionPagesPerSuperPage);
- size_t total_size = kPartitionPageSize * num_partition_pages;
+ PartitionPageSize()));
+ DCHECK(num_partition_pages <= NumPartitionPagesPerSuperPage());
+ size_t total_size = PartitionPageSize() * num_partition_pages;
size_t num_partition_pages_left =
(root->next_partition_page_end - root->next_partition_page) >>
- kPartitionPageShift;
+ PartitionPageShift();
if (LIKELY(num_partition_pages_left >= num_partition_pages)) {
// In this case, we can still hand out pages from the current super page
// allocation.
@@ -229,34 +229,33 @@
root->total_size_of_super_pages += kSuperPageSize;
root->IncreaseCommittedPages(total_size);
- // |total_size| MUST be less than kSuperPageSize - (kPartitionPageSize*2).
+ // |total_size| MUST be less than kSuperPageSize - (PartitionPageSize()*2).
// This is a trustworthy value because num_partition_pages is not user
// controlled.
//
// TODO(ajwong): Introduce a DCHECK.
root->next_super_page = super_page + kSuperPageSize;
- char* ret = super_page + kPartitionPageSize;
+ char* ret = super_page + PartitionPageSize();
root->next_partition_page = ret + total_size;
- root->next_partition_page_end = root->next_super_page - kPartitionPageSize;
+ root->next_partition_page_end = root->next_super_page - PartitionPageSize();
// Make the first partition page in the super page a guard page, but leave a
// hole in the middle.
// This is where we put page metadata and also a tiny amount of extent
// metadata.
- SetSystemPagesAccess(super_page, kSystemPageSize, PageInaccessible);
- SetSystemPagesAccess(super_page + (kSystemPageSize * 2),
- kPartitionPageSize - (kSystemPageSize * 2),
+ SetSystemPagesAccess(super_page, SystemPageSize(), PageInaccessible);
+ SetSystemPagesAccess(super_page + (SystemPageSize() * 2),
+ PartitionPageSize() - (SystemPageSize() * 2),
PageInaccessible);
// SetSystemPagesAccess(super_page + (kSuperPageSize -
- // kPartitionPageSize),
- // kPartitionPageSize, PageInaccessible);
+ // PartitionPageSize()), PartitionPageSize(), PageInaccessible);
// All remaining slotspans for the unallocated PartitionPages inside the
// SuperPage are conceptually decommitted. Correctly set the state here
// so they do not occupy resources.
//
// TODO(ajwong): Refactor Page Allocator API so the SuperPage comes in
// decommited initially.
- SetSystemPagesAccess(super_page + kPartitionPageSize + total_size,
- (kSuperPageSize - kPartitionPageSize - total_size),
+ SetSystemPagesAccess(super_page + PartitionPageSize() + total_size,
+ (kSuperPageSize - PartitionPageSize() - total_size),
PageInaccessible);
// If we were after a specific address, but didn't get it, assume that
@@ -308,10 +307,10 @@
}
ALWAYS_INLINE uint16_t PartitionBucket::get_pages_per_slot_span() {
- // Rounds up to nearest multiple of kNumSystemPagesPerPartitionPage.
+ // Rounds up to nearest multiple of NumSystemPagesPerPartitionPage().
return (num_system_pages_per_slot_span +
- (kNumSystemPagesPerPartitionPage - 1)) /
- kNumSystemPagesPerPartitionPage;
+ (NumSystemPagesPerPartitionPage() - 1)) /
+ NumSystemPagesPerPartitionPage();
}
ALWAYS_INLINE void PartitionBucket::InitializeSlotSpan(PartitionPage* page) {
@@ -475,7 +474,7 @@
DCHECK(size > kGenericMaxBucketed);
DCHECK(this == get_sentinel_bucket());
DCHECK(active_pages_head == PartitionPage::get_sentinel_page());
- if (size > kGenericMaxDirectMapped) {
+ if (size > GenericMaxDirectMapped()) {
if (return_null)
return nullptr;
PartitionExcessiveAllocationSize(size);
diff --git a/third_party/base/allocator/partition_allocator/partition_bucket.h b/third_party/base/allocator/partition_allocator/partition_bucket.h
index a89099b..f70a7da 100644
--- a/third_party/base/allocator/partition_allocator/partition_bucket.h
+++ b/third_party/base/allocator/partition_allocator/partition_bucket.h
@@ -50,7 +50,7 @@
ALWAYS_INLINE size_t get_bytes_per_span() const {
// TODO(ajwong): Change to CheckedMul. https://crbug.com/787153
// https://crbug.com/680657
- return num_system_pages_per_slot_span * kSystemPageSize;
+ return num_system_pages_per_slot_span * SystemPageSize();
}
ALWAYS_INLINE uint16_t get_slots_per_span() const {
// TODO(ajwong): Change to CheckedMul. https://crbug.com/787153
@@ -59,11 +59,11 @@
}
static ALWAYS_INLINE size_t get_direct_map_size(size_t size) {
- // Caller must check that the size is not above the kGenericMaxDirectMapped
+ // Caller must check that the size is not above the GenericMaxDirectMapped()
// limit before calling. This also guards against integer overflow in the
// calculation here.
- DCHECK(size <= kGenericMaxDirectMapped);
- return (size + kSystemPageOffsetMask) & kSystemPageBaseMask;
+ DCHECK(size <= GenericMaxDirectMapped());
+ return (size + SystemPageOffsetMask()) & SystemPageBaseMask();
}
// TODO(ajwong): Can this be made private? https://crbug.com/787153
diff --git a/third_party/base/allocator/partition_allocator/partition_page.cc b/third_party/base/allocator/partition_allocator/partition_page.cc
index 0ddfe12..312c33b 100644
--- a/third_party/base/allocator/partition_allocator/partition_page.cc
+++ b/third_party/base/allocator/partition_allocator/partition_page.cc
@@ -33,19 +33,19 @@
// Add on the size of the trailing guard page and preceeding partition
// page.
- unmap_size += kPartitionPageSize + kSystemPageSize;
+ unmap_size += PartitionPageSize() + SystemPageSize();
- size_t uncommitted_page_size = page->bucket->slot_size + kSystemPageSize;
+ size_t uncommitted_page_size = page->bucket->slot_size + SystemPageSize();
root->DecreaseCommittedPages(uncommitted_page_size);
DCHECK(root->total_size_of_direct_mapped_pages >= uncommitted_page_size);
root->total_size_of_direct_mapped_pages -= uncommitted_page_size;
- DCHECK(!(unmap_size & kPageAllocationGranularityOffsetMask));
+ DCHECK(!(unmap_size & PageAllocationGranularityOffsetMask()));
char* ptr = reinterpret_cast<char*>(PartitionPage::ToPointer(page));
// Account for the mapping starting a partition page before the actual
// allocation address.
- ptr -= kPartitionPageSize;
+ ptr -= PartitionPageSize();
return {ptr, unmap_size};
}
diff --git a/third_party/base/allocator/partition_allocator/partition_page.h b/third_party/base/allocator/partition_allocator/partition_page.h
index 049ff26..9eb136b 100644
--- a/third_party/base/allocator/partition_allocator/partition_page.h
+++ b/third_party/base/allocator/partition_allocator/partition_page.h
@@ -136,7 +136,7 @@
DCHECK(!(pointer_as_uint & kSuperPageOffsetMask));
// The metadata area is exactly one system page (the guard page) into the
// super page.
- return reinterpret_cast<char*>(pointer_as_uint + kSystemPageSize);
+ return reinterpret_cast<char*>(pointer_as_uint + SystemPageSize());
}
ALWAYS_INLINE PartitionPage* PartitionPage::FromPointerNoAlignmentCheck(
@@ -145,11 +145,11 @@
char* super_page_ptr =
reinterpret_cast<char*>(pointer_as_uint & kSuperPageBaseMask);
uintptr_t partition_page_index =
- (pointer_as_uint & kSuperPageOffsetMask) >> kPartitionPageShift;
+ (pointer_as_uint & kSuperPageOffsetMask) >> PartitionPageShift();
// Index 0 is invalid because it is the metadata and guard area and
// the last index is invalid because it is a guard page.
DCHECK(partition_page_index);
- DCHECK(partition_page_index < kNumPartitionPagesPerSuperPage - 1);
+ DCHECK(partition_page_index < NumPartitionPagesPerSuperPage() - 1);
PartitionPage* page = reinterpret_cast<PartitionPage*>(
PartitionSuperPageToMetadataArea(super_page_ptr) +
(partition_page_index << kPageMetadataShift));
@@ -169,20 +169,21 @@
// A valid |page| must be past the first guard System page and within
// the following metadata region.
- DCHECK(super_page_offset > kSystemPageSize);
+ DCHECK(super_page_offset > SystemPageSize());
// Must be less than total metadata region.
- DCHECK(super_page_offset < kSystemPageSize + (kNumPartitionPagesPerSuperPage *
- kPageMetadataSize));
+ DCHECK(super_page_offset <
+ SystemPageSize() +
+ (NumPartitionPagesPerSuperPage() * kPageMetadataSize));
uintptr_t partition_page_index =
- (super_page_offset - kSystemPageSize) >> kPageMetadataShift;
+ (super_page_offset - SystemPageSize()) >> kPageMetadataShift;
// Index 0 is invalid because it is the superpage extent metadata and the
// last index is invalid because the whole PartitionPage is set as guard
// pages for the metadata region.
DCHECK(partition_page_index);
- DCHECK(partition_page_index < kNumPartitionPagesPerSuperPage - 1);
+ DCHECK(partition_page_index < NumPartitionPagesPerSuperPage() - 1);
uintptr_t super_page_base = (pointer_as_uint & kSuperPageBaseMask);
void* ret = reinterpret_cast<void*>(
- super_page_base + (partition_page_index << kPartitionPageShift));
+ super_page_base + (partition_page_index << PartitionPageShift()));
return ret;
}
@@ -199,10 +200,10 @@
// For single-slot buckets which span more than one partition page, we
// have some spare metadata space to store the raw allocation size. We
// can use this to report better statistics.
- if (bucket->slot_size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize)
+ if (bucket->slot_size <= MaxSystemPagesPerSlotSpan() * SystemPageSize())
return nullptr;
- DCHECK((bucket->slot_size % kSystemPageSize) == 0);
+ DCHECK((bucket->slot_size % SystemPageSize()) == 0);
DCHECK(bucket->is_direct_mapped() || bucket->get_slots_per_span() == 1);
const PartitionPage* the_next_page = this + 1;
diff --git a/third_party/base/allocator/partition_allocator/partition_root_base.h b/third_party/base/allocator/partition_allocator/partition_root_base.h
index 5d692b2..e394c3a 100644
--- a/third_party/base/allocator/partition_allocator/partition_root_base.h
+++ b/third_party/base/allocator/partition_allocator/partition_root_base.h
@@ -166,7 +166,7 @@
PartitionPage* page) {
PartitionSuperPageExtentEntry* extent_entry =
reinterpret_cast<PartitionSuperPageExtentEntry*>(
- reinterpret_cast<uintptr_t>(page) & kSystemPageBaseMask);
+ reinterpret_cast<uintptr_t>(page) & SystemPageBaseMask());
return extent_entry->root;
}