Import PartitionAlloc from Chromium.

We'll add callers in a later CL.

BUG=pdfium:678

Change-Id: I98c8b2832c4750df326218e24ee8c1bd33b89b50
Reviewed-on: https://pdfium-review.googlesource.com/3066
Commit-Queue: Tom Sepez <tsepez@chromium.org>
Reviewed-by: Tom Sepez <tsepez@chromium.org>
diff --git a/third_party/base/allocator/partition_allocator/OWNERS b/third_party/base/allocator/partition_allocator/OWNERS
new file mode 100644
index 0000000..95d9982
--- /dev/null
+++ b/third_party/base/allocator/partition_allocator/OWNERS
@@ -0,0 +1,2 @@
+palmer@chromium.org
+tsepez@chromium.org
diff --git a/third_party/base/allocator/partition_allocator/address_space_randomization.cc b/third_party/base/allocator/partition_allocator/address_space_randomization.cc
new file mode 100644
index 0000000..fdcc591
--- /dev/null
+++ b/third_party/base/allocator/partition_allocator/address_space_randomization.cc
@@ -0,0 +1,132 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/base/allocator/partition_allocator/address_space_randomization.h"
+
+#include "third_party/base/allocator/partition_allocator/page_allocator.h"
+#include "third_party/base/allocator/partition_allocator/spin_lock.h"
+#include "third_party/build/build_config.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#else
+#include <sys/time.h>
+#include <unistd.h>
+#endif
+
+namespace pdfium {
+namespace base {
+
+namespace {
+
+// This is the same PRNG as used by tcmalloc for mapping address randomness;
+// see http://burtleburtle.net/bob/rand/smallprng.html
+struct ranctx {
+  subtle::SpinLock lock;
+  bool initialized;
+  uint32_t a;
+  uint32_t b;
+  uint32_t c;
+  uint32_t d;
+};
+
+#define rot(x, k) (((x) << (k)) | ((x) >> (32 - (k))))
+
+uint32_t ranvalInternal(ranctx* x) {
+  uint32_t e = x->a - rot(x->b, 27);
+  x->a = x->b ^ rot(x->c, 17);
+  x->b = x->c + x->d;
+  x->c = x->d + e;
+  x->d = e + x->a;
+  return x->d;
+}
+
+#undef rot
+
+uint32_t ranval(ranctx* x) {
+  subtle::SpinLock::Guard guard(x->lock);
+  if (UNLIKELY(!x->initialized)) {
+    x->initialized = true;
+    char c;
+    uint32_t seed = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(&c));
+    uint32_t pid;
+    uint32_t usec;
+#if defined(OS_WIN)
+    pid = GetCurrentProcessId();
+    SYSTEMTIME st;
+    GetSystemTime(&st);
+    usec = static_cast<uint32_t>(st.wMilliseconds * 1000);
+#else
+    pid = static_cast<uint32_t>(getpid());
+    struct timeval tv;
+    gettimeofday(&tv, 0);
+    usec = static_cast<uint32_t>(tv.tv_usec);
+#endif
+    seed ^= pid;
+    seed ^= usec;
+    x->a = 0xf1ea5eed;
+    x->b = x->c = x->d = seed;
+    for (int i = 0; i < 20; ++i) {
+      (void)ranvalInternal(x);
+    }
+  }
+  uint32_t ret = ranvalInternal(x);
+  return ret;
+}
+
+static struct ranctx s_ranctx;
+
+}  // namespace
+
+// Calculates a random preferred mapping address. In calculating an address, we
+// balance good ASLR against not fragmenting the address space too badly.
+void* GetRandomPageBase() {
+  uintptr_t random;
+  random = static_cast<uintptr_t>(ranval(&s_ranctx));
+#if defined(ARCH_CPU_X86_64)
+  random <<= 32UL;
+  random |= static_cast<uintptr_t>(ranval(&s_ranctx));
+// This address mask gives a low likelihood of address space collisions. We
+// handle the situation gracefully if there is a collision.
+#if defined(OS_WIN)
+  // 64-bit Windows has a bizarrely small 8TB user address space. Allocates in
+  // the 1-5TB region. TODO(palmer): See if Windows >= 8.1 has the full 47 bits,
+  // and use it if so. crbug.com/672219
+  random &= 0x3ffffffffffUL;
+  random += 0x10000000000UL;
+#elif defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+  // This range is copied from the TSan source, but works for all tools.
+  random &= 0x007fffffffffUL;
+  random += 0x7e8000000000UL;
+#else
+  // Linux and OS X support the full 47-bit user space of x64 processors.
+  random &= 0x3fffffffffffUL;
+#endif
+#elif defined(ARCH_CPU_ARM64)
+  // ARM64 on Linux has 39-bit user space.
+  random &= 0x3fffffffffUL;
+  random += 0x1000000000UL;
+#else  // !defined(ARCH_CPU_X86_64) && !defined(ARCH_CPU_ARM64)
+#if defined(OS_WIN)
+  // On win32 host systems the randomization plus huge alignment causes
+  // excessive fragmentation. Plus most of these systems lack ASLR, so the
+  // randomization isn't buying anything. In that case we just skip it.
+  // TODO(jschuh): Just dump the randomization when HE-ASLR is present.
+  static BOOL isWow64 = -1;
+  if (isWow64 == -1 && !IsWow64Process(GetCurrentProcess(), &isWow64))
+    isWow64 = FALSE;
+  if (!isWow64)
+    return nullptr;
+#endif  // defined(OS_WIN)
+  // This is a good range on Windows, Linux and Mac.
+  // Allocates in the 0.5-1.5GB region.
+  random &= 0x3fffffff;
+  random += 0x20000000;
+#endif  // defined(ARCH_CPU_X86_64)
+  random &= kPageAllocationGranularityBaseMask;
+  return reinterpret_cast<void*>(random);
+}
+
+}  // namespace base
+}  // namespace pdfium
diff --git a/third_party/base/allocator/partition_allocator/address_space_randomization.h b/third_party/base/allocator/partition_allocator/address_space_randomization.h
new file mode 100644
index 0000000..97c5f60
--- /dev/null
+++ b/third_party/base/allocator/partition_allocator/address_space_randomization.h
@@ -0,0 +1,18 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_RANDOMIZATION
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_RANDOMIZATION
+
+namespace pdfium {
+namespace base {
+
+// Calculates a random preferred mapping address. In calculating an address, we
+// balance good ASLR against not fragmenting the address space too badly.
+void* GetRandomPageBase();
+
+}  // namespace base
+}  // namespace pdfium
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_RANDOMIZATION
diff --git a/third_party/base/allocator/partition_allocator/oom.h b/third_party/base/allocator/partition_allocator/oom.h
new file mode 100644
index 0000000..41f29b5
--- /dev/null
+++ b/third_party/base/allocator/partition_allocator/oom.h
@@ -0,0 +1,37 @@
+// Copyright (c) 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_OOM_H
+#define BASE_ALLOCATOR_OOM_H
+
+#include "third_party/base/logging.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#endif
+
+// Do not want trivial entry points just calling OOM_CRASH() to be
+// commoned up by linker icf/comdat folding.
+#define OOM_CRASH_PREVENT_ICF()                  \
+  volatile int oom_crash_inhibit_icf = __LINE__; \
+  ALLOW_UNUSED_LOCAL(oom_crash_inhibit_icf)
+
+// OOM_CRASH() - Specialization of IMMEDIATE_CRASH which will raise a custom
+// exception on Windows to signal this is OOM and not a normal assert.
+#if defined(OS_WIN)
+#define OOM_CRASH()                                                     \
+  do {                                                                  \
+    OOM_CRASH_PREVENT_ICF();                                            \
+    ::RaiseException(0xE0000008, EXCEPTION_NONCONTINUABLE, 0, nullptr); \
+    IMMEDIATE_CRASH();                                                  \
+  } while (0)
+#else
+#define OOM_CRASH()          \
+  do {                       \
+    OOM_CRASH_PREVENT_ICF(); \
+    IMMEDIATE_CRASH();       \
+  } while (0)
+#endif
+
+#endif  // BASE_ALLOCATOR_OOM_H
diff --git a/third_party/base/allocator/partition_allocator/page_allocator.cc b/third_party/base/allocator/partition_allocator/page_allocator.cc
new file mode 100644
index 0000000..abe159b
--- /dev/null
+++ b/third_party/base/allocator/partition_allocator/page_allocator.cc
@@ -0,0 +1,281 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/base/allocator/partition_allocator/page_allocator.h"
+
+#include <limits.h>
+
+#include <atomic>
+
+#include "third_party/base/allocator/partition_allocator/address_space_randomization.h"
+#include "third_party/base/base_export.h"
+#include "third_party/base/logging.h"
+#include "third_party/build/build_config.h"
+
+#if defined(OS_POSIX)
+
+#include <errno.h>
+#include <sys/mman.h>
+
+#ifndef MADV_FREE
+#define MADV_FREE MADV_DONTNEED
+#endif
+
+#ifndef MAP_ANONYMOUS
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+
+// On POSIX |mmap| uses a nearby address if the hint address is blocked.
+static const bool kHintIsAdvisory = true;
+static std::atomic<int32_t> s_allocPageErrorCode{0};
+
+#elif defined(OS_WIN)
+
+#include <windows.h>
+
+// |VirtualAlloc| will fail if allocation at the hint address is blocked.
+static const bool kHintIsAdvisory = false;
+static std::atomic<int32_t> s_allocPageErrorCode{ERROR_SUCCESS};
+
+#else
+#error Unknown OS
+#endif  // defined(OS_POSIX)
+
+namespace pdfium {
+namespace base {
+
+// This internal function wraps the OS-specific page allocation call:
+// |VirtualAlloc| on Windows, and |mmap| on POSIX.
+static void* SystemAllocPages(
+    void* hint,
+    size_t length,
+    PageAccessibilityConfiguration page_accessibility) {
+  DCHECK(!(length & kPageAllocationGranularityOffsetMask));
+  DCHECK(!(reinterpret_cast<uintptr_t>(hint) &
+           kPageAllocationGranularityOffsetMask));
+  void* ret;
+#if defined(OS_WIN)
+  DWORD access_flag =
+      page_accessibility == PageAccessible ? PAGE_READWRITE : PAGE_NOACCESS;
+  ret = VirtualAlloc(hint, length, MEM_RESERVE | MEM_COMMIT, access_flag);
+  if (!ret)
+    s_allocPageErrorCode = GetLastError();
+#else
+  int access_flag = page_accessibility == PageAccessible
+                        ? (PROT_READ | PROT_WRITE)
+                        : PROT_NONE;
+  ret = mmap(hint, length, access_flag, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+  if (ret == MAP_FAILED) {
+    s_allocPageErrorCode = errno;
+    ret = 0;
+  }
+#endif
+  return ret;
+}
+
+// Trims base to given length and alignment. Windows returns null on failure and
+// frees base.
+static void* TrimMapping(void* base,
+                         size_t base_length,
+                         size_t trim_length,
+                         uintptr_t align,
+                         PageAccessibilityConfiguration page_accessibility) {
+  size_t pre_slack = reinterpret_cast<uintptr_t>(base) & (align - 1);
+  if (pre_slack)
+    pre_slack = align - pre_slack;
+  size_t post_slack = base_length - pre_slack - trim_length;
+  DCHECK(base_length >= trim_length || pre_slack || post_slack);
+  DCHECK(pre_slack < base_length);
+  DCHECK(post_slack < base_length);
+  void* ret = base;
+
+#if defined(OS_POSIX)  // On POSIX we can resize the allocation run.
+  (void)page_accessibility;
+  if (pre_slack) {
+    int res = munmap(base, pre_slack);
+    CHECK(!res);
+    ret = reinterpret_cast<char*>(base) + pre_slack;
+  }
+  if (post_slack) {
+    int res = munmap(reinterpret_cast<char*>(ret) + trim_length, post_slack);
+    CHECK(!res);
+  }
+#else  // On Windows we can't resize the allocation run.
+  if (pre_slack || post_slack) {
+    ret = reinterpret_cast<char*>(base) + pre_slack;
+    FreePages(base, base_length);
+    ret = SystemAllocPages(ret, trim_length, page_accessibility);
+  }
+#endif
+
+  return ret;
+}
+
+void* AllocPages(void* address,
+                 size_t length,
+                 size_t align,
+                 PageAccessibilityConfiguration page_accessibility) {
+  DCHECK(length >= kPageAllocationGranularity);
+  DCHECK(!(length & kPageAllocationGranularityOffsetMask));
+  DCHECK(align >= kPageAllocationGranularity);
+  DCHECK(!(align & kPageAllocationGranularityOffsetMask));
+  DCHECK(!(reinterpret_cast<uintptr_t>(address) &
+           kPageAllocationGranularityOffsetMask));
+  uintptr_t align_offset_mask = align - 1;
+  uintptr_t align_base_mask = ~align_offset_mask;
+  DCHECK(!(reinterpret_cast<uintptr_t>(address) & align_offset_mask));
+
+  // If the client passed null as the address, choose a good one.
+  if (!address) {
+    address = GetRandomPageBase();
+    address = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(address) &
+                                      align_base_mask);
+  }
+
+  // First try to force an exact-size, aligned allocation from our random base.
+  for (int count = 0; count < 3; ++count) {
+    void* ret = SystemAllocPages(address, length, page_accessibility);
+    if (kHintIsAdvisory || ret) {
+      // If the alignment is to our liking, we're done.
+      if (!(reinterpret_cast<uintptr_t>(ret) & align_offset_mask))
+        return ret;
+      FreePages(ret, length);
+#if defined(ARCH_CPU_32_BITS)
+      address = reinterpret_cast<void*>(
+          (reinterpret_cast<uintptr_t>(ret) + align) & align_base_mask);
+#endif
+    } else if (!address) {  // We know we're OOM when an unhinted allocation
+                            // fails.
+      return nullptr;
+    } else {
+#if defined(ARCH_CPU_32_BITS)
+      address = reinterpret_cast<char*>(address) + align;
+#endif
+    }
+
+#if !defined(ARCH_CPU_32_BITS)
+    // Keep trying random addresses on systems that have a large address space.
+    address = GetRandomPageBase();
+    address = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(address) &
+                                      align_base_mask);
+#endif
+  }
+
+  // Map a larger allocation so we can force alignment, but continue randomizing
+  // only on 64-bit POSIX.
+  size_t try_length = length + (align - kPageAllocationGranularity);
+  CHECK(try_length >= length);
+  void* ret;
+
+  do {
+    // Don't continue to burn cycles on mandatory hints (Windows).
+    address = kHintIsAdvisory ? GetRandomPageBase() : nullptr;
+    ret = SystemAllocPages(address, try_length, page_accessibility);
+    // The retries are for Windows, where a race can steal our mapping on
+    // resize.
+  } while (ret &&
+           (ret = TrimMapping(ret, try_length, length, align,
+                              page_accessibility)) == nullptr);
+
+  return ret;
+}
+
+void FreePages(void* address, size_t length) {
+  DCHECK(!(reinterpret_cast<uintptr_t>(address) &
+           kPageAllocationGranularityOffsetMask));
+  DCHECK(!(length & kPageAllocationGranularityOffsetMask));
+#if defined(OS_POSIX)
+  int ret = munmap(address, length);
+  CHECK(!ret);
+#else
+  BOOL ret = VirtualFree(address, 0, MEM_RELEASE);
+  CHECK(ret);
+#endif
+}
+
+void SetSystemPagesInaccessible(void* address, size_t length) {
+  DCHECK(!(length & kSystemPageOffsetMask));
+#if defined(OS_POSIX)
+  int ret = mprotect(address, length, PROT_NONE);
+  CHECK(!ret);
+#else
+  BOOL ret = VirtualFree(address, length, MEM_DECOMMIT);
+  CHECK(ret);
+#endif
+}
+
+bool SetSystemPagesAccessible(void* address, size_t length) {
+  DCHECK(!(length & kSystemPageOffsetMask));
+#if defined(OS_POSIX)
+  return !mprotect(address, length, PROT_READ | PROT_WRITE);
+#else
+  return !!VirtualAlloc(address, length, MEM_COMMIT, PAGE_READWRITE);
+#endif
+}
+
+void DecommitSystemPages(void* address, size_t length) {
+  DCHECK(!(length & kSystemPageOffsetMask));
+#if defined(OS_POSIX)
+  int ret = madvise(address, length, MADV_FREE);
+  if (ret != 0 && errno == EINVAL) {
+    // MADV_FREE only works on Linux 4.5+ . If request failed,
+    // retry with older MADV_DONTNEED . Note that MADV_FREE
+    // being defined at compile time doesn't imply runtime support.
+    ret = madvise(address, length, MADV_DONTNEED);
+  }
+  CHECK(!ret);
+#else
+  SetSystemPagesInaccessible(address, length);
+#endif
+}
+
+void RecommitSystemPages(void* address, size_t length) {
+  DCHECK(!(length & kSystemPageOffsetMask));
+#if defined(OS_POSIX)
+  (void)address;
+#else
+  CHECK(SetSystemPagesAccessible(address, length));
+#endif
+}
+
+void DiscardSystemPages(void* address, size_t length) {
+  DCHECK(!(length & kSystemPageOffsetMask));
+#if defined(OS_POSIX)
+  // On POSIX, the implementation detail is that discard and decommit are the
+  // same, and lead to pages that are returned to the system immediately and
+  // get replaced with zeroed pages when touched. So we just call
+  // DecommitSystemPages() here to avoid code duplication.
+  DecommitSystemPages(address, length);
+#else
+  // On Windows discarded pages are not returned to the system immediately and
+  // not guaranteed to be zeroed when returned to the application.
+  using DiscardVirtualMemoryFunction =
+      DWORD(WINAPI*)(PVOID virtualAddress, SIZE_T size);
+  static DiscardVirtualMemoryFunction discard_virtual_memory =
+      reinterpret_cast<DiscardVirtualMemoryFunction>(-1);
+  if (discard_virtual_memory ==
+      reinterpret_cast<DiscardVirtualMemoryFunction>(-1))
+    discard_virtual_memory =
+        reinterpret_cast<DiscardVirtualMemoryFunction>(GetProcAddress(
+            GetModuleHandle(L"Kernel32.dll"), "DiscardVirtualMemory"));
+  // Use DiscardVirtualMemory when available because it releases faster than
+  // MEM_RESET.
+  DWORD ret = 1;
+  if (discard_virtual_memory)
+    ret = discard_virtual_memory(address, length);
+  // DiscardVirtualMemory is buggy in Win10 SP0, so fall back to MEM_RESET on
+  // failure.
+  if (ret) {
+    void* ret = VirtualAlloc(address, length, MEM_RESET, PAGE_READWRITE);
+    CHECK(ret);
+  }
+#endif
+}
+
+uint32_t GetAllocPageErrorCode() {
+  return s_allocPageErrorCode;
+}
+
+}  // namespace base
+}  // namespace pdfium
diff --git a/third_party/base/allocator/partition_allocator/page_allocator.h b/third_party/base/allocator/partition_allocator/page_allocator.h
new file mode 100644
index 0000000..be73363
--- /dev/null
+++ b/third_party/base/allocator/partition_allocator/page_allocator.h
@@ -0,0 +1,126 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_H
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_H
+
+#include <stdint.h>
+
+#include <cstddef>
+
+#include "third_party/base/base_export.h"
+#include "third_party/base/compiler_specific.h"
+#include "third_party/build/build_config.h"
+
+namespace pdfium {
+namespace base {
+
+#if defined(OS_WIN)
+static const size_t kPageAllocationGranularityShift = 16;  // 64KB
+#else
+static const size_t kPageAllocationGranularityShift = 12;  // 4KB
+#endif
+static const size_t kPageAllocationGranularity =
+    1 << kPageAllocationGranularityShift;
+static const size_t kPageAllocationGranularityOffsetMask =
+    kPageAllocationGranularity - 1;
+static const size_t kPageAllocationGranularityBaseMask =
+    ~kPageAllocationGranularityOffsetMask;
+
+// All Blink-supported systems have 4096 sized system pages and can handle
+// permissions and commit / decommit at this granularity.
+static const size_t kSystemPageSize = 4096;
+static const size_t kSystemPageOffsetMask = kSystemPageSize - 1;
+static const size_t kSystemPageBaseMask = ~kSystemPageOffsetMask;
+
+enum PageAccessibilityConfiguration {
+  PageAccessible,
+  PageInaccessible,
+};
+
+// Allocate one or more pages.
+// The requested address is just a hint; the actual address returned may
+// differ. The returned address will be aligned at least to align bytes.
+// len is in bytes, and must be a multiple of kPageAllocationGranularity.
+// align is in bytes, and must be a power-of-two multiple of
+// kPageAllocationGranularity.
+// If addr is null, then a suitable and randomized address will be chosen
+// automatically.
+// PageAccessibilityConfiguration controls the permission of the
+// allocated pages.
+// This call will return null if the allocation cannot be satisfied.
+BASE_EXPORT void* AllocPages(void* address,
+                             size_t len,
+                             size_t align,
+                             PageAccessibilityConfiguration);
+
+// Free one or more pages.
+// addr and len must match a previous call to allocPages().
+BASE_EXPORT void FreePages(void* address, size_t length);
+
+// Mark one or more system pages as being inaccessible.
+// Subsequently accessing any address in the range will fault, and the
+// addresses will not be re-used by future allocations.
+// len must be a multiple of kSystemPageSize bytes.
+BASE_EXPORT void SetSystemPagesInaccessible(void* address, size_t length);
+
+// Mark one or more system pages as being accessible.
+// The pages will be readable and writeable.
+// len must be a multiple of kSystemPageSize bytes.
+// The result bool value indicates whether the permission
+// change succeeded or not. You must check the result
+// (in most cases you need to CHECK that it is true).
+BASE_EXPORT WARN_UNUSED_RESULT bool SetSystemPagesAccessible(void* address,
+                                                             size_t length);
+
+// Decommit one or more system pages. Decommitted means that the physical memory
+// is released to the system, but the virtual address space remains reserved.
+// System pages are re-committed by calling recommitSystemPages(). Touching
+// a decommitted page _may_ fault.
+// Clients should not make any assumptions about the contents of decommitted
+// system pages, before or after they write to the page. The only guarantee
+// provided is that the contents of the system page will be deterministic again
+// after recommitting and writing to it. In particlar note that system pages are
+// not guaranteed to be zero-filled upon re-commit. len must be a multiple of
+// kSystemPageSize bytes.
+BASE_EXPORT void DecommitSystemPages(void* address, size_t length);
+
+// Recommit one or more system pages. Decommitted system pages must be
+// recommitted before they are read are written again.
+// Note that this operation may be a no-op on some platforms.
+// len must be a multiple of kSystemPageSize bytes.
+BASE_EXPORT void RecommitSystemPages(void* address, size_t length);
+
+// Discard one or more system pages. Discarding is a hint to the system that
+// the page is no longer required. The hint may:
+// - Do nothing.
+// - Discard the page immediately, freeing up physical pages.
+// - Discard the page at some time in the future in response to memory pressure.
+// Only committed pages should be discarded. Discarding a page does not
+// decommit it, and it is valid to discard an already-discarded page.
+// A read or write to a discarded page will not fault.
+// Reading from a discarded page may return the original page content, or a
+// page full of zeroes.
+// Writing to a discarded page is the only guaranteed way to tell the system
+// that the page is required again. Once written to, the content of the page is
+// guaranteed stable once more. After being written to, the page content may be
+// based on the original page content, or a page of zeroes.
+// len must be a multiple of kSystemPageSize bytes.
+BASE_EXPORT void DiscardSystemPages(void* address, size_t length);
+
+ALWAYS_INLINE uintptr_t RoundUpToSystemPage(uintptr_t address) {
+  return (address + kSystemPageOffsetMask) & kSystemPageBaseMask;
+}
+
+ALWAYS_INLINE uintptr_t RoundDownToSystemPage(uintptr_t address) {
+  return address & kSystemPageBaseMask;
+}
+
+// Returns errno (or GetLastError code) when mmap (or VirtualAlloc) fails.
+BASE_EXPORT uint32_t GetAllocPageErrorCode();
+
+}  // namespace base
+}  // namespace pdfium
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_H
diff --git a/third_party/base/allocator/partition_allocator/partition_alloc.cc b/third_party/base/allocator/partition_allocator/partition_alloc.cc
new file mode 100644
index 0000000..9523e78
--- /dev/null
+++ b/third_party/base/allocator/partition_allocator/partition_alloc.cc
@@ -0,0 +1,1437 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/base/allocator/partition_allocator/partition_alloc.h"
+
+#include <string.h>
+
+#include "third_party/base/allocator/partition_allocator/oom.h"
+#include "third_party/base/allocator/partition_allocator/spin_lock.h"
+#include "third_party/base/compiler_specific.h"
+
+// Two partition pages are used as guard / metadata page so make sure the super
+// page size is bigger.
+static_assert(pdfium::base::kPartitionPageSize * 4 <=
+                  pdfium::base::kSuperPageSize,
+              "ok super page size");
+static_assert(!(pdfium::base::kSuperPageSize %
+                pdfium::base::kPartitionPageSize),
+              "ok super page multiple");
+// Four system pages gives us room to hack out a still-guard-paged piece
+// of metadata in the middle of a guard partition page.
+static_assert(pdfium::base::kSystemPageSize * 4 <=
+                  pdfium::base::kPartitionPageSize,
+              "ok partition page size");
+static_assert(!(pdfium::base::kPartitionPageSize %
+                pdfium::base::kSystemPageSize),
+              "ok partition page multiple");
+static_assert(sizeof(pdfium::base::PartitionPage) <=
+                  pdfium::base::kPageMetadataSize,
+              "PartitionPage should not be too big");
+static_assert(sizeof(pdfium::base::PartitionBucket) <=
+                  pdfium::base::kPageMetadataSize,
+              "PartitionBucket should not be too big");
+static_assert(sizeof(pdfium::base::PartitionSuperPageExtentEntry) <=
+                  pdfium::base::kPageMetadataSize,
+              "PartitionSuperPageExtentEntry should not be too big");
+static_assert(pdfium::base::kPageMetadataSize *
+                      pdfium::base::kNumPartitionPagesPerSuperPage <=
+                  pdfium::base::kSystemPageSize,
+              "page metadata fits in hole");
+// Check that some of our zanier calculations worked out as expected.
+static_assert(pdfium::base::kGenericSmallestBucket == 8,
+              "generic smallest bucket");
+static_assert(pdfium::base::kGenericMaxBucketed == 983040,
+              "generic max bucketed");
+static_assert(pdfium::base::kMaxSystemPagesPerSlotSpan < (1 << 8),
+              "System pages per slot span must be less than 128.");
+
+namespace pdfium {
+namespace base {
+
+subtle::SpinLock PartitionRootBase::gInitializedLock;
+bool PartitionRootBase::gInitialized = false;
+PartitionPage PartitionRootBase::gSeedPage;
+PartitionBucket PartitionRootBase::gPagedBucket;
+void (*PartitionRootBase::gOomHandlingFunction)() = nullptr;
+PartitionAllocHooks::AllocationHook* PartitionAllocHooks::allocation_hook_ =
+    nullptr;
+PartitionAllocHooks::FreeHook* PartitionAllocHooks::free_hook_ = nullptr;
+
+static uint8_t PartitionBucketNumSystemPages(size_t size) {
+  // This works out reasonably for the current bucket sizes of the generic
+  // allocator, and the current values of partition page size and constants.
+  // Specifically, we have enough room to always pack the slots perfectly into
+  // some number of system pages. The only waste is the waste associated with
+  // unfaulted pages (i.e. wasted address space).
+  // TODO: we end up using a lot of system pages for very small sizes. For
+  // example, we'll use 12 system pages for slot size 24. The slot size is
+  // so small that the waste would be tiny with just 4, or 1, system pages.
+  // Later, we can investigate whether there are anti-fragmentation benefits
+  // to using fewer system pages.
+  double best_waste_ratio = 1.0f;
+  uint16_t best_pages = 0;
+  if (size > kMaxSystemPagesPerSlotSpan * kSystemPageSize) {
+    DCHECK(!(size % kSystemPageSize));
+    best_pages = static_cast<uint16_t>(size / kSystemPageSize);
+    CHECK(best_pages < (1 << 8));
+    return static_cast<uint8_t>(best_pages);
+  }
+  DCHECK(size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize);
+  for (uint16_t i = kNumSystemPagesPerPartitionPage - 1;
+       i <= kMaxSystemPagesPerSlotSpan; ++i) {
+    size_t page_size = kSystemPageSize * i;
+    size_t num_slots = page_size / size;
+    size_t waste = page_size - (num_slots * size);
+    // Leaving a page unfaulted is not free; the page will occupy an empty page
+    // table entry.  Make a simple attempt to account for that.
+    size_t num_remainder_pages = i & (kNumSystemPagesPerPartitionPage - 1);
+    size_t num_unfaulted_pages =
+        num_remainder_pages
+            ? (kNumSystemPagesPerPartitionPage - num_remainder_pages)
+            : 0;
+    waste += sizeof(void*) * num_unfaulted_pages;
+    double waste_ratio = (double)waste / (double)page_size;
+    if (waste_ratio < best_waste_ratio) {
+      best_waste_ratio = waste_ratio;
+      best_pages = i;
+    }
+  }
+  DCHECK(best_pages > 0);
+  CHECK(best_pages <= kMaxSystemPagesPerSlotSpan);
+  return static_cast<uint8_t>(best_pages);
+}
+
+static void PartitionAllocBaseInit(PartitionRootBase* root) {
+  DCHECK(!root->initialized);
+  {
+    subtle::SpinLock::Guard guard(PartitionRootBase::gInitializedLock);
+    if (!PartitionRootBase::gInitialized) {
+      PartitionRootBase::gInitialized = true;
+      // We mark the seed page as free to make sure it is skipped by our
+      // logic to find a new active page.
+      PartitionRootBase::gPagedBucket.active_pages_head =
+          &PartitionRootGeneric::gSeedPage;
+    }
+  }
+
+  root->initialized = true;
+  root->total_size_of_committed_pages = 0;
+  root->total_size_of_super_pages = 0;
+  root->total_size_of_direct_mapped_pages = 0;
+  root->next_super_page = 0;
+  root->next_partition_page = 0;
+  root->next_partition_page_end = 0;
+  root->first_extent = 0;
+  root->current_extent = 0;
+  root->direct_map_list = 0;
+
+  memset(&root->global_empty_page_ring, '\0',
+         sizeof(root->global_empty_page_ring));
+  root->global_empty_page_ring_index = 0;
+
+  // This is a "magic" value so we can test if a root pointer is valid.
+  root->inverted_self = ~reinterpret_cast<uintptr_t>(root);
+}
+
+static void PartitionBucketInitBase(PartitionBucket* bucket,
+                                    PartitionRootBase* root) {
+  bucket->active_pages_head = &PartitionRootGeneric::gSeedPage;
+  bucket->empty_pages_head = 0;
+  bucket->decommitted_pages_head = 0;
+  bucket->num_full_pages = 0;
+  bucket->num_system_pages_per_slot_span =
+      PartitionBucketNumSystemPages(bucket->slot_size);
+}
+
+void PartitionAllocGlobalInit(void (*oom_handling_function)()) {
+  DCHECK(oom_handling_function);
+  PartitionRootBase::gOomHandlingFunction = oom_handling_function;
+}
+
+void PartitionAllocInit(PartitionRoot* root,
+                        size_t num_buckets,
+                        size_t max_allocation) {
+  PartitionAllocBaseInit(root);
+
+  root->num_buckets = num_buckets;
+  root->max_allocation = max_allocation;
+  size_t i;
+  for (i = 0; i < root->num_buckets; ++i) {
+    PartitionBucket* bucket = &root->buckets()[i];
+    if (!i)
+      bucket->slot_size = kAllocationGranularity;
+    else
+      bucket->slot_size = i << kBucketShift;
+    PartitionBucketInitBase(bucket, root);
+  }
+}
+
+void PartitionAllocGenericInit(PartitionRootGeneric* root) {
+  subtle::SpinLock::Guard guard(root->lock);
+
+  PartitionAllocBaseInit(root);
+
+  // Precalculate some shift and mask constants used in the hot path.
+  // Example: malloc(41) == 101001 binary.
+  // Order is 6 (1 << 6-1) == 32 is highest bit set.
+  // order_index is the next three MSB == 010 == 2.
+  // sub_order_index_mask is a mask for the remaining bits == 11 (masking to 01
+  // for
+  // the sub_order_index).
+  size_t order;
+  for (order = 0; order <= kBitsPerSizeT; ++order) {
+    size_t order_index_shift;
+    if (order < kGenericNumBucketsPerOrderBits + 1)
+      order_index_shift = 0;
+    else
+      order_index_shift = order - (kGenericNumBucketsPerOrderBits + 1);
+    root->order_index_shifts[order] = order_index_shift;
+    size_t sub_order_index_mask;
+    if (order == kBitsPerSizeT) {
+      // This avoids invoking undefined behavior for an excessive shift.
+      sub_order_index_mask =
+          static_cast<size_t>(-1) >> (kGenericNumBucketsPerOrderBits + 1);
+    } else {
+      sub_order_index_mask = ((static_cast<size_t>(1) << order) - 1) >>
+                             (kGenericNumBucketsPerOrderBits + 1);
+    }
+    root->order_sub_index_masks[order] = sub_order_index_mask;
+  }
+
+  // Set up the actual usable buckets first.
+  // Note that typical values (i.e. min allocation size of 8) will result in
+  // pseudo buckets (size==9 etc. or more generally, size is not a multiple
+  // of the smallest allocation granularity).
+  // We avoid them in the bucket lookup map, but we tolerate them to keep the
+  // code simpler and the structures more generic.
+  size_t i, j;
+  size_t current_size = kGenericSmallestBucket;
+  size_t currentIncrement =
+      kGenericSmallestBucket >> kGenericNumBucketsPerOrderBits;
+  PartitionBucket* bucket = &root->buckets[0];
+  for (i = 0; i < kGenericNumBucketedOrders; ++i) {
+    for (j = 0; j < kGenericNumBucketsPerOrder; ++j) {
+      bucket->slot_size = current_size;
+      PartitionBucketInitBase(bucket, root);
+      // Disable psuedo buckets so that touching them faults.
+      if (current_size % kGenericSmallestBucket)
+        bucket->active_pages_head = 0;
+      current_size += currentIncrement;
+      ++bucket;
+    }
+    currentIncrement <<= 1;
+  }
+  DCHECK(current_size == 1 << kGenericMaxBucketedOrder);
+  DCHECK(bucket == &root->buckets[0] + kGenericNumBuckets);
+
+  // Then set up the fast size -> bucket lookup table.
+  bucket = &root->buckets[0];
+  PartitionBucket** bucketPtr = &root->bucket_lookups[0];
+  for (order = 0; order <= kBitsPerSizeT; ++order) {
+    for (j = 0; j < kGenericNumBucketsPerOrder; ++j) {
+      if (order < kGenericMinBucketedOrder) {
+        // Use the bucket of the finest granularity for malloc(0) etc.
+        *bucketPtr++ = &root->buckets[0];
+      } else if (order > kGenericMaxBucketedOrder) {
+        *bucketPtr++ = &PartitionRootGeneric::gPagedBucket;
+      } else {
+        PartitionBucket* validBucket = bucket;
+        // Skip over invalid buckets.
+        while (validBucket->slot_size % kGenericSmallestBucket)
+          validBucket++;
+        *bucketPtr++ = validBucket;
+        bucket++;
+      }
+    }
+  }
+  DCHECK(bucket == &root->buckets[0] + kGenericNumBuckets);
+  DCHECK(bucketPtr ==
+         &root->bucket_lookups[0] +
+             ((kBitsPerSizeT + 1) * kGenericNumBucketsPerOrder));
+  // And there's one last bucket lookup that will be hit for e.g. malloc(-1),
+  // which tries to overflow to a non-existant order.
+  *bucketPtr = &PartitionRootGeneric::gPagedBucket;
+}
+
+#if !defined(ARCH_CPU_64_BITS)
+static NOINLINE void PartitionOutOfMemoryWithLotsOfUncommitedPages() {
+  OOM_CRASH();
+}
+#endif
+
+static NOINLINE void PartitionOutOfMemory(const PartitionRootBase* root) {
+#if !defined(ARCH_CPU_64_BITS)
+  // Check whether this OOM is due to a lot of super pages that are allocated
+  // but not committed, probably due to http://crbug.com/421387.
+  if (root->total_size_of_super_pages +
+          root->total_size_of_direct_mapped_pages -
+          root->total_size_of_committed_pages >
+      kReasonableSizeOfUnusedPages) {
+    PartitionOutOfMemoryWithLotsOfUncommitedPages();
+  }
+#endif
+  if (PartitionRootBase::gOomHandlingFunction)
+    (*PartitionRootBase::gOomHandlingFunction)();
+  OOM_CRASH();
+}
+
+static NOINLINE void PartitionExcessiveAllocationSize() {
+  OOM_CRASH();
+}
+
+static NOINLINE void PartitionBucketFull() {
+  OOM_CRASH();
+}
+
+// partitionPageStateIs*
+// Note that it's only valid to call these functions on pages found on one of
+// the page lists. Specifically, you can't call these functions on full pages
+// that were detached from the active list.
+static bool ALWAYS_INLINE
+PartitionPageStateIsActive(const PartitionPage* page) {
+  DCHECK(page != &PartitionRootGeneric::gSeedPage);
+  DCHECK(!page->page_offset);
+  return (page->num_allocated_slots > 0 &&
+          (page->freelist_head || page->num_unprovisioned_slots));
+}
+
+static bool ALWAYS_INLINE PartitionPageStateIsFull(const PartitionPage* page) {
+  DCHECK(page != &PartitionRootGeneric::gSeedPage);
+  DCHECK(!page->page_offset);
+  bool ret = (page->num_allocated_slots == PartitionBucketSlots(page->bucket));
+  if (ret) {
+    DCHECK(!page->freelist_head);
+    DCHECK(!page->num_unprovisioned_slots);
+  }
+  return ret;
+}
+
+static bool ALWAYS_INLINE PartitionPageStateIsEmpty(const PartitionPage* page) {
+  DCHECK(page != &PartitionRootGeneric::gSeedPage);
+  DCHECK(!page->page_offset);
+  return (!page->num_allocated_slots && page->freelist_head);
+}
+
+static bool ALWAYS_INLINE
+PartitionPageStateIsDecommitted(const PartitionPage* page) {
+  DCHECK(page != &PartitionRootGeneric::gSeedPage);
+  DCHECK(!page->page_offset);
+  bool ret = (!page->num_allocated_slots && !page->freelist_head);
+  if (ret) {
+    DCHECK(!page->num_unprovisioned_slots);
+    DCHECK(page->empty_cache_index == -1);
+  }
+  return ret;
+}
+
+static void PartitionIncreaseCommittedPages(PartitionRootBase* root,
+                                            size_t len) {
+  root->total_size_of_committed_pages += len;
+  DCHECK(root->total_size_of_committed_pages <=
+         root->total_size_of_super_pages +
+             root->total_size_of_direct_mapped_pages);
+}
+
+static void PartitionDecreaseCommittedPages(PartitionRootBase* root,
+                                            size_t len) {
+  root->total_size_of_committed_pages -= len;
+  DCHECK(root->total_size_of_committed_pages <=
+         root->total_size_of_super_pages +
+             root->total_size_of_direct_mapped_pages);
+}
+
+static ALWAYS_INLINE void PartitionDecommitSystemPages(PartitionRootBase* root,
+                                                       void* address,
+                                                       size_t length) {
+  DecommitSystemPages(address, length);
+  PartitionDecreaseCommittedPages(root, length);
+}
+
+static ALWAYS_INLINE void PartitionRecommitSystemPages(PartitionRootBase* root,
+                                                       void* address,
+                                                       size_t length) {
+  RecommitSystemPages(address, length);
+  PartitionIncreaseCommittedPages(root, length);
+}
+
+static ALWAYS_INLINE void* PartitionAllocPartitionPages(
+    PartitionRootBase* root,
+    int flags,
+    uint16_t num_partition_pages) {
+  DCHECK(!(reinterpret_cast<uintptr_t>(root->next_partition_page) %
+           kPartitionPageSize));
+  DCHECK(!(reinterpret_cast<uintptr_t>(root->next_partition_page_end) %
+           kPartitionPageSize));
+  DCHECK(num_partition_pages <= kNumPartitionPagesPerSuperPage);
+  size_t total_size = kPartitionPageSize * num_partition_pages;
+  size_t num_partition_pages_left =
+      (root->next_partition_page_end - root->next_partition_page) >>
+      kPartitionPageShift;
+  if (LIKELY(num_partition_pages_left >= num_partition_pages)) {
+    // In this case, we can still hand out pages from the current super page
+    // allocation.
+    char* ret = root->next_partition_page;
+    root->next_partition_page += total_size;
+    PartitionIncreaseCommittedPages(root, total_size);
+    return ret;
+  }
+
+  // Need a new super page. We want to allocate super pages in a continguous
+  // address region as much as possible. This is important for not causing
+  // page table bloat and not fragmenting address spaces in 32 bit
+  // architectures.
+  char* requestedAddress = root->next_super_page;
+  char* super_page = reinterpret_cast<char*>(AllocPages(
+      requestedAddress, kSuperPageSize, kSuperPageSize, PageAccessible));
+  if (UNLIKELY(!super_page))
+    return 0;
+
+  root->total_size_of_super_pages += kSuperPageSize;
+  PartitionIncreaseCommittedPages(root, total_size);
+
+  root->next_super_page = super_page + kSuperPageSize;
+  char* ret = super_page + kPartitionPageSize;
+  root->next_partition_page = ret + total_size;
+  root->next_partition_page_end = root->next_super_page - kPartitionPageSize;
+  // Make the first partition page in the super page a guard page, but leave a
+  // hole in the middle.
+  // This is where we put page metadata and also a tiny amount of extent
+  // metadata.
+  SetSystemPagesInaccessible(super_page, kSystemPageSize);
+  SetSystemPagesInaccessible(super_page + (kSystemPageSize * 2),
+                             kPartitionPageSize - (kSystemPageSize * 2));
+  // Also make the last partition page a guard page.
+  SetSystemPagesInaccessible(super_page + (kSuperPageSize - kPartitionPageSize),
+                             kPartitionPageSize);
+
+  // If we were after a specific address, but didn't get it, assume that
+  // the system chose a lousy address. Here most OS'es have a default
+  // algorithm that isn't randomized. For example, most Linux
+  // distributions will allocate the mapping directly before the last
+  // successful mapping, which is far from random. So we just get fresh
+  // randomness for the next mapping attempt.
+  if (requestedAddress && requestedAddress != super_page)
+    root->next_super_page = 0;
+
+  // We allocated a new super page so update super page metadata.
+  // First check if this is a new extent or not.
+  PartitionSuperPageExtentEntry* latest_extent =
+      reinterpret_cast<PartitionSuperPageExtentEntry*>(
+          PartitionSuperPageToMetadataArea(super_page));
+  // By storing the root in every extent metadata object, we have a fast way
+  // to go from a pointer within the partition to the root object.
+  latest_extent->root = root;
+  // Most new extents will be part of a larger extent, and these three fields
+  // are unused, but we initialize them to 0 so that we get a clear signal
+  // in case they are accidentally used.
+  latest_extent->super_page_base = 0;
+  latest_extent->super_pages_end = 0;
+  latest_extent->next = 0;
+
+  PartitionSuperPageExtentEntry* current_extent = root->current_extent;
+  bool isNewExtent = (super_page != requestedAddress);
+  if (UNLIKELY(isNewExtent)) {
+    if (UNLIKELY(!current_extent)) {
+      DCHECK(!root->first_extent);
+      root->first_extent = latest_extent;
+    } else {
+      DCHECK(current_extent->super_page_base);
+      current_extent->next = latest_extent;
+    }
+    root->current_extent = latest_extent;
+    latest_extent->super_page_base = super_page;
+    latest_extent->super_pages_end = super_page + kSuperPageSize;
+  } else {
+    // We allocated next to an existing extent so just nudge the size up a
+    // little.
+    DCHECK(current_extent->super_pages_end);
+    current_extent->super_pages_end += kSuperPageSize;
+    DCHECK(ret >= current_extent->super_page_base &&
+           ret < current_extent->super_pages_end);
+  }
+  return ret;
+}
+
+static ALWAYS_INLINE uint16_t
+PartitionBucketPartitionPages(const PartitionBucket* bucket) {
+  return (bucket->num_system_pages_per_slot_span +
+          (kNumSystemPagesPerPartitionPage - 1)) /
+         kNumSystemPagesPerPartitionPage;
+}
+
+static ALWAYS_INLINE void PartitionPageReset(PartitionPage* page) {
+  DCHECK(PartitionPageStateIsDecommitted(page));
+
+  page->num_unprovisioned_slots = PartitionBucketSlots(page->bucket);
+  DCHECK(page->num_unprovisioned_slots);
+
+  page->next_page = nullptr;
+}
+
+static ALWAYS_INLINE void PartitionPageSetup(PartitionPage* page,
+                                             PartitionBucket* bucket) {
+  // The bucket never changes. We set it up once.
+  page->bucket = bucket;
+  page->empty_cache_index = -1;
+
+  PartitionPageReset(page);
+
+  // If this page has just a single slot, do not set up page offsets for any
+  // page metadata other than the first one. This ensures that attempts to
+  // touch invalid page metadata fail.
+  if (page->num_unprovisioned_slots == 1)
+    return;
+
+  uint16_t num_partition_pages = PartitionBucketPartitionPages(bucket);
+  char* page_char_ptr = reinterpret_cast<char*>(page);
+  for (uint16_t i = 1; i < num_partition_pages; ++i) {
+    page_char_ptr += kPageMetadataSize;
+    PartitionPage* secondary_page =
+        reinterpret_cast<PartitionPage*>(page_char_ptr);
+    secondary_page->page_offset = i;
+  }
+}
+
+static ALWAYS_INLINE char* PartitionPageAllocAndFillFreelist(
+    PartitionPage* page) {
+  DCHECK(page != &PartitionRootGeneric::gSeedPage);
+  uint16_t num_slots = page->num_unprovisioned_slots;
+  DCHECK(num_slots);
+  PartitionBucket* bucket = page->bucket;
+  // We should only get here when _every_ slot is either used or unprovisioned.
+  // (The third state is "on the freelist". If we have a non-empty freelist, we
+  // should not get here.)
+  DCHECK(num_slots + page->num_allocated_slots == PartitionBucketSlots(bucket));
+  // Similarly, make explicitly sure that the freelist is empty.
+  DCHECK(!page->freelist_head);
+  DCHECK(page->num_allocated_slots >= 0);
+
+  size_t size = bucket->slot_size;
+  char* base = reinterpret_cast<char*>(PartitionPageToPointer(page));
+  char* return_object = base + (size * page->num_allocated_slots);
+  char* firstFreelistPointer = return_object + size;
+  char* firstFreelistPointerExtent =
+      firstFreelistPointer + sizeof(PartitionFreelistEntry*);
+  // Our goal is to fault as few system pages as possible. We calculate the
+  // page containing the "end" of the returned slot, and then allow freelist
+  // pointers to be written up to the end of that page.
+  char* sub_page_limit = reinterpret_cast<char*>(
+      RoundUpToSystemPage(reinterpret_cast<size_t>(firstFreelistPointer)));
+  char* slots_limit = return_object + (size * num_slots);
+  char* freelist_limit = sub_page_limit;
+  if (UNLIKELY(slots_limit < freelist_limit))
+    freelist_limit = slots_limit;
+
+  uint16_t num_new_freelist_entries = 0;
+  if (LIKELY(firstFreelistPointerExtent <= freelist_limit)) {
+    // Only consider used space in the slot span. If we consider wasted
+    // space, we may get an off-by-one when a freelist pointer fits in the
+    // wasted space, but a slot does not.
+    // We know we can fit at least one freelist pointer.
+    num_new_freelist_entries = 1;
+    // Any further entries require space for the whole slot span.
+    num_new_freelist_entries += static_cast<uint16_t>(
+        (freelist_limit - firstFreelistPointerExtent) / size);
+  }
+
+  // We always return an object slot -- that's the +1 below.
+  // We do not neccessarily create any new freelist entries, because we cross
+  // sub page boundaries frequently for large bucket sizes.
+  DCHECK(num_new_freelist_entries + 1 <= num_slots);
+  num_slots -= (num_new_freelist_entries + 1);
+  page->num_unprovisioned_slots = num_slots;
+  page->num_allocated_slots++;
+
+  if (LIKELY(num_new_freelist_entries)) {
+    char* freelist_pointer = firstFreelistPointer;
+    PartitionFreelistEntry* entry =
+        reinterpret_cast<PartitionFreelistEntry*>(freelist_pointer);
+    page->freelist_head = entry;
+    while (--num_new_freelist_entries) {
+      freelist_pointer += size;
+      PartitionFreelistEntry* next_entry =
+          reinterpret_cast<PartitionFreelistEntry*>(freelist_pointer);
+      entry->next = PartitionFreelistMask(next_entry);
+      entry = next_entry;
+    }
+    entry->next = PartitionFreelistMask(0);
+  } else {
+    page->freelist_head = 0;
+  }
+  return return_object;
+}
+
+// This helper function scans a bucket's active page list for a suitable new
+// active page.
+// When it finds a suitable new active page (one that has free slots and is not
+// empty), it is set as the new active page. If there is no suitable new
+// active page, the current active page is set to the seed page.
+// As potential pages are scanned, they are tidied up according to their state.
+// Empty pages are swept on to the empty page list, decommitted pages on to the
+// decommitted page list and full pages are unlinked from any list.
+static bool PartitionSetNewActivePage(PartitionBucket* bucket) {
+  PartitionPage* page = bucket->active_pages_head;
+  if (page == &PartitionRootBase::gSeedPage)
+    return false;
+
+  PartitionPage* next_page;
+
+  for (; page; page = next_page) {
+    next_page = page->next_page;
+    DCHECK(page->bucket == bucket);
+    DCHECK(page != bucket->empty_pages_head);
+    DCHECK(page != bucket->decommitted_pages_head);
+
+    // Deal with empty and decommitted pages.
+    if (LIKELY(PartitionPageStateIsActive(page))) {
+      // This page is usable because it has freelist entries, or has
+      // unprovisioned slots we can create freelist entries from.
+      bucket->active_pages_head = page;
+      return true;
+    }
+    if (LIKELY(PartitionPageStateIsEmpty(page))) {
+      page->next_page = bucket->empty_pages_head;
+      bucket->empty_pages_head = page;
+    } else if (LIKELY(PartitionPageStateIsDecommitted(page))) {
+      page->next_page = bucket->decommitted_pages_head;
+      bucket->decommitted_pages_head = page;
+    } else {
+      DCHECK(PartitionPageStateIsFull(page));
+      // If we get here, we found a full page. Skip over it too, and also
+      // tag it as full (via a negative value). We need it tagged so that
+      // free'ing can tell, and move it back into the active page list.
+      page->num_allocated_slots = -page->num_allocated_slots;
+      ++bucket->num_full_pages;
+      // num_full_pages is a uint16_t for efficient packing so guard against
+      // overflow to be safe.
+      if (UNLIKELY(!bucket->num_full_pages))
+        PartitionBucketFull();
+      // Not necessary but might help stop accidents.
+      page->next_page = 0;
+    }
+  }
+
+  bucket->active_pages_head = &PartitionRootGeneric::gSeedPage;
+  return false;
+}
+
+static ALWAYS_INLINE PartitionDirectMapExtent* partitionPageToDirectMapExtent(
+    PartitionPage* page) {
+  DCHECK(PartitionBucketIsDirectMapped(page->bucket));
+  return reinterpret_cast<PartitionDirectMapExtent*>(
+      reinterpret_cast<char*>(page) + 3 * kPageMetadataSize);
+}
+
+static ALWAYS_INLINE void PartitionPageSetRawSize(PartitionPage* page,
+                                                  size_t size) {
+  size_t* raw_size_ptr = PartitionPageGetRawSizePtr(page);
+  if (UNLIKELY(raw_size_ptr != nullptr))
+    *raw_size_ptr = size;
+}
+
+static ALWAYS_INLINE PartitionPage* PartitionDirectMap(PartitionRootBase* root,
+                                                       int flags,
+                                                       size_t raw_size) {
+  size_t size = PartitionDirectMapSize(raw_size);
+
+  // Because we need to fake looking like a super page, we need to allocate
+  // a bunch of system pages more than "size":
+  // - The first few system pages are the partition page in which the super
+  // page metadata is stored. We fault just one system page out of a partition
+  // page sized clump.
+  // - We add a trailing guard page on 32-bit (on 64-bit we rely on the
+  // massive address space plus randomization instead).
+  size_t map_size = size + kPartitionPageSize;
+#if !defined(ARCH_CPU_64_BITS)
+  map_size += kSystemPageSize;
+#endif
+  // Round up to the allocation granularity.
+  map_size += kPageAllocationGranularityOffsetMask;
+  map_size &= kPageAllocationGranularityBaseMask;
+
+  // TODO: these pages will be zero-filled. Consider internalizing an
+  // allocZeroed() API so we can avoid a memset() entirely in this case.
+  char* ptr = reinterpret_cast<char*>(
+      AllocPages(0, map_size, kSuperPageSize, PageAccessible));
+  if (UNLIKELY(!ptr))
+    return nullptr;
+
+  size_t committed_page_size = size + kSystemPageSize;
+  root->total_size_of_direct_mapped_pages += committed_page_size;
+  PartitionIncreaseCommittedPages(root, committed_page_size);
+
+  char* slot = ptr + kPartitionPageSize;
+  SetSystemPagesInaccessible(ptr + (kSystemPageSize * 2),
+                             kPartitionPageSize - (kSystemPageSize * 2));
+#if !defined(ARCH_CPU_64_BITS)
+  SetSystemPagesInaccessible(ptr, kSystemPageSize);
+  SetSystemPagesInaccessible(slot + size, kSystemPageSize);
+#endif
+
+  PartitionSuperPageExtentEntry* extent =
+      reinterpret_cast<PartitionSuperPageExtentEntry*>(
+          PartitionSuperPageToMetadataArea(ptr));
+  extent->root = root;
+  // The new structures are all located inside a fresh system page so they
+  // will all be zeroed out. These DCHECKs are for documentation.
+  DCHECK(!extent->super_page_base);
+  DCHECK(!extent->super_pages_end);
+  DCHECK(!extent->next);
+  PartitionPage* page = PartitionPointerToPageNoAlignmentCheck(slot);
+  PartitionBucket* bucket = reinterpret_cast<PartitionBucket*>(
+      reinterpret_cast<char*>(page) + (kPageMetadataSize * 2));
+  DCHECK(!page->next_page);
+  DCHECK(!page->num_allocated_slots);
+  DCHECK(!page->num_unprovisioned_slots);
+  DCHECK(!page->page_offset);
+  DCHECK(!page->empty_cache_index);
+  page->bucket = bucket;
+  page->freelist_head = reinterpret_cast<PartitionFreelistEntry*>(slot);
+  PartitionFreelistEntry* next_entry =
+      reinterpret_cast<PartitionFreelistEntry*>(slot);
+  next_entry->next = PartitionFreelistMask(0);
+
+  DCHECK(!bucket->active_pages_head);
+  DCHECK(!bucket->empty_pages_head);
+  DCHECK(!bucket->decommitted_pages_head);
+  DCHECK(!bucket->num_system_pages_per_slot_span);
+  DCHECK(!bucket->num_full_pages);
+  bucket->slot_size = size;
+
+  PartitionDirectMapExtent* map_extent = partitionPageToDirectMapExtent(page);
+  map_extent->map_size = map_size - kPartitionPageSize - kSystemPageSize;
+  map_extent->bucket = bucket;
+
+  // Maintain the doubly-linked list of all direct mappings.
+  map_extent->next_extent = root->direct_map_list;
+  if (map_extent->next_extent)
+    map_extent->next_extent->prev_extent = map_extent;
+  map_extent->prev_extent = nullptr;
+  root->direct_map_list = map_extent;
+
+  return page;
+}
+
+static ALWAYS_INLINE void PartitionDirectUnmap(PartitionPage* page) {
+  PartitionRootBase* root = PartitionPageToRoot(page);
+  const PartitionDirectMapExtent* extent = partitionPageToDirectMapExtent(page);
+  size_t unmap_size = extent->map_size;
+
+  // Maintain the doubly-linked list of all direct mappings.
+  if (extent->prev_extent) {
+    DCHECK(extent->prev_extent->next_extent == extent);
+    extent->prev_extent->next_extent = extent->next_extent;
+  } else {
+    root->direct_map_list = extent->next_extent;
+  }
+  if (extent->next_extent) {
+    DCHECK(extent->next_extent->prev_extent == extent);
+    extent->next_extent->prev_extent = extent->prev_extent;
+  }
+
+  // Add on the size of the trailing guard page and preceeding partition
+  // page.
+  unmap_size += kPartitionPageSize + kSystemPageSize;
+
+  size_t uncommitted_page_size = page->bucket->slot_size + kSystemPageSize;
+  PartitionDecreaseCommittedPages(root, uncommitted_page_size);
+  DCHECK(root->total_size_of_direct_mapped_pages >= uncommitted_page_size);
+  root->total_size_of_direct_mapped_pages -= uncommitted_page_size;
+
+  DCHECK(!(unmap_size & kPageAllocationGranularityOffsetMask));
+
+  char* ptr = reinterpret_cast<char*>(PartitionPageToPointer(page));
+  // Account for the mapping starting a partition page before the actual
+  // allocation address.
+  ptr -= kPartitionPageSize;
+
+  FreePages(ptr, unmap_size);
+}
+
+void* PartitionAllocSlowPath(PartitionRootBase* root,
+                             int flags,
+                             size_t size,
+                             PartitionBucket* bucket) {
+  // The slow path is called when the freelist is empty.
+  DCHECK(!bucket->active_pages_head->freelist_head);
+
+  PartitionPage* new_page = nullptr;
+
+  // For the PartitionAllocGeneric API, we have a bunch of buckets marked
+  // as special cases. We bounce them through to the slow path so that we
+  // can still have a blazing fast hot path due to lack of corner-case
+  // branches.
+  bool returnNull = flags & PartitionAllocReturnNull;
+  if (UNLIKELY(PartitionBucketIsDirectMapped(bucket))) {
+    DCHECK(size > kGenericMaxBucketed);
+    DCHECK(bucket == &PartitionRootBase::gPagedBucket);
+    DCHECK(bucket->active_pages_head == &PartitionRootGeneric::gSeedPage);
+    if (size > kGenericMaxDirectMapped) {
+      if (returnNull)
+        return nullptr;
+      PartitionExcessiveAllocationSize();
+    }
+    new_page = PartitionDirectMap(root, flags, size);
+  } else if (LIKELY(PartitionSetNewActivePage(bucket))) {
+    // First, did we find an active page in the active pages list?
+    new_page = bucket->active_pages_head;
+    DCHECK(PartitionPageStateIsActive(new_page));
+  } else if (LIKELY(bucket->empty_pages_head != nullptr) ||
+             LIKELY(bucket->decommitted_pages_head != nullptr)) {
+    // Second, look in our lists of empty and decommitted pages.
+    // Check empty pages first, which are preferred, but beware that an
+    // empty page might have been decommitted.
+    while (LIKELY((new_page = bucket->empty_pages_head) != nullptr)) {
+      DCHECK(new_page->bucket == bucket);
+      DCHECK(PartitionPageStateIsEmpty(new_page) ||
+             PartitionPageStateIsDecommitted(new_page));
+      bucket->empty_pages_head = new_page->next_page;
+      // Accept the empty page unless it got decommitted.
+      if (new_page->freelist_head) {
+        new_page->next_page = nullptr;
+        break;
+      }
+      DCHECK(PartitionPageStateIsDecommitted(new_page));
+      new_page->next_page = bucket->decommitted_pages_head;
+      bucket->decommitted_pages_head = new_page;
+    }
+    if (UNLIKELY(!new_page) &&
+        LIKELY(bucket->decommitted_pages_head != nullptr)) {
+      new_page = bucket->decommitted_pages_head;
+      DCHECK(new_page->bucket == bucket);
+      DCHECK(PartitionPageStateIsDecommitted(new_page));
+      bucket->decommitted_pages_head = new_page->next_page;
+      void* addr = PartitionPageToPointer(new_page);
+      PartitionRecommitSystemPages(root, addr,
+                                   PartitionBucketBytes(new_page->bucket));
+      PartitionPageReset(new_page);
+    }
+    DCHECK(new_page);
+  } else {
+    // Third. If we get here, we need a brand new page.
+    uint16_t num_partition_pages = PartitionBucketPartitionPages(bucket);
+    void* rawPages =
+        PartitionAllocPartitionPages(root, flags, num_partition_pages);
+    if (LIKELY(rawPages != nullptr)) {
+      new_page = PartitionPointerToPageNoAlignmentCheck(rawPages);
+      PartitionPageSetup(new_page, bucket);
+    }
+  }
+
+  // Bail if we had a memory allocation failure.
+  if (UNLIKELY(!new_page)) {
+    DCHECK(bucket->active_pages_head == &PartitionRootGeneric::gSeedPage);
+    if (returnNull)
+      return nullptr;
+    PartitionOutOfMemory(root);
+  }
+
+  bucket = new_page->bucket;
+  DCHECK(bucket != &PartitionRootBase::gPagedBucket);
+  bucket->active_pages_head = new_page;
+  PartitionPageSetRawSize(new_page, size);
+
+  // If we found an active page with free slots, or an empty page, we have a
+  // usable freelist head.
+  if (LIKELY(new_page->freelist_head != nullptr)) {
+    PartitionFreelistEntry* entry = new_page->freelist_head;
+    PartitionFreelistEntry* new_head = PartitionFreelistMask(entry->next);
+    new_page->freelist_head = new_head;
+    new_page->num_allocated_slots++;
+    return entry;
+  }
+  // Otherwise, we need to build the freelist.
+  DCHECK(new_page->num_unprovisioned_slots);
+  return PartitionPageAllocAndFillFreelist(new_page);
+}
+
+static ALWAYS_INLINE void PartitionDecommitPage(PartitionRootBase* root,
+                                                PartitionPage* page) {
+  DCHECK(PartitionPageStateIsEmpty(page));
+  DCHECK(!PartitionBucketIsDirectMapped(page->bucket));
+  void* addr = PartitionPageToPointer(page);
+  PartitionDecommitSystemPages(root, addr, PartitionBucketBytes(page->bucket));
+
+  // We actually leave the decommitted page in the active list. We'll sweep
+  // it on to the decommitted page list when we next walk the active page
+  // list.
+  // Pulling this trick enables us to use a singly-linked page list for all
+  // cases, which is critical in keeping the page metadata structure down to
+  // 32 bytes in size.
+  page->freelist_head = 0;
+  page->num_unprovisioned_slots = 0;
+  DCHECK(PartitionPageStateIsDecommitted(page));
+}
+
+static void PartitionDecommitPageIfPossible(PartitionRootBase* root,
+                                            PartitionPage* page) {
+  DCHECK(page->empty_cache_index >= 0);
+  DCHECK(static_cast<unsigned>(page->empty_cache_index) < kMaxFreeableSpans);
+  DCHECK(page == root->global_empty_page_ring[page->empty_cache_index]);
+  page->empty_cache_index = -1;
+  if (PartitionPageStateIsEmpty(page))
+    PartitionDecommitPage(root, page);
+}
+
+static ALWAYS_INLINE void PartitionRegisterEmptyPage(PartitionPage* page) {
+  DCHECK(PartitionPageStateIsEmpty(page));
+  PartitionRootBase* root = PartitionPageToRoot(page);
+
+  // If the page is already registered as empty, give it another life.
+  if (page->empty_cache_index != -1) {
+    DCHECK(page->empty_cache_index >= 0);
+    DCHECK(static_cast<unsigned>(page->empty_cache_index) < kMaxFreeableSpans);
+    DCHECK(root->global_empty_page_ring[page->empty_cache_index] == page);
+    root->global_empty_page_ring[page->empty_cache_index] = 0;
+  }
+
+  int16_t current_index = root->global_empty_page_ring_index;
+  PartitionPage* pageToDecommit = root->global_empty_page_ring[current_index];
+  // The page might well have been re-activated, filled up, etc. before we get
+  // around to looking at it here.
+  if (pageToDecommit)
+    PartitionDecommitPageIfPossible(root, pageToDecommit);
+
+  // We put the empty slot span on our global list of "pages that were once
+  // empty". thus providing it a bit of breathing room to get re-used before
+  // we really free it. This improves performance, particularly on Mac OS X
+  // which has subpar memory management performance.
+  root->global_empty_page_ring[current_index] = page;
+  page->empty_cache_index = current_index;
+  ++current_index;
+  if (current_index == kMaxFreeableSpans)
+    current_index = 0;
+  root->global_empty_page_ring_index = current_index;
+}
+
+static void PartitionDecommitEmptyPages(PartitionRootBase* root) {
+  for (size_t i = 0; i < kMaxFreeableSpans; ++i) {
+    PartitionPage* page = root->global_empty_page_ring[i];
+    if (page)
+      PartitionDecommitPageIfPossible(root, page);
+    root->global_empty_page_ring[i] = nullptr;
+  }
+}
+
+void PartitionFreeSlowPath(PartitionPage* page) {
+  PartitionBucket* bucket = page->bucket;
+  DCHECK(page != &PartitionRootGeneric::gSeedPage);
+  if (LIKELY(page->num_allocated_slots == 0)) {
+    // Page became fully unused.
+    if (UNLIKELY(PartitionBucketIsDirectMapped(bucket))) {
+      PartitionDirectUnmap(page);
+      return;
+    }
+    // If it's the current active page, change it. We bounce the page to
+    // the empty list as a force towards defragmentation.
+    if (LIKELY(page == bucket->active_pages_head))
+      (void)PartitionSetNewActivePage(bucket);
+    DCHECK(bucket->active_pages_head != page);
+
+    PartitionPageSetRawSize(page, 0);
+    DCHECK(!PartitionPageGetRawSize(page));
+
+    PartitionRegisterEmptyPage(page);
+  } else {
+    DCHECK(!PartitionBucketIsDirectMapped(bucket));
+    // Ensure that the page is full. That's the only valid case if we
+    // arrive here.
+    DCHECK(page->num_allocated_slots < 0);
+    // A transition of num_allocated_slots from 0 to -1 is not legal, and
+    // likely indicates a double-free.
+    CHECK(page->num_allocated_slots != -1);
+    page->num_allocated_slots = -page->num_allocated_slots - 2;
+    DCHECK(page->num_allocated_slots == PartitionBucketSlots(bucket) - 1);
+    // Fully used page became partially used. It must be put back on the
+    // non-full page list. Also make it the current page to increase the
+    // chances of it being filled up again. The old current page will be
+    // the next page.
+    DCHECK(!page->next_page);
+    if (LIKELY(bucket->active_pages_head != &PartitionRootGeneric::gSeedPage))
+      page->next_page = bucket->active_pages_head;
+    bucket->active_pages_head = page;
+    --bucket->num_full_pages;
+    // Special case: for a partition page with just a single slot, it may
+    // now be empty and we want to run it through the empty logic.
+    if (UNLIKELY(page->num_allocated_slots == 0))
+      PartitionFreeSlowPath(page);
+  }
+}
+
+bool partitionReallocDirectMappedInPlace(PartitionRootGeneric* root,
+                                         PartitionPage* page,
+                                         size_t raw_size) {
+  DCHECK(PartitionBucketIsDirectMapped(page->bucket));
+
+  raw_size = PartitionCookieSizeAdjustAdd(raw_size);
+
+  // Note that the new size might be a bucketed size; this function is called
+  // whenever we're reallocating a direct mapped allocation.
+  size_t new_size = PartitionDirectMapSize(raw_size);
+  if (new_size < kGenericMinDirectMappedDownsize)
+    return false;
+
+  // bucket->slot_size is the current size of the allocation.
+  size_t current_size = page->bucket->slot_size;
+  if (new_size == current_size)
+    return true;
+
+  char* char_ptr = static_cast<char*>(PartitionPageToPointer(page));
+
+  if (new_size < current_size) {
+    size_t map_size = partitionPageToDirectMapExtent(page)->map_size;
+
+    // Don't reallocate in-place if new size is less than 80 % of the full
+    // map size, to avoid holding on to too much unused address space.
+    if ((new_size / kSystemPageSize) * 5 < (map_size / kSystemPageSize) * 4)
+      return false;
+
+    // Shrink by decommitting unneeded pages and making them inaccessible.
+    size_t decommitSize = current_size - new_size;
+    PartitionDecommitSystemPages(root, char_ptr + new_size, decommitSize);
+    SetSystemPagesInaccessible(char_ptr + new_size, decommitSize);
+  } else if (new_size <= partitionPageToDirectMapExtent(page)->map_size) {
+    // Grow within the actually allocated memory. Just need to make the
+    // pages accessible again.
+    size_t recommit_size = new_size - current_size;
+    bool ret = SetSystemPagesAccessible(char_ptr + current_size, recommit_size);
+    CHECK(ret);
+    PartitionRecommitSystemPages(root, char_ptr + current_size, recommit_size);
+
+#if DCHECK_IS_ON()
+    memset(char_ptr + current_size, kUninitializedByte, recommit_size);
+#endif
+  } else {
+    // We can't perform the realloc in-place.
+    // TODO: support this too when possible.
+    return false;
+  }
+
+#if DCHECK_IS_ON()
+  // Write a new trailing cookie.
+  PartitionCookieWriteValue(char_ptr + raw_size - kCookieSize);
+#endif
+
+  PartitionPageSetRawSize(page, raw_size);
+  DCHECK(PartitionPageGetRawSize(page) == raw_size);
+
+  page->bucket->slot_size = new_size;
+  return true;
+}
+
+void* PartitionReallocGeneric(PartitionRootGeneric* root,
+                              void* ptr,
+                              size_t new_size,
+                              const char* type_name) {
+#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+  return realloc(ptr, new_size);
+#else
+  if (UNLIKELY(!ptr))
+    return PartitionAllocGeneric(root, new_size, type_name);
+  if (UNLIKELY(!new_size)) {
+    PartitionFreeGeneric(root, ptr);
+    return 0;
+  }
+
+  if (new_size > kGenericMaxDirectMapped)
+    PartitionExcessiveAllocationSize();
+
+  DCHECK(PartitionPointerIsValid(PartitionCookieFreePointerAdjust(ptr)));
+
+  PartitionPage* page =
+      PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr));
+
+  if (UNLIKELY(PartitionBucketIsDirectMapped(page->bucket))) {
+    // We may be able to perform the realloc in place by changing the
+    // accessibility of memory pages and, if reducing the size, decommitting
+    // them.
+    if (partitionReallocDirectMappedInPlace(root, page, new_size)) {
+      PartitionAllocHooks::ReallocHookIfEnabled(ptr, ptr, new_size, type_name);
+      return ptr;
+    }
+  }
+
+  size_t actual_new_size = PartitionAllocActualSize(root, new_size);
+  size_t actual_old_size = PartitionAllocGetSize(ptr);
+
+  // TODO: note that tcmalloc will "ignore" a downsizing realloc() unless the
+  // new size is a significant percentage smaller. We could do the same if we
+  // determine it is a win.
+  if (actual_new_size == actual_old_size) {
+    // Trying to allocate a block of size new_size would give us a block of
+    // the same size as the one we've already got, so no point in doing
+    // anything here.
+    return ptr;
+  }
+
+  // This realloc cannot be resized in-place. Sadness.
+  void* ret = PartitionAllocGeneric(root, new_size, type_name);
+  size_t copy_size = actual_old_size;
+  if (new_size < copy_size)
+    copy_size = new_size;
+
+  memcpy(ret, ptr, copy_size);
+  PartitionFreeGeneric(root, ptr);
+  return ret;
+#endif
+}
+
+static size_t PartitionPurgePage(PartitionPage* page, bool discard) {
+  const PartitionBucket* bucket = page->bucket;
+  size_t slot_size = bucket->slot_size;
+  if (slot_size < kSystemPageSize || !page->num_allocated_slots)
+    return 0;
+
+  size_t bucket_num_slots = PartitionBucketSlots(bucket);
+  size_t discardable_bytes = 0;
+
+  size_t raw_size = PartitionPageGetRawSize(const_cast<PartitionPage*>(page));
+  if (raw_size) {
+    uint32_t usedBytes = static_cast<uint32_t>(RoundUpToSystemPage(raw_size));
+    discardable_bytes = bucket->slot_size - usedBytes;
+    if (discardable_bytes && discard) {
+      char* ptr = reinterpret_cast<char*>(PartitionPageToPointer(page));
+      ptr += usedBytes;
+      DiscardSystemPages(ptr, discardable_bytes);
+    }
+    return discardable_bytes;
+  }
+
+  const size_t max_slot_count =
+      (kPartitionPageSize * kMaxPartitionPagesPerSlotSpan) / kSystemPageSize;
+  DCHECK(bucket_num_slots <= max_slot_count);
+  DCHECK(page->num_unprovisioned_slots < bucket_num_slots);
+  size_t num_slots = bucket_num_slots - page->num_unprovisioned_slots;
+  char slot_usage[max_slot_count];
+  size_t last_slot = static_cast<size_t>(-1);
+  memset(slot_usage, 1, num_slots);
+  char* ptr = reinterpret_cast<char*>(PartitionPageToPointer(page));
+  PartitionFreelistEntry* entry = page->freelist_head;
+  // First, walk the freelist for this page and make a bitmap of which slots
+  // are not in use.
+  while (entry) {
+    size_t slotIndex = (reinterpret_cast<char*>(entry) - ptr) / slot_size;
+    DCHECK(slotIndex < num_slots);
+    slot_usage[slotIndex] = 0;
+    entry = PartitionFreelistMask(entry->next);
+    // If we have a slot where the masked freelist entry is 0, we can
+    // actually discard that freelist entry because touching a discarded
+    // page is guaranteed to return original content or 0.
+    // (Note that this optimization won't fire on big endian machines
+    // because the masking function is negation.)
+    if (!PartitionFreelistMask(entry))
+      last_slot = slotIndex;
+  }
+
+  // If the slot(s) at the end of the slot span are not in used, we can
+  // truncate them entirely and rewrite the freelist.
+  size_t truncated_slots = 0;
+  while (!slot_usage[num_slots - 1]) {
+    truncated_slots++;
+    num_slots--;
+    DCHECK(num_slots);
+  }
+  // First, do the work of calculating the discardable bytes. Don't actually
+  // discard anything unless the discard flag was passed in.
+  char* begin_ptr = nullptr;
+  char* end_ptr = nullptr;
+  size_t unprovisioned_bytes = 0;
+  if (truncated_slots) {
+    begin_ptr = ptr + (num_slots * slot_size);
+    end_ptr = begin_ptr + (slot_size * truncated_slots);
+    begin_ptr = reinterpret_cast<char*>(
+        RoundUpToSystemPage(reinterpret_cast<size_t>(begin_ptr)));
+    // We round the end pointer here up and not down because we're at the
+    // end of a slot span, so we "own" all the way up the page boundary.
+    end_ptr = reinterpret_cast<char*>(
+        RoundUpToSystemPage(reinterpret_cast<size_t>(end_ptr)));
+    DCHECK(end_ptr <= ptr + PartitionBucketBytes(bucket));
+    if (begin_ptr < end_ptr) {
+      unprovisioned_bytes = end_ptr - begin_ptr;
+      discardable_bytes += unprovisioned_bytes;
+    }
+  }
+  if (unprovisioned_bytes && discard) {
+    DCHECK(truncated_slots > 0);
+    size_t num_new_entries = 0;
+    page->num_unprovisioned_slots += static_cast<uint16_t>(truncated_slots);
+    // Rewrite the freelist.
+    PartitionFreelistEntry** entry_ptr = &page->freelist_head;
+    for (size_t slotIndex = 0; slotIndex < num_slots; ++slotIndex) {
+      if (slot_usage[slotIndex])
+        continue;
+      PartitionFreelistEntry* entry = reinterpret_cast<PartitionFreelistEntry*>(
+          ptr + (slot_size * slotIndex));
+      *entry_ptr = PartitionFreelistMask(entry);
+      entry_ptr = reinterpret_cast<PartitionFreelistEntry**>(entry);
+      num_new_entries++;
+    }
+    // Terminate the freelist chain.
+    *entry_ptr = nullptr;
+    // The freelist head is stored unmasked.
+    page->freelist_head = PartitionFreelistMask(page->freelist_head);
+    DCHECK(num_new_entries == num_slots - page->num_allocated_slots);
+    // Discard the memory.
+    DiscardSystemPages(begin_ptr, unprovisioned_bytes);
+  }
+
+  // Next, walk the slots and for any not in use, consider where the system
+  // page boundaries occur. We can release any system pages back to the
+  // system as long as we don't interfere with a freelist pointer or an
+  // adjacent slot.
+  for (size_t i = 0; i < num_slots; ++i) {
+    if (slot_usage[i])
+      continue;
+    // The first address we can safely discard is just after the freelist
+    // pointer. There's one quirk: if the freelist pointer is actually a
+    // null, we can discard that pointer value too.
+    char* begin_ptr = ptr + (i * slot_size);
+    char* end_ptr = begin_ptr + slot_size;
+    if (i != last_slot)
+      begin_ptr += sizeof(PartitionFreelistEntry);
+    begin_ptr = reinterpret_cast<char*>(
+        RoundUpToSystemPage(reinterpret_cast<size_t>(begin_ptr)));
+    end_ptr = reinterpret_cast<char*>(
+        RoundDownToSystemPage(reinterpret_cast<size_t>(end_ptr)));
+    if (begin_ptr < end_ptr) {
+      size_t partial_slot_bytes = end_ptr - begin_ptr;
+      discardable_bytes += partial_slot_bytes;
+      if (discard)
+        DiscardSystemPages(begin_ptr, partial_slot_bytes);
+    }
+  }
+  return discardable_bytes;
+}
+
+static void PartitionPurgeBucket(PartitionBucket* bucket) {
+  if (bucket->active_pages_head != &PartitionRootGeneric::gSeedPage) {
+    for (PartitionPage* page = bucket->active_pages_head; page;
+         page = page->next_page) {
+      DCHECK(page != &PartitionRootGeneric::gSeedPage);
+      (void)PartitionPurgePage(page, true);
+    }
+  }
+}
+
+void PartitionPurgeMemory(PartitionRoot* root, int flags) {
+  if (flags & PartitionPurgeDecommitEmptyPages)
+    PartitionDecommitEmptyPages(root);
+  // We don't currently do anything for PartitionPurgeDiscardUnusedSystemPages
+  // here because that flag is only useful for allocations >= system page
+  // size. We only have allocations that large inside generic partitions
+  // at the moment.
+}
+
+void PartitionPurgeMemoryGeneric(PartitionRootGeneric* root, int flags) {
+  subtle::SpinLock::Guard guard(root->lock);
+  if (flags & PartitionPurgeDecommitEmptyPages)
+    PartitionDecommitEmptyPages(root);
+  if (flags & PartitionPurgeDiscardUnusedSystemPages) {
+    for (size_t i = 0; i < kGenericNumBuckets; ++i) {
+      PartitionBucket* bucket = &root->buckets[i];
+      if (bucket->slot_size >= kSystemPageSize)
+        PartitionPurgeBucket(bucket);
+    }
+  }
+}
+
+static void PartitionDumpPageStats(PartitionBucketMemoryStats* stats_out,
+                                   const PartitionPage* page) {
+  uint16_t bucket_num_slots = PartitionBucketSlots(page->bucket);
+
+  if (PartitionPageStateIsDecommitted(page)) {
+    ++stats_out->num_decommitted_pages;
+    return;
+  }
+
+  stats_out->discardable_bytes +=
+      PartitionPurgePage(const_cast<PartitionPage*>(page), false);
+
+  size_t raw_size = PartitionPageGetRawSize(const_cast<PartitionPage*>(page));
+  if (raw_size)
+    stats_out->active_bytes += static_cast<uint32_t>(raw_size);
+  else
+    stats_out->active_bytes +=
+        (page->num_allocated_slots * stats_out->bucket_slot_size);
+
+  size_t page_bytes_resident =
+      RoundUpToSystemPage((bucket_num_slots - page->num_unprovisioned_slots) *
+                          stats_out->bucket_slot_size);
+  stats_out->resident_bytes += page_bytes_resident;
+  if (PartitionPageStateIsEmpty(page)) {
+    stats_out->decommittable_bytes += page_bytes_resident;
+    ++stats_out->num_empty_pages;
+  } else if (PartitionPageStateIsFull(page)) {
+    ++stats_out->num_full_pages;
+  } else {
+    DCHECK(PartitionPageStateIsActive(page));
+    ++stats_out->num_active_pages;
+  }
+}
+
+static void PartitionDumpBucketStats(PartitionBucketMemoryStats* stats_out,
+                                     const PartitionBucket* bucket) {
+  DCHECK(!PartitionBucketIsDirectMapped(bucket));
+  stats_out->is_valid = false;
+  // If the active page list is empty (== &PartitionRootGeneric::gSeedPage),
+  // the bucket might still need to be reported if it has a list of empty,
+  // decommitted or full pages.
+  if (bucket->active_pages_head == &PartitionRootGeneric::gSeedPage &&
+      !bucket->empty_pages_head && !bucket->decommitted_pages_head &&
+      !bucket->num_full_pages)
+    return;
+
+  memset(stats_out, '\0', sizeof(*stats_out));
+  stats_out->is_valid = true;
+  stats_out->is_direct_map = false;
+  stats_out->num_full_pages = static_cast<size_t>(bucket->num_full_pages);
+  stats_out->bucket_slot_size = bucket->slot_size;
+  uint16_t bucket_num_slots = PartitionBucketSlots(bucket);
+  size_t bucket_useful_storage = stats_out->bucket_slot_size * bucket_num_slots;
+  stats_out->allocated_page_size = PartitionBucketBytes(bucket);
+  stats_out->active_bytes = bucket->num_full_pages * bucket_useful_storage;
+  stats_out->resident_bytes =
+      bucket->num_full_pages * stats_out->allocated_page_size;
+
+  for (const PartitionPage* page = bucket->empty_pages_head; page;
+       page = page->next_page) {
+    DCHECK(PartitionPageStateIsEmpty(page) ||
+           PartitionPageStateIsDecommitted(page));
+    PartitionDumpPageStats(stats_out, page);
+  }
+  for (const PartitionPage* page = bucket->decommitted_pages_head; page;
+       page = page->next_page) {
+    DCHECK(PartitionPageStateIsDecommitted(page));
+    PartitionDumpPageStats(stats_out, page);
+  }
+
+  if (bucket->active_pages_head != &PartitionRootGeneric::gSeedPage) {
+    for (const PartitionPage* page = bucket->active_pages_head; page;
+         page = page->next_page) {
+      DCHECK(page != &PartitionRootGeneric::gSeedPage);
+      PartitionDumpPageStats(stats_out, page);
+    }
+  }
+}
+
+void PartitionDumpStatsGeneric(PartitionRootGeneric* partition,
+                               const char* partition_name,
+                               bool is_light_dump,
+                               PartitionStatsDumper* dumper) {
+  PartitionMemoryStats stats = {0};
+  stats.total_mmapped_bytes = partition->total_size_of_super_pages +
+                              partition->total_size_of_direct_mapped_pages;
+  stats.total_committed_bytes = partition->total_size_of_committed_pages;
+
+  size_t direct_mapped_allocations_total_size = 0;
+
+  static const size_t kMaxReportableDirectMaps = 4096;
+
+  // Allocate on the heap rather than on the stack to avoid stack overflow
+  // skirmishes (on Windows, in particular).
+  std::unique_ptr<uint32_t[]> direct_map_lengths = nullptr;
+  if (!is_light_dump) {
+    direct_map_lengths =
+        std::unique_ptr<uint32_t[]>(new uint32_t[kMaxReportableDirectMaps]);
+  }
+
+  PartitionBucketMemoryStats bucket_stats[kGenericNumBuckets];
+  size_t num_direct_mapped_allocations = 0;
+  {
+    subtle::SpinLock::Guard guard(partition->lock);
+
+    for (size_t i = 0; i < kGenericNumBuckets; ++i) {
+      const PartitionBucket* bucket = &partition->buckets[i];
+      // Don't report the pseudo buckets that the generic allocator sets up in
+      // order to preserve a fast size->bucket map (see
+      // PartitionAllocGenericInit for details).
+      if (!bucket->active_pages_head)
+        bucket_stats[i].is_valid = false;
+      else
+        PartitionDumpBucketStats(&bucket_stats[i], bucket);
+      if (bucket_stats[i].is_valid) {
+        stats.total_resident_bytes += bucket_stats[i].resident_bytes;
+        stats.total_active_bytes += bucket_stats[i].active_bytes;
+        stats.total_decommittable_bytes += bucket_stats[i].decommittable_bytes;
+        stats.total_discardable_bytes += bucket_stats[i].discardable_bytes;
+      }
+    }
+
+    for (PartitionDirectMapExtent *extent = partition->direct_map_list;
+         extent && num_direct_mapped_allocations < kMaxReportableDirectMaps;
+         extent = extent->next_extent, ++num_direct_mapped_allocations) {
+      DCHECK(!extent->next_extent ||
+             extent->next_extent->prev_extent == extent);
+      size_t slot_size = extent->bucket->slot_size;
+      direct_mapped_allocations_total_size += slot_size;
+      if (is_light_dump)
+        continue;
+      direct_map_lengths[num_direct_mapped_allocations] = slot_size;
+    }
+  }
+
+  if (!is_light_dump) {
+    // Call |PartitionsDumpBucketStats| after collecting stats because it can
+    // try to allocate using |PartitionAllocGeneric| and it can't obtain the
+    // lock.
+    for (size_t i = 0; i < kGenericNumBuckets; ++i) {
+      if (bucket_stats[i].is_valid)
+        dumper->PartitionsDumpBucketStats(partition_name, &bucket_stats[i]);
+    }
+
+    for (size_t i = 0; i < num_direct_mapped_allocations; ++i) {
+      uint32_t size = direct_map_lengths[i];
+
+      PartitionBucketMemoryStats stats;
+      memset(&stats, '\0', sizeof(stats));
+      stats.is_valid = true;
+      stats.is_direct_map = true;
+      stats.num_full_pages = 1;
+      stats.allocated_page_size = size;
+      stats.bucket_slot_size = size;
+      stats.active_bytes = size;
+      stats.resident_bytes = size;
+      dumper->PartitionsDumpBucketStats(partition_name, &stats);
+    }
+  }
+
+  stats.total_resident_bytes += direct_mapped_allocations_total_size;
+  stats.total_active_bytes += direct_mapped_allocations_total_size;
+  dumper->PartitionDumpTotals(partition_name, &stats);
+}
+
+void PartitionDumpStats(PartitionRoot* partition,
+                        const char* partition_name,
+                        bool is_light_dump,
+                        PartitionStatsDumper* dumper) {
+  static const size_t kMaxReportableBuckets = 4096 / sizeof(void*);
+  PartitionBucketMemoryStats memory_stats[kMaxReportableBuckets];
+  const size_t partitionNumBuckets = partition->num_buckets;
+  DCHECK(partitionNumBuckets <= kMaxReportableBuckets);
+
+  for (size_t i = 0; i < partitionNumBuckets; ++i)
+    PartitionDumpBucketStats(&memory_stats[i], &partition->buckets()[i]);
+
+  // PartitionsDumpBucketStats is called after collecting stats because it
+  // can use PartitionAlloc to allocate and this can affect the statistics.
+  PartitionMemoryStats stats = {0};
+  stats.total_mmapped_bytes = partition->total_size_of_super_pages;
+  stats.total_committed_bytes = partition->total_size_of_committed_pages;
+  DCHECK(!partition->total_size_of_direct_mapped_pages);
+  for (size_t i = 0; i < partitionNumBuckets; ++i) {
+    if (memory_stats[i].is_valid) {
+      stats.total_resident_bytes += memory_stats[i].resident_bytes;
+      stats.total_active_bytes += memory_stats[i].active_bytes;
+      stats.total_decommittable_bytes += memory_stats[i].decommittable_bytes;
+      stats.total_discardable_bytes += memory_stats[i].discardable_bytes;
+      if (!is_light_dump)
+        dumper->PartitionsDumpBucketStats(partition_name, &memory_stats[i]);
+    }
+  }
+  dumper->PartitionDumpTotals(partition_name, &stats);
+}
+
+}  // namespace base
+}  // namespace pdfium
diff --git a/third_party/base/allocator/partition_allocator/partition_alloc.h b/third_party/base/allocator/partition_allocator/partition_alloc.h
new file mode 100644
index 0000000..285f2af
--- /dev/null
+++ b/third_party/base/allocator/partition_allocator/partition_alloc.h
@@ -0,0 +1,908 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H
+
+// DESCRIPTION
+// partitionAlloc() / PartitionAllocGeneric() and PartitionFree() /
+// PartitionFreeGeneric() are approximately analagous to malloc() and free().
+//
+// The main difference is that a PartitionRoot / PartitionRootGeneric object
+// must be supplied to these functions, representing a specific "heap partition"
+// that will be used to satisfy the allocation. Different partitions are
+// guaranteed to exist in separate address spaces, including being separate from
+// the main system heap. If the contained objects are all freed, physical memory
+// is returned to the system but the address space remains reserved.
+// See PartitionAlloc.md for other security properties PartitionAlloc provides.
+//
+// THE ONLY LEGITIMATE WAY TO OBTAIN A PartitionRoot IS THROUGH THE
+// SizeSpecificPartitionAllocator / PartitionAllocatorGeneric classes. To
+// minimize the instruction count to the fullest extent possible, the
+// PartitionRoot is really just a header adjacent to other data areas provided
+// by the allocator class.
+//
+// The partitionAlloc() variant of the API has the following caveats:
+// - Allocations and frees against a single partition must be single threaded.
+// - Allocations must not exceed a max size, chosen at compile-time via a
+// templated parameter to PartitionAllocator.
+// - Allocation sizes must be aligned to the system pointer size.
+// - Allocations are bucketed exactly according to size.
+//
+// And for PartitionAllocGeneric():
+// - Multi-threaded use against a single partition is ok; locking is handled.
+// - Allocations of any arbitrary size can be handled (subject to a limit of
+// INT_MAX bytes for security reasons).
+// - Bucketing is by approximate size, for example an allocation of 4000 bytes
+// might be placed into a 4096-byte bucket. Bucket sizes are chosen to try and
+// keep worst-case waste to ~10%.
+//
+// The allocators are designed to be extremely fast, thanks to the following
+// properties and design:
+// - Just two single (reasonably predicatable) branches in the hot / fast path
+//   for both allocating and (significantly) freeing.
+// - A minimal number of operations in the hot / fast path, with the slow paths
+//   in separate functions, leading to the possibility of inlining.
+// - Each partition page (which is usually multiple physical pages) has a
+//   metadata structure which allows fast mapping of free() address to an
+//   underlying bucket.
+// - Supports a lock-free API for fast performance in single-threaded cases.
+// - The freelist for a given bucket is split across a number of partition
+//   pages, enabling various simple tricks to try and minimize fragmentation.
+// - Fine-grained bucket sizes leading to less waste and better packing.
+//
+// The following security properties could be investigated in the future:
+// - Per-object bucketing (instead of per-size) is mostly available at the API,
+// but not used yet.
+// - No randomness of freelist entries or bucket position.
+// - Better checking for wild pointers in free().
+// - Better freelist masking function to guarantee fault on 32-bit.
+
+#include <limits.h>
+#include <string.h>
+
+#include "third_party/base/allocator/partition_allocator/page_allocator.h"
+#include "third_party/base/allocator/partition_allocator/spin_lock.h"
+#include "third_party/base/bits.h"
+#include "third_party/base/compiler_specific.h"
+#include "third_party/base/logging.h"
+#include "third_party/base/sys_byteorder.h"
+#include "third_party/build/build_config.h"
+
+#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+#include <stdlib.h>
+#endif
+
+namespace pdfium {
+namespace base {
+
+// Allocation granularity of sizeof(void*) bytes.
+static const size_t kAllocationGranularity = sizeof(void*);
+static const size_t kAllocationGranularityMask = kAllocationGranularity - 1;
+static const size_t kBucketShift = (kAllocationGranularity == 8) ? 3 : 2;
+
+// Underlying partition storage pages are a power-of-two size. It is typical
+// for a partition page to be based on multiple system pages. Most references to
+// "page" refer to partition pages.
+// We also have the concept of "super pages" -- these are the underlying system
+// allocations we make. Super pages contain multiple partition pages inside them
+// and include space for a small amount of metadata per partition page.
+// Inside super pages, we store "slot spans". A slot span is a continguous range
+// of one or more partition pages that stores allocations of the same size.
+// Slot span sizes are adjusted depending on the allocation size, to make sure
+// the packing does not lead to unused (wasted) space at the end of the last
+// system page of the span. For our current max slot span size of 64k and other
+// constant values, we pack _all_ PartitionAllocGeneric() sizes perfectly up
+// against the end of a system page.
+static const size_t kPartitionPageShift = 14;  // 16KB
+static const size_t kPartitionPageSize = 1 << kPartitionPageShift;
+static const size_t kPartitionPageOffsetMask = kPartitionPageSize - 1;
+static const size_t kPartitionPageBaseMask = ~kPartitionPageOffsetMask;
+static const size_t kMaxPartitionPagesPerSlotSpan = 4;
+
+// To avoid fragmentation via never-used freelist entries, we hand out partition
+// freelist sections gradually, in units of the dominant system page size.
+// What we're actually doing is avoiding filling the full partition page (16 KB)
+// with freelist pointers right away. Writing freelist pointers will fault and
+// dirty a private page, which is very wasteful if we never actually store
+// objects there.
+static const size_t kNumSystemPagesPerPartitionPage =
+    kPartitionPageSize / kSystemPageSize;
+static const size_t kMaxSystemPagesPerSlotSpan =
+    kNumSystemPagesPerPartitionPage * kMaxPartitionPagesPerSlotSpan;
+
+// We reserve virtual address space in 2MB chunks (aligned to 2MB as well).
+// These chunks are called "super pages". We do this so that we can store
+// metadata in the first few pages of each 2MB aligned section. This leads to
+// a very fast free(). We specifically choose 2MB because this virtual address
+// block represents a full but single PTE allocation on ARM, ia32 and x64.
+//
+// The layout of the super page is as follows. The sizes below are the same
+// for 32 bit and 64 bit.
+//
+//   | Guard page (4KB)    |
+//   | Metadata page (4KB) |
+//   | Guard pages (8KB)   |
+//   | Slot span           |
+//   | Slot span           |
+//   | ...                 |
+//   | Slot span           |
+//   | Guard page (4KB)    |
+//
+//   - Each slot span is a contiguous range of one or more PartitionPages.
+//   - The metadata page has the following format. Note that the PartitionPage
+//     that is not at the head of a slot span is "unused". In other words,
+//     the metadata for the slot span is stored only in the first PartitionPage
+//     of the slot span. Metadata accesses to other PartitionPages are
+//     redirected to the first PartitionPage.
+//
+//     | SuperPageExtentEntry (32B)                 |
+//     | PartitionPage of slot span 1 (32B, used)   |
+//     | PartitionPage of slot span 1 (32B, unused) |
+//     | PartitionPage of slot span 1 (32B, unused) |
+//     | PartitionPage of slot span 2 (32B, used)   |
+//     | PartitionPage of slot span 3 (32B, used)   |
+//     | ...                                        |
+//     | PartitionPage of slot span N (32B, unused) |
+//
+// A direct mapped page has a similar layout to fake it looking like a super
+// page:
+//
+//     | Guard page (4KB)     |
+//     | Metadata page (4KB)  |
+//     | Guard pages (8KB)    |
+//     | Direct mapped object |
+//     | Guard page (4KB)     |
+//
+//    - The metadata page has the following layout:
+//
+//     | SuperPageExtentEntry (32B)    |
+//     | PartitionPage (32B)           |
+//     | PartitionBucket (32B)         |
+//     | PartitionDirectMapExtent (8B) |
+static const size_t kSuperPageShift = 21;  // 2MB
+static const size_t kSuperPageSize = 1 << kSuperPageShift;
+static const size_t kSuperPageOffsetMask = kSuperPageSize - 1;
+static const size_t kSuperPageBaseMask = ~kSuperPageOffsetMask;
+static const size_t kNumPartitionPagesPerSuperPage =
+    kSuperPageSize / kPartitionPageSize;
+
+static const size_t kPageMetadataShift = 5;  // 32 bytes per partition page.
+static const size_t kPageMetadataSize = 1 << kPageMetadataShift;
+
+// The following kGeneric* constants apply to the generic variants of the API.
+// The "order" of an allocation is closely related to the power-of-two size of
+// the allocation. More precisely, the order is the bit index of the
+// most-significant-bit in the allocation size, where the bit numbers starts
+// at index 1 for the least-significant-bit.
+// In terms of allocation sizes, order 0 covers 0, order 1 covers 1, order 2
+// covers 2->3, order 3 covers 4->7, order 4 covers 8->15.
+static const size_t kGenericMinBucketedOrder = 4;  // 8 bytes.
+static const size_t kGenericMaxBucketedOrder =
+    20;  // Largest bucketed order is 1<<(20-1) (storing 512KB -> almost 1MB)
+static const size_t kGenericNumBucketedOrders =
+    (kGenericMaxBucketedOrder - kGenericMinBucketedOrder) + 1;
+// Eight buckets per order (for the higher orders), e.g. order 8 is 128, 144,
+// 160, ..., 240:
+static const size_t kGenericNumBucketsPerOrderBits = 3;
+static const size_t kGenericNumBucketsPerOrder =
+    1 << kGenericNumBucketsPerOrderBits;
+static const size_t kGenericNumBuckets =
+    kGenericNumBucketedOrders * kGenericNumBucketsPerOrder;
+static const size_t kGenericSmallestBucket = 1
+                                             << (kGenericMinBucketedOrder - 1);
+static const size_t kGenericMaxBucketSpacing =
+    1 << ((kGenericMaxBucketedOrder - 1) - kGenericNumBucketsPerOrderBits);
+static const size_t kGenericMaxBucketed =
+    (1 << (kGenericMaxBucketedOrder - 1)) +
+    ((kGenericNumBucketsPerOrder - 1) * kGenericMaxBucketSpacing);
+static const size_t kGenericMinDirectMappedDownsize =
+    kGenericMaxBucketed +
+    1;  // Limit when downsizing a direct mapping using realloc().
+static const size_t kGenericMaxDirectMapped = INT_MAX - kSystemPageSize;
+static const size_t kBitsPerSizeT = sizeof(void*) * CHAR_BIT;
+
+// Constants for the memory reclaim logic.
+static const size_t kMaxFreeableSpans = 16;
+
+// If the total size in bytes of allocated but not committed pages exceeds this
+// value (probably it is a "out of virtual address space" crash),
+// a special crash stack trace is generated at |partitionOutOfMemory|.
+// This is to distinguish "out of virtual address space" from
+// "out of physical memory" in crash reports.
+static const size_t kReasonableSizeOfUnusedPages = 1024 * 1024 * 1024;  // 1GiB
+
+#if DCHECK_IS_ON()
+// These two byte values match tcmalloc.
+static const unsigned char kUninitializedByte = 0xAB;
+static const unsigned char kFreedByte = 0xCD;
+static const size_t kCookieSize =
+    16;  // Handles alignment up to XMM instructions on Intel.
+static const unsigned char kCookieValue[kCookieSize] = {
+    0xDE, 0xAD, 0xBE, 0xEF, 0xCA, 0xFE, 0xD0, 0x0D,
+    0x13, 0x37, 0xF0, 0x05, 0xBA, 0x11, 0xAB, 0x1E};
+#endif
+
+struct PartitionBucket;
+struct PartitionRootBase;
+
+struct PartitionFreelistEntry {
+  PartitionFreelistEntry* next;
+};
+
+// Some notes on page states. A page can be in one of four major states:
+// 1) Active.
+// 2) Full.
+// 3) Empty.
+// 4) Decommitted.
+// An active page has available free slots. A full page has no free slots. An
+// empty page has no free slots, and a decommitted page is an empty page that
+// had its backing memory released back to the system.
+// There are two linked lists tracking the pages. The "active page" list is an
+// approximation of a list of active pages. It is an approximation because
+// full, empty and decommitted pages may briefly be present in the list until
+// we next do a scan over it.
+// The "empty page" list is an accurate list of pages which are either empty
+// or decommitted.
+//
+// The significant page transitions are:
+// - free() will detect when a full page has a slot free()'d and immediately
+// return the page to the head of the active list.
+// - free() will detect when a page is fully emptied. It _may_ add it to the
+// empty list or it _may_ leave it on the active list until a future list scan.
+// - malloc() _may_ scan the active page list in order to fulfil the request.
+// If it does this, full, empty and decommitted pages encountered will be
+// booted out of the active list. If there are no suitable active pages found,
+// an empty or decommitted page (if one exists) will be pulled from the empty
+// list on to the active list.
+struct PartitionPage {
+  PartitionFreelistEntry* freelist_head;
+  PartitionPage* next_page;
+  PartitionBucket* bucket;
+  // Deliberately signed, 0 for empty or decommitted page, -n for full pages:
+  int16_t num_allocated_slots;
+  uint16_t num_unprovisioned_slots;
+  uint16_t page_offset;
+  int16_t empty_cache_index;  // -1 if not in the empty cache.
+};
+
+struct PartitionBucket {
+  PartitionPage* active_pages_head;  // Accessed most in hot path => goes first.
+  PartitionPage* empty_pages_head;
+  PartitionPage* decommitted_pages_head;
+  uint32_t slot_size;
+  unsigned num_system_pages_per_slot_span : 8;
+  unsigned num_full_pages : 24;
+};
+
+// An "extent" is a span of consecutive superpages. We link to the partition's
+// next extent (if there is one) at the very start of a superpage's metadata
+// area.
+struct PartitionSuperPageExtentEntry {
+  PartitionRootBase* root;
+  char* super_page_base;
+  char* super_pages_end;
+  PartitionSuperPageExtentEntry* next;
+};
+
+struct PartitionDirectMapExtent {
+  PartitionDirectMapExtent* next_extent;
+  PartitionDirectMapExtent* prev_extent;
+  PartitionBucket* bucket;
+  size_t map_size;  // Mapped size, not including guard pages and meta-data.
+};
+
+struct BASE_EXPORT PartitionRootBase {
+  size_t total_size_of_committed_pages;
+  size_t total_size_of_super_pages;
+  size_t total_size_of_direct_mapped_pages;
+  // Invariant: total_size_of_committed_pages <=
+  //                total_size_of_super_pages +
+  //                total_size_of_direct_mapped_pages.
+  unsigned num_buckets;
+  unsigned max_allocation;
+  bool initialized;
+  char* next_super_page;
+  char* next_partition_page;
+  char* next_partition_page_end;
+  PartitionSuperPageExtentEntry* current_extent;
+  PartitionSuperPageExtentEntry* first_extent;
+  PartitionDirectMapExtent* direct_map_list;
+  PartitionPage* global_empty_page_ring[kMaxFreeableSpans];
+  int16_t global_empty_page_ring_index;
+  uintptr_t inverted_self;
+
+  static subtle::SpinLock gInitializedLock;
+  static bool gInitialized;
+  // gSeedPage is used as a sentinel to indicate that there is no page
+  // in the active page list. We can use nullptr, but in that case we need
+  // to add a null-check branch to the hot allocation path. We want to avoid
+  // that.
+  static PartitionPage gSeedPage;
+  static PartitionBucket gPagedBucket;
+  // gOomHandlingFunction is invoked when ParitionAlloc hits OutOfMemory.
+  static void (*gOomHandlingFunction)();
+};
+
+// Never instantiate a PartitionRoot directly, instead use PartitionAlloc.
+struct PartitionRoot : public PartitionRootBase {
+  // The PartitionAlloc templated class ensures the following is correct.
+  ALWAYS_INLINE PartitionBucket* buckets() {
+    return reinterpret_cast<PartitionBucket*>(this + 1);
+  }
+  ALWAYS_INLINE const PartitionBucket* buckets() const {
+    return reinterpret_cast<const PartitionBucket*>(this + 1);
+  }
+};
+
+// Never instantiate a PartitionRootGeneric directly, instead use
+// PartitionAllocatorGeneric.
+struct PartitionRootGeneric : public PartitionRootBase {
+  subtle::SpinLock lock;
+  // Some pre-computed constants.
+  size_t order_index_shifts[kBitsPerSizeT + 1];
+  size_t order_sub_index_masks[kBitsPerSizeT + 1];
+  // The bucket lookup table lets us map a size_t to a bucket quickly.
+  // The trailing +1 caters for the overflow case for very large allocation
+  // sizes.  It is one flat array instead of a 2D array because in the 2D
+  // world, we'd need to index array[blah][max+1] which risks undefined
+  // behavior.
+  PartitionBucket*
+      bucket_lookups[((kBitsPerSizeT + 1) * kGenericNumBucketsPerOrder) + 1];
+  PartitionBucket buckets[kGenericNumBuckets];
+};
+
+// Flags for PartitionAllocGenericFlags.
+enum PartitionAllocFlags {
+  PartitionAllocReturnNull = 1 << 0,
+};
+
+// Struct used to retrieve total memory usage of a partition. Used by
+// PartitionStatsDumper implementation.
+struct PartitionMemoryStats {
+  size_t total_mmapped_bytes;    // Total bytes mmaped from the system.
+  size_t total_committed_bytes;  // Total size of commmitted pages.
+  size_t total_resident_bytes;   // Total bytes provisioned by the partition.
+  size_t total_active_bytes;     // Total active bytes in the partition.
+  size_t total_decommittable_bytes;  // Total bytes that could be decommitted.
+  size_t total_discardable_bytes;    // Total bytes that could be discarded.
+};
+
+// Struct used to retrieve memory statistics about a partition bucket. Used by
+// PartitionStatsDumper implementation.
+struct PartitionBucketMemoryStats {
+  bool is_valid;       // Used to check if the stats is valid.
+  bool is_direct_map;  // True if this is a direct mapping; size will not be
+                       // unique.
+  uint32_t bucket_slot_size;     // The size of the slot in bytes.
+  uint32_t allocated_page_size;  // Total size the partition page allocated from
+                                 // the system.
+  uint32_t active_bytes;         // Total active bytes used in the bucket.
+  uint32_t resident_bytes;       // Total bytes provisioned in the bucket.
+  uint32_t decommittable_bytes;  // Total bytes that could be decommitted.
+  uint32_t discardable_bytes;    // Total bytes that could be discarded.
+  uint32_t num_full_pages;       // Number of pages with all slots allocated.
+  uint32_t num_active_pages;     // Number of pages that have at least one
+                                 // provisioned slot.
+  uint32_t num_empty_pages;      // Number of pages that are empty
+                                 // but not decommitted.
+  uint32_t num_decommitted_pages;  // Number of pages that are empty
+                                   // and decommitted.
+};
+
+// Interface that is passed to PartitionDumpStats and
+// PartitionDumpStatsGeneric for using the memory statistics.
+class BASE_EXPORT PartitionStatsDumper {
+ public:
+  // Called to dump total memory used by partition, once per partition.
+  virtual void PartitionDumpTotals(const char* partition_name,
+                                   const PartitionMemoryStats*) = 0;
+
+  // Called to dump stats about buckets, for each bucket.
+  virtual void PartitionsDumpBucketStats(const char* partition_name,
+                                         const PartitionBucketMemoryStats*) = 0;
+};
+
+BASE_EXPORT void PartitionAllocGlobalInit(void (*oom_handling_function)());
+BASE_EXPORT void PartitionAllocInit(PartitionRoot*,
+                                    size_t num_buckets,
+                                    size_t max_allocation);
+BASE_EXPORT void PartitionAllocGenericInit(PartitionRootGeneric*);
+
+enum PartitionPurgeFlags {
+  // Decommitting the ring list of empty pages is reasonably fast.
+  PartitionPurgeDecommitEmptyPages = 1 << 0,
+  // Discarding unused system pages is slower, because it involves walking all
+  // freelists in all active partition pages of all buckets >= system page
+  // size. It often frees a similar amount of memory to decommitting the empty
+  // pages, though.
+  PartitionPurgeDiscardUnusedSystemPages = 1 << 1,
+};
+
+BASE_EXPORT void PartitionPurgeMemory(PartitionRoot*, int);
+BASE_EXPORT void PartitionPurgeMemoryGeneric(PartitionRootGeneric*, int);
+
+BASE_EXPORT NOINLINE void* PartitionAllocSlowPath(PartitionRootBase*,
+                                                  int,
+                                                  size_t,
+                                                  PartitionBucket*);
+BASE_EXPORT NOINLINE void PartitionFreeSlowPath(PartitionPage*);
+BASE_EXPORT NOINLINE void* PartitionReallocGeneric(PartitionRootGeneric*,
+                                                   void*,
+                                                   size_t,
+                                                   const char* type_name);
+
+BASE_EXPORT void PartitionDumpStats(PartitionRoot*,
+                                    const char* partition_name,
+                                    bool is_light_dump,
+                                    PartitionStatsDumper*);
+BASE_EXPORT void PartitionDumpStatsGeneric(PartitionRootGeneric*,
+                                           const char* partition_name,
+                                           bool is_light_dump,
+                                           PartitionStatsDumper*);
+
+class BASE_EXPORT PartitionAllocHooks {
+ public:
+  typedef void AllocationHook(void* address, size_t, const char* type_name);
+  typedef void FreeHook(void* address);
+
+  static void SetAllocationHook(AllocationHook* hook) {
+    allocation_hook_ = hook;
+  }
+  static void SetFreeHook(FreeHook* hook) { free_hook_ = hook; }
+
+  static void AllocationHookIfEnabled(void* address,
+                                      size_t size,
+                                      const char* type_name) {
+    AllocationHook* hook = allocation_hook_;
+    if (UNLIKELY(hook != nullptr))
+      hook(address, size, type_name);
+  }
+
+  static void FreeHookIfEnabled(void* address) {
+    FreeHook* hook = free_hook_;
+    if (UNLIKELY(hook != nullptr))
+      hook(address);
+  }
+
+  static void ReallocHookIfEnabled(void* old_address,
+                                   void* new_address,
+                                   size_t size,
+                                   const char* type_name) {
+    // Report a reallocation as a free followed by an allocation.
+    AllocationHook* allocation_hook = allocation_hook_;
+    FreeHook* free_hook = free_hook_;
+    if (UNLIKELY(allocation_hook && free_hook)) {
+      free_hook(old_address);
+      allocation_hook(new_address, size, type_name);
+    }
+  }
+
+ private:
+  // Pointers to hook functions that PartitionAlloc will call on allocation and
+  // free if the pointers are non-null.
+  static AllocationHook* allocation_hook_;
+  static FreeHook* free_hook_;
+};
+
+ALWAYS_INLINE PartitionFreelistEntry* PartitionFreelistMask(
+    PartitionFreelistEntry* ptr) {
+// We use bswap on little endian as a fast mask for two reasons:
+// 1) If an object is freed and its vtable used where the attacker doesn't
+// get the chance to run allocations between the free and use, the vtable
+// dereference is likely to fault.
+// 2) If the attacker has a linear buffer overflow and elects to try and
+// corrupt a freelist pointer, partial pointer overwrite attacks are
+// thwarted.
+// For big endian, similar guarantees are arrived at with a negation.
+#if defined(ARCH_CPU_BIG_ENDIAN)
+  uintptr_t masked = ~reinterpret_cast<uintptr_t>(ptr);
+#else
+  uintptr_t masked = ByteSwapUintPtrT(reinterpret_cast<uintptr_t>(ptr));
+#endif
+  return reinterpret_cast<PartitionFreelistEntry*>(masked);
+}
+
+ALWAYS_INLINE size_t PartitionCookieSizeAdjustAdd(size_t size) {
+#if DCHECK_IS_ON()
+  // Add space for cookies, checking for integer overflow. TODO(palmer):
+  // Investigate the performance and code size implications of using
+  // CheckedNumeric throughout PA.
+  DCHECK(size + (2 * kCookieSize) > size);
+  size += 2 * kCookieSize;
+#endif
+  return size;
+}
+
+ALWAYS_INLINE size_t PartitionCookieSizeAdjustSubtract(size_t size) {
+#if DCHECK_IS_ON()
+  // Remove space for cookies.
+  DCHECK(size >= 2 * kCookieSize);
+  size -= 2 * kCookieSize;
+#endif
+  return size;
+}
+
+ALWAYS_INLINE void* PartitionCookieFreePointerAdjust(void* ptr) {
+#if DCHECK_IS_ON()
+  // The value given to the application is actually just after the cookie.
+  ptr = static_cast<char*>(ptr) - kCookieSize;
+#endif
+  return ptr;
+}
+
+ALWAYS_INLINE void PartitionCookieWriteValue(void* ptr) {
+#if DCHECK_IS_ON()
+  unsigned char* cookie_ptr = reinterpret_cast<unsigned char*>(ptr);
+  for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr)
+    *cookie_ptr = kCookieValue[i];
+#endif
+}
+
+ALWAYS_INLINE void PartitionCookieCheckValue(void* ptr) {
+#if DCHECK_IS_ON()
+  unsigned char* cookie_ptr = reinterpret_cast<unsigned char*>(ptr);
+  for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr)
+    DCHECK(*cookie_ptr == kCookieValue[i]);
+#endif
+}
+
+ALWAYS_INLINE char* PartitionSuperPageToMetadataArea(char* ptr) {
+  uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(ptr);
+  DCHECK(!(pointer_as_uint & kSuperPageOffsetMask));
+  // The metadata area is exactly one system page (the guard page) into the
+  // super page.
+  return reinterpret_cast<char*>(pointer_as_uint + kSystemPageSize);
+}
+
+ALWAYS_INLINE PartitionPage* PartitionPointerToPageNoAlignmentCheck(void* ptr) {
+  uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(ptr);
+  char* super_page_ptr =
+      reinterpret_cast<char*>(pointer_as_uint & kSuperPageBaseMask);
+  uintptr_t partition_page_index =
+      (pointer_as_uint & kSuperPageOffsetMask) >> kPartitionPageShift;
+  // Index 0 is invalid because it is the metadata and guard area and
+  // the last index is invalid because it is a guard page.
+  DCHECK(partition_page_index);
+  DCHECK(partition_page_index < kNumPartitionPagesPerSuperPage - 1);
+  PartitionPage* page = reinterpret_cast<PartitionPage*>(
+      PartitionSuperPageToMetadataArea(super_page_ptr) +
+      (partition_page_index << kPageMetadataShift));
+  // Partition pages in the same slot span can share the same page object.
+  // Adjust for that.
+  size_t delta = page->page_offset << kPageMetadataShift;
+  page =
+      reinterpret_cast<PartitionPage*>(reinterpret_cast<char*>(page) - delta);
+  return page;
+}
+
+ALWAYS_INLINE void* PartitionPageToPointer(const PartitionPage* page) {
+  uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(page);
+  uintptr_t super_page_offset = (pointer_as_uint & kSuperPageOffsetMask);
+  DCHECK(super_page_offset > kSystemPageSize);
+  DCHECK(super_page_offset < kSystemPageSize + (kNumPartitionPagesPerSuperPage *
+                                                kPageMetadataSize));
+  uintptr_t partition_page_index =
+      (super_page_offset - kSystemPageSize) >> kPageMetadataShift;
+  // Index 0 is invalid because it is the metadata area and the last index is
+  // invalid because it is a guard page.
+  DCHECK(partition_page_index);
+  DCHECK(partition_page_index < kNumPartitionPagesPerSuperPage - 1);
+  uintptr_t super_page_base = (pointer_as_uint & kSuperPageBaseMask);
+  void* ret = reinterpret_cast<void*>(
+      super_page_base + (partition_page_index << kPartitionPageShift));
+  return ret;
+}
+
+ALWAYS_INLINE PartitionPage* PartitionPointerToPage(void* ptr) {
+  PartitionPage* page = PartitionPointerToPageNoAlignmentCheck(ptr);
+  // Checks that the pointer is a multiple of bucket size.
+  DCHECK(!((reinterpret_cast<uintptr_t>(ptr) -
+            reinterpret_cast<uintptr_t>(PartitionPageToPointer(page))) %
+           page->bucket->slot_size));
+  return page;
+}
+
+ALWAYS_INLINE bool PartitionBucketIsDirectMapped(
+    const PartitionBucket* bucket) {
+  return !bucket->num_system_pages_per_slot_span;
+}
+
+ALWAYS_INLINE size_t PartitionBucketBytes(const PartitionBucket* bucket) {
+  return bucket->num_system_pages_per_slot_span * kSystemPageSize;
+}
+
+ALWAYS_INLINE uint16_t PartitionBucketSlots(const PartitionBucket* bucket) {
+  return static_cast<uint16_t>(PartitionBucketBytes(bucket) /
+                               bucket->slot_size);
+}
+
+ALWAYS_INLINE size_t* PartitionPageGetRawSizePtr(PartitionPage* page) {
+  // For single-slot buckets which span more than one partition page, we
+  // have some spare metadata space to store the raw allocation size. We
+  // can use this to report better statistics.
+  PartitionBucket* bucket = page->bucket;
+  if (bucket->slot_size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize)
+    return nullptr;
+
+  DCHECK((bucket->slot_size % kSystemPageSize) == 0);
+  DCHECK(PartitionBucketIsDirectMapped(bucket) ||
+         PartitionBucketSlots(bucket) == 1);
+  page++;
+  return reinterpret_cast<size_t*>(&page->freelist_head);
+}
+
+ALWAYS_INLINE size_t PartitionPageGetRawSize(PartitionPage* page) {
+  size_t* raw_size_ptr = PartitionPageGetRawSizePtr(page);
+  if (UNLIKELY(raw_size_ptr != nullptr))
+    return *raw_size_ptr;
+  return 0;
+}
+
+ALWAYS_INLINE PartitionRootBase* PartitionPageToRoot(PartitionPage* page) {
+  PartitionSuperPageExtentEntry* extent_entry =
+      reinterpret_cast<PartitionSuperPageExtentEntry*>(
+          reinterpret_cast<uintptr_t>(page) & kSystemPageBaseMask);
+  return extent_entry->root;
+}
+
+ALWAYS_INLINE bool PartitionPointerIsValid(void* ptr) {
+  PartitionPage* page = PartitionPointerToPage(ptr);
+  PartitionRootBase* root = PartitionPageToRoot(page);
+  return root->inverted_self == ~reinterpret_cast<uintptr_t>(root);
+}
+
+ALWAYS_INLINE void* PartitionBucketAlloc(PartitionRootBase* root,
+                                         int flags,
+                                         size_t size,
+                                         PartitionBucket* bucket) {
+  PartitionPage* page = bucket->active_pages_head;
+  // Check that this page is neither full nor freed.
+  DCHECK(page->num_allocated_slots >= 0);
+  void* ret = page->freelist_head;
+  if (LIKELY(ret != 0)) {
+    // If these asserts fire, you probably corrupted memory.
+    DCHECK(PartitionPointerIsValid(ret));
+    // All large allocations must go through the slow path to correctly
+    // update the size metadata.
+    DCHECK(PartitionPageGetRawSize(page) == 0);
+    PartitionFreelistEntry* new_head =
+        PartitionFreelistMask(static_cast<PartitionFreelistEntry*>(ret)->next);
+    page->freelist_head = new_head;
+    page->num_allocated_slots++;
+  } else {
+    ret = PartitionAllocSlowPath(root, flags, size, bucket);
+    DCHECK(!ret || PartitionPointerIsValid(ret));
+  }
+#if DCHECK_IS_ON()
+  if (!ret)
+    return 0;
+  // Fill the uninitialized pattern, and write the cookies.
+  page = PartitionPointerToPage(ret);
+  size_t slot_size = page->bucket->slot_size;
+  size_t raw_size = PartitionPageGetRawSize(page);
+  if (raw_size) {
+    DCHECK(raw_size == size);
+    slot_size = raw_size;
+  }
+  size_t no_cookie_size = PartitionCookieSizeAdjustSubtract(slot_size);
+  char* char_ret = static_cast<char*>(ret);
+  // The value given to the application is actually just after the cookie.
+  ret = char_ret + kCookieSize;
+  memset(ret, kUninitializedByte, no_cookie_size);
+  PartitionCookieWriteValue(char_ret);
+  PartitionCookieWriteValue(char_ret + kCookieSize + no_cookie_size);
+#endif
+  return ret;
+}
+
+ALWAYS_INLINE void* PartitionAlloc(PartitionRoot* root,
+                                   size_t size,
+                                   const char* type_name) {
+#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+  void* result = malloc(size);
+  CHECK(result);
+  return result;
+#else
+  size_t requested_size = size;
+  size = PartitionCookieSizeAdjustAdd(size);
+  DCHECK(root->initialized);
+  size_t index = size >> kBucketShift;
+  DCHECK(index < root->num_buckets);
+  DCHECK(size == index << kBucketShift);
+  PartitionBucket* bucket = &root->buckets()[index];
+  void* result = PartitionBucketAlloc(root, 0, size, bucket);
+  PartitionAllocHooks::AllocationHookIfEnabled(result, requested_size,
+                                               type_name);
+  return result;
+#endif  // defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+}
+
+ALWAYS_INLINE void PartitionFreeWithPage(void* ptr, PartitionPage* page) {
+// If these asserts fire, you probably corrupted memory.
+#if DCHECK_IS_ON()
+  size_t slot_size = page->bucket->slot_size;
+  size_t raw_size = PartitionPageGetRawSize(page);
+  if (raw_size)
+    slot_size = raw_size;
+  PartitionCookieCheckValue(ptr);
+  PartitionCookieCheckValue(reinterpret_cast<char*>(ptr) + slot_size -
+                            kCookieSize);
+  memset(ptr, kFreedByte, slot_size);
+#endif
+  DCHECK(page->num_allocated_slots);
+  PartitionFreelistEntry* freelist_head = page->freelist_head;
+  DCHECK(!freelist_head || PartitionPointerIsValid(freelist_head));
+  CHECK(ptr != freelist_head);  // Catches an immediate double free.
+  // Look for double free one level deeper in debug.
+  DCHECK(!freelist_head || ptr != PartitionFreelistMask(freelist_head->next));
+  PartitionFreelistEntry* entry = static_cast<PartitionFreelistEntry*>(ptr);
+  entry->next = PartitionFreelistMask(freelist_head);
+  page->freelist_head = entry;
+  --page->num_allocated_slots;
+  if (UNLIKELY(page->num_allocated_slots <= 0)) {
+    PartitionFreeSlowPath(page);
+  } else {
+    // All single-slot allocations must go through the slow path to
+    // correctly update the size metadata.
+    DCHECK(PartitionPageGetRawSize(page) == 0);
+  }
+}
+
+ALWAYS_INLINE void PartitionFree(void* ptr) {
+#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+  free(ptr);
+#else
+  PartitionAllocHooks::FreeHookIfEnabled(ptr);
+  ptr = PartitionCookieFreePointerAdjust(ptr);
+  DCHECK(PartitionPointerIsValid(ptr));
+  PartitionPage* page = PartitionPointerToPage(ptr);
+  PartitionFreeWithPage(ptr, page);
+#endif
+}
+
+ALWAYS_INLINE PartitionBucket* PartitionGenericSizeToBucket(
+    PartitionRootGeneric* root,
+    size_t size) {
+  size_t order = kBitsPerSizeT - bits::CountLeadingZeroBitsSizeT(size);
+  // The order index is simply the next few bits after the most significant bit.
+  size_t order_index = (size >> root->order_index_shifts[order]) &
+                       (kGenericNumBucketsPerOrder - 1);
+  // And if the remaining bits are non-zero we must bump the bucket up.
+  size_t sub_order_index = size & root->order_sub_index_masks[order];
+  PartitionBucket* bucket =
+      root->bucket_lookups[(order << kGenericNumBucketsPerOrderBits) +
+                           order_index + !!sub_order_index];
+  DCHECK(!bucket->slot_size || bucket->slot_size >= size);
+  DCHECK(!(bucket->slot_size % kGenericSmallestBucket));
+  return bucket;
+}
+
+ALWAYS_INLINE void* PartitionAllocGenericFlags(PartitionRootGeneric* root,
+                                               int flags,
+                                               size_t size,
+                                               const char* type_name) {
+#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+  void* result = malloc(size);
+  CHECK(result || flags & PartitionAllocReturnNull);
+  return result;
+#else
+  DCHECK(root->initialized);
+  size_t requested_size = size;
+  size = PartitionCookieSizeAdjustAdd(size);
+  PartitionBucket* bucket = PartitionGenericSizeToBucket(root, size);
+  void* ret = nullptr;
+  {
+    subtle::SpinLock::Guard guard(root->lock);
+    ret = PartitionBucketAlloc(root, flags, size, bucket);
+  }
+  PartitionAllocHooks::AllocationHookIfEnabled(ret, requested_size, type_name);
+  return ret;
+#endif
+}
+
+ALWAYS_INLINE void* PartitionAllocGeneric(PartitionRootGeneric* root,
+                                          size_t size,
+                                          const char* type_name) {
+  return PartitionAllocGenericFlags(root, 0, size, type_name);
+}
+
+ALWAYS_INLINE void PartitionFreeGeneric(PartitionRootGeneric* root, void* ptr) {
+#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+  free(ptr);
+#else
+  DCHECK(root->initialized);
+
+  if (UNLIKELY(!ptr))
+    return;
+
+  PartitionAllocHooks::FreeHookIfEnabled(ptr);
+  ptr = PartitionCookieFreePointerAdjust(ptr);
+  DCHECK(PartitionPointerIsValid(ptr));
+  PartitionPage* page = PartitionPointerToPage(ptr);
+  {
+    subtle::SpinLock::Guard guard(root->lock);
+    PartitionFreeWithPage(ptr, page);
+  }
+#endif
+}
+
+ALWAYS_INLINE size_t PartitionDirectMapSize(size_t size) {
+  // Caller must check that the size is not above the kGenericMaxDirectMapped
+  // limit before calling. This also guards against integer overflow in the
+  // calculation here.
+  DCHECK(size <= kGenericMaxDirectMapped);
+  return (size + kSystemPageOffsetMask) & kSystemPageBaseMask;
+}
+
+ALWAYS_INLINE size_t PartitionAllocActualSize(PartitionRootGeneric* root,
+                                              size_t size) {
+#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+  return size;
+#else
+  DCHECK(root->initialized);
+  size = PartitionCookieSizeAdjustAdd(size);
+  PartitionBucket* bucket = PartitionGenericSizeToBucket(root, size);
+  if (LIKELY(!PartitionBucketIsDirectMapped(bucket))) {
+    size = bucket->slot_size;
+  } else if (size > kGenericMaxDirectMapped) {
+    // Too large to allocate => return the size unchanged.
+  } else {
+    DCHECK(bucket == &PartitionRootBase::gPagedBucket);
+    size = PartitionDirectMapSize(size);
+  }
+  return PartitionCookieSizeAdjustSubtract(size);
+#endif
+}
+
+ALWAYS_INLINE bool PartitionAllocSupportsGetSize() {
+#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+  return false;
+#else
+  return true;
+#endif
+}
+
+ALWAYS_INLINE size_t PartitionAllocGetSize(void* ptr) {
+  // No need to lock here. Only |ptr| being freed by another thread could
+  // cause trouble, and the caller is responsible for that not happening.
+  DCHECK(PartitionAllocSupportsGetSize());
+  ptr = PartitionCookieFreePointerAdjust(ptr);
+  DCHECK(PartitionPointerIsValid(ptr));
+  PartitionPage* page = PartitionPointerToPage(ptr);
+  size_t size = page->bucket->slot_size;
+  return PartitionCookieSizeAdjustSubtract(size);
+}
+
+// N (or more accurately, N - sizeof(void*)) represents the largest size in
+// bytes that will be handled by a SizeSpecificPartitionAllocator.
+// Attempts to partitionAlloc() more than this amount will fail.
+template <size_t N>
+class SizeSpecificPartitionAllocator {
+ public:
+  static const size_t kMaxAllocation = N - kAllocationGranularity;
+  static const size_t kNumBuckets = N / kAllocationGranularity;
+  void init() {
+    PartitionAllocInit(&partition_root_, kNumBuckets, kMaxAllocation);
+  }
+  ALWAYS_INLINE PartitionRoot* root() { return &partition_root_; }
+
+ private:
+  PartitionRoot partition_root_;
+  PartitionBucket actual_buckets_[kNumBuckets];
+};
+
+class PartitionAllocatorGeneric {
+ public:
+  void init() { PartitionAllocGenericInit(&partition_root_); }
+  ALWAYS_INLINE PartitionRootGeneric* root() { return &partition_root_; }
+
+ private:
+  PartitionRootGeneric partition_root_;
+};
+
+}  // namespace base
+}  // namespace pdfium
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H
diff --git a/third_party/base/allocator/partition_allocator/spin_lock.cc b/third_party/base/allocator/partition_allocator/spin_lock.cc
new file mode 100644
index 0000000..803e4d6
--- /dev/null
+++ b/third_party/base/allocator/partition_allocator/spin_lock.cc
@@ -0,0 +1,84 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/base/allocator/partition_allocator/spin_lock.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#elif defined(OS_POSIX)
+#include <sched.h>
+#endif
+
+// The YIELD_PROCESSOR macro wraps an architecture specific-instruction that
+// informs the processor we're in a busy wait, so it can handle the branch more
+// intelligently and e.g. reduce power to our core or give more resources to the
+// other hyper-thread on this core. See the following for context:
+// https://software.intel.com/en-us/articles/benefitting-power-and-performance-sleep-loops
+//
+// The YIELD_THREAD macro tells the OS to relinquish our quantum. This is
+// basically a worst-case fallback, and if you're hitting it with any frequency
+// you really should be using a proper lock (such as |base::Lock|)rather than
+// these spinlocks.
+#if defined(OS_WIN)
+#define YIELD_PROCESSOR YieldProcessor()
+#define YIELD_THREAD SwitchToThread()
+#elif defined(COMPILER_GCC) || defined(__clang__)
+#if defined(ARCH_CPU_X86_64) || defined(ARCH_CPU_X86)
+#define YIELD_PROCESSOR __asm__ __volatile__("pause")
+#elif defined(ARCH_CPU_ARMEL) || defined(ARCH_CPU_ARM64)
+#define YIELD_PROCESSOR __asm__ __volatile__("yield")
+#elif defined(ARCH_CPU_MIPSEL)
+// The MIPS32 docs state that the PAUSE instruction is a no-op on older
+// architectures (first added in MIPS32r2). To avoid assembler errors when
+// targeting pre-r2, we must encode the instruction manually.
+#define YIELD_PROCESSOR __asm__ __volatile__(".word 0x00000140")
+#elif defined(ARCH_CPU_MIPS64EL) && __mips_isa_rev >= 2
+// Don't bother doing using .word here since r2 is the lowest supported mips64
+// that Chromium supports.
+#define YIELD_PROCESSOR __asm__ __volatile__("pause")
+#endif
+#endif
+
+#ifndef YIELD_PROCESSOR
+#warning "Processor yield not supported on this architecture."
+#define YIELD_PROCESSOR ((void)0)
+#endif
+
+#ifndef YIELD_THREAD
+#if defined(OS_POSIX)
+#define YIELD_THREAD sched_yield()
+#else
+#warning "Thread yield not supported on this OS."
+#define YIELD_THREAD ((void)0)
+#endif
+#endif
+
+namespace pdfium {
+namespace base {
+namespace subtle {
+
+void SpinLock::LockSlow() {
+  // The value of |kYieldProcessorTries| is cargo culted from TCMalloc, Windows
+  // critical section defaults, and various other recommendations.
+  // TODO(jschuh): Further tuning may be warranted.
+  static const int kYieldProcessorTries = 1000;
+  do {
+    do {
+      for (int count = 0; count < kYieldProcessorTries; ++count) {
+        // Let the processor know we're spinning.
+        YIELD_PROCESSOR;
+        if (!lock_.load(std::memory_order_relaxed) &&
+            LIKELY(!lock_.exchange(true, std::memory_order_acquire)))
+          return;
+      }
+
+      // Give the OS a chance to schedule something on this core.
+      YIELD_THREAD;
+    } while (lock_.load(std::memory_order_relaxed));
+  } while (UNLIKELY(lock_.exchange(true, std::memory_order_acquire)));
+}
+
+}  // namespace subtle
+}  // namespace base
+}  // namespace pdfium
diff --git a/third_party/base/allocator/partition_allocator/spin_lock.h b/third_party/base/allocator/partition_allocator/spin_lock.h
new file mode 100644
index 0000000..7a42a29
--- /dev/null
+++ b/third_party/base/allocator/partition_allocator/spin_lock.h
@@ -0,0 +1,54 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_SPIN_LOCK_H
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_SPIN_LOCK_H
+
+#include <atomic>
+#include <memory>
+#include <mutex>
+
+#include "third_party/base/base_export.h"
+#include "third_party/base/compiler_specific.h"
+
+// Spinlock is a simple spinlock class based on the standard CPU primitive of
+// atomic increment and decrement of an int at a given memory address. These are
+// intended only for very short duration locks and assume a system with multiple
+// cores. For any potentially longer wait you should use a real lock, such as
+// |base::Lock|.
+//
+// |SpinLock|s MUST be globals. Using them as (e.g.) struct/class members will
+// result in an uninitialized lock, which is dangerously incorrect.
+
+namespace pdfium {
+namespace base {
+namespace subtle {
+
+class SpinLock {
+ public:
+  using Guard = std::lock_guard<SpinLock>;
+
+  ALWAYS_INLINE void lock() {
+    static_assert(sizeof(lock_) == sizeof(int),
+                  "int and lock_ are different sizes");
+    if (LIKELY(!lock_.exchange(true, std::memory_order_acquire)))
+      return;
+    LockSlow();
+  }
+
+  ALWAYS_INLINE void unlock() { lock_.store(false, std::memory_order_release); }
+
+ private:
+  // This is called if the initial attempt to acquire the lock fails. It's
+  // slower, but has a much better scheduling and power consumption behavior.
+  BASE_EXPORT void LockSlow();
+
+  std::atomic_int lock_;
+};
+
+}  // namespace subtle
+}  // namespace base
+}  // namespace pdfium
+
+#endif  // BASE_ALLOCATOR_PARTITION_ALLOCATOR_SPIN_LOCK_H
diff --git a/third_party/base/base_export.h b/third_party/base/base_export.h
new file mode 100644
index 0000000..a0d6d9b
--- /dev/null
+++ b/third_party/base/base_export.h
@@ -0,0 +1,3 @@
+#ifndef BASE_EXPORT
+#define BASE_EXPORT
+#endif
diff --git a/third_party/base/bits.h b/third_party/base/bits.h
new file mode 100644
index 0000000..220be4b
--- /dev/null
+++ b/third_party/base/bits.h
@@ -0,0 +1,114 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file defines some bit utilities.
+
+#ifndef BASE_BITS_H_
+#define BASE_BITS_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "third_party/base/compiler_specific.h"
+#include "third_party/base/logging.h"
+
+#if defined(COMPILER_MSVC)
+#include <intrin.h>
+#endif
+
+namespace pdfium {
+namespace base {
+namespace bits {
+
+// Returns the integer i such as 2^i <= n < 2^(i+1)
+inline int Log2Floor(uint32_t n) {
+  if (n == 0)
+    return -1;
+  int log = 0;
+  uint32_t value = n;
+  for (int i = 4; i >= 0; --i) {
+    int shift = (1 << i);
+    uint32_t x = value >> shift;
+    if (x != 0) {
+      value = x;
+      log += shift;
+    }
+  }
+  DCHECK_EQ(value, 1u);
+  return log;
+}
+
+// Returns the integer i such as 2^(i-1) < n <= 2^i
+inline int Log2Ceiling(uint32_t n) {
+  if (n == 0) {
+    return -1;
+  } else {
+    // Log2Floor returns -1 for 0, so the following works correctly for n=1.
+    return 1 + Log2Floor(n - 1);
+  }
+}
+
+// Round up |size| to a multiple of alignment, which must be a power of two.
+inline size_t Align(size_t size, size_t alignment) {
+  DCHECK_EQ(alignment & (alignment - 1), 0u);
+  return (size + alignment - 1) & ~(alignment - 1);
+}
+
+// These functions count the number of leading zeros in a binary value, starting
+// with the most significant bit. C does not have an operator to do this, but
+// fortunately the various compilers have built-ins that map to fast underlying
+// processor instructions.
+#if defined(COMPILER_MSVC)
+
+ALWAYS_INLINE uint32_t CountLeadingZeroBits32(uint32_t x) {
+  unsigned long index;
+  return LIKELY(_BitScanReverse(&index, x)) ? (31 - index) : 32;
+}
+
+#if defined(ARCH_CPU_64_BITS)
+
+// MSVC only supplies _BitScanForward64 when building for a 64-bit target.
+ALWAYS_INLINE uint64_t CountLeadingZeroBits64(uint64_t x) {
+  unsigned long index;
+  return LIKELY(_BitScanReverse64(&index, x)) ? (63 - index) : 64;
+}
+
+#endif
+
+#elif defined(COMPILER_GCC)
+
+// This is very annoying. __builtin_clz has undefined behaviour for an input of
+// 0, even though there's clearly a return value that makes sense, and even
+// though some processor clz instructions have defined behaviour for 0. We could
+// drop to raw __asm__ to do better, but we'll avoid doing that unless we see
+// proof that we need to.
+ALWAYS_INLINE uint32_t CountLeadingZeroBits32(uint32_t x) {
+  return LIKELY(x) ? __builtin_clz(x) : 32;
+}
+
+ALWAYS_INLINE uint64_t CountLeadingZeroBits64(uint64_t x) {
+  return LIKELY(x) ? __builtin_clzll(x) : 64;
+}
+
+#endif
+
+#if defined(ARCH_CPU_64_BITS)
+
+ALWAYS_INLINE size_t CountLeadingZeroBitsSizeT(size_t x) {
+  return CountLeadingZeroBits64(x);
+}
+
+#else
+
+ALWAYS_INLINE size_t CountLeadingZeroBitsSizeT(size_t x) {
+  return CountLeadingZeroBits32(x);
+}
+
+#endif
+
+}  // namespace bits
+}  // namespace base
+}  // namespace pdfium
+
+#endif  // BASE_BITS_H_
diff --git a/third_party/base/compiler_specific.h b/third_party/base/compiler_specific.h
new file mode 100644
index 0000000..832b95b
--- /dev/null
+++ b/third_party/base/compiler_specific.h
@@ -0,0 +1,233 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_COMPILER_SPECIFIC_H_
+#define BASE_COMPILER_SPECIFIC_H_
+
+#include "build/build_config.h"
+
+#if defined(COMPILER_MSVC)
+
+// For _Printf_format_string_.
+#include <sal.h>
+
+// Macros for suppressing and disabling warnings on MSVC.
+//
+// Warning numbers are enumerated at:
+// http://msdn.microsoft.com/en-us/library/8x5x43k7(VS.80).aspx
+//
+// The warning pragma:
+// http://msdn.microsoft.com/en-us/library/2c8f766e(VS.80).aspx
+//
+// Using __pragma instead of #pragma inside macros:
+// http://msdn.microsoft.com/en-us/library/d9x1s805.aspx
+
+// MSVC_SUPPRESS_WARNING disables warning |n| for the remainder of the line and
+// for the next line of the source file.
+#define MSVC_SUPPRESS_WARNING(n) __pragma(warning(suppress : n))
+
+// MSVC_PUSH_DISABLE_WARNING pushes |n| onto a stack of warnings to be disabled.
+// The warning remains disabled until popped by MSVC_POP_WARNING.
+#define MSVC_PUSH_DISABLE_WARNING(n) \
+  __pragma(warning(push)) __pragma(warning(disable : n))
+
+// MSVC_PUSH_WARNING_LEVEL pushes |n| as the global warning level.  The level
+// remains in effect until popped by MSVC_POP_WARNING().  Use 0 to disable all
+// warnings.
+#define MSVC_PUSH_WARNING_LEVEL(n) __pragma(warning(push, n))
+
+// Pop effects of innermost MSVC_PUSH_* macro.
+#define MSVC_POP_WARNING() __pragma(warning(pop))
+
+#define MSVC_DISABLE_OPTIMIZE() __pragma(optimize("", off))
+#define MSVC_ENABLE_OPTIMIZE() __pragma(optimize("", on))
+
+// Allows exporting a class that inherits from a non-exported base class.
+// This uses suppress instead of push/pop because the delimiter after the
+// declaration (either "," or "{") has to be placed before the pop macro.
+//
+// Example usage:
+// class EXPORT_API Foo : NON_EXPORTED_BASE(public Bar) {
+//
+// MSVC Compiler warning C4275:
+// non dll-interface class 'Bar' used as base for dll-interface class 'Foo'.
+// Note that this is intended to be used only when no access to the base class'
+// static data is done through derived classes or inline methods. For more info,
+// see http://msdn.microsoft.com/en-us/library/3tdb471s(VS.80).aspx
+#define NON_EXPORTED_BASE(code) \
+  MSVC_SUPPRESS_WARNING(4275)   \
+  code
+
+#else  // Not MSVC
+
+#define _Printf_format_string_
+#define MSVC_SUPPRESS_WARNING(n)
+#define MSVC_PUSH_DISABLE_WARNING(n)
+#define MSVC_PUSH_WARNING_LEVEL(n)
+#define MSVC_POP_WARNING()
+#define MSVC_DISABLE_OPTIMIZE()
+#define MSVC_ENABLE_OPTIMIZE()
+#define NON_EXPORTED_BASE(code) code
+
+#endif  // COMPILER_MSVC
+
+// Annotate a variable indicating it's ok if the variable is not used.
+// (Typically used to silence a compiler warning when the assignment
+// is important for some other reason.)
+// Use like:
+//   int x = ...;
+//   ALLOW_UNUSED_LOCAL(x);
+#define ALLOW_UNUSED_LOCAL(x) false ? (void)x : (void)0
+
+// Annotate a typedef or function indicating it's ok if it's not used.
+// Use like:
+//   typedef Foo Bar ALLOW_UNUSED_TYPE;
+#if defined(COMPILER_GCC) || defined(__clang__)
+#define ALLOW_UNUSED_TYPE __attribute__((unused))
+#else
+#define ALLOW_UNUSED_TYPE
+#endif
+
+// Annotate a function indicating it should not be inlined.
+// Use like:
+//   NOINLINE void DoStuff() { ... }
+#if defined(COMPILER_GCC)
+#define NOINLINE __attribute__((noinline))
+#elif defined(COMPILER_MSVC)
+#define NOINLINE __declspec(noinline)
+#else
+#define NOINLINE
+#endif
+
+#if COMPILER_GCC && defined(NDEBUG)
+#define ALWAYS_INLINE inline __attribute__((__always_inline__))
+#elif COMPILER_MSVC && defined(NDEBUG)
+#define ALWAYS_INLINE __forceinline
+#else
+#define ALWAYS_INLINE inline
+#endif
+
+// Specify memory alignment for structs, classes, etc.
+// Use like:
+//   class ALIGNAS(16) MyClass { ... }
+//   ALIGNAS(16) int array[4];
+#if defined(COMPILER_MSVC)
+#define ALIGNAS(byte_alignment) __declspec(align(byte_alignment))
+#elif defined(COMPILER_GCC)
+#define ALIGNAS(byte_alignment) __attribute__((aligned(byte_alignment)))
+#endif
+
+// Return the byte alignment of the given type (available at compile time).
+// Use like:
+//   ALIGNOF(int32_t)  // this would be 4
+#if defined(COMPILER_MSVC)
+#define ALIGNOF(type) __alignof(type)
+#elif defined(COMPILER_GCC)
+#define ALIGNOF(type) __alignof__(type)
+#endif
+
+// Annotate a function indicating the caller must examine the return value.
+// Use like:
+//   int foo() WARN_UNUSED_RESULT;
+// To explicitly ignore a result, see |ignore_result()| in base/macros.h.
+#undef WARN_UNUSED_RESULT
+#if defined(COMPILER_GCC) || defined(__clang__)
+#define WARN_UNUSED_RESULT __attribute__((warn_unused_result))
+#else
+#define WARN_UNUSED_RESULT
+#endif
+
+// Tell the compiler a function is using a printf-style format string.
+// |format_param| is the one-based index of the format string parameter;
+// |dots_param| is the one-based index of the "..." parameter.
+// For v*printf functions (which take a va_list), pass 0 for dots_param.
+// (This is undocumented but matches what the system C headers do.)
+#if defined(COMPILER_GCC)
+#define PRINTF_FORMAT(format_param, dots_param) \
+  __attribute__((format(printf, format_param, dots_param)))
+#else
+#define PRINTF_FORMAT(format_param, dots_param)
+#endif
+
+// WPRINTF_FORMAT is the same, but for wide format strings.
+// This doesn't appear to yet be implemented in any compiler.
+// See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=38308 .
+#define WPRINTF_FORMAT(format_param, dots_param)
+// If available, it would look like:
+//   __attribute__((format(wprintf, format_param, dots_param)))
+
+// Sanitizers annotations.
+#if defined(__has_attribute)
+#if __has_attribute(no_sanitize)
+#define NO_SANITIZE(what) __attribute__((no_sanitize(what)))
+#endif
+#endif
+#if !defined(NO_SANITIZE)
+#define NO_SANITIZE(what)
+#endif
+
+// MemorySanitizer annotations.
+#if defined(MEMORY_SANITIZER) && !defined(OS_NACL)
+#include <sanitizer/msan_interface.h>
+
+// Mark a memory region fully initialized.
+// Use this to annotate code that deliberately reads uninitialized data, for
+// example a GC scavenging root set pointers from the stack.
+#define MSAN_UNPOISON(p, size) __msan_unpoison(p, size)
+
+// Check a memory region for initializedness, as if it was being used here.
+// If any bits are uninitialized, crash with an MSan report.
+// Use this to sanitize data which MSan won't be able to track, e.g. before
+// passing data to another process via shared memory.
+#define MSAN_CHECK_MEM_IS_INITIALIZED(p, size) \
+  __msan_check_mem_is_initialized(p, size)
+#else  // MEMORY_SANITIZER
+#define MSAN_UNPOISON(p, size)
+#define MSAN_CHECK_MEM_IS_INITIALIZED(p, size)
+#endif  // MEMORY_SANITIZER
+
+// DISABLE_CFI_PERF -- Disable Control Flow Integrity for perf reasons.
+#if !defined(DISABLE_CFI_PERF)
+#if defined(__clang__) && defined(OFFICIAL_BUILD)
+#define DISABLE_CFI_PERF __attribute__((no_sanitize("cfi")))
+#else
+#define DISABLE_CFI_PERF
+#endif
+#endif
+
+// Macro useful for writing cross-platform function pointers.
+#if !defined(CDECL)
+#if defined(OS_WIN)
+#define CDECL __cdecl
+#else  // defined(OS_WIN)
+#define CDECL
+#endif  // defined(OS_WIN)
+#endif  // !defined(CDECL)
+
+// Macro for hinting that an expression is likely to be false.
+#if !defined(UNLIKELY)
+#if defined(COMPILER_GCC)
+#define UNLIKELY(x) __builtin_expect(!!(x), 0)
+#else
+#define UNLIKELY(x) (x)
+#endif  // defined(COMPILER_GCC)
+#endif  // !defined(UNLIKELY)
+
+#if !defined(LIKELY)
+#if defined(COMPILER_GCC)
+#define LIKELY(x) __builtin_expect(!!(x), 1)
+#else
+#define LIKELY(x) (x)
+#endif  // defined(COMPILER_GCC)
+#endif  // !defined(LIKELY)
+
+// Compiler feature-detection.
+// clang.llvm.org/docs/LanguageExtensions.html#has-feature-and-has-extension
+#if defined(__has_feature)
+#define HAS_FEATURE(FEATURE) __has_feature(FEATURE)
+#else
+#define HAS_FEATURE(FEATURE) 0
+#endif
+
+#endif  // BASE_COMPILER_SPECIFIC_H_
diff --git a/third_party/base/logging.h b/third_party/base/logging.h
index bf4faea..38b0dd9 100644
--- a/third_party/base/logging.h
+++ b/third_party/base/logging.h
@@ -21,6 +21,19 @@
     NULL_DEREF_IF_POSSIBLE \
   }
 
+// TODO(palmer): These are quick hacks to import PartitionAlloc with minimum
+// hassle. Look into pulling in the real DCHECK definition. It might be more
+// than we need, or have more dependencies than we want. In the meantime, this
+// is safe, at the cost of some performance.
+#define DCHECK CHECK
+#define DCHECK_EQ(x, y) CHECK((x) == (y))
+#define DCHECK_IS_ON() true
+
+// TODO(palmer): Also a quick hack. IMMEDIATE_CRASH used to be simple in
+// Chromium base/, but it got way more complicated and has lots of base/
+// dependencies now. Sad!
+#define IMMEDIATE_CRASH() abort();
+
 #define NOTREACHED() assert(false)
 
 #endif  // PDFIUM_THIRD_PARTY_BASE_LOGGING_H_
diff --git a/third_party/base/macros.h b/third_party/base/macros.h
index 05c3132..b6ec161 100644
--- a/third_party/base/macros.h
+++ b/third_party/base/macros.h
@@ -28,4 +28,10 @@
 #undef COMPILE_ASSERT
 #define COMPILE_ASSERT(expr, msg) static_assert(expr, #msg)
 
+// A macro to disallow the copy constructor and operator= functions.
+// This should be used in the private: declarations for a class.
+#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
+  TypeName(const TypeName&) = delete;      \
+  void operator=(const TypeName&) = delete
+
 #endif  // PDFIUM_THIRD_PARTY_BASE_MACROS_H_
diff --git a/third_party/base/sys_byteorder.h b/third_party/base/sys_byteorder.h
new file mode 100644
index 0000000..593abe1
--- /dev/null
+++ b/third_party/base/sys_byteorder.h
@@ -0,0 +1,141 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This header defines cross-platform ByteSwap() implementations for 16, 32 and
+// 64-bit values, and NetToHostXX() / HostToNextXX() functions equivalent to
+// the traditional ntohX() and htonX() functions.
+// Use the functions defined here rather than using the platform-specific
+// functions directly.
+
+#ifndef BASE_SYS_BYTEORDER_H_
+#define BASE_SYS_BYTEORDER_H_
+
+#include <stdint.h>
+
+#include "third_party/base/logging.h"
+#include "third_party/build/build_config.h"
+
+#if defined(COMPILER_MSVC)
+#include <stdlib.h>
+#endif
+
+namespace pdfium {
+namespace base {
+
+// Returns a value with all bytes in |x| swapped, i.e. reverses the endianness.
+inline uint16_t ByteSwap(uint16_t x) {
+#if defined(COMPILER_MSVC)
+  return _byteswap_ushort(x);
+#else
+  return __builtin_bswap16(x);
+#endif
+}
+
+inline uint32_t ByteSwap(uint32_t x) {
+#if defined(COMPILER_MSVC)
+  return _byteswap_ulong(x);
+#else
+  return __builtin_bswap32(x);
+#endif
+}
+
+inline uint64_t ByteSwap(uint64_t x) {
+#if defined(COMPILER_MSVC)
+  return _byteswap_uint64(x);
+#else
+  return __builtin_bswap64(x);
+#endif
+}
+
+inline uintptr_t ByteSwapUintPtrT(uintptr_t x) {
+  // We do it this way because some build configurations are ILP32 even when
+  // defined(ARCH_CPU_64_BITS). Unfortunately, we can't use sizeof in #ifs. But,
+  // because these conditionals are constexprs, the irrelevant branches will
+  // likely be optimized away, so this construction should not result in code
+  // bloat.
+  if (sizeof(uintptr_t) == 4) {
+    return ByteSwap(static_cast<uint32_t>(x));
+  } else if (sizeof(uintptr_t) == 8) {
+    return ByteSwap(static_cast<uint64_t>(x));
+  } else {
+    NOTREACHED();
+  }
+}
+
+// Converts the bytes in |x| from host order (endianness) to little endian, and
+// returns the result.
+inline uint16_t ByteSwapToLE16(uint16_t x) {
+#if defined(ARCH_CPU_LITTLE_ENDIAN)
+  return x;
+#else
+  return ByteSwap(x);
+#endif
+}
+inline uint32_t ByteSwapToLE32(uint32_t x) {
+#if defined(ARCH_CPU_LITTLE_ENDIAN)
+  return x;
+#else
+  return ByteSwap(x);
+#endif
+}
+inline uint64_t ByteSwapToLE64(uint64_t x) {
+#if defined(ARCH_CPU_LITTLE_ENDIAN)
+  return x;
+#else
+  return ByteSwap(x);
+#endif
+}
+
+// Converts the bytes in |x| from network to host order (endianness), and
+// returns the result.
+inline uint16_t NetToHost16(uint16_t x) {
+#if defined(ARCH_CPU_LITTLE_ENDIAN)
+  return ByteSwap(x);
+#else
+  return x;
+#endif
+}
+inline uint32_t NetToHost32(uint32_t x) {
+#if defined(ARCH_CPU_LITTLE_ENDIAN)
+  return ByteSwap(x);
+#else
+  return x;
+#endif
+}
+inline uint64_t NetToHost64(uint64_t x) {
+#if defined(ARCH_CPU_LITTLE_ENDIAN)
+  return ByteSwap(x);
+#else
+  return x;
+#endif
+}
+
+// Converts the bytes in |x| from host to network order (endianness), and
+// returns the result.
+inline uint16_t HostToNet16(uint16_t x) {
+#if defined(ARCH_CPU_LITTLE_ENDIAN)
+  return ByteSwap(x);
+#else
+  return x;
+#endif
+}
+inline uint32_t HostToNet32(uint32_t x) {
+#if defined(ARCH_CPU_LITTLE_ENDIAN)
+  return ByteSwap(x);
+#else
+  return x;
+#endif
+}
+inline uint64_t HostToNet64(uint64_t x) {
+#if defined(ARCH_CPU_LITTLE_ENDIAN)
+  return ByteSwap(x);
+#else
+  return x;
+#endif
+}
+
+}  // namespace base
+}  // namespace pdfium
+
+#endif  // BASE_SYS_BYTEORDER_H_
diff --git a/third_party/base/template_util.h b/third_party/base/template_util.h
new file mode 100644
index 0000000..b246322
--- /dev/null
+++ b/third_party/base/template_util.h
@@ -0,0 +1,203 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEMPLATE_UTIL_H_
+#define BASE_TEMPLATE_UTIL_H_
+
+#include <stddef.h>
+#include <iosfwd>
+#include <type_traits>
+#include <utility>
+
+#include "third_party/build/build_config.h"
+
+// This hacks around libstdc++ 4.6 missing stuff in type_traits, while we need
+// to support it.
+#define CR_GLIBCXX_4_7_0 20120322
+#define CR_GLIBCXX_4_5_4 20120702
+#define CR_GLIBCXX_4_6_4 20121127
+#if defined(__GLIBCXX__) &&                                               \
+    (__GLIBCXX__ < CR_GLIBCXX_4_7_0 || __GLIBCXX__ == CR_GLIBCXX_4_5_4 || \
+     __GLIBCXX__ == CR_GLIBCXX_4_6_4)
+#define CR_USE_FALLBACKS_FOR_OLD_GLIBCXX
+#endif
+
+// Some versions of libstdc++ have partial support for type_traits, but misses
+// a smaller subset while removing some of the older non-standard stuff. Assume
+// that all versions below 5.0 fall in this category, along with one 5.0
+// experimental release. Test for this by consulting compiler major version,
+// the only reliable option available, so theoretically this could fail should
+// you attempt to mix an earlier version of libstdc++ with >= GCC5. But
+// that's unlikely to work out, especially as GCC5 changed ABI.
+#define CR_GLIBCXX_5_0_0 20150123
+#if (defined(__GNUC__) && __GNUC__ < 5) || \
+    (defined(__GLIBCXX__) && __GLIBCXX__ == CR_GLIBCXX_5_0_0)
+#define CR_USE_FALLBACKS_FOR_OLD_EXPERIMENTAL_GLIBCXX
+#endif
+
+// This hacks around using gcc with libc++ which has some incompatibilies.
+// - is_trivially_* doesn't work: https://llvm.org/bugs/show_bug.cgi?id=27538
+// TODO(danakj): Remove this when android builders are all using a newer version
+// of gcc, or the android ndk is updated to a newer libc++ that works with older
+// gcc versions.
+#if !defined(__clang__) && defined(_LIBCPP_VERSION)
+#define CR_USE_FALLBACKS_FOR_GCC_WITH_LIBCXX
+#endif
+
+namespace base {
+
+template <class T>
+struct is_non_const_reference : std::false_type {};
+template <class T>
+struct is_non_const_reference<T&> : std::true_type {};
+template <class T>
+struct is_non_const_reference<const T&> : std::false_type {};
+
+// is_assignable
+
+namespace internal {
+
+template <typename First, typename Second>
+struct SelectSecond {
+  using type = Second;
+};
+
+struct Any {
+  Any(...);
+};
+
+// True case: If |Lvalue| can be assigned to from |Rvalue|, then the return
+// value is a true_type.
+template <class Lvalue, class Rvalue>
+typename internal::SelectSecond<
+    decltype((std::declval<Lvalue>() = std::declval<Rvalue>())),
+    std::true_type>::type
+IsAssignableTest(Lvalue&&, Rvalue&&);
+
+// False case: Otherwise the return value is a false_type.
+template <class Rvalue>
+std::false_type IsAssignableTest(internal::Any, Rvalue&&);
+
+// Default case: Neither Lvalue nor Rvalue is void. Uses IsAssignableTest to
+// determine the type of IsAssignableImpl.
+template <class Lvalue,
+          class Rvalue,
+          bool = std::is_void<Lvalue>::value || std::is_void<Rvalue>::value>
+struct IsAssignableImpl
+    : public std::common_type<decltype(
+          internal::IsAssignableTest(std::declval<Lvalue>(),
+                                     std::declval<Rvalue>()))>::type {};
+
+// Void case: Either Lvalue or Rvalue is void. Then the type of IsAssignableTest
+// is false_type.
+template <class Lvalue, class Rvalue>
+struct IsAssignableImpl<Lvalue, Rvalue, true> : public std::false_type {};
+
+// Uses expression SFINAE to detect whether using operator<< would work.
+template <typename T, typename = void>
+struct SupportsOstreamOperator : std::false_type {};
+template <typename T>
+struct SupportsOstreamOperator<T,
+                               decltype(void(std::declval<std::ostream&>()
+                                             << std::declval<T>()))>
+    : std::true_type {};
+
+}  // namespace internal
+
+// TODO(crbug.com/554293): Remove this when all platforms have this in the std
+// namespace.
+template <class Lvalue, class Rvalue>
+struct is_assignable : public internal::IsAssignableImpl<Lvalue, Rvalue> {};
+
+// is_copy_assignable is true if a T const& is assignable to a T&.
+// TODO(crbug.com/554293): Remove this when all platforms have this in the std
+// namespace.
+template <class T>
+struct is_copy_assignable
+    : public is_assignable<typename std::add_lvalue_reference<T>::type,
+                           typename std::add_lvalue_reference<
+                               typename std::add_const<T>::type>::type> {};
+
+// is_move_assignable is true if a T&& is assignable to a T&.
+// TODO(crbug.com/554293): Remove this when all platforms have this in the std
+// namespace.
+template <class T>
+struct is_move_assignable
+    : public is_assignable<typename std::add_lvalue_reference<T>::type,
+                           const typename std::add_rvalue_reference<T>::type> {
+};
+
+// underlying_type produces the integer type backing an enum type.
+// TODO(crbug.com/554293): Remove this when all platforms have this in the std
+// namespace.
+#if defined(CR_USE_FALLBACKS_FOR_OLD_GLIBCXX)
+template <typename T>
+struct underlying_type {
+  using type = __underlying_type(T);
+};
+#else
+template <typename T>
+using underlying_type = std::underlying_type<T>;
+#endif
+
+// TODO(crbug.com/554293): Remove this when all platforms have this in the std
+// namespace.
+#if defined(CR_USE_FALLBACKS_FOR_OLD_GLIBCXX)
+template <class T>
+using is_trivially_destructible = std::has_trivial_destructor<T>;
+#else
+template <class T>
+using is_trivially_destructible = std::is_trivially_destructible<T>;
+#endif
+
+// is_trivially_copyable is especially hard to get right.
+// - Older versions of libstdc++ will fail to have it like they do for other
+//   type traits. In this case we should provide it based on compiler
+//   intrinsics. This is covered by the CR_USE_FALLBACKS_FOR_OLD_GLIBCXX define.
+// - An experimental release of gcc includes most of type_traits but misses
+//   is_trivially_copyable, so we still have to avoid using libstdc++ in this
+//   case, which is covered by CR_USE_FALLBACKS_FOR_OLD_EXPERIMENTAL_GLIBCXX.
+// - When compiling libc++ from before r239653, with a gcc compiler, the
+//   std::is_trivially_copyable can fail. So we need to work around that by not
+//   using the one in libc++ in this case. This is covered by the
+//   CR_USE_FALLBACKS_FOR_GCC_WITH_LIBCXX define, and is discussed in
+//   https://llvm.org/bugs/show_bug.cgi?id=27538#c1 where they point out that
+//   in libc++'s commit r239653 this is fixed by libc++ checking for gcc 5.1.
+// - In both of the above cases we are using the gcc compiler. When defining
+//   this ourselves on compiler intrinsics, the __is_trivially_copyable()
+//   intrinsic is not available on gcc before version 5.1 (see the discussion in
+//   https://llvm.org/bugs/show_bug.cgi?id=27538#c1 again), so we must check for
+//   that version.
+// - When __is_trivially_copyable() is not available because we are on gcc older
+//   than 5.1, we need to fall back to something, so we use __has_trivial_copy()
+//   instead based on what was done one-off in bit_cast() previously.
+
+// TODO(crbug.com/554293): Remove this when all platforms have this in the std
+// namespace and it works with gcc as needed.
+#if defined(CR_USE_FALLBACKS_FOR_OLD_GLIBCXX) ||              \
+    defined(CR_USE_FALLBACKS_FOR_OLD_EXPERIMENTAL_GLIBCXX) || \
+    defined(CR_USE_FALLBACKS_FOR_GCC_WITH_LIBCXX)
+template <typename T>
+struct is_trivially_copyable {
+// TODO(danakj): Remove this when android builders are all using a newer version
+// of gcc, or the android ndk is updated to a newer libc++ that does this for
+// us.
+#if _GNUC_VER >= 501
+  static constexpr bool value = __is_trivially_copyable(T);
+#else
+  static constexpr bool value = __has_trivial_copy(T);
+#endif
+};
+#else
+template <class T>
+using is_trivially_copyable = std::is_trivially_copyable<T>;
+#endif
+
+}  // namespace base
+
+#undef CR_USE_FALLBACKS_FOR_OLD_GLIBCXX
+#undef CR_USE_FALLBACKS_FOR_GCC_WITH_LIBCXX
+#undef CR_USE_FALLBACKS_FOR_OLD_EXPERIMENTAL_GLIBCXX
+
+#endif  // BASE_TEMPLATE_UTIL_H_