Update PartitionAlloc from Chromium at r721710.

This merges in https://crrev.com/721710 to fix some nits.

Change-Id: I25be3f040b2bb03b321f140630f20ff48fcec31a
Reviewed-on: https://pdfium-review.googlesource.com/c/pdfium/+/63311
Commit-Queue: Lei Zhang <thestig@chromium.org>
Reviewed-by: Chris Palmer <palmer@chromium.org>
Reviewed-by: Tom Sepez <tsepez@chromium.org>
diff --git a/third_party/base/allocator/partition_allocator/partition_alloc.cc b/third_party/base/allocator/partition_allocator/partition_alloc.cc
index a6acfc1..19ce17b 100644
--- a/third_party/base/allocator/partition_allocator/partition_alloc.cc
+++ b/third_party/base/allocator/partition_allocator/partition_alloc.cc
@@ -193,19 +193,19 @@
   internal::PartitionRootBase::gOomHandlingFunction = oom_handling_function;
 }
 
-void PartitionRoot::Init(size_t num_buckets, size_t max_allocation) {
+void PartitionRoot::Init(size_t bucket_count, size_t maximum_allocation) {
   PartitionAllocBaseInit(this);
 
-  this->num_buckets = num_buckets;
-  this->max_allocation = max_allocation;
-  for (size_t i = 0; i < this->num_buckets; ++i) {
-    internal::PartitionBucket* bucket = &this->buckets()[i];
-    bucket->Init(i == 0 ? kAllocationGranularity : (i << kBucketShift));
+  num_buckets = bucket_count;
+  max_allocation = maximum_allocation;
+  for (size_t i = 0; i < num_buckets; ++i) {
+    internal::PartitionBucket& bucket = buckets()[i];
+    bucket.Init(i == 0 ? kAllocationGranularity : (i << kBucketShift));
   }
 }
 
 void PartitionRootGeneric::Init() {
-  subtle::SpinLock::Guard guard(this->lock);
+  subtle::SpinLock::Guard guard(lock);
 
   PartitionAllocBaseInit(this);
 
@@ -223,7 +223,7 @@
       order_index_shift = 0;
     else
       order_index_shift = order - (kGenericNumBucketsPerOrderBits + 1);
-    this->order_index_shifts[order] = order_index_shift;
+    order_index_shifts[order] = order_index_shift;
     size_t sub_order_index_mask;
     if (order == kBitsPerSizeT) {
       // This avoids invoking undefined behavior for an excessive shift.
@@ -233,7 +233,7 @@
       sub_order_index_mask = ((static_cast<size_t>(1) << order) - 1) >>
                              (kGenericNumBucketsPerOrderBits + 1);
     }
-    this->order_sub_index_masks[order] = sub_order_index_mask;
+    order_sub_index_masks[order] = sub_order_index_mask;
   }
 
   // Set up the actual usable buckets first.
@@ -246,7 +246,7 @@
   size_t current_size = kGenericSmallestBucket;
   size_t current_increment =
       kGenericSmallestBucket >> kGenericNumBucketsPerOrderBits;
-  internal::PartitionBucket* bucket = &this->buckets[0];
+  internal::PartitionBucket* bucket = &buckets[0];
   for (i = 0; i < kGenericNumBucketedOrders; ++i) {
     for (j = 0; j < kGenericNumBucketsPerOrder; ++j) {
       bucket->Init(current_size);
@@ -259,16 +259,16 @@
     current_increment <<= 1;
   }
   DCHECK(current_size == 1 << kGenericMaxBucketedOrder);
-  DCHECK(bucket == &this->buckets[0] + kGenericNumBuckets);
+  DCHECK(bucket == &buckets[0] + kGenericNumBuckets);
 
   // Then set up the fast size -> bucket lookup table.
-  bucket = &this->buckets[0];
-  internal::PartitionBucket** bucket_ptr = &this->bucket_lookups[0];
+  bucket = &buckets[0];
+  internal::PartitionBucket** bucket_ptr = &bucket_lookups[0];
   for (order = 0; order <= kBitsPerSizeT; ++order) {
     for (j = 0; j < kGenericNumBucketsPerOrder; ++j) {
       if (order < kGenericMinBucketedOrder) {
         // Use the bucket of the finest granularity for malloc(0) etc.
-        *bucket_ptr++ = &this->buckets[0];
+        *bucket_ptr++ = &buckets[0];
       } else if (order > kGenericMaxBucketedOrder) {
         *bucket_ptr++ = internal::PartitionBucket::get_sentinel_bucket();
       } else {
@@ -281,8 +281,8 @@
       }
     }
   }
-  DCHECK(bucket == &this->buckets[0] + kGenericNumBuckets);
-  DCHECK(bucket_ptr == &this->bucket_lookups[0] +
+  DCHECK(bucket == &buckets[0] + kGenericNumBuckets);
+  DCHECK(bucket_ptr == &bucket_lookups[0] +
                            ((kBitsPerSizeT + 1) * kGenericNumBucketsPerOrder));
   // And there's one last bucket lookup that will be hit for e.g. malloc(-1),
   // which tries to overflow to a non-existant order.
@@ -620,12 +620,12 @@
 }
 
 void PartitionRootGeneric::PurgeMemory(int flags) {
-  subtle::SpinLock::Guard guard(this->lock);
+  subtle::SpinLock::Guard guard(lock);
   if (flags & PartitionPurgeDecommitEmptyPages)
     DecommitEmptyPages();
   if (flags & PartitionPurgeDiscardUnusedSystemPages) {
     for (size_t i = 0; i < kGenericNumBuckets; ++i) {
-      internal::PartitionBucket* bucket = &this->buckets[i];
+      internal::PartitionBucket* bucket = &buckets[i];
       if (bucket->slot_size >= kSystemPageSize)
         PartitionPurgeBucket(bucket);
     }
@@ -717,8 +717,8 @@
                                      PartitionStatsDumper* dumper) {
   PartitionMemoryStats stats = {0};
   stats.total_mmapped_bytes =
-      this->total_size_of_super_pages + this->total_size_of_direct_mapped_pages;
-  stats.total_committed_bytes = this->total_size_of_committed_pages;
+      total_size_of_super_pages + total_size_of_direct_mapped_pages;
+  stats.total_committed_bytes = total_size_of_committed_pages;
 
   size_t direct_mapped_allocations_total_size = 0;
 
@@ -735,10 +735,10 @@
   PartitionBucketMemoryStats bucket_stats[kGenericNumBuckets];
   size_t num_direct_mapped_allocations = 0;
   {
-    subtle::SpinLock::Guard guard(this->lock);
+    subtle::SpinLock::Guard guard(lock);
 
     for (size_t i = 0; i < kGenericNumBuckets; ++i) {
-      const internal::PartitionBucket* bucket = &this->buckets[i];
+      const internal::PartitionBucket* bucket = &buckets[i];
       // Don't report the pseudo buckets that the generic allocator sets up in
       // order to preserve a fast size->bucket map (see
       // PartitionRootGeneric::Init() for details).
@@ -754,7 +754,7 @@
       }
     }
 
-    for (internal::PartitionDirectMapExtent *extent = this->direct_map_list;
+    for (internal::PartitionDirectMapExtent* extent = direct_map_list;
          extent && num_direct_mapped_allocations < kMaxReportableDirectMaps;
          extent = extent->next_extent, ++num_direct_mapped_allocations) {
       DCHECK(!extent->next_extent ||
@@ -800,9 +800,9 @@
                               bool is_light_dump,
                               PartitionStatsDumper* dumper) {
   PartitionMemoryStats stats = {0};
-  stats.total_mmapped_bytes = this->total_size_of_super_pages;
-  stats.total_committed_bytes = this->total_size_of_committed_pages;
-  DCHECK(!this->total_size_of_direct_mapped_pages);
+  stats.total_mmapped_bytes = total_size_of_super_pages;
+  stats.total_committed_bytes = total_size_of_committed_pages;
+  DCHECK(!total_size_of_direct_mapped_pages);
 
   static constexpr size_t kMaxReportableBuckets = 4096 / sizeof(void*);
   std::unique_ptr<PartitionBucketMemoryStats[]> memory_stats;
@@ -811,12 +811,12 @@
         new PartitionBucketMemoryStats[kMaxReportableBuckets]);
   }
 
-  const size_t partition_num_buckets = this->num_buckets;
+  const size_t partition_num_buckets = num_buckets;
   DCHECK(partition_num_buckets <= kMaxReportableBuckets);
 
   for (size_t i = 0; i < partition_num_buckets; ++i) {
     PartitionBucketMemoryStats bucket_stats = {0};
-    PartitionDumpBucketStats(&bucket_stats, &this->buckets()[i]);
+    PartitionDumpBucketStats(&bucket_stats, &buckets()[i]);
     if (bucket_stats.is_valid) {
       stats.total_resident_bytes += bucket_stats.resident_bytes;
       stats.total_active_bytes += bucket_stats.active_bytes;
diff --git a/third_party/base/allocator/partition_allocator/partition_alloc.h b/third_party/base/allocator/partition_allocator/partition_alloc.h
index bd6505c..e3ce36c 100644
--- a/third_party/base/allocator/partition_allocator/partition_alloc.h
+++ b/third_party/base/allocator/partition_allocator/partition_alloc.h
@@ -122,7 +122,7 @@
     return reinterpret_cast<const internal::PartitionBucket*>(this + 1);
   }
 
-  void Init(size_t num_buckets, size_t max_allocation);
+  void Init(size_t bucket_count, size_t maximum_allocation);
 
   ALWAYS_INLINE void* Alloc(size_t size, const char* type_name);
   ALWAYS_INLINE void* AllocFlags(int flags, size_t size, const char* type_name);
@@ -318,11 +318,11 @@
   }
   size_t requested_size = size;
   size = internal::PartitionCookieSizeAdjustAdd(size);
-  DCHECK(this->initialized);
+  DCHECK(initialized);
   size_t index = size >> kBucketShift;
-  DCHECK(index < this->num_buckets);
+  DCHECK(index < num_buckets);
   DCHECK(size == index << kBucketShift);
-  internal::PartitionBucket* bucket = &this->buckets()[index];
+  internal::PartitionBucket* bucket = &buckets()[index];
   result = AllocFromBucket(bucket, flags, size);
   if (UNLIKELY(hooks_enabled)) {
     PartitionAllocHooks::AllocationObserverHookIfEnabled(result, requested_size,
@@ -447,7 +447,7 @@
 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
   free(ptr);
 #else
-  DCHECK(this->initialized);
+  DCHECK(initialized);
 
   if (UNLIKELY(!ptr))
     return;
@@ -463,7 +463,7 @@
   // TODO(palmer): See if we can afford to make this a CHECK.
   DCHECK(IsValidPage(page));
   {
-    subtle::SpinLock::Guard guard(this->lock);
+    subtle::SpinLock::Guard guard(lock);
     page->Free(ptr);
   }
 #endif
@@ -479,7 +479,7 @@
 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
   return size;
 #else
-  DCHECK(this->initialized);
+  DCHECK(initialized);
   size = internal::PartitionCookieSizeAdjustAdd(size);
   internal::PartitionBucket* bucket = PartitionGenericSizeToBucket(this, size);
   if (LIKELY(!bucket->is_direct_mapped())) {
diff --git a/third_party/base/allocator/partition_allocator/partition_bucket.cc b/third_party/base/allocator/partition_allocator/partition_bucket.cc
index 54acfde..7b24c90 100644
--- a/third_party/base/allocator/partition_allocator/partition_bucket.cc
+++ b/third_party/base/allocator/partition_allocator/partition_bucket.cc
@@ -133,23 +133,23 @@
   // to using fewer system pages.
   double best_waste_ratio = 1.0f;
   uint16_t best_pages = 0;
-  if (this->slot_size > kMaxSystemPagesPerSlotSpan * kSystemPageSize) {
+  if (slot_size > kMaxSystemPagesPerSlotSpan * kSystemPageSize) {
     // TODO(ajwong): Why is there a DCHECK here for this?
     // http://crbug.com/776537
-    DCHECK(!(this->slot_size % kSystemPageSize));
-    best_pages = static_cast<uint16_t>(this->slot_size / kSystemPageSize);
+    DCHECK(!(slot_size % kSystemPageSize));
+    best_pages = static_cast<uint16_t>(slot_size / kSystemPageSize);
     // TODO(ajwong): Should this be checking against
     // kMaxSystemPagesPerSlotSpan or numeric_limits<uint8_t>::max?
     // http://crbug.com/776537
     CHECK(best_pages < (1 << 8));
     return static_cast<uint8_t>(best_pages);
   }
-  DCHECK(this->slot_size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize);
+  DCHECK(slot_size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize);
   for (uint16_t i = kNumSystemPagesPerPartitionPage - 1;
        i <= kMaxSystemPagesPerSlotSpan; ++i) {
     size_t page_size = kSystemPageSize * i;
-    size_t num_slots = page_size / this->slot_size;
-    size_t waste = page_size - (num_slots * this->slot_size);
+    size_t num_slots = page_size / slot_size;
+    size_t waste = page_size - (num_slots * slot_size);
     // Leaving a page unfaulted is not free; the page will occupy an empty page
     // table entry.  Make a simple attempt to account for that.
     //
@@ -344,12 +344,12 @@
   // We should only get here when _every_ slot is either used or unprovisioned.
   // (The third state is "on the freelist". If we have a non-empty freelist, we
   // should not get here.)
-  DCHECK(num_slots + page->num_allocated_slots == this->get_slots_per_span());
+  DCHECK(num_slots + page->num_allocated_slots == get_slots_per_span());
   // Similarly, make explicitly sure that the freelist is empty.
   DCHECK(!page->freelist_head);
   DCHECK(page->num_allocated_slots >= 0);
 
-  size_t size = this->slot_size;
+  size_t size = slot_size;
   char* base = reinterpret_cast<char*>(PartitionPage::ToPointer(page));
   char* return_object = base + (size * page->num_allocated_slots);
   char* first_freelist_pointer = return_object + size;
@@ -405,7 +405,7 @@
 }
 
 bool PartitionBucket::SetNewActivePage() {
-  PartitionPage* page = this->active_pages_head;
+  PartitionPage* page = active_pages_head;
   if (page == PartitionPage::get_sentinel_page())
     return false;
 
@@ -414,40 +414,40 @@
   for (; page; page = next_page) {
     next_page = page->next_page;
     DCHECK(page->bucket == this);
-    DCHECK(page != this->empty_pages_head);
-    DCHECK(page != this->decommitted_pages_head);
+    DCHECK(page != empty_pages_head);
+    DCHECK(page != decommitted_pages_head);
 
     if (LIKELY(page->is_active())) {
       // This page is usable because it has freelist entries, or has
       // unprovisioned slots we can create freelist entries from.
-      this->active_pages_head = page;
+      active_pages_head = page;
       return true;
     }
 
     // Deal with empty and decommitted pages.
     if (LIKELY(page->is_empty())) {
-      page->next_page = this->empty_pages_head;
-      this->empty_pages_head = page;
+      page->next_page = empty_pages_head;
+      empty_pages_head = page;
     } else if (LIKELY(page->is_decommitted())) {
-      page->next_page = this->decommitted_pages_head;
-      this->decommitted_pages_head = page;
+      page->next_page = decommitted_pages_head;
+      decommitted_pages_head = page;
     } else {
       DCHECK(page->is_full());
       // If we get here, we found a full page. Skip over it too, and also
       // tag it as full (via a negative value). We need it tagged so that
       // free'ing can tell, and move it back into the active page list.
       page->num_allocated_slots = -page->num_allocated_slots;
-      ++this->num_full_pages;
+      ++num_full_pages;
       // num_full_pages is a uint16_t for efficient packing so guard against
       // overflow to be safe.
-      if (UNLIKELY(!this->num_full_pages))
+      if (UNLIKELY(!num_full_pages))
         OnFull();
       // Not necessary but might help stop accidents.
       page->next_page = nullptr;
     }
   }
 
-  this->active_pages_head = PartitionPage::get_sentinel_page();
+  active_pages_head = PartitionPage::get_sentinel_page();
   return false;
 }
 
@@ -456,7 +456,7 @@
                                      size_t size,
                                      bool* is_already_zeroed) {
   // The slow path is called when the freelist is empty.
-  DCHECK(!this->active_pages_head->freelist_head);
+  DCHECK(!active_pages_head->freelist_head);
 
   PartitionPage* new_page = nullptr;
   *is_already_zeroed = false;
@@ -471,10 +471,10 @@
   // false where it sweeps the active page list and may move things into
   // the empty or decommitted lists which affects the subsequent conditional.
   bool return_null = flags & PartitionAllocReturnNull;
-  if (UNLIKELY(this->is_direct_mapped())) {
+  if (UNLIKELY(is_direct_mapped())) {
     DCHECK(size > kGenericMaxBucketed);
     DCHECK(this == get_sentinel_bucket());
-    DCHECK(this->active_pages_head == PartitionPage::get_sentinel_page());
+    DCHECK(active_pages_head == PartitionPage::get_sentinel_page());
     if (size > kGenericMaxDirectMapped) {
       if (return_null)
         return nullptr;
@@ -485,34 +485,33 @@
     // Turn off the optimization to see if it helps https://crbug.com/892550.
     *is_already_zeroed = true;
 #endif
-  } else if (LIKELY(this->SetNewActivePage())) {
+  } else if (LIKELY(SetNewActivePage())) {
     // First, did we find an active page in the active pages list?
-    new_page = this->active_pages_head;
+    new_page = active_pages_head;
     DCHECK(new_page->is_active());
-  } else if (LIKELY(this->empty_pages_head != nullptr) ||
-             LIKELY(this->decommitted_pages_head != nullptr)) {
+  } else if (LIKELY(empty_pages_head != nullptr) ||
+             LIKELY(decommitted_pages_head != nullptr)) {
     // Second, look in our lists of empty and decommitted pages.
     // Check empty pages first, which are preferred, but beware that an
     // empty page might have been decommitted.
-    while (LIKELY((new_page = this->empty_pages_head) != nullptr)) {
+    while (LIKELY((new_page = empty_pages_head) != nullptr)) {
       DCHECK(new_page->bucket == this);
       DCHECK(new_page->is_empty() || new_page->is_decommitted());
-      this->empty_pages_head = new_page->next_page;
+      empty_pages_head = new_page->next_page;
       // Accept the empty page unless it got decommitted.
       if (new_page->freelist_head) {
         new_page->next_page = nullptr;
         break;
       }
       DCHECK(new_page->is_decommitted());
-      new_page->next_page = this->decommitted_pages_head;
-      this->decommitted_pages_head = new_page;
+      new_page->next_page = decommitted_pages_head;
+      decommitted_pages_head = new_page;
     }
-    if (UNLIKELY(!new_page) &&
-        LIKELY(this->decommitted_pages_head != nullptr)) {
-      new_page = this->decommitted_pages_head;
+    if (UNLIKELY(!new_page) && LIKELY(decommitted_pages_head != nullptr)) {
+      new_page = decommitted_pages_head;
       DCHECK(new_page->bucket == this);
       DCHECK(new_page->is_decommitted());
-      this->decommitted_pages_head = new_page->next_page;
+      decommitted_pages_head = new_page->next_page;
       void* addr = PartitionPage::ToPointer(new_page);
       root->RecommitSystemPages(addr, new_page->bucket->get_bytes_per_span());
       new_page->Reset();
@@ -523,7 +522,7 @@
     DCHECK(new_page);
   } else {
     // Third. If we get here, we need a brand new page.
-    uint16_t num_partition_pages = this->get_pages_per_slot_span();
+    uint16_t num_partition_pages = get_pages_per_slot_span();
     void* raw_pages = AllocNewSlotSpan(root, flags, num_partition_pages);
     if (LIKELY(raw_pages != nullptr)) {
       new_page = PartitionPage::FromPointerNoAlignmentCheck(raw_pages);
@@ -536,7 +535,7 @@
 
   // Bail if we had a memory allocation failure.
   if (UNLIKELY(!new_page)) {
-    DCHECK(this->active_pages_head == PartitionPage::get_sentinel_page());
+    DCHECK(active_pages_head == PartitionPage::get_sentinel_page());
     if (return_null)
       return nullptr;
     root->OutOfMemory();
diff --git a/third_party/base/allocator/partition_allocator/partition_page.cc b/third_party/base/allocator/partition_allocator/partition_page.cc
index 3f70048..8b507c3 100644
--- a/third_party/base/allocator/partition_allocator/partition_page.cc
+++ b/third_party/base/allocator/partition_allocator/partition_page.cc
@@ -92,7 +92,7 @@
 
 void PartitionPage::FreeSlowPath() {
   DCHECK(this != get_sentinel_page());
-  if (LIKELY(this->num_allocated_slots == 0)) {
+  if (LIKELY(num_allocated_slots == 0)) {
     // Page became fully unused.
     if (UNLIKELY(bucket->is_direct_mapped())) {
       PartitionDirectUnmap(this);
@@ -112,24 +112,24 @@
     DCHECK(!bucket->is_direct_mapped());
     // Ensure that the page is full. That's the only valid case if we
     // arrive here.
-    DCHECK(this->num_allocated_slots < 0);
+    DCHECK(num_allocated_slots < 0);
     // A transition of num_allocated_slots from 0 to -1 is not legal, and
     // likely indicates a double-free.
-    CHECK(this->num_allocated_slots != -1);
-    this->num_allocated_slots = -this->num_allocated_slots - 2;
-    DCHECK(this->num_allocated_slots == bucket->get_slots_per_span() - 1);
+    CHECK(num_allocated_slots != -1);
+    num_allocated_slots = -num_allocated_slots - 2;
+    DCHECK(num_allocated_slots == bucket->get_slots_per_span() - 1);
     // Fully used page became partially used. It must be put back on the
     // non-full page list. Also make it the current page to increase the
     // chances of it being filled up again. The old current page will be
     // the next page.
-    DCHECK(!this->next_page);
+    DCHECK(!next_page);
     if (LIKELY(bucket->active_pages_head != get_sentinel_page()))
-      this->next_page = bucket->active_pages_head;
+      next_page = bucket->active_pages_head;
     bucket->active_pages_head = this;
     --bucket->num_full_pages;
     // Special case: for a partition page with just a single slot, it may
     // now be empty and we want to run it through the empty logic.
-    if (UNLIKELY(this->num_allocated_slots == 0))
+    if (UNLIKELY(num_allocated_slots == 0))
       FreeSlowPath();
   }
 }
diff --git a/third_party/base/allocator/partition_allocator/partition_page.h b/third_party/base/allocator/partition_allocator/partition_page.h
index a4aa3ac..4bbb76b 100644
--- a/third_party/base/allocator/partition_allocator/partition_page.h
+++ b/third_party/base/allocator/partition_allocator/partition_page.h
@@ -203,7 +203,7 @@
 
 ALWAYS_INLINE void PartitionPage::Free(void* ptr) {
 #if DCHECK_IS_ON()
-  size_t slot_size = this->bucket->slot_size;
+  size_t slot_size = bucket->slot_size;
   const size_t raw_size = get_raw_size();
   if (raw_size) {
     slot_size = raw_size;
@@ -217,7 +217,7 @@
   memset(ptr, kFreedByte, slot_size);
 #endif
 
-  DCHECK(this->num_allocated_slots);
+  DCHECK(num_allocated_slots);
   // Catches an immediate double free.
   CHECK(ptr != freelist_head);
   // Look for double free one level deeper in debug.
@@ -227,8 +227,8 @@
       static_cast<internal::PartitionFreelistEntry*>(ptr);
   entry->next = internal::PartitionFreelistEntry::Encode(freelist_head);
   freelist_head = entry;
-  --this->num_allocated_slots;
-  if (UNLIKELY(this->num_allocated_slots <= 0)) {
+  --num_allocated_slots;
+  if (UNLIKELY(num_allocated_slots <= 0)) {
     FreeSlowPath();
   } else {
     // All single-slot allocations must go through the slow path to
@@ -279,7 +279,7 @@
 }
 
 ALWAYS_INLINE void PartitionPage::Reset() {
-  DCHECK(this->is_decommitted());
+  DCHECK(is_decommitted());
 
   num_unprovisioned_slots = bucket->get_slots_per_span();
   DCHECK(num_unprovisioned_slots);