[heap] Add mechanism for tracking invalidated slots per memory chunk.
For correct slots recording in concurrent marker, we need to resolve
the race that happens when
1) the mutator is invalidating slots for double unboxing or string
conversions
2) and the concurrent marker is recording these slots.
This patch adds a data-structure for tracking the invalidated objects.
Thus we can allow the concurrent marker to record slots without
worrying about clearing them. During old-to-old pointer updating phase
we re-check all slots that belong to the invalidated objects.
BUG=chromium:694255
Change-Id: Ifc3d82918cd3b96e5a5fb7125691626a56f4ab83
Reviewed-on: https://chromium-review.googlesource.com/591810
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#47049}
diff --git a/BUILD.gn b/BUILD.gn
index e3152ac..a57c152 100644
--- a/BUILD.gn
+++ b/BUILD.gn
@@ -1581,6 +1581,9 @@
"src/heap/incremental-marking-job.h",
"src/heap/incremental-marking.cc",
"src/heap/incremental-marking.h",
+ "src/heap/invalidated-slots-inl.h",
+ "src/heap/invalidated-slots.cc",
+ "src/heap/invalidated-slots.h",
"src/heap/item-parallel-job.h",
"src/heap/local-allocator.h",
"src/heap/mark-compact-inl.h",
diff --git a/src/heap/heap.cc b/src/heap/heap.cc
index bcaa75b..e3ac830 100644
--- a/src/heap/heap.cc
+++ b/src/heap/heap.cc
@@ -4605,10 +4605,17 @@
}
}
-void Heap::NotifyObjectLayoutChange(HeapObject* object,
+void Heap::NotifyObjectLayoutChange(HeapObject* object, int size,
const DisallowHeapAllocation&) {
+ DCHECK(InOldSpace(object) || InNewSpace(object));
if (FLAG_incremental_marking && incremental_marking()->IsMarking()) {
incremental_marking()->MarkBlackAndPush(object);
+ if (InOldSpace(object)) {
+ // The concurrent marker might have recorded slots for the object.
+ // Register this object as invalidated to filter out the slots.
+ MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
+ chunk->RegisterObjectWithInvalidatedSlots(object, size);
+ }
}
#ifdef VERIFY_HEAP
DCHECK(pending_layout_change_object_ == nullptr);
diff --git a/src/heap/heap.h b/src/heap/heap.h
index d1ab66d..a3bee56 100644
--- a/src/heap/heap.h
+++ b/src/heap/heap.h
@@ -1186,7 +1186,8 @@
// The runtime uses this function to notify potentially unsafe object layout
// changes that require special synchronization with the concurrent marker.
- void NotifyObjectLayoutChange(HeapObject* object,
+ // The old size is the size of the object before layout change.
+ void NotifyObjectLayoutChange(HeapObject* object, int old_size,
const DisallowHeapAllocation&);
#ifdef VERIFY_HEAP
diff --git a/src/heap/invalidated-slots-inl.h b/src/heap/invalidated-slots-inl.h
new file mode 100644
index 0000000..8f71575
--- /dev/null
+++ b/src/heap/invalidated-slots-inl.h
@@ -0,0 +1,61 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INVALIDATED_SLOTS_INL_H
+#define V8_INVALIDATED_SLOTS_INL_H
+
+#include <map>
+
+#include "src/allocation.h"
+#include "src/heap/invalidated-slots.h"
+#include "src/heap/spaces.h"
+#include "src/objects-body-descriptors-inl.h"
+#include "src/objects-body-descriptors.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+
+bool InvalidatedSlotsFilter::IsValid(Address slot) {
+#ifdef DEBUG
+ DCHECK_LT(slot, sentinel_);
+ // Slots must come in non-decreasing order.
+ DCHECK_LE(last_slot_, slot);
+ last_slot_ = slot;
+#endif
+ while (slot >= invalidated_end_) {
+ ++iterator_;
+ if (iterator_ != iterator_end_) {
+ // Invalidated ranges must not overlap.
+ DCHECK_LE(invalidated_end_, iterator_->first->address());
+ invalidated_start_ = iterator_->first->address();
+ invalidated_end_ = invalidated_start_ + iterator_->second;
+ } else {
+ invalidated_start_ = sentinel_;
+ invalidated_end_ = sentinel_;
+ }
+ }
+ // Now the invalidated region ends after the slot.
+ if (slot < invalidated_start_) {
+ // The invalidated region starts after the slot.
+ return true;
+ }
+ // The invalidated region includes the slot.
+ // Ask the object if the slot is valid.
+ if (invalidated_object_ == nullptr) {
+ invalidated_object_ = HeapObject::FromAddress(invalidated_start_);
+ invalidated_object_size_ =
+ invalidated_object_->SizeFromMap(invalidated_object_->map());
+ }
+ int offset = static_cast<int>(slot - invalidated_start_);
+ DCHECK_GT(offset, 0);
+
+ return offset < invalidated_object_size_ &&
+ invalidated_object_->IsValidSlot(offset);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INVALIDATED_SLOTS_INL_H
diff --git a/src/heap/invalidated-slots.cc b/src/heap/invalidated-slots.cc
new file mode 100644
index 0000000..85430e5
--- /dev/null
+++ b/src/heap/invalidated-slots.cc
@@ -0,0 +1,35 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/invalidated-slots.h"
+#include "src/heap/spaces.h"
+
+namespace v8 {
+namespace internal {
+
+InvalidatedSlotsFilter::InvalidatedSlotsFilter(MemoryChunk* chunk) {
+ DCHECK_IMPLIES(chunk->invalidated_slots() != nullptr,
+ chunk->owner()->identity() == OLD_SPACE);
+ InvalidatedSlots* invalidated_slots =
+ chunk->invalidated_slots() ? chunk->invalidated_slots() : &empty_;
+ iterator_ = invalidated_slots->begin();
+ iterator_end_ = invalidated_slots->end();
+ sentinel_ = chunk->area_end();
+ if (iterator_ != iterator_end_) {
+ invalidated_start_ = iterator_->first->address();
+ invalidated_end_ = invalidated_start_ + iterator_->second;
+ } else {
+ invalidated_start_ = sentinel_;
+ invalidated_end_ = sentinel_;
+ }
+ // These values will be lazily set when needed.
+ invalidated_object_ = nullptr;
+ invalidated_object_size_ = 0;
+#ifdef DEBUG
+ last_slot_ = chunk->area_start();
+#endif
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/src/heap/invalidated-slots.h b/src/heap/invalidated-slots.h
new file mode 100644
index 0000000..6aa4ded
--- /dev/null
+++ b/src/heap/invalidated-slots.h
@@ -0,0 +1,55 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INVALIDATED_SLOTS_H
+#define V8_INVALIDATED_SLOTS_H
+
+#include <map>
+#include <stack>
+
+#include "src/allocation.h"
+#include "src/base/atomic-utils.h"
+#include "src/base/bits.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+class HeapObject;
+
+// This data structure stores objects that went through object layout change
+// that potentially invalidates slots recorded concurrently. The second part
+// of each element is the size of the corresponding object before the layout
+// change.
+using InvalidatedSlots = std::map<HeapObject*, int>;
+
+// This class provides IsValid predicate that takes into account the set
+// of invalidated objects in the given memory chunk.
+// The sequence of queried slot must be non-decreasing. This allows fast
+// implementation with complexity O(m*log(m) + n), where
+// m is the number of invalidated objects in the memory chunk.
+// n is the number of IsValid queries.
+class InvalidatedSlotsFilter {
+ public:
+ explicit InvalidatedSlotsFilter(MemoryChunk* chunk);
+ inline bool IsValid(Address slot);
+
+ private:
+ InvalidatedSlots::const_iterator iterator_;
+ InvalidatedSlots::const_iterator iterator_end_;
+ Address sentinel_;
+ Address invalidated_start_;
+ Address invalidated_end_;
+ HeapObject* invalidated_object_;
+ int invalidated_object_size_;
+ InvalidatedSlots empty_;
+#ifdef DEBUG
+ Address last_slot_;
+#endif
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INVALIDATED_SLOTS_H
diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc
index 9fa190c..f974f08 100644
--- a/src/heap/mark-compact.cc
+++ b/src/heap/mark-compact.cc
@@ -21,6 +21,8 @@
#include "src/heap/concurrent-marking.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/incremental-marking.h"
+#include "src/heap/invalidated-slots-inl.h"
+#include "src/heap/invalidated-slots.h"
#include "src/heap/item-parallel-job.h"
#include "src/heap/local-allocator.h"
#include "src/heap/mark-compact-inl.h"
@@ -3264,6 +3266,14 @@
heap()->new_space()->set_age_mark(heap()->new_space()->top());
// Old space. Deallocate evacuated candidate pages.
ReleaseEvacuationCandidates();
+#ifdef DEBUG
+ // Old-to-old slot sets must be empty after evacuation.
+ for (Page* p : *heap()->old_space()) {
+ DCHECK_NULL((p->slot_set<OLD_TO_OLD, AccessMode::ATOMIC>()));
+ DCHECK_NULL((p->typed_slot_set<OLD_TO_OLD, AccessMode::ATOMIC>()));
+ DCHECK_NULL(p->invalidated_slots());
+ }
+#endif
}
class Evacuator : public Malloced {
@@ -4131,13 +4141,21 @@
}
if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
(chunk_->slot_set<OLD_TO_OLD, AccessMode::NON_ATOMIC>() != nullptr)) {
- RememberedSet<OLD_TO_OLD>::Iterate(
- chunk_,
- [](Address slot) {
- return UpdateSlot<AccessMode::NON_ATOMIC>(
- reinterpret_cast<Object**>(slot));
- },
- SlotSet::PREFREE_EMPTY_BUCKETS);
+ InvalidatedSlotsFilter filter(chunk_);
+ RememberedSet<OLD_TO_OLD>::Iterate(
+ chunk_,
+ [&filter](Address slot) {
+ if (!filter.IsValid(slot)) return REMOVE_SLOT;
+ return UpdateSlot<AccessMode::NON_ATOMIC>(
+ reinterpret_cast<Object**>(slot));
+ },
+ SlotSet::PREFREE_EMPTY_BUCKETS);
+ }
+ if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
+ chunk_->invalidated_slots() != nullptr) {
+ // The invalidated slots are not needed after old-to-old slots were
+ // processsed.
+ chunk_->ReleaseInvalidatedSlots();
}
}
diff --git a/src/heap/remembered-set.h b/src/heap/remembered-set.h
index f44d458..6ebedae 100644
--- a/src/heap/remembered-set.h
+++ b/src/heap/remembered-set.h
@@ -120,7 +120,8 @@
while ((chunk = it.next()) != nullptr) {
SlotSet* slots = chunk->slot_set<type>();
TypedSlotSet* typed_slots = chunk->typed_slot_set<type>();
- if (slots != nullptr || typed_slots != nullptr) {
+ if (slots != nullptr || typed_slots != nullptr ||
+ chunk->invalidated_slots() != nullptr) {
callback(chunk);
}
}
@@ -230,6 +231,7 @@
while ((chunk = it.next()) != nullptr) {
chunk->ReleaseSlotSet<OLD_TO_OLD>();
chunk->ReleaseTypedSlotSet<OLD_TO_OLD>();
+ chunk->ReleaseInvalidatedSlots();
}
}
diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc
index 0a416d1..a7c7b72 100644
--- a/src/heap/spaces.cc
+++ b/src/heap/spaces.cc
@@ -549,6 +549,7 @@
nullptr);
base::AsAtomicWord::Release_Store(&chunk->typed_slot_set_[OLD_TO_OLD],
nullptr);
+ chunk->invalidated_slots_ = nullptr;
chunk->skip_list_ = nullptr;
chunk->progress_bar_ = 0;
chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
@@ -1216,6 +1217,7 @@
ReleaseSlotSet<OLD_TO_OLD>();
ReleaseTypedSlotSet<OLD_TO_NEW>();
ReleaseTypedSlotSet<OLD_TO_OLD>();
+ ReleaseInvalidatedSlots();
if (local_tracker_ != nullptr) ReleaseLocalTracker();
if (young_generation_bitmap_ != nullptr) ReleaseYoungGenerationBitmap();
}
@@ -1286,6 +1288,28 @@
}
}
+InvalidatedSlots* MemoryChunk::AllocateInvalidatedSlots() {
+ DCHECK_NULL(invalidated_slots_);
+ invalidated_slots_ = new InvalidatedSlots();
+ return invalidated_slots_;
+}
+
+void MemoryChunk::ReleaseInvalidatedSlots() {
+ if (invalidated_slots_) {
+ delete invalidated_slots_;
+ invalidated_slots_ = nullptr;
+ }
+}
+
+void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject* object,
+ int size) {
+ if (invalidated_slots() == nullptr) {
+ AllocateInvalidatedSlots();
+ }
+ int old_size = (*invalidated_slots())[object];
+ (*invalidated_slots())[object] = std::max(old_size, size);
+}
+
void MemoryChunk::AllocateLocalTracker() {
DCHECK_NULL(local_tracker_);
local_tracker_ = new LocalArrayBufferTracker(heap());
diff --git a/src/heap/spaces.h b/src/heap/spaces.h
index f409a50..afac981 100644
--- a/src/heap/spaces.h
+++ b/src/heap/spaces.h
@@ -6,6 +6,7 @@
#define V8_HEAP_SPACES_H_
#include <list>
+#include <map>
#include <memory>
#include <unordered_set>
@@ -19,6 +20,7 @@
#include "src/flags.h"
#include "src/globals.h"
#include "src/heap/heap.h"
+#include "src/heap/invalidated-slots.h"
#include "src/heap/marking.h"
#include "src/list.h"
#include "src/objects.h"
@@ -354,7 +356,8 @@
+ kIntptrSize // intptr_t live_byte_count_
+ kPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // SlotSet* array
+ kPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // TypedSlotSet* array
- + kPointerSize // SkipList* skip_list_
+ + kPointerSize // InvalidatedSlots* invalidated_slots_
+ + kPointerSize // SkipList* skip_list_
+ kPointerSize // AtomicValue high_water_mark_
+ kPointerSize // base::RecursiveMutex* mutex_
+ kPointerSize // base::AtomicWord concurrent_sweeping_
@@ -472,6 +475,11 @@
template <RememberedSetType type>
void ReleaseTypedSlotSet();
+ InvalidatedSlots* AllocateInvalidatedSlots();
+ void ReleaseInvalidatedSlots();
+ void RegisterObjectWithInvalidatedSlots(HeapObject* object, int size);
+ InvalidatedSlots* invalidated_slots() { return invalidated_slots_; }
+
void AllocateLocalTracker();
void ReleaseLocalTracker();
inline LocalArrayBufferTracker* local_tracker() { return local_tracker_; }
@@ -631,6 +639,7 @@
// is ceil(size() / kPageSize).
SlotSet* slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
TypedSlotSet* typed_slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
+ InvalidatedSlots* invalidated_slots_;
SkipList* skip_list_;
diff --git a/src/objects.cc b/src/objects.cc
index 4a47b48..d5696dd 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -2598,7 +2598,7 @@
bool is_internalized = this->IsInternalizedString();
bool has_pointers = StringShape(this).IsIndirect();
if (has_pointers) {
- heap->NotifyObjectLayoutChange(this, no_allocation);
+ heap->NotifyObjectLayoutChange(this, size, no_allocation);
}
// Morph the string to an external string by replacing the map and
// reinitializing the fields. This won't work if the space the existing
@@ -2674,7 +2674,7 @@
bool has_pointers = StringShape(this).IsIndirect();
if (has_pointers) {
- heap->NotifyObjectLayoutChange(this, no_allocation);
+ heap->NotifyObjectLayoutChange(this, size, no_allocation);
}
// Morph the string to an external string by replacing the map and
@@ -3980,7 +3980,9 @@
Heap* heap = isolate->heap();
- heap->NotifyObjectLayoutChange(*object, no_allocation);
+ int old_instance_size = old_map->instance_size();
+
+ heap->NotifyObjectLayoutChange(*object, old_instance_size, no_allocation);
// Copy (real) inobject properties. If necessary, stop at number_of_fields to
// avoid overwriting |one_pointer_filler_map|.
@@ -4014,7 +4016,7 @@
// Create filler object past the new instance size.
int new_instance_size = new_map->instance_size();
- int instance_size_delta = old_map->instance_size() - new_instance_size;
+ int instance_size_delta = old_instance_size - new_instance_size;
DCHECK(instance_size_delta >= 0);
if (instance_size_delta > 0) {
@@ -4096,11 +4098,12 @@
DisallowHeapAllocation no_allocation;
Heap* heap = isolate->heap();
- heap->NotifyObjectLayoutChange(*object, no_allocation);
+ int old_instance_size = map->instance_size();
+ heap->NotifyObjectLayoutChange(*object, old_instance_size, no_allocation);
// Resize the object in the heap if necessary.
int new_instance_size = new_map->instance_size();
- int instance_size_delta = map->instance_size() - new_instance_size;
+ int instance_size_delta = old_instance_size - new_instance_size;
DCHECK(instance_size_delta >= 0);
if (instance_size_delta > 0) {
@@ -17071,11 +17074,11 @@
if (!string->IsInternalizedString()) {
DisallowHeapAllocation no_gc;
- isolate->heap()->NotifyObjectLayoutChange(string, no_gc);
+ int old_size = string->Size();
+ isolate->heap()->NotifyObjectLayoutChange(string, old_size, no_gc);
bool one_byte = internalized->IsOneByteRepresentation();
Handle<Map> map = one_byte ? isolate->factory()->thin_one_byte_string_map()
: isolate->factory()->thin_string_map();
- int old_size = string->Size();
DCHECK(old_size >= ThinString::kSize);
string->synchronized_set_map(*map);
ThinString* thin = ThinString::cast(string);
diff --git a/src/runtime/runtime-object.cc b/src/runtime/runtime-object.cc
index 2b9d79c..3f4a104 100644
--- a/src/runtime/runtime-object.cc
+++ b/src/runtime/runtime-object.cc
@@ -160,7 +160,8 @@
// Zap the property to avoid keeping objects alive. Zapping is not necessary
// for properties stored in the descriptor array.
if (details.location() == kField) {
- isolate->heap()->NotifyObjectLayoutChange(*receiver, no_allocation);
+ isolate->heap()->NotifyObjectLayoutChange(*receiver, map->instance_size(),
+ no_allocation);
Object* filler = isolate->heap()->one_pointer_filler_map();
FieldIndex index = FieldIndex::ForPropertyIndex(map, details.field_index());
JSObject::cast(*receiver)->RawFastPropertyAtPut(index, filler);
diff --git a/src/v8.gyp b/src/v8.gyp
index 07ee64d..0b91565 100644
--- a/src/v8.gyp
+++ b/src/v8.gyp
@@ -1027,6 +1027,9 @@
'heap/incremental-marking-job.h',
'heap/incremental-marking.cc',
'heap/incremental-marking.h',
+ 'heap/invalidated-slots-inl.h',
+ 'heap/invalidated-slots.cc',
+ 'heap/invalidated-slots.h',
'heap/item-parallel-job.h',
'heap/local-allocator.h',
'heap/mark-compact-inl.h',
diff --git a/test/cctest/BUILD.gn b/test/cctest/BUILD.gn
index 8b6404b..4cd9550 100644
--- a/test/cctest/BUILD.gn
+++ b/test/cctest/BUILD.gn
@@ -78,6 +78,7 @@
"heap/test-concurrent-marking.cc",
"heap/test-heap.cc",
"heap/test-incremental-marking.cc",
+ "heap/test-invalidated-slots.cc",
"heap/test-lab.cc",
"heap/test-mark-compact.cc",
"heap/test-page-promotion.cc",
diff --git a/test/cctest/cctest.gyp b/test/cctest/cctest.gyp
index ccf79fc..f383374 100644
--- a/test/cctest/cctest.gyp
+++ b/test/cctest/cctest.gyp
@@ -96,6 +96,7 @@
'heap/test-concurrent-marking.cc',
'heap/test-heap.cc',
'heap/test-incremental-marking.cc',
+ 'heap/test-invalidated-slots.cc',
'heap/test-lab.cc',
'heap/test-mark-compact.cc',
'heap/test-page-promotion.cc',
diff --git a/test/cctest/heap/heap-tester.h b/test/cctest/heap/heap-tester.h
index 099a231..51f9430 100644
--- a/test/cctest/heap/heap-tester.h
+++ b/test/cctest/heap/heap-tester.h
@@ -16,6 +16,7 @@
V(CompactionPartiallyAbortedPageWithStoreBufferEntries) \
V(CompactionSpaceDivideMultiplePages) \
V(CompactionSpaceDivideSinglePage) \
+ V(InvalidatedSlots) \
V(TestNewSpaceRefsInCopiedCode) \
V(GCFlags) \
V(MarkCompactCollector) \
diff --git a/test/cctest/heap/test-invalidated-slots.cc b/test/cctest/heap/test-invalidated-slots.cc
new file mode 100644
index 0000000..795b8d9
--- /dev/null
+++ b/test/cctest/heap/test-invalidated-slots.cc
@@ -0,0 +1,96 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdlib.h>
+
+#include "src/v8.h"
+
+#include "src/heap/heap-inl.h"
+#include "src/heap/heap.h"
+#include "src/heap/invalidated-slots-inl.h"
+#include "src/heap/invalidated-slots.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/heap/heap-tester.h"
+#include "test/cctest/heap/heap-utils.h"
+
+namespace v8 {
+namespace internal {
+
+HEAP_TEST(InvalidatedSlots) {
+ CcTest::InitializeVM();
+ Heap* heap = CcTest::heap();
+ Isolate* isolate = heap->isolate();
+ PagedSpace* old_space = heap->old_space();
+ Page* page;
+ std::vector<ByteArray*> byte_arrays;
+ const int kLength = 256 - ByteArray::kHeaderSize;
+ const int kSize = ByteArray::SizeFor(kLength);
+ CHECK_EQ(kSize, 256);
+ // Fill a page with byte arrays.
+ {
+ AlwaysAllocateScope always_allocate(isolate);
+ heap::SimulateFullSpace(old_space);
+ ByteArray* byte_array;
+ CHECK(heap->AllocateByteArray(kLength, TENURED).To(&byte_array));
+ byte_arrays.push_back(byte_array);
+ page = Page::FromAddress(byte_array->address());
+ CHECK_EQ(page->area_size() % kSize, 0u);
+ size_t n = page->area_size() / kSize;
+ for (size_t i = 1; i < n; i++) {
+ CHECK(heap->AllocateByteArray(kLength, TENURED).To(&byte_array));
+ byte_arrays.push_back(byte_array);
+ CHECK_EQ(page, Page::FromAddress(byte_array->address()));
+ }
+ }
+ CHECK_NULL(page->invalidated_slots());
+ {
+ // Without invalidated slots on the page, the filter considers
+ // all slots as valid.
+ InvalidatedSlotsFilter filter(page);
+ for (auto byte_array : byte_arrays) {
+ Address start = byte_array->address() + ByteArray::kHeaderSize;
+ Address end = byte_array->address() + kSize;
+ for (Address addr = start; addr < end; addr += kPointerSize) {
+ CHECK(filter.IsValid(addr));
+ }
+ }
+ }
+ // Register every second byte arrays as invalidated.
+ for (size_t i = 0; i < byte_arrays.size(); i += 2) {
+ page->RegisterObjectWithInvalidatedSlots(byte_arrays[i], kSize);
+ }
+ {
+ InvalidatedSlotsFilter filter(page);
+ for (size_t i = 0; i < byte_arrays.size(); i++) {
+ ByteArray* byte_array = byte_arrays[i];
+ Address start = byte_array->address() + ByteArray::kHeaderSize;
+ Address end = byte_array->address() + kSize;
+ for (Address addr = start; addr < end; addr += kPointerSize) {
+ if (i % 2 == 0) {
+ CHECK(!filter.IsValid(addr));
+ } else {
+ CHECK(filter.IsValid(addr));
+ }
+ }
+ }
+ }
+ // Register the remaining byte arrays as invalidated.
+ for (size_t i = 1; i < byte_arrays.size(); i += 2) {
+ page->RegisterObjectWithInvalidatedSlots(byte_arrays[i], kSize);
+ }
+ {
+ InvalidatedSlotsFilter filter(page);
+ for (size_t i = 0; i < byte_arrays.size(); i++) {
+ ByteArray* byte_array = byte_arrays[i];
+ Address start = byte_array->address() + ByteArray::kHeaderSize;
+ Address end = byte_array->address() + kSize;
+ for (Address addr = start; addr < end; addr += kPointerSize) {
+ CHECK(!filter.IsValid(addr));
+ }
+ }
+ }
+}
+
+} // namespace internal
+} // namespace v8