Skip to content

Commit 4e8736d

Browse files
mlippautzCommit bot
authored and
Commit bot
committedApr 25, 2016
[heap] Merge NewSpacePage into Page
BUG=chromium:581412 LOG=N Review URL: https://codereview.chromium.org/1900423002 Cr-Commit-Position: refs/heads/master@{#35768}
1 parent fac7361 commit 4e8736d

12 files changed

+250
-343
lines changed
 

‎src/heap/heap-inl.h

+3-3
Original file line numberDiff line numberDiff line change
@@ -395,7 +395,7 @@ bool Heap::OldGenerationAllocationLimitReached() {
395395

396396

397397
bool Heap::ShouldBePromoted(Address old_address, int object_size) {
398-
NewSpacePage* page = NewSpacePage::FromAddress(old_address);
398+
Page* page = Page::FromAddress(old_address);
399399
Address age_mark = new_space_.age_mark();
400400
return page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
401401
(!page->ContainsLimit(age_mark) || old_address < age_mark);
@@ -476,7 +476,7 @@ AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) {
476476
Address object_address = object->address();
477477
Address memento_address = object_address + object->Size();
478478
Address last_memento_word_address = memento_address + kPointerSize;
479-
if (!NewSpacePage::OnSamePage(object_address, last_memento_word_address)) {
479+
if (!Page::OnSamePage(object_address, last_memento_word_address)) {
480480
return nullptr;
481481
}
482482
HeapObject* candidate = HeapObject::FromAddress(memento_address);
@@ -504,7 +504,7 @@ AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) {
504504
top = NewSpaceTop();
505505
DCHECK(memento_address == top ||
506506
memento_address + HeapObject::kHeaderSize <= top ||
507-
!NewSpacePage::OnSamePage(memento_address, top - 1));
507+
!Page::OnSamePage(memento_address, top - 1));
508508
if ((memento_address != top) && memento_candidate->IsValid()) {
509509
return memento_candidate;
510510
}

‎src/heap/heap.cc

+9-7
Original file line numberDiff line numberDiff line change
@@ -945,7 +945,7 @@ void Heap::EnsureFillerObjectAtTop() {
945945
// may be uninitialized memory behind top. We fill the remainder of the page
946946
// with a filler.
947947
Address to_top = new_space_.top();
948-
NewSpacePage* page = NewSpacePage::FromAddress(to_top - kPointerSize);
948+
Page* page = Page::FromAddress(to_top - kPointerSize);
949949
if (page->Contains(to_top)) {
950950
int remaining_in_page = static_cast<int>(page->area_end() - to_top);
951951
CreateFillerObjectAt(to_top, remaining_in_page, ClearRecordedSlots::kNo);
@@ -1552,15 +1552,16 @@ void PromotionQueue::Initialize() {
15521552
front_ = rear_ =
15531553
reinterpret_cast<struct Entry*>(heap_->new_space()->ToSpaceEnd());
15541554
limit_ = reinterpret_cast<struct Entry*>(
1555-
Page::FromAllocationTop(reinterpret_cast<Address>(rear_))->area_start());
1555+
Page::FromAllocationAreaAddress(reinterpret_cast<Address>(rear_))
1556+
->area_start());
15561557
emergency_stack_ = NULL;
15571558
}
15581559

15591560

15601561
void PromotionQueue::RelocateQueueHead() {
15611562
DCHECK(emergency_stack_ == NULL);
15621563

1563-
Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1564+
Page* p = Page::FromAllocationAreaAddress(reinterpret_cast<Address>(rear_));
15641565
struct Entry* head_start = rear_;
15651566
struct Entry* head_end =
15661567
Min(front_, reinterpret_cast<struct Entry*>(p->area_end()));
@@ -1909,13 +1910,14 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
19091910
// queue of unprocessed copied objects. Process them until the
19101911
// queue is empty.
19111912
while (new_space_front != new_space_.top()) {
1912-
if (!NewSpacePage::IsAtEnd(new_space_front)) {
1913+
if (!Page::IsAlignedToPageSize(new_space_front)) {
19131914
HeapObject* object = HeapObject::FromAddress(new_space_front);
19141915
new_space_front +=
19151916
StaticScavengeVisitor::IterateBody(object->map(), object);
19161917
} else {
1917-
new_space_front =
1918-
NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
1918+
new_space_front = Page::FromAllocationAreaAddress(new_space_front)
1919+
->next_page()
1920+
->area_start();
19191921
}
19201922
}
19211923

@@ -4629,7 +4631,7 @@ void Heap::ZapFromSpace() {
46294631
NewSpacePageIterator it(new_space_.FromSpaceStart(),
46304632
new_space_.FromSpaceEnd());
46314633
while (it.has_next()) {
4632-
NewSpacePage* page = it.next();
4634+
Page* page = it.next();
46334635
for (Address cursor = page->area_start(), limit = page->area_end();
46344636
cursor < limit; cursor += kPointerSize) {
46354637
Memory::Address_at(cursor) = kFromSpaceZapValue;

‎src/heap/heap.h

+2-2
Original file line numberDiff line numberDiff line change
@@ -322,15 +322,15 @@ class PromotionQueue {
322322
}
323323

324324
Page* GetHeadPage() {
325-
return Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
325+
return Page::FromAllocationAreaAddress(reinterpret_cast<Address>(rear_));
326326
}
327327

328328
void SetNewLimit(Address limit) {
329329
// If we are already using an emergency stack, we can ignore it.
330330
if (emergency_stack_) return;
331331

332332
// If the limit is not on the same page, we can ignore it.
333-
if (Page::FromAllocationTop(limit) != GetHeadPage()) return;
333+
if (Page::FromAllocationAreaAddress(limit) != GetHeadPage()) return;
334334

335335
limit_ = reinterpret_cast<struct Entry*>(limit);
336336

‎src/heap/incremental-marking.cc

+4-4
Original file line numberDiff line numberDiff line change
@@ -348,7 +348,7 @@ void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
348348
NewSpace* space) {
349349
NewSpacePageIterator it(space);
350350
while (it.has_next()) {
351-
NewSpacePage* p = it.next();
351+
Page* p = it.next();
352352
SetNewSpacePageFlags(p, false);
353353
}
354354
}
@@ -361,7 +361,7 @@ void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
361361
DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
362362

363363
LargePage* lop = heap_->lo_space()->first_page();
364-
while (lop->is_valid()) {
364+
while (LargePage::IsValid(lop)) {
365365
SetOldSpacePageFlags(lop, false, false);
366366
lop = lop->next_page();
367367
}
@@ -380,7 +380,7 @@ void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
380380
void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
381381
NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
382382
while (it.has_next()) {
383-
NewSpacePage* p = it.next();
383+
Page* p = it.next();
384384
SetNewSpacePageFlags(p, true);
385385
}
386386
}
@@ -393,7 +393,7 @@ void IncrementalMarking::ActivateIncrementalWriteBarrier() {
393393
ActivateIncrementalWriteBarrier(heap_->new_space());
394394

395395
LargePage* lop = heap_->lo_space()->first_page();
396-
while (lop->is_valid()) {
396+
while (LargePage::IsValid(lop)) {
397397
SetOldSpacePageFlags(lop, true, is_compacting_);
398398
lop = lop->next_page();
399399
}

‎src/heap/incremental-marking.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -185,7 +185,7 @@ class IncrementalMarking {
185185
SetOldSpacePageFlags(chunk, IsMarking(), IsCompacting());
186186
}
187187

188-
inline void SetNewSpacePageFlags(MemoryChunk* chunk) {
188+
inline void SetNewSpacePageFlags(Page* chunk) {
189189
SetNewSpacePageFlags(chunk, IsMarking());
190190
}
191191

‎src/heap/mark-compact.cc

+24-26
Original file line numberDiff line numberDiff line change
@@ -134,10 +134,9 @@ static void VerifyMarking(NewSpace* space) {
134134
NewSpacePageIterator it(space->bottom(), end);
135135
// The bottom position is at the start of its page. Allows us to use
136136
// page->area_start() as start of range on all pages.
137-
CHECK_EQ(space->bottom(),
138-
NewSpacePage::FromAddress(space->bottom())->area_start());
137+
CHECK_EQ(space->bottom(), Page::FromAddress(space->bottom())->area_start());
139138
while (it.has_next()) {
140-
NewSpacePage* page = it.next();
139+
Page* page = it.next();
141140
Address limit = it.has_next() ? page->area_end() : end;
142141
CHECK(limit == end || !page->Contains(end));
143142
VerifyMarking(space->heap(), page->area_start(), limit);
@@ -209,7 +208,7 @@ static void VerifyEvacuation(NewSpace* space) {
209208
VerifyEvacuationVisitor visitor;
210209

211210
while (it.has_next()) {
212-
NewSpacePage* page = it.next();
211+
Page* page = it.next();
213212
Address current = page->area_start();
214213
Address limit = it.has_next() ? page->area_end() : space->top();
215214
CHECK(limit == space->top() || !page->Contains(space->top()));
@@ -375,7 +374,7 @@ void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
375374
NewSpacePageIterator it(space->bottom(), space->top());
376375

377376
while (it.has_next()) {
378-
NewSpacePage* p = it.next();
377+
Page* p = it.next();
379378
CHECK(p->markbits()->IsClean());
380379
CHECK_EQ(0, p->LiveBytes());
381380
}
@@ -1803,9 +1802,9 @@ class MarkCompactCollector::EvacuateNewSpacePageVisitor final
18031802
public:
18041803
EvacuateNewSpacePageVisitor() : promoted_size_(0) {}
18051804

1806-
static void MoveToOldSpace(NewSpacePage* page, PagedSpace* owner) {
1805+
static void MoveToOldSpace(Page* page, PagedSpace* owner) {
18071806
page->heap()->new_space()->ReplaceWithEmptyPage(page);
1808-
Page* new_page = Page::Convert(page, owner);
1807+
Page* new_page = Page::ConvertNewToOld(page, owner);
18091808
new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
18101809
}
18111810

@@ -1884,7 +1883,7 @@ void MarkCompactCollector::DiscoverGreyObjectsInNewSpace() {
18841883
NewSpace* space = heap()->new_space();
18851884
NewSpacePageIterator it(space->bottom(), space->top());
18861885
while (it.has_next()) {
1887-
NewSpacePage* page = it.next();
1886+
Page* page = it.next();
18881887
DiscoverGreyObjectsOnPage(page);
18891888
if (marking_deque()->IsFull()) return;
18901889
}
@@ -3050,9 +3049,8 @@ class MarkCompactCollector::Evacuator : public Malloced {
30503049
// evacuation.
30513050
static int PageEvacuationThreshold() {
30523051
if (FLAG_page_promotion)
3053-
return FLAG_page_promotion_threshold * NewSpacePage::kAllocatableMemory /
3054-
100;
3055-
return NewSpacePage::kAllocatableMemory + kPointerSize;
3052+
return FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100;
3053+
return Page::kAllocatableMemory + kPointerSize;
30563054
}
30573055

30583056
explicit Evacuator(MarkCompactCollector* collector)
@@ -3067,7 +3065,7 @@ class MarkCompactCollector::Evacuator : public Malloced {
30673065
duration_(0.0),
30683066
bytes_compacted_(0) {}
30693067

3070-
inline bool EvacuatePage(MemoryChunk* chunk);
3068+
inline bool EvacuatePage(Page* chunk);
30713069

30723070
// Merge back locally cached info sequentially. Note that this method needs
30733071
// to be called from the main thread.
@@ -3101,7 +3099,7 @@ class MarkCompactCollector::Evacuator : public Malloced {
31013099
}
31023100

31033101
template <IterationMode mode, class Visitor>
3104-
inline bool EvacuateSinglePage(MemoryChunk* p, Visitor* visitor);
3102+
inline bool EvacuateSinglePage(Page* p, Visitor* visitor);
31053103

31063104
MarkCompactCollector* collector_;
31073105

@@ -3120,7 +3118,7 @@ class MarkCompactCollector::Evacuator : public Malloced {
31203118
};
31213119

31223120
template <MarkCompactCollector::IterationMode mode, class Visitor>
3123-
bool MarkCompactCollector::Evacuator::EvacuateSinglePage(MemoryChunk* p,
3121+
bool MarkCompactCollector::Evacuator::EvacuateSinglePage(Page* p,
31243122
Visitor* visitor) {
31253123
bool success = false;
31263124
DCHECK(p->IsEvacuationCandidate() || p->InNewSpace() ||
@@ -3154,28 +3152,27 @@ bool MarkCompactCollector::Evacuator::EvacuateSinglePage(MemoryChunk* p,
31543152
return success;
31553153
}
31563154

3157-
bool MarkCompactCollector::Evacuator::EvacuatePage(MemoryChunk* chunk) {
3155+
bool MarkCompactCollector::Evacuator::EvacuatePage(Page* page) {
31583156
bool result = false;
3159-
DCHECK_EQ(chunk->concurrent_sweeping_state().Value(),
3160-
NewSpacePage::kSweepingDone);
3161-
switch (ComputeEvacuationMode(chunk)) {
3157+
DCHECK(page->SweepingDone());
3158+
switch (ComputeEvacuationMode(page)) {
31623159
case kObjectsNewToOld:
3163-
result = EvacuateSinglePage<kClearMarkbits>(chunk, &new_space_visitor_);
3160+
result = EvacuateSinglePage<kClearMarkbits>(page, &new_space_visitor_);
31643161
DCHECK(result);
31653162
USE(result);
31663163
break;
31673164
case kPageNewToOld:
3168-
result = EvacuateSinglePage<kKeepMarking>(chunk, &new_space_page_visitor);
3165+
result = EvacuateSinglePage<kKeepMarking>(page, &new_space_page_visitor);
31693166
DCHECK(result);
31703167
USE(result);
31713168
break;
31723169
case kObjectsOldToOld:
3173-
result = EvacuateSinglePage<kClearMarkbits>(chunk, &old_space_visitor_);
3170+
result = EvacuateSinglePage<kClearMarkbits>(page, &old_space_visitor_);
31743171
if (!result) {
31753172
// Aborted compaction page. We can record slots here to have them
31763173
// processed in parallel later on.
3177-
EvacuateRecordOnlyVisitor record_visitor(chunk->owner()->identity());
3178-
result = EvacuateSinglePage<kKeepMarking>(chunk, &record_visitor);
3174+
EvacuateRecordOnlyVisitor record_visitor(page->owner()->identity());
3175+
result = EvacuateSinglePage<kKeepMarking>(page, &record_visitor);
31793176
DCHECK(result);
31803177
USE(result);
31813178
// We need to return failure here to indicate that we want this page
@@ -3244,7 +3241,7 @@ class EvacuationJobTraits {
32443241

32453242
static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator,
32463243
MemoryChunk* chunk, PerPageData) {
3247-
return evacuator->EvacuatePage(chunk);
3244+
return evacuator->EvacuatePage(reinterpret_cast<Page*>(chunk));
32483245
}
32493246

32503247
static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk,
@@ -3288,8 +3285,9 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
32883285
live_bytes += page->LiveBytes();
32893286
job.AddPage(page, &abandoned_pages);
32903287
}
3288+
32913289
const Address age_mark = heap()->new_space()->age_mark();
3292-
for (NewSpacePage* page : newspace_evacuation_candidates_) {
3290+
for (Page* page : newspace_evacuation_candidates_) {
32933291
live_bytes += page->LiveBytes();
32943292
if (!page->NeverEvacuate() &&
32953293
(page->LiveBytes() > Evacuator::PageEvacuationThreshold()) &&
@@ -3674,7 +3672,7 @@ void UpdateToSpacePointersInParallel(Heap* heap) {
36743672
Address space_end = heap->new_space()->top();
36753673
NewSpacePageIterator it(space_start, space_end);
36763674
while (it.has_next()) {
3677-
NewSpacePage* page = it.next();
3675+
Page* page = it.next();
36783676
Address start =
36793677
page->Contains(space_start) ? space_start : page->area_start();
36803678
Address end = page->Contains(space_end) ? space_end : page->area_end();

‎src/heap/mark-compact.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -862,7 +862,7 @@ class MarkCompactCollector {
862862
bool have_code_to_deoptimize_;
863863

864864
List<Page*> evacuation_candidates_;
865-
List<NewSpacePage*> newspace_evacuation_candidates_;
865+
List<Page*> newspace_evacuation_candidates_;
866866

867867
// True if we are collecting slots to perform evacuation from evacuation
868868
// candidates.

‎src/heap/spaces-inl.h

+26-39
Original file line numberDiff line numberDiff line change
@@ -56,8 +56,8 @@ Page* PageIterator::next() {
5656

5757
HeapObject* SemiSpaceIterator::Next() {
5858
while (current_ != limit_) {
59-
if (NewSpacePage::IsAtEnd(current_)) {
60-
NewSpacePage* page = NewSpacePage::FromLimit(current_);
59+
if (Page::IsAlignedToPageSize(current_)) {
60+
Page* page = Page::FromAllocationAreaAddress(current_);
6161
page = page->next_page();
6262
DCHECK(!page->is_anchor());
6363
current_ = page->area_start();
@@ -80,27 +80,26 @@ HeapObject* SemiSpaceIterator::next_object() { return Next(); }
8080
// NewSpacePageIterator
8181

8282
NewSpacePageIterator::NewSpacePageIterator(NewSpace* space)
83-
: prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()),
84-
next_page_(NewSpacePage::FromAddress(space->ToSpaceStart())),
85-
last_page_(NewSpacePage::FromLimit(space->ToSpaceEnd())) {}
83+
: prev_page_(Page::FromAddress(space->ToSpaceStart())->prev_page()),
84+
next_page_(Page::FromAddress(space->ToSpaceStart())),
85+
last_page_(Page::FromAllocationAreaAddress(space->ToSpaceEnd())) {}
8686

8787
NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space)
8888
: prev_page_(space->anchor()),
8989
next_page_(prev_page_->next_page()),
9090
last_page_(prev_page_->prev_page()) {}
9191

9292
NewSpacePageIterator::NewSpacePageIterator(Address start, Address limit)
93-
: prev_page_(NewSpacePage::FromAddress(start)->prev_page()),
94-
next_page_(NewSpacePage::FromAddress(start)),
95-
last_page_(NewSpacePage::FromLimit(limit)) {
93+
: prev_page_(Page::FromAddress(start)->prev_page()),
94+
next_page_(Page::FromAddress(start)),
95+
last_page_(Page::FromAllocationAreaAddress(limit)) {
9696
SemiSpace::AssertValidRange(start, limit);
9797
}
9898

9999

100100
bool NewSpacePageIterator::has_next() { return prev_page_ != last_page_; }
101101

102-
103-
NewSpacePage* NewSpacePageIterator::next() {
102+
Page* NewSpacePageIterator::next() {
104103
DCHECK(has_next());
105104
prev_page_ = next_page_;
106105
next_page_ = next_page_->next_page();
@@ -244,20 +243,18 @@ bool NewSpace::ToSpaceContains(Object* o) { return to_space_.Contains(o); }
244243
bool NewSpace::FromSpaceContains(Object* o) { return from_space_.Contains(o); }
245244

246245
size_t NewSpace::AllocatedSinceLastGC() {
247-
const intptr_t age_mark_offset =
248-
NewSpacePage::OffsetInPage(to_space_.age_mark());
249-
const intptr_t top_offset =
250-
NewSpacePage::OffsetInPage(allocation_info_.top());
246+
const intptr_t age_mark_offset = Page::OffsetInPage(to_space_.age_mark());
247+
const intptr_t top_offset = Page::OffsetInPage(allocation_info_.top());
251248
const intptr_t age_mark_delta =
252-
age_mark_offset >= NewSpacePage::kObjectStartOffset
253-
? age_mark_offset - NewSpacePage::kObjectStartOffset
254-
: NewSpacePage::kAllocatableMemory;
255-
const intptr_t top_delta = top_offset >= NewSpacePage::kObjectStartOffset
256-
? top_offset - NewSpacePage::kObjectStartOffset
257-
: NewSpacePage::kAllocatableMemory;
249+
age_mark_offset >= Page::kObjectStartOffset
250+
? age_mark_offset - Page::kObjectStartOffset
251+
: Page::kAllocatableMemory;
252+
const intptr_t top_delta = top_offset >= Page::kObjectStartOffset
253+
? top_offset - Page::kObjectStartOffset
254+
: Page::kAllocatableMemory;
258255
DCHECK((allocated_since_last_gc_ > 0) ||
259-
(NewSpacePage::FromLimit(allocation_info_.top()) ==
260-
NewSpacePage::FromLimit(to_space_.age_mark())));
256+
(Page::FromAllocationAreaAddress(allocation_info_.top()) ==
257+
Page::FromAllocationAreaAddress(to_space_.age_mark())));
261258
return static_cast<size_t>(allocated_since_last_gc_ + top_delta -
262259
age_mark_delta);
263260
}
@@ -270,16 +267,15 @@ AllocationSpace AllocationResult::RetrySpace() {
270267
return static_cast<AllocationSpace>(Smi::cast(object_)->value());
271268
}
272269

273-
NewSpacePage* NewSpacePage::Initialize(Heap* heap, MemoryChunk* chunk,
274-
Executability executable,
275-
SemiSpace* owner) {
270+
Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
271+
SemiSpace* owner) {
276272
DCHECK_EQ(executable, Executability::NOT_EXECUTABLE);
277273
bool in_to_space = (owner->id() != kFromSpace);
278274
chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
279275
: MemoryChunk::IN_FROM_SPACE);
280276
DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
281277
: MemoryChunk::IN_TO_SPACE));
282-
NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
278+
Page* page = static_cast<Page*>(chunk);
283279
heap->incremental_marking()->SetNewSpacePageFlags(page);
284280
return page;
285281
}
@@ -309,7 +305,8 @@ Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
309305
return page;
310306
}
311307

312-
Page* Page::Convert(NewSpacePage* old_page, PagedSpace* new_owner) {
308+
Page* Page::ConvertNewToOld(Page* old_page, PagedSpace* new_owner) {
309+
DCHECK(old_page->InNewSpace());
313310
old_page->set_owner(new_owner);
314311
old_page->SetFlags(0, ~0);
315312
new_owner->AccountCommitted(old_page->size());
@@ -359,14 +356,14 @@ void MemoryChunk::IncrementLiveBytesFromMutator(HeapObject* object, int by) {
359356

360357
bool PagedSpace::Contains(Address addr) {
361358
Page* p = Page::FromAddress(addr);
362-
if (!p->is_valid()) return false;
359+
if (!Page::IsValid(p)) return false;
363360
return p->owner() == this;
364361
}
365362

366363
bool PagedSpace::Contains(Object* o) {
367364
if (!o->IsHeapObject()) return false;
368365
Page* p = Page::FromAddress(HeapObject::cast(o)->address());
369-
if (!p->is_valid()) return false;
366+
if (!Page::IsValid(p)) return false;
370367
return p->owner() == this;
371368
}
372369

@@ -472,16 +469,6 @@ MemoryChunk* MemoryChunkIterator::next() {
472469
return nullptr;
473470
}
474471

475-
void Page::set_next_page(Page* page) {
476-
DCHECK(page->owner() == owner());
477-
set_next_chunk(page);
478-
}
479-
480-
void Page::set_prev_page(Page* page) {
481-
DCHECK(page->owner() == owner());
482-
set_prev_chunk(page);
483-
}
484-
485472
Page* FreeListCategory::page() {
486473
return Page::FromAddress(reinterpret_cast<Address>(this));
487474
}

‎src/heap/spaces.cc

+71-84
Original file line numberDiff line numberDiff line change
@@ -425,20 +425,12 @@ Address MemoryAllocator::AllocateAlignedMemory(
425425
return base;
426426
}
427427

428-
429-
void Page::InitializeAsAnchor(PagedSpace* owner) {
430-
set_owner(owner);
431-
set_prev_page(this);
432-
set_next_page(this);
433-
}
434-
435-
void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
436-
set_owner(semi_space);
428+
void Page::InitializeAsAnchor(Space* space) {
429+
set_owner(space);
437430
set_next_chunk(this);
438431
set_prev_chunk(this);
439-
// Flags marks this invalid page as not being in new-space.
440-
// All real new-space pages will be in new-space.
441432
SetFlags(0, ~0);
433+
SetFlag(ANCHOR);
442434
}
443435

444436
MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
@@ -772,12 +764,11 @@ template void MemoryAllocator::Free<MemoryAllocator::kRegular>(
772764
template void MemoryAllocator::Free<MemoryAllocator::kPooled>(
773765
MemoryChunk* chunk);
774766

775-
template <typename PageType, MemoryAllocator::AllocationMode mode,
776-
typename SpaceType>
777-
PageType* MemoryAllocator::AllocatePage(intptr_t size, SpaceType* owner,
778-
Executability executable) {
767+
template <MemoryAllocator::AllocationMode alloc_mode, typename SpaceType>
768+
Page* MemoryAllocator::AllocatePage(intptr_t size, SpaceType* owner,
769+
Executability executable) {
779770
MemoryChunk* chunk = nullptr;
780-
if (mode == kPooled) {
771+
if (alloc_mode == kPooled) {
781772
DCHECK_EQ(size, static_cast<intptr_t>(MemoryChunk::kAllocatableMemory));
782773
DCHECK_EQ(executable, NOT_EXECUTABLE);
783774
chunk = AllocatePagePooled(owner);
@@ -786,21 +777,27 @@ PageType* MemoryAllocator::AllocatePage(intptr_t size, SpaceType* owner,
786777
chunk = AllocateChunk(size, size, executable, owner);
787778
}
788779
if (chunk == nullptr) return nullptr;
789-
return PageType::Initialize(isolate_->heap(), chunk, executable, owner);
780+
return Page::Initialize(isolate_->heap(), chunk, executable, owner);
781+
}
782+
783+
template Page*
784+
MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
785+
intptr_t size, PagedSpace* owner, Executability executable);
786+
template Page*
787+
MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
788+
intptr_t size, SemiSpace* owner, Executability executable);
789+
template Page*
790+
MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
791+
intptr_t size, SemiSpace* owner, Executability executable);
792+
793+
LargePage* MemoryAllocator::AllocateLargePage(intptr_t size,
794+
LargeObjectSpace* owner,
795+
Executability executable) {
796+
MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
797+
if (chunk == nullptr) return nullptr;
798+
return LargePage::Initialize(isolate_->heap(), chunk, executable, owner);
790799
}
791800

792-
template Page* MemoryAllocator::AllocatePage<Page, MemoryAllocator::kRegular,
793-
PagedSpace>(intptr_t, PagedSpace*,
794-
Executability);
795-
796-
template LargePage*
797-
MemoryAllocator::AllocatePage<LargePage, MemoryAllocator::kRegular, Space>(
798-
intptr_t, Space*, Executability);
799-
800-
template NewSpacePage* MemoryAllocator::AllocatePage<
801-
NewSpacePage, MemoryAllocator::kPooled, SemiSpace>(intptr_t, SemiSpace*,
802-
Executability);
803-
804801
template <typename SpaceType>
805802
MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
806803
if (chunk_pool_.is_empty()) return nullptr;
@@ -1041,13 +1038,11 @@ void Space::AllocationStep(Address soon_object, int size) {
10411038

10421039
PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
10431040
Executability executable)
1044-
: Space(heap, space, executable), free_list_(this) {
1041+
: Space(heap, space, executable), anchor_(this), free_list_(this) {
10451042
area_size_ = MemoryAllocator::PageAreaSize(space);
10461043
accounting_stats_.Clear();
10471044

10481045
allocation_info_.Reset(nullptr, nullptr);
1049-
1050-
anchor_.InitializeAsAnchor(this);
10511046
}
10521047

10531048

@@ -1180,8 +1175,7 @@ bool PagedSpace::Expand() {
11801175

11811176
if (!heap()->CanExpandOldGeneration(size)) return false;
11821177

1183-
Page* p =
1184-
heap()->memory_allocator()->AllocatePage<Page>(size, this, executable());
1178+
Page* p = heap()->memory_allocator()->AllocatePage(size, this, executable());
11851179
if (p == nullptr) return false;
11861180

11871181
AccountCommitted(static_cast<intptr_t>(p->size()));
@@ -1240,7 +1234,7 @@ void PagedSpace::ReleasePage(Page* page) {
12401234
free_list_.EvictFreeListItems(page);
12411235
DCHECK(!free_list_.ContainsPageFreeListItems(page));
12421236

1243-
if (Page::FromAllocationTop(allocation_info_.top()) == page) {
1237+
if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
12441238
allocation_info_.Reset(nullptr, nullptr);
12451239
}
12461240

@@ -1269,7 +1263,7 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
12691263
while (page_iterator.has_next()) {
12701264
Page* page = page_iterator.next();
12711265
CHECK(page->owner() == this);
1272-
if (page == Page::FromAllocationTop(allocation_info_.top())) {
1266+
if (page == Page::FromAllocationAreaAddress(allocation_info_.top())) {
12731267
allocation_pointer_found_in_space = true;
12741268
}
12751269
CHECK(page->SweepingDone());
@@ -1488,22 +1482,22 @@ void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) {
14881482

14891483
bool NewSpace::AddFreshPage() {
14901484
Address top = allocation_info_.top();
1491-
DCHECK(!NewSpacePage::IsAtStart(top));
1485+
DCHECK(!Page::IsAtObjectStart(top));
14921486
if (!to_space_.AdvancePage()) {
14931487
// No more pages left to advance.
14941488
return false;
14951489
}
14961490

14971491
// Clear remainder of current page.
1498-
Address limit = NewSpacePage::FromLimit(top)->area_end();
1492+
Address limit = Page::FromAllocationAreaAddress(top)->area_end();
14991493
if (heap()->gc_state() == Heap::SCAVENGE) {
15001494
heap()->promotion_queue()->SetNewLimit(limit);
15011495
}
15021496

15031497
int remaining_in_page = static_cast<int>(limit - top);
15041498
heap()->CreateFillerObjectAt(top, remaining_in_page, ClearRecordedSlots::kNo);
15051499
pages_used_++;
1506-
allocated_since_last_gc_ += NewSpacePage::kAllocatableMemory;
1500+
allocated_since_last_gc_ += Page::kAllocatableMemory;
15071501
UpdateAllocationInfo();
15081502

15091503
return true;
@@ -1622,9 +1616,9 @@ void NewSpace::Verify() {
16221616
CHECK_EQ(current, to_space_.space_start());
16231617

16241618
while (current != top()) {
1625-
if (!NewSpacePage::IsAtEnd(current)) {
1619+
if (!Page::IsAlignedToPageSize(current)) {
16261620
// The allocation pointer should not be in the middle of an object.
1627-
CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) ||
1621+
CHECK(!Page::FromAllocationAreaAddress(current)->ContainsLimit(top()) ||
16281622
current < top());
16291623

16301624
HeapObject* object = HeapObject::FromAddress(current);
@@ -1650,7 +1644,7 @@ void NewSpace::Verify() {
16501644
current += size;
16511645
} else {
16521646
// At end of page, switch to next page.
1653-
NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
1647+
Page* page = Page::FromAllocationAreaAddress(current)->next_page();
16541648
// Next page should be valid.
16551649
CHECK(!page->is_anchor());
16561650
current = page->area_start();
@@ -1686,14 +1680,12 @@ void SemiSpace::TearDown() {
16861680

16871681
bool SemiSpace::Commit() {
16881682
DCHECK(!is_committed());
1689-
NewSpacePage* current = anchor();
1683+
Page* current = anchor();
16901684
const int num_pages = current_capacity_ / Page::kPageSize;
16911685
for (int pages_added = 0; pages_added < num_pages; pages_added++) {
1692-
NewSpacePage* new_page =
1693-
heap()
1694-
->memory_allocator()
1695-
->AllocatePage<NewSpacePage, MemoryAllocator::kPooled>(
1696-
NewSpacePage::kAllocatableMemory, this, executable());
1686+
Page* new_page =
1687+
heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
1688+
Page::kAllocatableMemory, this, executable());
16971689
if (new_page == nullptr) {
16981690
RewindPages(current, pages_added);
16991691
return false;
@@ -1740,39 +1732,36 @@ bool SemiSpace::GrowTo(int new_capacity) {
17401732
if (!is_committed()) {
17411733
if (!Commit()) return false;
17421734
}
1743-
DCHECK_EQ(new_capacity & NewSpacePage::kPageAlignmentMask, 0);
1735+
DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0);
17441736
DCHECK_LE(new_capacity, maximum_capacity_);
17451737
DCHECK_GT(new_capacity, current_capacity_);
17461738
const int delta = new_capacity - current_capacity_;
17471739
DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
1748-
const int delta_pages = delta / NewSpacePage::kPageSize;
1749-
NewSpacePage* last_page = anchor()->prev_page();
1740+
const int delta_pages = delta / Page::kPageSize;
1741+
Page* last_page = anchor()->prev_page();
17501742
DCHECK_NE(last_page, anchor());
17511743
for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
1752-
NewSpacePage* new_page =
1753-
heap()
1754-
->memory_allocator()
1755-
->AllocatePage<NewSpacePage, MemoryAllocator::kPooled>(
1756-
NewSpacePage::kAllocatableMemory, this, executable());
1744+
Page* new_page =
1745+
heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
1746+
Page::kAllocatableMemory, this, executable());
17571747
if (new_page == nullptr) {
17581748
RewindPages(last_page, pages_added);
17591749
return false;
17601750
}
17611751
new_page->InsertAfter(last_page);
17621752
Bitmap::Clear(new_page);
17631753
// Duplicate the flags that was set on the old page.
1764-
new_page->SetFlags(last_page->GetFlags(),
1765-
NewSpacePage::kCopyOnFlipFlagsMask);
1754+
new_page->SetFlags(last_page->GetFlags(), Page::kCopyOnFlipFlagsMask);
17661755
last_page = new_page;
17671756
}
17681757
AccountCommitted(static_cast<intptr_t>(delta));
17691758
current_capacity_ = new_capacity;
17701759
return true;
17711760
}
17721761

1773-
void SemiSpace::RewindPages(NewSpacePage* start, int num_pages) {
1774-
NewSpacePage* new_last_page = nullptr;
1775-
NewSpacePage* last_page = start;
1762+
void SemiSpace::RewindPages(Page* start, int num_pages) {
1763+
Page* new_last_page = nullptr;
1764+
Page* last_page = start;
17761765
while (num_pages > 0) {
17771766
DCHECK_NE(last_page, anchor());
17781767
new_last_page = last_page->prev_page();
@@ -1784,15 +1773,15 @@ void SemiSpace::RewindPages(NewSpacePage* start, int num_pages) {
17841773
}
17851774

17861775
bool SemiSpace::ShrinkTo(int new_capacity) {
1787-
DCHECK_EQ(new_capacity & NewSpacePage::kPageAlignmentMask, 0);
1776+
DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0);
17881777
DCHECK_GE(new_capacity, minimum_capacity_);
17891778
DCHECK_LT(new_capacity, current_capacity_);
17901779
if (is_committed()) {
17911780
const int delta = current_capacity_ - new_capacity;
17921781
DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
1793-
int delta_pages = delta / NewSpacePage::kPageSize;
1794-
NewSpacePage* new_last_page;
1795-
NewSpacePage* last_page;
1782+
int delta_pages = delta / Page::kPageSize;
1783+
Page* new_last_page;
1784+
Page* last_page;
17961785
while (delta_pages > 0) {
17971786
last_page = anchor()->prev_page();
17981787
new_last_page = last_page->prev_page();
@@ -1809,13 +1798,12 @@ bool SemiSpace::ShrinkTo(int new_capacity) {
18091798

18101799
void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
18111800
anchor_.set_owner(this);
1812-
// Fixup back-pointers to anchor. Address of anchor changes when we swap.
18131801
anchor_.prev_page()->set_next_page(&anchor_);
18141802
anchor_.next_page()->set_prev_page(&anchor_);
18151803

18161804
NewSpacePageIterator it(this);
18171805
while (it.has_next()) {
1818-
NewSpacePage* page = it.next();
1806+
Page* page = it.next();
18191807
page->set_owner(this);
18201808
page->SetFlags(flags, mask);
18211809
if (id_ == kToSpace) {
@@ -1838,12 +1826,11 @@ void SemiSpace::Reset() {
18381826
current_page_ = anchor_.next_page();
18391827
}
18401828

1841-
void SemiSpace::ReplaceWithEmptyPage(NewSpacePage* old_page) {
1842-
NewSpacePage* new_page =
1843-
heap()->memory_allocator()->AllocatePage<NewSpacePage>(
1844-
NewSpacePage::kAllocatableMemory, this, executable());
1829+
void SemiSpace::ReplaceWithEmptyPage(Page* old_page) {
1830+
Page* new_page = heap()->memory_allocator()->AllocatePage(
1831+
Page::kAllocatableMemory, this, executable());
18451832
Bitmap::Clear(new_page);
1846-
new_page->SetFlags(old_page->GetFlags(), NewSpacePage::kCopyAllFlags);
1833+
new_page->SetFlags(old_page->GetFlags(), Page::kCopyAllFlags);
18471834
new_page->set_next_page(old_page->next_page());
18481835
new_page->set_prev_page(old_page->prev_page());
18491836
old_page->next_page()->set_prev_page(new_page);
@@ -1868,13 +1855,13 @@ void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
18681855
std::swap(from->anchor_, to->anchor_);
18691856
std::swap(from->current_page_, to->current_page_);
18701857

1871-
to->FixPagesFlags(saved_to_space_flags, NewSpacePage::kCopyOnFlipFlagsMask);
1858+
to->FixPagesFlags(saved_to_space_flags, Page::kCopyOnFlipFlagsMask);
18721859
from->FixPagesFlags(0, 0);
18731860
}
18741861

18751862

18761863
void SemiSpace::set_age_mark(Address mark) {
1877-
DCHECK_EQ(NewSpacePage::FromLimit(mark)->semi_space(), this);
1864+
DCHECK_EQ(Page::FromAllocationAreaAddress(mark)->owner(), this);
18781865
age_mark_ = mark;
18791866
// Mark all pages up to the one containing mark.
18801867
NewSpacePageIterator it(space_start(), mark);
@@ -1891,10 +1878,10 @@ void SemiSpace::Print() {}
18911878
#ifdef VERIFY_HEAP
18921879
void SemiSpace::Verify() {
18931880
bool is_from_space = (id_ == kFromSpace);
1894-
NewSpacePage* page = anchor_.next_page();
1895-
CHECK(anchor_.semi_space() == this);
1881+
Page* page = anchor_.next_page();
1882+
CHECK(anchor_.owner() == this);
18961883
while (page != &anchor_) {
1897-
CHECK_EQ(page->semi_space(), this);
1884+
CHECK_EQ(page->owner(), this);
18981885
CHECK(page->InNewSpace());
18991886
CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
19001887
: MemoryChunk::IN_TO_SPACE));
@@ -1922,10 +1909,10 @@ void SemiSpace::Verify() {
19221909
#ifdef DEBUG
19231910
void SemiSpace::AssertValidRange(Address start, Address end) {
19241911
// Addresses belong to same semi-space
1925-
NewSpacePage* page = NewSpacePage::FromLimit(start);
1926-
NewSpacePage* end_page = NewSpacePage::FromLimit(end);
1927-
SemiSpace* space = page->semi_space();
1928-
CHECK_EQ(space, end_page->semi_space());
1912+
Page* page = Page::FromAllocationAreaAddress(start);
1913+
Page* end_page = Page::FromAllocationAreaAddress(end);
1914+
SemiSpace* space = reinterpret_cast<SemiSpace*>(page->owner());
1915+
CHECK_EQ(space, end_page->owner());
19291916
// Start address is before end address, either on same page,
19301917
// or end address is on a later page in the linked list of
19311918
// semi-space pages.
@@ -2599,7 +2586,7 @@ void PagedSpace::RepairFreeListsAfterDeserialization() {
25992586
void PagedSpace::EvictEvacuationCandidatesFromLinearAllocationArea() {
26002587
if (allocation_info_.top() >= allocation_info_.limit()) return;
26012588

2602-
if (!Page::FromAllocationTop(allocation_info_.top())->CanAllocate()) {
2589+
if (!Page::FromAllocationAreaAddress(allocation_info_.top())->CanAllocate()) {
26032590
// Create filler object to keep page iterable if it was iterable.
26042591
int remaining =
26052592
static_cast<int>(allocation_info_.limit() - allocation_info_.top());
@@ -2908,7 +2895,7 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
29082895
return AllocationResult::Retry(identity());
29092896
}
29102897

2911-
LargePage* page = heap()->memory_allocator()->AllocatePage<LargePage>(
2898+
LargePage* page = heap()->memory_allocator()->AllocateLargePage(
29122899
object_size, this, executable);
29132900
if (page == NULL) return AllocationResult::Retry(identity());
29142901
DCHECK(page->area_size() >= object_size);
@@ -2977,7 +2964,7 @@ LargePage* LargeObjectSpace::FindPage(Address a) {
29772964
if (e != NULL) {
29782965
DCHECK(e->value != NULL);
29792966
LargePage* page = reinterpret_cast<LargePage*>(e->value);
2980-
DCHECK(page->is_valid());
2967+
DCHECK(LargePage::IsValid(page));
29812968
if (page->Contains(a)) {
29822969
return page;
29832970
}

‎src/heap/spaces.h

+102-168
Large diffs are not rendered by default.

‎test/cctest/heap/test-heap.cc

+2-3
Original file line numberDiff line numberDiff line change
@@ -6622,15 +6622,14 @@ UNINITIALIZED_TEST(PagePromotion) {
66226622
CHECK_GT(handles.size(), 0u);
66236623
// First object in handle should be on the first page.
66246624
Handle<FixedArray> first_object = handles.front();
6625-
NewSpacePage* first_page =
6626-
NewSpacePage::FromAddress(first_object->address());
6625+
Page* first_page = Page::FromAddress(first_object->address());
66276626
// The age mark should not be on the first page.
66286627
CHECK(!first_page->ContainsLimit(heap->new_space()->age_mark()));
66296628
// To perform a sanity check on live bytes we need to mark the heap.
66306629
SimulateIncrementalMarking(heap, true);
66316630
// Sanity check that the page meets the requirements for promotion.
66326631
const int threshold_bytes =
6633-
FLAG_page_promotion_threshold * NewSpacePage::kAllocatableMemory / 100;
6632+
FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100;
66346633
CHECK_GE(first_page->LiveBytes(), threshold_bytes);
66356634

66366635
// Actual checks: The page is in new space first, but is moved to old space

‎test/cctest/heap/test-spaces.cc

+5-5
Original file line numberDiff line numberDiff line change
@@ -315,12 +315,12 @@ TEST(MemoryAllocator) {
315315
{
316316
int total_pages = 0;
317317
OldSpace faked_space(heap, OLD_SPACE, NOT_EXECUTABLE);
318-
Page* first_page = memory_allocator->AllocatePage<Page>(
318+
Page* first_page = memory_allocator->AllocatePage(
319319
faked_space.AreaSize(), static_cast<PagedSpace*>(&faked_space),
320320
NOT_EXECUTABLE);
321321

322322
first_page->InsertAfter(faked_space.anchor()->prev_page());
323-
CHECK(first_page->is_valid());
323+
CHECK(Page::IsValid(first_page));
324324
CHECK(first_page->next_page() == faked_space.anchor());
325325
total_pages++;
326326

@@ -329,10 +329,10 @@ TEST(MemoryAllocator) {
329329
}
330330

331331
// Again, we should get n or n - 1 pages.
332-
Page* other = memory_allocator->AllocatePage<Page>(
332+
Page* other = memory_allocator->AllocatePage(
333333
faked_space.AreaSize(), static_cast<PagedSpace*>(&faked_space),
334334
NOT_EXECUTABLE);
335-
CHECK(other->is_valid());
335+
CHECK(Page::IsValid(other));
336336
total_pages++;
337337
other->InsertAfter(first_page);
338338
int page_count = 0;
@@ -343,7 +343,7 @@ TEST(MemoryAllocator) {
343343
CHECK(total_pages == page_count);
344344

345345
Page* second_page = first_page->next_page();
346-
CHECK(second_page->is_valid());
346+
CHECK(Page::IsValid(second_page));
347347

348348
// OldSpace's destructor will tear down the space and free up all pages.
349349
}

0 commit comments

Comments
 (0)
Please sign in to comment.