@@ -30,22 +30,28 @@ StoreBuffer::StoreBuffer(Heap* heap)
30
30
}
31
31
32
32
void StoreBuffer::SetUp () {
33
- // Allocate 3x the buffer size, so that we can start the new store buffer
34
- // aligned to 2x the size. This lets us use a bit test to detect the end of
35
- // the area.
33
+ const size_t requested_size = kStoreBufferSize * kStoreBuffers ;
34
+ // Allocate buffer memory aligned at least to kStoreBufferSize. This lets us
35
+ // use a bit test to detect the ends of the buffers.
36
+ const size_t alignment =
37
+ std::max<size_t >(kStoreBufferSize , AllocatePageSize ());
38
+ void * hint = AlignedAddress (heap_->GetRandomMmapAddr (), alignment);
36
39
VirtualMemory reservation;
37
- if (!AllocVirtualMemory ( kStoreBufferSize * 3 , heap_-> GetRandomMmapAddr () ,
38
- &reservation)) {
40
+ if (!AlignedAllocVirtualMemory (requested_size, alignment, hint ,
41
+ &reservation)) {
39
42
heap_->FatalProcessOutOfMemory (" StoreBuffer::SetUp" );
40
43
}
44
+
41
45
Address start = reservation.address ();
42
- start_[0 ] = reinterpret_cast <Address*>(::RoundUp (start, kStoreBufferSize ));
46
+ const size_t allocated_size = reservation.size ();
47
+
48
+ start_[0 ] = reinterpret_cast <Address*>(start);
43
49
limit_[0 ] = start_[0 ] + (kStoreBufferSize / kPointerSize );
44
50
start_[1 ] = limit_[0 ];
45
51
limit_[1 ] = start_[1 ] + (kStoreBufferSize / kPointerSize );
46
52
47
- Address* vm_limit = reinterpret_cast <Address*>(start + reservation. size ());
48
-
53
+ // Sanity check the buffers.
54
+ Address* vm_limit = reinterpret_cast <Address*>(start + allocated_size);
49
55
USE (vm_limit);
50
56
for (int i = 0 ; i < kStoreBuffers ; i++) {
51
57
DCHECK (reinterpret_cast <Address>(start_[i]) >= reservation.address ());
@@ -55,8 +61,9 @@ void StoreBuffer::SetUp() {
55
61
DCHECK_EQ (0 , reinterpret_cast <Address>(limit_[i]) & kStoreBufferMask );
56
62
}
57
63
58
- if (!reservation.SetPermissions (reinterpret_cast <Address>(start_[0 ]),
59
- kStoreBufferSize * kStoreBuffers ,
64
+ // Set RW permissions only on the pages we use.
65
+ const size_t used_size = RoundUp (requested_size, CommitPageSize ());
66
+ if (!reservation.SetPermissions (start, used_size,
60
67
PageAllocator::kReadWrite )) {
61
68
heap_->FatalProcessOutOfMemory (" StoreBuffer::SetUp" );
62
69
}
@@ -65,7 +72,6 @@ void StoreBuffer::SetUp() {
65
72
virtual_memory_.TakeControl (&reservation);
66
73
}
67
74
68
-
69
75
void StoreBuffer::TearDown () {
70
76
if (virtual_memory_.IsReserved ()) virtual_memory_.Free ();
71
77
top_ = nullptr ;
0 commit comments