The data contained in this repository can be downloaded to your computer using one of several clients.
Please see the documentation of your version control software client for more information.
Please select the desired protocol below to get the URL.
This URL has Read-Only access.
main_repo / deps / v8 / src / spaces-inl.h @ 40c0f755
History | View | Annotate | Download (10.9 KB)
1 |
// Copyright 2006-2008 the V8 project authors. All rights reserved.
|
---|---|
2 |
// Redistribution and use in source and binary forms, with or without
|
3 |
// modification, are permitted provided that the following conditions are
|
4 |
// met:
|
5 |
//
|
6 |
// * Redistributions of source code must retain the above copyright
|
7 |
// notice, this list of conditions and the following disclaimer.
|
8 |
// * Redistributions in binary form must reproduce the above
|
9 |
// copyright notice, this list of conditions and the following
|
10 |
// disclaimer in the documentation and/or other materials provided
|
11 |
// with the distribution.
|
12 |
// * Neither the name of Google Inc. nor the names of its
|
13 |
// contributors may be used to endorse or promote products derived
|
14 |
// from this software without specific prior written permission.
|
15 |
//
|
16 |
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
17 |
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
18 |
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
19 |
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
20 |
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
21 |
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
22 |
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
23 |
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
24 |
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
25 |
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
26 |
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
27 |
|
28 |
#ifndef V8_SPACES_INL_H_
|
29 |
#define V8_SPACES_INL_H_
|
30 |
|
31 |
#include "memory.h" |
32 |
#include "spaces.h" |
33 |
|
34 |
namespace v8 { namespace internal { |
35 |
|
36 |
|
37 |
// -----------------------------------------------------------------------------
|
38 |
// HeapObjectIterator
|
39 |
|
40 |
bool HeapObjectIterator::has_next() {
|
41 |
if (cur_addr_ < cur_limit_) {
|
42 |
return true; // common case |
43 |
} |
44 |
ASSERT(cur_addr_ == cur_limit_); |
45 |
return HasNextInNextPage(); // slow path |
46 |
} |
47 |
|
48 |
|
49 |
HeapObject* HeapObjectIterator::next() { |
50 |
ASSERT(has_next()); |
51 |
|
52 |
HeapObject* obj = HeapObject::FromAddress(cur_addr_); |
53 |
int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj); |
54 |
ASSERT_OBJECT_SIZE(obj_size); |
55 |
|
56 |
cur_addr_ += obj_size; |
57 |
ASSERT(cur_addr_ <= cur_limit_); |
58 |
|
59 |
return obj;
|
60 |
} |
61 |
|
62 |
|
63 |
// -----------------------------------------------------------------------------
|
64 |
// PageIterator
|
65 |
|
66 |
bool PageIterator::has_next() {
|
67 |
return cur_page_ != stop_page_;
|
68 |
} |
69 |
|
70 |
|
71 |
Page* PageIterator::next() { |
72 |
ASSERT(has_next()); |
73 |
Page* result = cur_page_; |
74 |
cur_page_ = cur_page_->next_page(); |
75 |
return result;
|
76 |
} |
77 |
|
78 |
|
79 |
// -----------------------------------------------------------------------------
|
80 |
// Page
|
81 |
|
82 |
Page* Page::next_page() { |
83 |
return MemoryAllocator::GetNextPage(this);
|
84 |
} |
85 |
|
86 |
|
87 |
Address Page::AllocationTop() { |
88 |
PagedSpace* owner = MemoryAllocator::PageOwner(this); |
89 |
return owner->PageAllocationTop(this);
|
90 |
} |
91 |
|
92 |
|
93 |
void Page::ClearRSet() {
|
94 |
// This method can be called in all rset states.
|
95 |
memset(RSetStart(), 0, kRSetEndOffset - kRSetStartOffset);
|
96 |
} |
97 |
|
98 |
|
99 |
// Give an address a (32-bits):
|
100 |
// | page address | words (6) | bit offset (5) | pointer alignment (2) |
|
101 |
// The rset address is computed as:
|
102 |
// page_address + words * 4
|
103 |
|
104 |
Address Page::ComputeRSetBitPosition(Address address, int offset,
|
105 |
uint32_t* bitmask) { |
106 |
ASSERT(Page::is_rset_in_use()); |
107 |
|
108 |
Page* page = Page::FromAddress(address); |
109 |
uint32_t bit_offset = ArithmeticShiftRight(page->Offset(address) + offset, |
110 |
kObjectAlignmentBits); |
111 |
*bitmask = 1 << (bit_offset % kBitsPerInt);
|
112 |
|
113 |
Address rset_address = |
114 |
page->address() + (bit_offset / kBitsPerInt) * kIntSize; |
115 |
// The remembered set address is either in the normal remembered set range
|
116 |
// of a page or else we have a large object page.
|
117 |
ASSERT((page->RSetStart() <= rset_address && rset_address < page->RSetEnd()) |
118 |
|| page->IsLargeObjectPage()); |
119 |
|
120 |
if (rset_address >= page->RSetEnd()) {
|
121 |
// We have a large object page, and the remembered set address is actually
|
122 |
// past the end of the object. The address of the remembered set in this
|
123 |
// case is the extra remembered set start address at the address of the
|
124 |
// end of the object:
|
125 |
// (page->ObjectAreaStart() + object size)
|
126 |
// plus the offset of the computed remembered set address from the start
|
127 |
// of the object:
|
128 |
// (rset_address - page->ObjectAreaStart()).
|
129 |
// Ie, we can just add the object size.
|
130 |
ASSERT(HeapObject::FromAddress(address)->IsFixedArray()); |
131 |
rset_address += |
132 |
FixedArray::SizeFor(Memory::int_at(page->ObjectAreaStart() |
133 |
+ Array::kLengthOffset)); |
134 |
} |
135 |
return rset_address;
|
136 |
} |
137 |
|
138 |
|
139 |
void Page::SetRSet(Address address, int offset) { |
140 |
uint32_t bitmask = 0;
|
141 |
Address rset_address = ComputeRSetBitPosition(address, offset, &bitmask); |
142 |
Memory::uint32_at(rset_address) |= bitmask; |
143 |
|
144 |
ASSERT(IsRSetSet(address, offset)); |
145 |
} |
146 |
|
147 |
|
148 |
// Clears the corresponding remembered set bit for a given address.
|
149 |
void Page::UnsetRSet(Address address, int offset) { |
150 |
uint32_t bitmask = 0;
|
151 |
Address rset_address = ComputeRSetBitPosition(address, offset, &bitmask); |
152 |
Memory::uint32_at(rset_address) &= ~bitmask; |
153 |
|
154 |
ASSERT(!IsRSetSet(address, offset)); |
155 |
} |
156 |
|
157 |
|
158 |
bool Page::IsRSetSet(Address address, int offset) { |
159 |
uint32_t bitmask = 0;
|
160 |
Address rset_address = ComputeRSetBitPosition(address, offset, &bitmask); |
161 |
return (Memory::uint32_at(rset_address) & bitmask) != 0; |
162 |
} |
163 |
|
164 |
|
165 |
// -----------------------------------------------------------------------------
|
166 |
// MemoryAllocator
|
167 |
|
168 |
bool MemoryAllocator::IsValidChunk(int chunk_id) { |
169 |
if (!IsValidChunkId(chunk_id)) return false; |
170 |
|
171 |
ChunkInfo& c = chunks_[chunk_id]; |
172 |
return (c.address() != NULL) && (c.size() != 0) && (c.owner() != NULL); |
173 |
} |
174 |
|
175 |
|
176 |
bool MemoryAllocator::IsValidChunkId(int chunk_id) { |
177 |
return (0 <= chunk_id) && (chunk_id < max_nof_chunks_); |
178 |
} |
179 |
|
180 |
|
181 |
bool MemoryAllocator::IsPageInSpace(Page* p, PagedSpace* space) {
|
182 |
ASSERT(p->is_valid()); |
183 |
|
184 |
int chunk_id = GetChunkId(p);
|
185 |
if (!IsValidChunkId(chunk_id)) return false; |
186 |
|
187 |
ChunkInfo& c = chunks_[chunk_id]; |
188 |
return (c.address() <= p->address()) &&
|
189 |
(p->address() < c.address() + c.size()) && |
190 |
(space == c.owner()); |
191 |
} |
192 |
|
193 |
|
194 |
Page* MemoryAllocator::GetNextPage(Page* p) { |
195 |
ASSERT(p->is_valid()); |
196 |
int raw_addr = p->opaque_header & ~Page::kPageAlignmentMask;
|
197 |
return Page::FromAddress(AddressFrom<Address>(raw_addr));
|
198 |
} |
199 |
|
200 |
|
201 |
int MemoryAllocator::GetChunkId(Page* p) {
|
202 |
ASSERT(p->is_valid()); |
203 |
return p->opaque_header & Page::kPageAlignmentMask;
|
204 |
} |
205 |
|
206 |
|
207 |
void MemoryAllocator::SetNextPage(Page* prev, Page* next) {
|
208 |
ASSERT(prev->is_valid()); |
209 |
int chunk_id = prev->opaque_header & Page::kPageAlignmentMask;
|
210 |
ASSERT_PAGE_ALIGNED(next->address()); |
211 |
prev->opaque_header = OffsetFrom(next->address()) | chunk_id; |
212 |
} |
213 |
|
214 |
|
215 |
PagedSpace* MemoryAllocator::PageOwner(Page* page) { |
216 |
int chunk_id = GetChunkId(page);
|
217 |
ASSERT(IsValidChunk(chunk_id)); |
218 |
return chunks_[chunk_id].owner();
|
219 |
} |
220 |
|
221 |
|
222 |
bool MemoryAllocator::InInitialChunk(Address address) {
|
223 |
if (initial_chunk_ == NULL) return false; |
224 |
|
225 |
Address start = static_cast<Address>(initial_chunk_->address()); |
226 |
return (start <= address) && (address < start + initial_chunk_->size());
|
227 |
} |
228 |
|
229 |
|
230 |
#ifdef ENABLE_HEAP_PROTECTION
|
231 |
|
232 |
void MemoryAllocator::Protect(Address start, size_t size) {
|
233 |
OS::Protect(start, size); |
234 |
} |
235 |
|
236 |
|
237 |
void MemoryAllocator::Unprotect(Address start,
|
238 |
size_t size, |
239 |
Executability executable) { |
240 |
OS::Unprotect(start, size, executable); |
241 |
} |
242 |
|
243 |
|
244 |
void MemoryAllocator::ProtectChunkFromPage(Page* page) {
|
245 |
int id = GetChunkId(page);
|
246 |
OS::Protect(chunks_[id].address(), chunks_[id].size()); |
247 |
} |
248 |
|
249 |
|
250 |
void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
|
251 |
int id = GetChunkId(page);
|
252 |
OS::Unprotect(chunks_[id].address(), chunks_[id].size(), |
253 |
chunks_[id].owner()->executable() == EXECUTABLE); |
254 |
} |
255 |
|
256 |
#endif
|
257 |
|
258 |
|
259 |
// --------------------------------------------------------------------------
|
260 |
// PagedSpace
|
261 |
|
262 |
bool PagedSpace::Contains(Address addr) {
|
263 |
Page* p = Page::FromAddress(addr); |
264 |
ASSERT(p->is_valid()); |
265 |
|
266 |
return MemoryAllocator::IsPageInSpace(p, this);
|
267 |
} |
268 |
|
269 |
|
270 |
// Try linear allocation in the page of alloc_info's allocation top. Does
|
271 |
// not contain slow case logic (eg, move to the next page or try free list
|
272 |
// allocation) so it can be used by all the allocation functions and for all
|
273 |
// the paged spaces.
|
274 |
HeapObject* PagedSpace::AllocateLinearly(AllocationInfo* alloc_info, |
275 |
int size_in_bytes) {
|
276 |
Address current_top = alloc_info->top; |
277 |
Address new_top = current_top + size_in_bytes; |
278 |
if (new_top > alloc_info->limit) return NULL; |
279 |
|
280 |
alloc_info->top = new_top; |
281 |
ASSERT(alloc_info->VerifyPagedAllocation()); |
282 |
accounting_stats_.AllocateBytes(size_in_bytes); |
283 |
return HeapObject::FromAddress(current_top);
|
284 |
} |
285 |
|
286 |
|
287 |
// Raw allocation.
|
288 |
Object* PagedSpace::AllocateRaw(int size_in_bytes) {
|
289 |
ASSERT(HasBeenSetup()); |
290 |
ASSERT_OBJECT_SIZE(size_in_bytes); |
291 |
HeapObject* object = AllocateLinearly(&allocation_info_, size_in_bytes); |
292 |
if (object != NULL) return object; |
293 |
|
294 |
object = SlowAllocateRaw(size_in_bytes); |
295 |
if (object != NULL) return object; |
296 |
|
297 |
return Failure::RetryAfterGC(size_in_bytes, identity());
|
298 |
} |
299 |
|
300 |
|
301 |
// Reallocating (and promoting) objects during a compacting collection.
|
302 |
Object* PagedSpace::MCAllocateRaw(int size_in_bytes) {
|
303 |
ASSERT(HasBeenSetup()); |
304 |
ASSERT_OBJECT_SIZE(size_in_bytes); |
305 |
HeapObject* object = AllocateLinearly(&mc_forwarding_info_, size_in_bytes); |
306 |
if (object != NULL) return object; |
307 |
|
308 |
object = SlowMCAllocateRaw(size_in_bytes); |
309 |
if (object != NULL) return object; |
310 |
|
311 |
return Failure::RetryAfterGC(size_in_bytes, identity());
|
312 |
} |
313 |
|
314 |
|
315 |
// -----------------------------------------------------------------------------
|
316 |
// LargeObjectChunk
|
317 |
|
318 |
HeapObject* LargeObjectChunk::GetObject() { |
319 |
// Round the chunk address up to the nearest page-aligned address
|
320 |
// and return the heap object in that page.
|
321 |
Page* page = Page::FromAddress(RoundUp(address(), Page::kPageSize)); |
322 |
return HeapObject::FromAddress(page->ObjectAreaStart());
|
323 |
} |
324 |
|
325 |
|
326 |
// -----------------------------------------------------------------------------
|
327 |
// LargeObjectSpace
|
328 |
|
329 |
int LargeObjectSpace::ExtraRSetBytesFor(int object_size) { |
330 |
int extra_rset_bits =
|
331 |
RoundUp((object_size - Page::kObjectAreaSize) / kPointerSize, |
332 |
kBitsPerInt); |
333 |
return extra_rset_bits / kBitsPerByte;
|
334 |
} |
335 |
|
336 |
|
337 |
Object* NewSpace::AllocateRawInternal(int size_in_bytes,
|
338 |
AllocationInfo* alloc_info) { |
339 |
Address new_top = alloc_info->top + size_in_bytes; |
340 |
if (new_top > alloc_info->limit) return Failure::RetryAfterGC(size_in_bytes); |
341 |
|
342 |
Object* obj = HeapObject::FromAddress(alloc_info->top); |
343 |
alloc_info->top = new_top; |
344 |
#ifdef DEBUG
|
345 |
SemiSpace* space = |
346 |
(alloc_info == &allocation_info_) ? &to_space_ : &from_space_; |
347 |
ASSERT(space->low() <= alloc_info->top |
348 |
&& alloc_info->top <= space->high() |
349 |
&& alloc_info->limit == space->high()); |
350 |
#endif
|
351 |
return obj;
|
352 |
} |
353 |
|
354 |
} } // namespace v8::internal
|
355 |
|
356 |
#endif // V8_SPACES_INL_H_ |