The data contained in this repository can be downloaded to your computer using one of several clients.
Please see the documentation of your version control software client for more information.

Please select the desired protocol below to get the URL.

This URL has Read-Only access.

Statistics
| Branch: | Revision:

main_repo / deps / v8 / src / spaces-inl.h @ f230a1cf

History | View | Annotate | Download (11.6 KB)

1
// Copyright 2011 the V8 project authors. All rights reserved.
2
// Redistribution and use in source and binary forms, with or without
3
// modification, are permitted provided that the following conditions are
4
// met:
5
//
6
//     * Redistributions of source code must retain the above copyright
7
//       notice, this list of conditions and the following disclaimer.
8
//     * Redistributions in binary form must reproduce the above
9
//       copyright notice, this list of conditions and the following
10
//       disclaimer in the documentation and/or other materials provided
11
//       with the distribution.
12
//     * Neither the name of Google Inc. nor the names of its
13
//       contributors may be used to endorse or promote products derived
14
//       from this software without specific prior written permission.
15
//
16
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27

    
28
#ifndef V8_SPACES_INL_H_
29
#define V8_SPACES_INL_H_
30

    
31
#include "heap-profiler.h"
32
#include "isolate.h"
33
#include "spaces.h"
34
#include "v8memory.h"
35

    
36
namespace v8 {
37
namespace internal {
38

    
39

    
40
// -----------------------------------------------------------------------------
41
// Bitmap
42

    
43
void Bitmap::Clear(MemoryChunk* chunk) {
44
  Bitmap* bitmap = chunk->markbits();
45
  for (int i = 0; i < bitmap->CellsCount(); i++) bitmap->cells()[i] = 0;
46
  chunk->ResetLiveBytes();
47
}
48

    
49

    
50
// -----------------------------------------------------------------------------
51
// PageIterator
52

    
53

    
54
PageIterator::PageIterator(PagedSpace* space)
55
    : space_(space),
56
      prev_page_(&space->anchor_),
57
      next_page_(prev_page_->next_page()) { }
58

    
59

    
60
bool PageIterator::has_next() {
61
  return next_page_ != &space_->anchor_;
62
}
63

    
64

    
65
Page* PageIterator::next() {
66
  ASSERT(has_next());
67
  prev_page_ = next_page_;
68
  next_page_ = next_page_->next_page();
69
  return prev_page_;
70
}
71

    
72

    
73
// -----------------------------------------------------------------------------
74
// NewSpacePageIterator
75

    
76

    
77
NewSpacePageIterator::NewSpacePageIterator(NewSpace* space)
78
    : prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()),
79
      next_page_(NewSpacePage::FromAddress(space->ToSpaceStart())),
80
      last_page_(NewSpacePage::FromLimit(space->ToSpaceEnd())) { }
81

    
82
NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space)
83
    : prev_page_(space->anchor()),
84
      next_page_(prev_page_->next_page()),
85
      last_page_(prev_page_->prev_page()) { }
86

    
87
NewSpacePageIterator::NewSpacePageIterator(Address start, Address limit)
88
    : prev_page_(NewSpacePage::FromAddress(start)->prev_page()),
89
      next_page_(NewSpacePage::FromAddress(start)),
90
      last_page_(NewSpacePage::FromLimit(limit)) {
91
  SemiSpace::AssertValidRange(start, limit);
92
}
93

    
94

    
95
bool NewSpacePageIterator::has_next() {
96
  return prev_page_ != last_page_;
97
}
98

    
99

    
100
NewSpacePage* NewSpacePageIterator::next() {
101
  ASSERT(has_next());
102
  prev_page_ = next_page_;
103
  next_page_ = next_page_->next_page();
104
  return prev_page_;
105
}
106

    
107

    
108
// -----------------------------------------------------------------------------
109
// HeapObjectIterator
110
HeapObject* HeapObjectIterator::FromCurrentPage() {
111
  while (cur_addr_ != cur_end_) {
112
    if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
113
      cur_addr_ = space_->limit();
114
      continue;
115
    }
116
    HeapObject* obj = HeapObject::FromAddress(cur_addr_);
117
    int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
118
    cur_addr_ += obj_size;
119
    ASSERT(cur_addr_ <= cur_end_);
120
    if (!obj->IsFiller()) {
121
      ASSERT_OBJECT_SIZE(obj_size);
122
      return obj;
123
    }
124
  }
125
  return NULL;
126
}
127

    
128

    
129
// -----------------------------------------------------------------------------
130
// MemoryAllocator
131

    
132
#ifdef ENABLE_HEAP_PROTECTION
133

    
134
void MemoryAllocator::Protect(Address start, size_t size) {
135
  OS::Protect(start, size);
136
}
137

    
138

    
139
void MemoryAllocator::Unprotect(Address start,
140
                                size_t size,
141
                                Executability executable) {
142
  OS::Unprotect(start, size, executable);
143
}
144

    
145

    
146
void MemoryAllocator::ProtectChunkFromPage(Page* page) {
147
  int id = GetChunkId(page);
148
  OS::Protect(chunks_[id].address(), chunks_[id].size());
149
}
150

    
151

    
152
void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
153
  int id = GetChunkId(page);
154
  OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
155
                chunks_[id].owner()->executable() == EXECUTABLE);
156
}
157

    
158
#endif
159

    
160

    
161
// --------------------------------------------------------------------------
162
// PagedSpace
163
Page* Page::Initialize(Heap* heap,
164
                       MemoryChunk* chunk,
165
                       Executability executable,
166
                       PagedSpace* owner) {
167
  Page* page = reinterpret_cast<Page*>(chunk);
168
  ASSERT(page->area_size() <= kNonCodeObjectAreaSize);
169
  ASSERT(chunk->owner() == owner);
170
  owner->IncreaseCapacity(page->area_size());
171
  owner->Free(page->area_start(), page->area_size());
172

    
173
  heap->incremental_marking()->SetOldSpacePageFlags(chunk);
174

    
175
  return page;
176
}
177

    
178

    
179
bool PagedSpace::Contains(Address addr) {
180
  Page* p = Page::FromAddress(addr);
181
  if (!p->is_valid()) return false;
182
  return p->owner() == this;
183
}
184

    
185

    
186
void MemoryChunk::set_scan_on_scavenge(bool scan) {
187
  if (scan) {
188
    if (!scan_on_scavenge()) heap_->increment_scan_on_scavenge_pages();
189
    SetFlag(SCAN_ON_SCAVENGE);
190
  } else {
191
    if (scan_on_scavenge()) heap_->decrement_scan_on_scavenge_pages();
192
    ClearFlag(SCAN_ON_SCAVENGE);
193
  }
194
  heap_->incremental_marking()->SetOldSpacePageFlags(this);
195
}
196

    
197

    
198
MemoryChunk* MemoryChunk::FromAnyPointerAddress(Heap* heap, Address addr) {
199
  MemoryChunk* maybe = reinterpret_cast<MemoryChunk*>(
200
      OffsetFrom(addr) & ~Page::kPageAlignmentMask);
201
  if (maybe->owner() != NULL) return maybe;
202
  LargeObjectIterator iterator(heap->lo_space());
203
  for (HeapObject* o = iterator.Next(); o != NULL; o = iterator.Next()) {
204
    // Fixed arrays are the only pointer-containing objects in large object
205
    // space.
206
    if (o->IsFixedArray()) {
207
      MemoryChunk* chunk = MemoryChunk::FromAddress(o->address());
208
      if (chunk->Contains(addr)) {
209
        return chunk;
210
      }
211
    }
212
  }
213
  UNREACHABLE();
214
  return NULL;
215
}
216

    
217

    
218
void MemoryChunk::UpdateHighWaterMark(Address mark) {
219
  if (mark == NULL) return;
220
  // Need to subtract one from the mark because when a chunk is full the
221
  // top points to the next address after the chunk, which effectively belongs
222
  // to another chunk. See the comment to Page::FromAllocationTop.
223
  MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
224
  int new_mark = static_cast<int>(mark - chunk->address());
225
  if (new_mark > chunk->high_water_mark_) {
226
    chunk->high_water_mark_ = new_mark;
227
  }
228
}
229

    
230

    
231
PointerChunkIterator::PointerChunkIterator(Heap* heap)
232
    : state_(kOldPointerState),
233
      old_pointer_iterator_(heap->old_pointer_space()),
234
      map_iterator_(heap->map_space()),
235
      lo_iterator_(heap->lo_space()) { }
236

    
237

    
238
Page* Page::next_page() {
239
  ASSERT(next_chunk()->owner() == owner());
240
  return static_cast<Page*>(next_chunk());
241
}
242

    
243

    
244
Page* Page::prev_page() {
245
  ASSERT(prev_chunk()->owner() == owner());
246
  return static_cast<Page*>(prev_chunk());
247
}
248

    
249

    
250
void Page::set_next_page(Page* page) {
251
  ASSERT(page->owner() == owner());
252
  set_next_chunk(page);
253
}
254

    
255

    
256
void Page::set_prev_page(Page* page) {
257
  ASSERT(page->owner() == owner());
258
  set_prev_chunk(page);
259
}
260

    
261

    
262
// Try linear allocation in the page of alloc_info's allocation top.  Does
263
// not contain slow case logic (e.g. move to the next page or try free list
264
// allocation) so it can be used by all the allocation functions and for all
265
// the paged spaces.
266
HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
267
  Address current_top = allocation_info_.top();
268
  Address new_top = current_top + size_in_bytes;
269
  if (new_top > allocation_info_.limit()) return NULL;
270

    
271
  allocation_info_.set_top(new_top);
272
  return HeapObject::FromAddress(current_top);
273
}
274

    
275

    
276
// Raw allocation.
277
MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes,
278
                                     AllocationType event) {
279
  HeapProfiler* profiler = heap()->isolate()->heap_profiler();
280

    
281
  HeapObject* object = AllocateLinearly(size_in_bytes);
282
  if (object != NULL) {
283
    if (identity() == CODE_SPACE) {
284
      SkipList::Update(object->address(), size_in_bytes);
285
    }
286
    if (event == NEW_OBJECT && profiler->is_tracking_allocations()) {
287
      profiler->NewObjectEvent(object->address(), size_in_bytes);
288
    }
289
    return object;
290
  }
291

    
292
  ASSERT(!heap()->linear_allocation() ||
293
         (anchor_.next_chunk() == &anchor_ &&
294
          anchor_.prev_chunk() == &anchor_));
295

    
296
  object = free_list_.Allocate(size_in_bytes);
297
  if (object != NULL) {
298
    if (identity() == CODE_SPACE) {
299
      SkipList::Update(object->address(), size_in_bytes);
300
    }
301
    if (event == NEW_OBJECT && profiler->is_tracking_allocations()) {
302
      profiler->NewObjectEvent(object->address(), size_in_bytes);
303
    }
304
    return object;
305
  }
306

    
307
  object = SlowAllocateRaw(size_in_bytes);
308
  if (object != NULL) {
309
    if (identity() == CODE_SPACE) {
310
      SkipList::Update(object->address(), size_in_bytes);
311
    }
312
    if (event == NEW_OBJECT && profiler->is_tracking_allocations()) {
313
      profiler->NewObjectEvent(object->address(), size_in_bytes);
314
    }
315
    return object;
316
  }
317

    
318
  return Failure::RetryAfterGC(identity());
319
}
320

    
321

    
322
// -----------------------------------------------------------------------------
323
// NewSpace
324

    
325

    
326
MaybeObject* NewSpace::AllocateRaw(int size_in_bytes) {
327
  Address old_top = allocation_info_.top();
328
#ifdef DEBUG
329
  // If we are stressing compaction we waste some memory in new space
330
  // in order to get more frequent GCs.
331
  if (FLAG_stress_compaction && !heap()->linear_allocation()) {
332
    if (allocation_info_.limit() - old_top >= size_in_bytes * 4) {
333
      int filler_size = size_in_bytes * 4;
334
      for (int i = 0; i < filler_size; i += kPointerSize) {
335
        *(reinterpret_cast<Object**>(old_top + i)) =
336
            heap()->one_pointer_filler_map();
337
      }
338
      old_top += filler_size;
339
      allocation_info_.set_top(allocation_info_.top() + filler_size);
340
    }
341
  }
342
#endif
343

    
344
  if (allocation_info_.limit() - old_top < size_in_bytes) {
345
    return SlowAllocateRaw(size_in_bytes);
346
  }
347

    
348
  HeapObject* obj = HeapObject::FromAddress(old_top);
349
  allocation_info_.set_top(allocation_info_.top() + size_in_bytes);
350
  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
351

    
352
  HeapProfiler* profiler = heap()->isolate()->heap_profiler();
353
  if (profiler != NULL && profiler->is_tracking_allocations()) {
354
    profiler->NewObjectEvent(obj->address(), size_in_bytes);
355
  }
356

    
357
  return obj;
358
}
359

    
360

    
361
LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) {
362
  heap->incremental_marking()->SetOldSpacePageFlags(chunk);
363
  return static_cast<LargePage*>(chunk);
364
}
365

    
366

    
367
intptr_t LargeObjectSpace::Available() {
368
  return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available());
369
}
370

    
371

    
372
bool FreeListNode::IsFreeListNode(HeapObject* object) {
373
  Map* map = object->map();
374
  Heap* heap = object->GetHeap();
375
  return map == heap->raw_unchecked_free_space_map()
376
      || map == heap->raw_unchecked_one_pointer_filler_map()
377
      || map == heap->raw_unchecked_two_pointer_filler_map();
378
}
379

    
380
} }  // namespace v8::internal
381

    
382
#endif  // V8_SPACES_INL_H_