The data contained in this repository can be downloaded to your computer using one of several clients.
Please see the documentation of your version control software client for more information.

Please select the desired protocol below to get the URL.

This URL has Read-Only access.

Statistics
| Branch: | Revision:

main_repo / deps / v8 / src / spaces.h @ f230a1cf

History | View | Annotate | Download (93.2 KB)

1
// Copyright 2011 the V8 project authors. All rights reserved.
2
// Redistribution and use in source and binary forms, with or without
3
// modification, are permitted provided that the following conditions are
4
// met:
5
//
6
//     * Redistributions of source code must retain the above copyright
7
//       notice, this list of conditions and the following disclaimer.
8
//     * Redistributions in binary form must reproduce the above
9
//       copyright notice, this list of conditions and the following
10
//       disclaimer in the documentation and/or other materials provided
11
//       with the distribution.
12
//     * Neither the name of Google Inc. nor the names of its
13
//       contributors may be used to endorse or promote products derived
14
//       from this software without specific prior written permission.
15
//
16
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27

    
28
#ifndef V8_SPACES_H_
29
#define V8_SPACES_H_
30

    
31
#include "allocation.h"
32
#include "hashmap.h"
33
#include "list.h"
34
#include "log.h"
35
#include "platform/mutex.h"
36
#include "v8utils.h"
37

    
38
namespace v8 {
39
namespace internal {
40

    
41
class Isolate;
42

    
43
// -----------------------------------------------------------------------------
44
// Heap structures:
45
//
46
// A JS heap consists of a young generation, an old generation, and a large
47
// object space. The young generation is divided into two semispaces. A
48
// scavenger implements Cheney's copying algorithm. The old generation is
49
// separated into a map space and an old object space. The map space contains
50
// all (and only) map objects, the rest of old objects go into the old space.
51
// The old generation is collected by a mark-sweep-compact collector.
52
//
53
// The semispaces of the young generation are contiguous.  The old and map
54
// spaces consists of a list of pages. A page has a page header and an object
55
// area.
56
//
57
// There is a separate large object space for objects larger than
58
// Page::kMaxHeapObjectSize, so that they do not have to move during
59
// collection. The large object space is paged. Pages in large object space
60
// may be larger than the page size.
61
//
62
// A store-buffer based write barrier is used to keep track of intergenerational
63
// references.  See store-buffer.h.
64
//
65
// During scavenges and mark-sweep collections we sometimes (after a store
66
// buffer overflow) iterate intergenerational pointers without decoding heap
67
// object maps so if the page belongs to old pointer space or large object
68
// space it is essential to guarantee that the page does not contain any
69
// garbage pointers to new space: every pointer aligned word which satisfies
70
// the Heap::InNewSpace() predicate must be a pointer to a live heap object in
71
// new space. Thus objects in old pointer and large object spaces should have a
72
// special layout (e.g. no bare integer fields). This requirement does not
73
// apply to map space which is iterated in a special fashion. However we still
74
// require pointer fields of dead maps to be cleaned.
75
//
76
// To enable lazy cleaning of old space pages we can mark chunks of the page
77
// as being garbage.  Garbage sections are marked with a special map.  These
78
// sections are skipped when scanning the page, even if we are otherwise
79
// scanning without regard for object boundaries.  Garbage sections are chained
80
// together to form a free list after a GC.  Garbage sections created outside
81
// of GCs by object trunctation etc. may not be in the free list chain.  Very
82
// small free spaces are ignored, they need only be cleaned of bogus pointers
83
// into new space.
84
//
85
// Each page may have up to one special garbage section.  The start of this
86
// section is denoted by the top field in the space.  The end of the section
87
// is denoted by the limit field in the space.  This special garbage section
88
// is not marked with a free space map in the data.  The point of this section
89
// is to enable linear allocation without having to constantly update the byte
90
// array every time the top field is updated and a new object is created.  The
91
// special garbage section is not in the chain of garbage sections.
92
//
93
// Since the top and limit fields are in the space, not the page, only one page
94
// has a special garbage section, and if the top and limit are equal then there
95
// is no special garbage section.
96

    
97
// Some assertion macros used in the debugging mode.
98

    
99
#define ASSERT_PAGE_ALIGNED(address)                                           \
100
  ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
101

    
102
#define ASSERT_OBJECT_ALIGNED(address)                                         \
103
  ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
104

    
105
#define ASSERT_OBJECT_SIZE(size)                                               \
106
  ASSERT((0 < size) && (size <= Page::kMaxNonCodeHeapObjectSize))
107

    
108
#define ASSERT_PAGE_OFFSET(offset)                                             \
109
  ASSERT((Page::kObjectStartOffset <= offset)                                  \
110
      && (offset <= Page::kPageSize))
111

    
112
#define ASSERT_MAP_PAGE_INDEX(index)                                           \
113
  ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
114

    
115

    
116
class PagedSpace;
117
class MemoryAllocator;
118
class AllocationInfo;
119
class Space;
120
class FreeList;
121
class MemoryChunk;
122

    
123
class MarkBit {
124
 public:
125
  typedef uint32_t CellType;
126

    
127
  inline MarkBit(CellType* cell, CellType mask, bool data_only)
128
      : cell_(cell), mask_(mask), data_only_(data_only) { }
129

    
130
  inline CellType* cell() { return cell_; }
131
  inline CellType mask() { return mask_; }
132

    
133
#ifdef DEBUG
134
  bool operator==(const MarkBit& other) {
135
    return cell_ == other.cell_ && mask_ == other.mask_;
136
  }
137
#endif
138

    
139
  inline void Set() { *cell_ |= mask_; }
140
  inline bool Get() { return (*cell_ & mask_) != 0; }
141
  inline void Clear() { *cell_ &= ~mask_; }
142

    
143
  inline bool data_only() { return data_only_; }
144

    
145
  inline MarkBit Next() {
146
    CellType new_mask = mask_ << 1;
147
    if (new_mask == 0) {
148
      return MarkBit(cell_ + 1, 1, data_only_);
149
    } else {
150
      return MarkBit(cell_, new_mask, data_only_);
151
    }
152
  }
153

    
154
 private:
155
  CellType* cell_;
156
  CellType mask_;
157
  // This boolean indicates that the object is in a data-only space with no
158
  // pointers.  This enables some optimizations when marking.
159
  // It is expected that this field is inlined and turned into control flow
160
  // at the place where the MarkBit object is created.
161
  bool data_only_;
162
};
163

    
164

    
165
// Bitmap is a sequence of cells each containing fixed number of bits.
166
class Bitmap {
167
 public:
168
  static const uint32_t kBitsPerCell = 32;
169
  static const uint32_t kBitsPerCellLog2 = 5;
170
  static const uint32_t kBitIndexMask = kBitsPerCell - 1;
171
  static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte;
172
  static const uint32_t kBytesPerCellLog2 = kBitsPerCellLog2 - kBitsPerByteLog2;
173

    
174
  static const size_t kLength =
175
    (1 << kPageSizeBits) >> (kPointerSizeLog2);
176

    
177
  static const size_t kSize =
178
    (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2);
179

    
180

    
181
  static int CellsForLength(int length) {
182
    return (length + kBitsPerCell - 1) >> kBitsPerCellLog2;
183
  }
184

    
185
  int CellsCount() {
186
    return CellsForLength(kLength);
187
  }
188

    
189
  static int SizeFor(int cells_count) {
190
    return sizeof(MarkBit::CellType) * cells_count;
191
  }
192

    
193
  INLINE(static uint32_t IndexToCell(uint32_t index)) {
194
    return index >> kBitsPerCellLog2;
195
  }
196

    
197
  INLINE(static uint32_t CellToIndex(uint32_t index)) {
198
    return index << kBitsPerCellLog2;
199
  }
200

    
201
  INLINE(static uint32_t CellAlignIndex(uint32_t index)) {
202
    return (index + kBitIndexMask) & ~kBitIndexMask;
203
  }
204

    
205
  INLINE(MarkBit::CellType* cells()) {
206
    return reinterpret_cast<MarkBit::CellType*>(this);
207
  }
208

    
209
  INLINE(Address address()) {
210
    return reinterpret_cast<Address>(this);
211
  }
212

    
213
  INLINE(static Bitmap* FromAddress(Address addr)) {
214
    return reinterpret_cast<Bitmap*>(addr);
215
  }
216

    
217
  inline MarkBit MarkBitFromIndex(uint32_t index, bool data_only = false) {
218
    MarkBit::CellType mask = 1 << (index & kBitIndexMask);
219
    MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2);
220
    return MarkBit(cell, mask, data_only);
221
  }
222

    
223
  static inline void Clear(MemoryChunk* chunk);
224

    
225
  static void PrintWord(uint32_t word, uint32_t himask = 0) {
226
    for (uint32_t mask = 1; mask != 0; mask <<= 1) {
227
      if ((mask & himask) != 0) PrintF("[");
228
      PrintF((mask & word) ? "1" : "0");
229
      if ((mask & himask) != 0) PrintF("]");
230
    }
231
  }
232

    
233
  class CellPrinter {
234
   public:
235
    CellPrinter() : seq_start(0), seq_type(0), seq_length(0) { }
236

    
237
    void Print(uint32_t pos, uint32_t cell) {
238
      if (cell == seq_type) {
239
        seq_length++;
240
        return;
241
      }
242

    
243
      Flush();
244

    
245
      if (IsSeq(cell)) {
246
        seq_start = pos;
247
        seq_length = 0;
248
        seq_type = cell;
249
        return;
250
      }
251

    
252
      PrintF("%d: ", pos);
253
      PrintWord(cell);
254
      PrintF("\n");
255
    }
256

    
257
    void Flush() {
258
      if (seq_length > 0) {
259
        PrintF("%d: %dx%d\n",
260
               seq_start,
261
               seq_type == 0 ? 0 : 1,
262
               seq_length * kBitsPerCell);
263
        seq_length = 0;
264
      }
265
    }
266

    
267
    static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; }
268

    
269
   private:
270
    uint32_t seq_start;
271
    uint32_t seq_type;
272
    uint32_t seq_length;
273
  };
274

    
275
  void Print() {
276
    CellPrinter printer;
277
    for (int i = 0; i < CellsCount(); i++) {
278
      printer.Print(i, cells()[i]);
279
    }
280
    printer.Flush();
281
    PrintF("\n");
282
  }
283

    
284
  bool IsClean() {
285
    for (int i = 0; i < CellsCount(); i++) {
286
      if (cells()[i] != 0) {
287
        return false;
288
      }
289
    }
290
    return true;
291
  }
292
};
293

    
294

    
295
class SkipList;
296
class SlotsBuffer;
297

    
298
// MemoryChunk represents a memory region owned by a specific space.
299
// It is divided into the header and the body. Chunk start is always
300
// 1MB aligned. Start of the body is aligned so it can accommodate
301
// any heap object.
302
class MemoryChunk {
303
 public:
304
  // Only works if the pointer is in the first kPageSize of the MemoryChunk.
305
  static MemoryChunk* FromAddress(Address a) {
306
    return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
307
  }
308

    
309
  // Only works for addresses in pointer spaces, not data or code spaces.
310
  static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr);
311

    
312
  Address address() { return reinterpret_cast<Address>(this); }
313

    
314
  bool is_valid() { return address() != NULL; }
315

    
316
  MemoryChunk* next_chunk() const { return next_chunk_; }
317
  MemoryChunk* prev_chunk() const { return prev_chunk_; }
318

    
319
  void set_next_chunk(MemoryChunk* next) { next_chunk_ = next; }
320
  void set_prev_chunk(MemoryChunk* prev) { prev_chunk_ = prev; }
321

    
322
  Space* owner() const {
323
    if ((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
324
        kFailureTag) {
325
      return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) -
326
                                      kFailureTag);
327
    } else {
328
      return NULL;
329
    }
330
  }
331

    
332
  void set_owner(Space* space) {
333
    ASSERT((reinterpret_cast<intptr_t>(space) & kFailureTagMask) == 0);
334
    owner_ = reinterpret_cast<Address>(space) + kFailureTag;
335
    ASSERT((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
336
           kFailureTag);
337
  }
338

    
339
  VirtualMemory* reserved_memory() {
340
    return &reservation_;
341
  }
342

    
343
  void InitializeReservedMemory() {
344
    reservation_.Reset();
345
  }
346

    
347
  void set_reserved_memory(VirtualMemory* reservation) {
348
    ASSERT_NOT_NULL(reservation);
349
    reservation_.TakeControl(reservation);
350
  }
351

    
352
  bool scan_on_scavenge() { return IsFlagSet(SCAN_ON_SCAVENGE); }
353
  void initialize_scan_on_scavenge(bool scan) {
354
    if (scan) {
355
      SetFlag(SCAN_ON_SCAVENGE);
356
    } else {
357
      ClearFlag(SCAN_ON_SCAVENGE);
358
    }
359
  }
360
  inline void set_scan_on_scavenge(bool scan);
361

    
362
  int store_buffer_counter() { return store_buffer_counter_; }
363
  void set_store_buffer_counter(int counter) {
364
    store_buffer_counter_ = counter;
365
  }
366

    
367
  bool Contains(Address addr) {
368
    return addr >= area_start() && addr < area_end();
369
  }
370

    
371
  // Checks whether addr can be a limit of addresses in this page.
372
  // It's a limit if it's in the page, or if it's just after the
373
  // last byte of the page.
374
  bool ContainsLimit(Address addr) {
375
    return addr >= area_start() && addr <= area_end();
376
  }
377

    
378
  // Every n write barrier invocations we go to runtime even though
379
  // we could have handled it in generated code.  This lets us check
380
  // whether we have hit the limit and should do some more marking.
381
  static const int kWriteBarrierCounterGranularity = 500;
382

    
383
  enum MemoryChunkFlags {
384
    IS_EXECUTABLE,
385
    ABOUT_TO_BE_FREED,
386
    POINTERS_TO_HERE_ARE_INTERESTING,
387
    POINTERS_FROM_HERE_ARE_INTERESTING,
388
    SCAN_ON_SCAVENGE,
389
    IN_FROM_SPACE,  // Mutually exclusive with IN_TO_SPACE.
390
    IN_TO_SPACE,    // All pages in new space has one of these two set.
391
    NEW_SPACE_BELOW_AGE_MARK,
392
    CONTAINS_ONLY_DATA,
393
    EVACUATION_CANDIDATE,
394
    RESCAN_ON_EVACUATION,
395

    
396
    // Pages swept precisely can be iterated, hitting only the live objects.
397
    // Whereas those swept conservatively cannot be iterated over. Both flags
398
    // indicate that marking bits have been cleared by the sweeper, otherwise
399
    // marking bits are still intact.
400
    WAS_SWEPT_PRECISELY,
401
    WAS_SWEPT_CONSERVATIVELY,
402

    
403
    // Large objects can have a progress bar in their page header. These object
404
    // are scanned in increments and will be kept black while being scanned.
405
    // Even if the mutator writes to them they will be kept black and a white
406
    // to grey transition is performed in the value.
407
    HAS_PROGRESS_BAR,
408

    
409
    // Last flag, keep at bottom.
410
    NUM_MEMORY_CHUNK_FLAGS
411
  };
412

    
413

    
414
  static const int kPointersToHereAreInterestingMask =
415
      1 << POINTERS_TO_HERE_ARE_INTERESTING;
416

    
417
  static const int kPointersFromHereAreInterestingMask =
418
      1 << POINTERS_FROM_HERE_ARE_INTERESTING;
419

    
420
  static const int kEvacuationCandidateMask =
421
      1 << EVACUATION_CANDIDATE;
422

    
423
  static const int kSkipEvacuationSlotsRecordingMask =
424
      (1 << EVACUATION_CANDIDATE) |
425
      (1 << RESCAN_ON_EVACUATION) |
426
      (1 << IN_FROM_SPACE) |
427
      (1 << IN_TO_SPACE);
428

    
429

    
430
  void SetFlag(int flag) {
431
    flags_ |= static_cast<uintptr_t>(1) << flag;
432
  }
433

    
434
  void ClearFlag(int flag) {
435
    flags_ &= ~(static_cast<uintptr_t>(1) << flag);
436
  }
437

    
438
  void SetFlagTo(int flag, bool value) {
439
    if (value) {
440
      SetFlag(flag);
441
    } else {
442
      ClearFlag(flag);
443
    }
444
  }
445

    
446
  bool IsFlagSet(int flag) {
447
    return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0;
448
  }
449

    
450
  // Set or clear multiple flags at a time. The flags in the mask
451
  // are set to the value in "flags", the rest retain the current value
452
  // in flags_.
453
  void SetFlags(intptr_t flags, intptr_t mask) {
454
    flags_ = (flags_ & ~mask) | (flags & mask);
455
  }
456

    
457
  // Return all current flags.
458
  intptr_t GetFlags() { return flags_; }
459

    
460
  intptr_t parallel_sweeping() const {
461
    return parallel_sweeping_;
462
  }
463

    
464
  void set_parallel_sweeping(intptr_t state) {
465
    parallel_sweeping_ = state;
466
  }
467

    
468
  bool TryParallelSweeping() {
469
    return NoBarrier_CompareAndSwap(&parallel_sweeping_, 1, 0) == 1;
470
  }
471

    
472
  // Manage live byte count (count of bytes known to be live,
473
  // because they are marked black).
474
  void ResetLiveBytes() {
475
    if (FLAG_gc_verbose) {
476
      PrintF("ResetLiveBytes:%p:%x->0\n",
477
             static_cast<void*>(this), live_byte_count_);
478
    }
479
    live_byte_count_ = 0;
480
  }
481
  void IncrementLiveBytes(int by) {
482
    if (FLAG_gc_verbose) {
483
      printf("UpdateLiveBytes:%p:%x%c=%x->%x\n",
484
             static_cast<void*>(this), live_byte_count_,
485
             ((by < 0) ? '-' : '+'), ((by < 0) ? -by : by),
486
             live_byte_count_ + by);
487
    }
488
    live_byte_count_ += by;
489
    ASSERT_LE(static_cast<unsigned>(live_byte_count_), size_);
490
  }
491
  int LiveBytes() {
492
    ASSERT(static_cast<unsigned>(live_byte_count_) <= size_);
493
    return live_byte_count_;
494
  }
495

    
496
  int write_barrier_counter() {
497
    return static_cast<int>(write_barrier_counter_);
498
  }
499

    
500
  void set_write_barrier_counter(int counter) {
501
    write_barrier_counter_ = counter;
502
  }
503

    
504
  int progress_bar() {
505
    ASSERT(IsFlagSet(HAS_PROGRESS_BAR));
506
    return progress_bar_;
507
  }
508

    
509
  void set_progress_bar(int progress_bar) {
510
    ASSERT(IsFlagSet(HAS_PROGRESS_BAR));
511
    progress_bar_ = progress_bar;
512
  }
513

    
514
  void ResetProgressBar() {
515
    if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
516
      set_progress_bar(0);
517
      ClearFlag(MemoryChunk::HAS_PROGRESS_BAR);
518
    }
519
  }
520

    
521
  bool IsLeftOfProgressBar(Object** slot) {
522
    Address slot_address = reinterpret_cast<Address>(slot);
523
    ASSERT(slot_address > this->address());
524
    return (slot_address - (this->address() + kObjectStartOffset)) <
525
           progress_bar();
526
  }
527

    
528
  static void IncrementLiveBytesFromGC(Address address, int by) {
529
    MemoryChunk::FromAddress(address)->IncrementLiveBytes(by);
530
  }
531

    
532
  static void IncrementLiveBytesFromMutator(Address address, int by);
533

    
534
  static const intptr_t kAlignment =
535
      (static_cast<uintptr_t>(1) << kPageSizeBits);
536

    
537
  static const intptr_t kAlignmentMask = kAlignment - 1;
538

    
539
  static const intptr_t kSizeOffset = kPointerSize + kPointerSize;
540

    
541
  static const intptr_t kLiveBytesOffset =
542
     kSizeOffset + kPointerSize + kPointerSize + kPointerSize +
543
     kPointerSize + kPointerSize +
544
     kPointerSize + kPointerSize + kPointerSize + kIntSize;
545

    
546
  static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize;
547

    
548
  static const size_t kWriteBarrierCounterOffset =
549
      kSlotsBufferOffset + kPointerSize + kPointerSize;
550

    
551
  static const size_t kHeaderSize = kWriteBarrierCounterOffset + kPointerSize +
552
                                    kIntSize + kIntSize + kPointerSize +
553
                                    5 * kPointerSize;
554

    
555
  static const int kBodyOffset =
556
      CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
557

    
558
  // The start offset of the object area in a page. Aligned to both maps and
559
  // code alignment to be suitable for both.  Also aligned to 32 words because
560
  // the marking bitmap is arranged in 32 bit chunks.
561
  static const int kObjectStartAlignment = 32 * kPointerSize;
562
  static const int kObjectStartOffset = kBodyOffset - 1 +
563
      (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
564

    
565
  size_t size() const { return size_; }
566

    
567
  void set_size(size_t size) {
568
    size_ = size;
569
  }
570

    
571
  void SetArea(Address area_start, Address area_end) {
572
    area_start_ = area_start;
573
    area_end_ = area_end;
574
  }
575

    
576
  Executability executable() {
577
    return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
578
  }
579

    
580
  bool ContainsOnlyData() {
581
    return IsFlagSet(CONTAINS_ONLY_DATA);
582
  }
583

    
584
  bool InNewSpace() {
585
    return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
586
  }
587

    
588
  bool InToSpace() {
589
    return IsFlagSet(IN_TO_SPACE);
590
  }
591

    
592
  bool InFromSpace() {
593
    return IsFlagSet(IN_FROM_SPACE);
594
  }
595

    
596
  // ---------------------------------------------------------------------
597
  // Markbits support
598

    
599
  inline Bitmap* markbits() {
600
    return Bitmap::FromAddress(address() + kHeaderSize);
601
  }
602

    
603
  void PrintMarkbits() { markbits()->Print(); }
604

    
605
  inline uint32_t AddressToMarkbitIndex(Address addr) {
606
    return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
607
  }
608

    
609
  inline static uint32_t FastAddressToMarkbitIndex(Address addr) {
610
    const intptr_t offset =
611
        reinterpret_cast<intptr_t>(addr) & kAlignmentMask;
612

    
613
    return static_cast<uint32_t>(offset) >> kPointerSizeLog2;
614
  }
615

    
616
  inline Address MarkbitIndexToAddress(uint32_t index) {
617
    return this->address() + (index << kPointerSizeLog2);
618
  }
619

    
620
  void InsertAfter(MemoryChunk* other);
621
  void Unlink();
622

    
623
  inline Heap* heap() { return heap_; }
624

    
625
  static const int kFlagsOffset = kPointerSize * 3;
626

    
627
  bool IsEvacuationCandidate() { return IsFlagSet(EVACUATION_CANDIDATE); }
628

    
629
  bool ShouldSkipEvacuationSlotRecording() {
630
    return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
631
  }
632

    
633
  inline SkipList* skip_list() {
634
    return skip_list_;
635
  }
636

    
637
  inline void set_skip_list(SkipList* skip_list) {
638
    skip_list_ = skip_list;
639
  }
640

    
641
  inline SlotsBuffer* slots_buffer() {
642
    return slots_buffer_;
643
  }
644

    
645
  inline SlotsBuffer** slots_buffer_address() {
646
    return &slots_buffer_;
647
  }
648

    
649
  void MarkEvacuationCandidate() {
650
    ASSERT(slots_buffer_ == NULL);
651
    SetFlag(EVACUATION_CANDIDATE);
652
  }
653

    
654
  void ClearEvacuationCandidate() {
655
    ASSERT(slots_buffer_ == NULL);
656
    ClearFlag(EVACUATION_CANDIDATE);
657
  }
658

    
659
  Address area_start() { return area_start_; }
660
  Address area_end() { return area_end_; }
661
  int area_size() {
662
    return static_cast<int>(area_end() - area_start());
663
  }
664
  bool CommitArea(size_t requested);
665

    
666
  // Approximate amount of physical memory committed for this chunk.
667
  size_t CommittedPhysicalMemory() {
668
    return high_water_mark_;
669
  }
670

    
671
  static inline void UpdateHighWaterMark(Address mark);
672

    
673
 protected:
674
  MemoryChunk* next_chunk_;
675
  MemoryChunk* prev_chunk_;
676
  size_t size_;
677
  intptr_t flags_;
678

    
679
  // Start and end of allocatable memory on this chunk.
680
  Address area_start_;
681
  Address area_end_;
682

    
683
  // If the chunk needs to remember its memory reservation, it is stored here.
684
  VirtualMemory reservation_;
685
  // The identity of the owning space.  This is tagged as a failure pointer, but
686
  // no failure can be in an object, so this can be distinguished from any entry
687
  // in a fixed array.
688
  Address owner_;
689
  Heap* heap_;
690
  // Used by the store buffer to keep track of which pages to mark scan-on-
691
  // scavenge.
692
  int store_buffer_counter_;
693
  // Count of bytes marked black on page.
694
  int live_byte_count_;
695
  SlotsBuffer* slots_buffer_;
696
  SkipList* skip_list_;
697
  intptr_t write_barrier_counter_;
698
  // Used by the incremental marker to keep track of the scanning progress in
699
  // large objects that have a progress bar and are scanned in increments.
700
  int progress_bar_;
701
  // Assuming the initial allocation on a page is sequential,
702
  // count highest number of bytes ever allocated on the page.
703
  int high_water_mark_;
704

    
705
  intptr_t parallel_sweeping_;
706

    
707
  // PagedSpace free-list statistics.
708
  intptr_t available_in_small_free_list_;
709
  intptr_t available_in_medium_free_list_;
710
  intptr_t available_in_large_free_list_;
711
  intptr_t available_in_huge_free_list_;
712
  intptr_t non_available_small_blocks_;
713

    
714
  static MemoryChunk* Initialize(Heap* heap,
715
                                 Address base,
716
                                 size_t size,
717
                                 Address area_start,
718
                                 Address area_end,
719
                                 Executability executable,
720
                                 Space* owner);
721

    
722
  friend class MemoryAllocator;
723
};
724

    
725

    
726
STATIC_CHECK(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
727

    
728

    
729
// -----------------------------------------------------------------------------
730
// A page is a memory chunk of a size 1MB. Large object pages may be larger.
731
//
732
// The only way to get a page pointer is by calling factory methods:
733
//   Page* p = Page::FromAddress(addr); or
734
//   Page* p = Page::FromAllocationTop(top);
735
class Page : public MemoryChunk {
736
 public:
737
  // Returns the page containing a given address. The address ranges
738
  // from [page_addr .. page_addr + kPageSize[
739
  // This only works if the object is in fact in a page.  See also MemoryChunk::
740
  // FromAddress() and FromAnyAddress().
741
  INLINE(static Page* FromAddress(Address a)) {
742
    return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
743
  }
744

    
745
  // Returns the page containing an allocation top. Because an allocation
746
  // top address can be the upper bound of the page, we need to subtract
747
  // it with kPointerSize first. The address ranges from
748
  // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
749
  INLINE(static Page* FromAllocationTop(Address top)) {
750
    Page* p = FromAddress(top - kPointerSize);
751
    return p;
752
  }
753

    
754
  // Returns the next page in the chain of pages owned by a space.
755
  inline Page* next_page();
756
  inline Page* prev_page();
757
  inline void set_next_page(Page* page);
758
  inline void set_prev_page(Page* page);
759

    
760
  // Checks whether an address is page aligned.
761
  static bool IsAlignedToPageSize(Address a) {
762
    return 0 == (OffsetFrom(a) & kPageAlignmentMask);
763
  }
764

    
765
  // Returns the offset of a given address to this page.
766
  INLINE(int Offset(Address a)) {
767
    int offset = static_cast<int>(a - address());
768
    return offset;
769
  }
770

    
771
  // Returns the address for a given offset to the this page.
772
  Address OffsetToAddress(int offset) {
773
    ASSERT_PAGE_OFFSET(offset);
774
    return address() + offset;
775
  }
776

    
777
  // ---------------------------------------------------------------------
778

    
779
  // Page size in bytes.  This must be a multiple of the OS page size.
780
  static const int kPageSize = 1 << kPageSizeBits;
781

    
782
  // Object area size in bytes.
783
  static const int kNonCodeObjectAreaSize = kPageSize - kObjectStartOffset;
784

    
785
  // Maximum object size that fits in a page. Objects larger than that size
786
  // are allocated in large object space and are never moved in memory. This
787
  // also applies to new space allocation, since objects are never migrated
788
  // from new space to large object space.  Takes double alignment into account.
789
  static const int kMaxNonCodeHeapObjectSize =
790
      kNonCodeObjectAreaSize - kPointerSize;
791

    
792
  // Page size mask.
793
  static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
794

    
795
  inline void ClearGCFields();
796

    
797
  static inline Page* Initialize(Heap* heap,
798
                                 MemoryChunk* chunk,
799
                                 Executability executable,
800
                                 PagedSpace* owner);
801

    
802
  void InitializeAsAnchor(PagedSpace* owner);
803

    
804
  bool WasSweptPrecisely() { return IsFlagSet(WAS_SWEPT_PRECISELY); }
805
  bool WasSweptConservatively() { return IsFlagSet(WAS_SWEPT_CONSERVATIVELY); }
806
  bool WasSwept() { return WasSweptPrecisely() || WasSweptConservatively(); }
807

    
808
  void MarkSweptPrecisely() { SetFlag(WAS_SWEPT_PRECISELY); }
809
  void MarkSweptConservatively() { SetFlag(WAS_SWEPT_CONSERVATIVELY); }
810

    
811
  void ClearSweptPrecisely() { ClearFlag(WAS_SWEPT_PRECISELY); }
812
  void ClearSweptConservatively() { ClearFlag(WAS_SWEPT_CONSERVATIVELY); }
813

    
814
  void ResetFreeListStatistics();
815

    
816
#define FRAGMENTATION_STATS_ACCESSORS(type, name) \
817
  type name() { return name##_; }                 \
818
  void set_##name(type name) { name##_ = name; }  \
819
  void add_##name(type name) { name##_ += name; }
820

    
821
  FRAGMENTATION_STATS_ACCESSORS(intptr_t, non_available_small_blocks)
822
  FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_small_free_list)
823
  FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_medium_free_list)
824
  FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_large_free_list)
825
  FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_huge_free_list)
826

    
827
#undef FRAGMENTATION_STATS_ACCESSORS
828

    
829
#ifdef DEBUG
830
  void Print();
831
#endif  // DEBUG
832

    
833
  friend class MemoryAllocator;
834
};
835

    
836

    
837
STATIC_CHECK(sizeof(Page) <= MemoryChunk::kHeaderSize);
838

    
839

    
840
class LargePage : public MemoryChunk {
841
 public:
842
  HeapObject* GetObject() {
843
    return HeapObject::FromAddress(area_start());
844
  }
845

    
846
  inline LargePage* next_page() const {
847
    return static_cast<LargePage*>(next_chunk());
848
  }
849

    
850
  inline void set_next_page(LargePage* page) {
851
    set_next_chunk(page);
852
  }
853
 private:
854
  static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk);
855

    
856
  friend class MemoryAllocator;
857
};
858

    
859
STATIC_CHECK(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
860

    
861
// ----------------------------------------------------------------------------
862
// Space is the abstract superclass for all allocation spaces.
863
class Space : public Malloced {
864
 public:
865
  Space(Heap* heap, AllocationSpace id, Executability executable)
866
      : heap_(heap), id_(id), executable_(executable) {}
867

    
868
  virtual ~Space() {}
869

    
870
  Heap* heap() const { return heap_; }
871

    
872
  // Does the space need executable memory?
873
  Executability executable() { return executable_; }
874

    
875
  // Identity used in error reporting.
876
  AllocationSpace identity() { return id_; }
877

    
878
  // Returns allocated size.
879
  virtual intptr_t Size() = 0;
880

    
881
  // Returns size of objects. Can differ from the allocated size
882
  // (e.g. see LargeObjectSpace).
883
  virtual intptr_t SizeOfObjects() { return Size(); }
884

    
885
  virtual int RoundSizeDownToObjectAlignment(int size) {
886
    if (id_ == CODE_SPACE) {
887
      return RoundDown(size, kCodeAlignment);
888
    } else {
889
      return RoundDown(size, kPointerSize);
890
    }
891
  }
892

    
893
#ifdef DEBUG
894
  virtual void Print() = 0;
895
#endif
896

    
897
 private:
898
  Heap* heap_;
899
  AllocationSpace id_;
900
  Executability executable_;
901
};
902

    
903

    
904
// ----------------------------------------------------------------------------
905
// All heap objects containing executable code (code objects) must be allocated
906
// from a 2 GB range of memory, so that they can call each other using 32-bit
907
// displacements.  This happens automatically on 32-bit platforms, where 32-bit
908
// displacements cover the entire 4GB virtual address space.  On 64-bit
909
// platforms, we support this using the CodeRange object, which reserves and
910
// manages a range of virtual memory.
911
class CodeRange {
912
 public:
913
  explicit CodeRange(Isolate* isolate);
914
  ~CodeRange() { TearDown(); }
915

    
916
  // Reserves a range of virtual memory, but does not commit any of it.
917
  // Can only be called once, at heap initialization time.
918
  // Returns false on failure.
919
  bool SetUp(const size_t requested_size);
920

    
921
  // Frees the range of virtual memory, and frees the data structures used to
922
  // manage it.
923
  void TearDown();
924

    
925
  bool exists() { return this != NULL && code_range_ != NULL; }
926
  Address start() {
927
    if (this == NULL || code_range_ == NULL) return NULL;
928
    return static_cast<Address>(code_range_->address());
929
  }
930
  bool contains(Address address) {
931
    if (this == NULL || code_range_ == NULL) return false;
932
    Address start = static_cast<Address>(code_range_->address());
933
    return start <= address && address < start + code_range_->size();
934
  }
935

    
936
  // Allocates a chunk of memory from the large-object portion of
937
  // the code range.  On platforms with no separate code range, should
938
  // not be called.
939
  MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size,
940
                                            const size_t commit_size,
941
                                            size_t* allocated);
942
  bool CommitRawMemory(Address start, size_t length);
943
  bool UncommitRawMemory(Address start, size_t length);
944
  void FreeRawMemory(Address buf, size_t length);
945

    
946
 private:
947
  Isolate* isolate_;
948

    
949
  // The reserved range of virtual memory that all code objects are put in.
950
  VirtualMemory* code_range_;
951
  // Plain old data class, just a struct plus a constructor.
952
  class FreeBlock {
953
   public:
954
    FreeBlock(Address start_arg, size_t size_arg)
955
        : start(start_arg), size(size_arg) {
956
      ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
957
      ASSERT(size >= static_cast<size_t>(Page::kPageSize));
958
    }
959
    FreeBlock(void* start_arg, size_t size_arg)
960
        : start(static_cast<Address>(start_arg)), size(size_arg) {
961
      ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
962
      ASSERT(size >= static_cast<size_t>(Page::kPageSize));
963
    }
964

    
965
    Address start;
966
    size_t size;
967
  };
968

    
969
  // Freed blocks of memory are added to the free list.  When the allocation
970
  // list is exhausted, the free list is sorted and merged to make the new
971
  // allocation list.
972
  List<FreeBlock> free_list_;
973
  // Memory is allocated from the free blocks on the allocation list.
974
  // The block at current_allocation_block_index_ is the current block.
975
  List<FreeBlock> allocation_list_;
976
  int current_allocation_block_index_;
977

    
978
  // Finds a block on the allocation list that contains at least the
979
  // requested amount of memory.  If none is found, sorts and merges
980
  // the existing free memory blocks, and searches again.
981
  // If none can be found, terminates V8 with FatalProcessOutOfMemory.
982
  void GetNextAllocationBlock(size_t requested);
983
  // Compares the start addresses of two free blocks.
984
  static int CompareFreeBlockAddress(const FreeBlock* left,
985
                                     const FreeBlock* right);
986

    
987
  DISALLOW_COPY_AND_ASSIGN(CodeRange);
988
};
989

    
990

    
991
class SkipList {
992
 public:
993
  SkipList() {
994
    Clear();
995
  }
996

    
997
  void Clear() {
998
    for (int idx = 0; idx < kSize; idx++) {
999
      starts_[idx] = reinterpret_cast<Address>(-1);
1000
    }
1001
  }
1002

    
1003
  Address StartFor(Address addr) {
1004
    return starts_[RegionNumber(addr)];
1005
  }
1006

    
1007
  void AddObject(Address addr, int size) {
1008
    int start_region = RegionNumber(addr);
1009
    int end_region = RegionNumber(addr + size - kPointerSize);
1010
    for (int idx = start_region; idx <= end_region; idx++) {
1011
      if (starts_[idx] > addr) starts_[idx] = addr;
1012
    }
1013
  }
1014

    
1015
  static inline int RegionNumber(Address addr) {
1016
    return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2;
1017
  }
1018

    
1019
  static void Update(Address addr, int size) {
1020
    Page* page = Page::FromAddress(addr);
1021
    SkipList* list = page->skip_list();
1022
    if (list == NULL) {
1023
      list = new SkipList();
1024
      page->set_skip_list(list);
1025
    }
1026

    
1027
    list->AddObject(addr, size);
1028
  }
1029

    
1030
 private:
1031
  static const int kRegionSizeLog2 = 13;
1032
  static const int kRegionSize = 1 << kRegionSizeLog2;
1033
  static const int kSize = Page::kPageSize / kRegionSize;
1034

    
1035
  STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
1036

    
1037
  Address starts_[kSize];
1038
};
1039

    
1040

    
1041
// ----------------------------------------------------------------------------
1042
// A space acquires chunks of memory from the operating system. The memory
1043
// allocator allocated and deallocates pages for the paged heap spaces and large
1044
// pages for large object space.
1045
//
1046
// Each space has to manage it's own pages.
1047
//
1048
class MemoryAllocator {
1049
 public:
1050
  explicit MemoryAllocator(Isolate* isolate);
1051

    
1052
  // Initializes its internal bookkeeping structures.
1053
  // Max capacity of the total space and executable memory limit.
1054
  bool SetUp(intptr_t max_capacity, intptr_t capacity_executable);
1055

    
1056
  void TearDown();
1057

    
1058
  Page* AllocatePage(
1059
      intptr_t size, PagedSpace* owner, Executability executable);
1060

    
1061
  LargePage* AllocateLargePage(
1062
      intptr_t object_size, Space* owner, Executability executable);
1063

    
1064
  void Free(MemoryChunk* chunk);
1065

    
1066
  // Returns the maximum available bytes of heaps.
1067
  intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
1068

    
1069
  // Returns allocated spaces in bytes.
1070
  intptr_t Size() { return size_; }
1071

    
1072
  // Returns the maximum available executable bytes of heaps.
1073
  intptr_t AvailableExecutable() {
1074
    if (capacity_executable_ < size_executable_) return 0;
1075
    return capacity_executable_ - size_executable_;
1076
  }
1077

    
1078
  // Returns allocated executable spaces in bytes.
1079
  intptr_t SizeExecutable() { return size_executable_; }
1080

    
1081
  // Returns maximum available bytes that the old space can have.
1082
  intptr_t MaxAvailable() {
1083
    return (Available() / Page::kPageSize) * Page::kMaxNonCodeHeapObjectSize;
1084
  }
1085

    
1086
  // Returns an indication of whether a pointer is in a space that has
1087
  // been allocated by this MemoryAllocator.
1088
  V8_INLINE bool IsOutsideAllocatedSpace(const void* address) const {
1089
    return address < lowest_ever_allocated_ ||
1090
        address >= highest_ever_allocated_;
1091
  }
1092

    
1093
#ifdef DEBUG
1094
  // Reports statistic info of the space.
1095
  void ReportStatistics();
1096
#endif
1097

    
1098
  // Returns a MemoryChunk in which the memory region from commit_area_size to
1099
  // reserve_area_size of the chunk area is reserved but not committed, it
1100
  // could be committed later by calling MemoryChunk::CommitArea.
1101
  MemoryChunk* AllocateChunk(intptr_t reserve_area_size,
1102
                             intptr_t commit_area_size,
1103
                             Executability executable,
1104
                             Space* space);
1105

    
1106
  Address ReserveAlignedMemory(size_t requested,
1107
                               size_t alignment,
1108
                               VirtualMemory* controller);
1109
  Address AllocateAlignedMemory(size_t reserve_size,
1110
                                size_t commit_size,
1111
                                size_t alignment,
1112
                                Executability executable,
1113
                                VirtualMemory* controller);
1114

    
1115
  bool CommitMemory(Address addr, size_t size, Executability executable);
1116

    
1117
  void FreeMemory(VirtualMemory* reservation, Executability executable);
1118
  void FreeMemory(Address addr, size_t size, Executability executable);
1119

    
1120
  // Commit a contiguous block of memory from the initial chunk.  Assumes that
1121
  // the address is not NULL, the size is greater than zero, and that the
1122
  // block is contained in the initial chunk.  Returns true if it succeeded
1123
  // and false otherwise.
1124
  bool CommitBlock(Address start, size_t size, Executability executable);
1125

    
1126
  // Uncommit a contiguous block of memory [start..(start+size)[.
1127
  // start is not NULL, the size is greater than zero, and the
1128
  // block is contained in the initial chunk.  Returns true if it succeeded
1129
  // and false otherwise.
1130
  bool UncommitBlock(Address start, size_t size);
1131

    
1132
  // Zaps a contiguous block of memory [start..(start+size)[ thus
1133
  // filling it up with a recognizable non-NULL bit pattern.
1134
  void ZapBlock(Address start, size_t size);
1135

    
1136
  void PerformAllocationCallback(ObjectSpace space,
1137
                                 AllocationAction action,
1138
                                 size_t size);
1139

    
1140
  void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
1141
                                          ObjectSpace space,
1142
                                          AllocationAction action);
1143

    
1144
  void RemoveMemoryAllocationCallback(
1145
      MemoryAllocationCallback callback);
1146

    
1147
  bool MemoryAllocationCallbackRegistered(
1148
      MemoryAllocationCallback callback);
1149

    
1150
  static int CodePageGuardStartOffset();
1151

    
1152
  static int CodePageGuardSize();
1153

    
1154
  static int CodePageAreaStartOffset();
1155

    
1156
  static int CodePageAreaEndOffset();
1157

    
1158
  static int CodePageAreaSize() {
1159
    return CodePageAreaEndOffset() - CodePageAreaStartOffset();
1160
  }
1161

    
1162
  MUST_USE_RESULT bool CommitExecutableMemory(VirtualMemory* vm,
1163
                                              Address start,
1164
                                              size_t commit_size,
1165
                                              size_t reserved_size);
1166

    
1167
 private:
1168
  Isolate* isolate_;
1169

    
1170
  // Maximum space size in bytes.
1171
  size_t capacity_;
1172
  // Maximum subset of capacity_ that can be executable
1173
  size_t capacity_executable_;
1174

    
1175
  // Allocated space size in bytes.
1176
  size_t size_;
1177
  // Allocated executable space size in bytes.
1178
  size_t size_executable_;
1179

    
1180
  // We keep the lowest and highest addresses allocated as a quick way
1181
  // of determining that pointers are outside the heap. The estimate is
1182
  // conservative, i.e. not all addrsses in 'allocated' space are allocated
1183
  // to our heap. The range is [lowest, highest[, inclusive on the low end
1184
  // and exclusive on the high end.
1185
  void* lowest_ever_allocated_;
1186
  void* highest_ever_allocated_;
1187

    
1188
  struct MemoryAllocationCallbackRegistration {
1189
    MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
1190
                                         ObjectSpace space,
1191
                                         AllocationAction action)
1192
        : callback(callback), space(space), action(action) {
1193
    }
1194
    MemoryAllocationCallback callback;
1195
    ObjectSpace space;
1196
    AllocationAction action;
1197
  };
1198

    
1199
  // A List of callback that are triggered when memory is allocated or free'd
1200
  List<MemoryAllocationCallbackRegistration>
1201
      memory_allocation_callbacks_;
1202

    
1203
  // Initializes pages in a chunk. Returns the first page address.
1204
  // This function and GetChunkId() are provided for the mark-compact
1205
  // collector to rebuild page headers in the from space, which is
1206
  // used as a marking stack and its page headers are destroyed.
1207
  Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
1208
                               PagedSpace* owner);
1209

    
1210
  void UpdateAllocatedSpaceLimits(void* low, void* high) {
1211
    lowest_ever_allocated_ = Min(lowest_ever_allocated_, low);
1212
    highest_ever_allocated_ = Max(highest_ever_allocated_, high);
1213
  }
1214

    
1215
  DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
1216
};
1217

    
1218

    
1219
// -----------------------------------------------------------------------------
1220
// Interface for heap object iterator to be implemented by all object space
1221
// object iterators.
1222
//
1223
// NOTE: The space specific object iterators also implements the own next()
1224
//       method which is used to avoid using virtual functions
1225
//       iterating a specific space.
1226

    
1227
class ObjectIterator : public Malloced {
1228
 public:
1229
  virtual ~ObjectIterator() { }
1230

    
1231
  virtual HeapObject* next_object() = 0;
1232
};
1233

    
1234

    
1235
// -----------------------------------------------------------------------------
1236
// Heap object iterator in new/old/map spaces.
1237
//
1238
// A HeapObjectIterator iterates objects from the bottom of the given space
1239
// to its top or from the bottom of the given page to its top.
1240
//
1241
// If objects are allocated in the page during iteration the iterator may
1242
// or may not iterate over those objects.  The caller must create a new
1243
// iterator in order to be sure to visit these new objects.
1244
class HeapObjectIterator: public ObjectIterator {
1245
 public:
1246
  // Creates a new object iterator in a given space.
1247
  // If the size function is not given, the iterator calls the default
1248
  // Object::Size().
1249
  explicit HeapObjectIterator(PagedSpace* space);
1250
  HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
1251
  HeapObjectIterator(Page* page, HeapObjectCallback size_func);
1252

    
1253
  // Advance to the next object, skipping free spaces and other fillers and
1254
  // skipping the special garbage section of which there is one per space.
1255
  // Returns NULL when the iteration has ended.
1256
  inline HeapObject* Next() {
1257
    do {
1258
      HeapObject* next_obj = FromCurrentPage();
1259
      if (next_obj != NULL) return next_obj;
1260
    } while (AdvanceToNextPage());
1261
    return NULL;
1262
  }
1263

    
1264
  virtual HeapObject* next_object() {
1265
    return Next();
1266
  }
1267

    
1268
 private:
1269
  enum PageMode { kOnePageOnly, kAllPagesInSpace };
1270

    
1271
  Address cur_addr_;  // Current iteration point.
1272
  Address cur_end_;   // End iteration point.
1273
  HeapObjectCallback size_func_;  // Size function or NULL.
1274
  PagedSpace* space_;
1275
  PageMode page_mode_;
1276

    
1277
  // Fast (inlined) path of next().
1278
  inline HeapObject* FromCurrentPage();
1279

    
1280
  // Slow path of next(), goes into the next page.  Returns false if the
1281
  // iteration has ended.
1282
  bool AdvanceToNextPage();
1283

    
1284
  // Initializes fields.
1285
  inline void Initialize(PagedSpace* owner,
1286
                         Address start,
1287
                         Address end,
1288
                         PageMode mode,
1289
                         HeapObjectCallback size_func);
1290
};
1291

    
1292

    
1293
// -----------------------------------------------------------------------------
1294
// A PageIterator iterates the pages in a paged space.
1295

    
1296
class PageIterator BASE_EMBEDDED {
1297
 public:
1298
  explicit inline PageIterator(PagedSpace* space);
1299

    
1300
  inline bool has_next();
1301
  inline Page* next();
1302

    
1303
 private:
1304
  PagedSpace* space_;
1305
  Page* prev_page_;  // Previous page returned.
1306
  // Next page that will be returned.  Cached here so that we can use this
1307
  // iterator for operations that deallocate pages.
1308
  Page* next_page_;
1309
};
1310

    
1311

    
1312
// -----------------------------------------------------------------------------
1313
// A space has a circular list of pages. The next page can be accessed via
1314
// Page::next_page() call.
1315

    
1316
// An abstraction of allocation and relocation pointers in a page-structured
1317
// space.
1318
class AllocationInfo {
1319
 public:
1320
  AllocationInfo() : top_(NULL), limit_(NULL) {
1321
  }
1322

    
1323
  INLINE(void set_top(Address top)) {
1324
    SLOW_ASSERT(top == NULL ||
1325
        (reinterpret_cast<intptr_t>(top) & HeapObjectTagMask()) == 0);
1326
    top_ = top;
1327
  }
1328

    
1329
  INLINE(Address top()) const {
1330
    SLOW_ASSERT(top_ == NULL ||
1331
        (reinterpret_cast<intptr_t>(top_) & HeapObjectTagMask()) == 0);
1332
    return top_;
1333
  }
1334

    
1335
  Address* top_address() {
1336
    return &top_;
1337
  }
1338

    
1339
  INLINE(void set_limit(Address limit)) {
1340
    SLOW_ASSERT(limit == NULL ||
1341
        (reinterpret_cast<intptr_t>(limit) & HeapObjectTagMask()) == 0);
1342
    limit_ = limit;
1343
  }
1344

    
1345
  INLINE(Address limit()) const {
1346
    SLOW_ASSERT(limit_ == NULL ||
1347
        (reinterpret_cast<intptr_t>(limit_) & HeapObjectTagMask()) == 0);
1348
    return limit_;
1349
  }
1350

    
1351
  Address* limit_address() {
1352
    return &limit_;
1353
  }
1354

    
1355
#ifdef DEBUG
1356
  bool VerifyPagedAllocation() {
1357
    return (Page::FromAllocationTop(top_) == Page::FromAllocationTop(limit_))
1358
        && (top_ <= limit_);
1359
  }
1360
#endif
1361

    
1362
 private:
1363
  // Current allocation top.
1364
  Address top_;
1365
  // Current allocation limit.
1366
  Address limit_;
1367
};
1368

    
1369

    
1370
// An abstraction of the accounting statistics of a page-structured space.
1371
// The 'capacity' of a space is the number of object-area bytes (i.e., not
1372
// including page bookkeeping structures) currently in the space. The 'size'
1373
// of a space is the number of allocated bytes, the 'waste' in the space is
1374
// the number of bytes that are not allocated and not available to
1375
// allocation without reorganizing the space via a GC (e.g. small blocks due
1376
// to internal fragmentation, top of page areas in map space), and the bytes
1377
// 'available' is the number of unallocated bytes that are not waste.  The
1378
// capacity is the sum of size, waste, and available.
1379
//
1380
// The stats are only set by functions that ensure they stay balanced. These
1381
// functions increase or decrease one of the non-capacity stats in
1382
// conjunction with capacity, or else they always balance increases and
1383
// decreases to the non-capacity stats.
1384
class AllocationStats BASE_EMBEDDED {
1385
 public:
1386
  AllocationStats() { Clear(); }
1387

    
1388
  // Zero out all the allocation statistics (i.e., no capacity).
1389
  void Clear() {
1390
    capacity_ = 0;
1391
    size_ = 0;
1392
    waste_ = 0;
1393
  }
1394

    
1395
  void ClearSizeWaste() {
1396
    size_ = capacity_;
1397
    waste_ = 0;
1398
  }
1399

    
1400
  // Reset the allocation statistics (i.e., available = capacity with no
1401
  // wasted or allocated bytes).
1402
  void Reset() {
1403
    size_ = 0;
1404
    waste_ = 0;
1405
  }
1406

    
1407
  // Accessors for the allocation statistics.
1408
  intptr_t Capacity() { return capacity_; }
1409
  intptr_t Size() { return size_; }
1410
  intptr_t Waste() { return waste_; }
1411

    
1412
  // Grow the space by adding available bytes.  They are initially marked as
1413
  // being in use (part of the size), but will normally be immediately freed,
1414
  // putting them on the free list and removing them from size_.
1415
  void ExpandSpace(int size_in_bytes) {
1416
    capacity_ += size_in_bytes;
1417
    size_ += size_in_bytes;
1418
    ASSERT(size_ >= 0);
1419
  }
1420

    
1421
  // Shrink the space by removing available bytes.  Since shrinking is done
1422
  // during sweeping, bytes have been marked as being in use (part of the size)
1423
  // and are hereby freed.
1424
  void ShrinkSpace(int size_in_bytes) {
1425
    capacity_ -= size_in_bytes;
1426
    size_ -= size_in_bytes;
1427
    ASSERT(size_ >= 0);
1428
  }
1429

    
1430
  // Allocate from available bytes (available -> size).
1431
  void AllocateBytes(intptr_t size_in_bytes) {
1432
    size_ += size_in_bytes;
1433
    ASSERT(size_ >= 0);
1434
  }
1435

    
1436
  // Free allocated bytes, making them available (size -> available).
1437
  void DeallocateBytes(intptr_t size_in_bytes) {
1438
    size_ -= size_in_bytes;
1439
    ASSERT(size_ >= 0);
1440
  }
1441

    
1442
  // Waste free bytes (available -> waste).
1443
  void WasteBytes(int size_in_bytes) {
1444
    size_ -= size_in_bytes;
1445
    waste_ += size_in_bytes;
1446
    ASSERT(size_ >= 0);
1447
  }
1448

    
1449
 private:
1450
  intptr_t capacity_;
1451
  intptr_t size_;
1452
  intptr_t waste_;
1453
};
1454

    
1455

    
1456
// -----------------------------------------------------------------------------
1457
// Free lists for old object spaces
1458
//
1459
// Free-list nodes are free blocks in the heap.  They look like heap objects
1460
// (free-list node pointers have the heap object tag, and they have a map like
1461
// a heap object).  They have a size and a next pointer.  The next pointer is
1462
// the raw address of the next free list node (or NULL).
1463
class FreeListNode: public HeapObject {
1464
 public:
1465
  // Obtain a free-list node from a raw address.  This is not a cast because
1466
  // it does not check nor require that the first word at the address is a map
1467
  // pointer.
1468
  static FreeListNode* FromAddress(Address address) {
1469
    return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
1470
  }
1471

    
1472
  static inline bool IsFreeListNode(HeapObject* object);
1473

    
1474
  // Set the size in bytes, which can be read with HeapObject::Size().  This
1475
  // function also writes a map to the first word of the block so that it
1476
  // looks like a heap object to the garbage collector and heap iteration
1477
  // functions.
1478
  void set_size(Heap* heap, int size_in_bytes);
1479

    
1480
  // Accessors for the next field.
1481
  inline FreeListNode* next();
1482
  inline FreeListNode** next_address();
1483
  inline void set_next(FreeListNode* next);
1484

    
1485
  inline void Zap();
1486

    
1487
  static inline FreeListNode* cast(MaybeObject* maybe) {
1488
    ASSERT(!maybe->IsFailure());
1489
    return reinterpret_cast<FreeListNode*>(maybe);
1490
  }
1491

    
1492
 private:
1493
  static const int kNextOffset = POINTER_SIZE_ALIGN(FreeSpace::kHeaderSize);
1494

    
1495
  DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
1496
};
1497

    
1498

    
1499
// The free list category holds a pointer to the top element and a pointer to
1500
// the end element of the linked list of free memory blocks.
1501
class FreeListCategory {
1502
 public:
1503
  FreeListCategory() :
1504
      top_(NULL),
1505
      end_(NULL),
1506
      available_(0) {}
1507

    
1508
  intptr_t Concatenate(FreeListCategory* category);
1509

    
1510
  void Reset();
1511

    
1512
  void Free(FreeListNode* node, int size_in_bytes);
1513

    
1514
  FreeListNode* PickNodeFromList(int *node_size);
1515
  FreeListNode* PickNodeFromList(int size_in_bytes, int *node_size);
1516

    
1517
  intptr_t EvictFreeListItemsInList(Page* p);
1518

    
1519
  void RepairFreeList(Heap* heap);
1520

    
1521
  FreeListNode** GetTopAddress() { return &top_; }
1522
  FreeListNode* top() const { return top_; }
1523
  void set_top(FreeListNode* top) { top_ = top; }
1524

    
1525
  FreeListNode** GetEndAddress() { return &end_; }
1526
  FreeListNode* end() const { return end_; }
1527
  void set_end(FreeListNode* end) { end_ = end; }
1528

    
1529
  int* GetAvailableAddress() { return &available_; }
1530
  int available() const { return available_; }
1531
  void set_available(int available) { available_ = available; }
1532

    
1533
  Mutex* mutex() { return &mutex_; }
1534

    
1535
#ifdef DEBUG
1536
  intptr_t SumFreeList();
1537
  int FreeListLength();
1538
#endif
1539

    
1540
 private:
1541
  FreeListNode* top_;
1542
  FreeListNode* end_;
1543
  Mutex mutex_;
1544

    
1545
  // Total available bytes in all blocks of this free list category.
1546
  int available_;
1547
};
1548

    
1549

    
1550
// The free list for the old space.  The free list is organized in such a way
1551
// as to encourage objects allocated around the same time to be near each
1552
// other.  The normal way to allocate is intended to be by bumping a 'top'
1553
// pointer until it hits a 'limit' pointer.  When the limit is hit we need to
1554
// find a new space to allocate from.  This is done with the free list, which
1555
// is divided up into rough categories to cut down on waste.  Having finer
1556
// categories would scatter allocation more.
1557

    
1558
// The old space free list is organized in categories.
1559
// 1-31 words:  Such small free areas are discarded for efficiency reasons.
1560
//     They can be reclaimed by the compactor.  However the distance between top
1561
//     and limit may be this small.
1562
// 32-255 words: There is a list of spaces this large.  It is used for top and
1563
//     limit when the object we need to allocate is 1-31 words in size.  These
1564
//     spaces are called small.
1565
// 256-2047 words: There is a list of spaces this large.  It is used for top and
1566
//     limit when the object we need to allocate is 32-255 words in size.  These
1567
//     spaces are called medium.
1568
// 1048-16383 words: There is a list of spaces this large.  It is used for top
1569
//     and limit when the object we need to allocate is 256-2047 words in size.
1570
//     These spaces are call large.
1571
// At least 16384 words.  This list is for objects of 2048 words or larger.
1572
//     Empty pages are added to this list.  These spaces are called huge.
1573
class FreeList BASE_EMBEDDED {
1574
 public:
1575
  explicit FreeList(PagedSpace* owner);
1576

    
1577
  intptr_t Concatenate(FreeList* free_list);
1578

    
1579
  // Clear the free list.
1580
  void Reset();
1581

    
1582
  // Return the number of bytes available on the free list.
1583
  intptr_t available() {
1584
    return small_list_.available() + medium_list_.available() +
1585
           large_list_.available() + huge_list_.available();
1586
  }
1587

    
1588
  // Place a node on the free list.  The block of size 'size_in_bytes'
1589
  // starting at 'start' is placed on the free list.  The return value is the
1590
  // number of bytes that have been lost due to internal fragmentation by
1591
  // freeing the block.  Bookkeeping information will be written to the block,
1592
  // i.e., its contents will be destroyed.  The start address should be word
1593
  // aligned, and the size should be a non-zero multiple of the word size.
1594
  int Free(Address start, int size_in_bytes);
1595

    
1596
  // Allocate a block of size 'size_in_bytes' from the free list.  The block
1597
  // is unitialized.  A failure is returned if no block is available.  The
1598
  // number of bytes lost to fragmentation is returned in the output parameter
1599
  // 'wasted_bytes'.  The size should be a non-zero multiple of the word size.
1600
  MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
1601

    
1602
#ifdef DEBUG
1603
  void Zap();
1604
  intptr_t SumFreeLists();
1605
  bool IsVeryLong();
1606
#endif
1607

    
1608
  // Used after booting the VM.
1609
  void RepairLists(Heap* heap);
1610

    
1611
  intptr_t EvictFreeListItems(Page* p);
1612

    
1613
  FreeListCategory* small_list() { return &small_list_; }
1614
  FreeListCategory* medium_list() { return &medium_list_; }
1615
  FreeListCategory* large_list() { return &large_list_; }
1616
  FreeListCategory* huge_list() { return &huge_list_; }
1617

    
1618
 private:
1619
  // The size range of blocks, in bytes.
1620
  static const int kMinBlockSize = 3 * kPointerSize;
1621
  static const int kMaxBlockSize = Page::kMaxNonCodeHeapObjectSize;
1622

    
1623
  FreeListNode* FindNodeFor(int size_in_bytes, int* node_size);
1624

    
1625
  PagedSpace* owner_;
1626
  Heap* heap_;
1627

    
1628
  static const int kSmallListMin = 0x20 * kPointerSize;
1629
  static const int kSmallListMax = 0xff * kPointerSize;
1630
  static const int kMediumListMax = 0x7ff * kPointerSize;
1631
  static const int kLargeListMax = 0x3fff * kPointerSize;
1632
  static const int kSmallAllocationMax = kSmallListMin - kPointerSize;
1633
  static const int kMediumAllocationMax = kSmallListMax;
1634
  static const int kLargeAllocationMax = kMediumListMax;
1635
  FreeListCategory small_list_;
1636
  FreeListCategory medium_list_;
1637
  FreeListCategory large_list_;
1638
  FreeListCategory huge_list_;
1639

    
1640
  DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
1641
};
1642

    
1643

    
1644
class PagedSpace : public Space {
1645
 public:
1646
  // Creates a space with a maximum capacity, and an id.
1647
  PagedSpace(Heap* heap,
1648
             intptr_t max_capacity,
1649
             AllocationSpace id,
1650
             Executability executable);
1651

    
1652
  virtual ~PagedSpace() {}
1653

    
1654
  // Set up the space using the given address range of virtual memory (from
1655
  // the memory allocator's initial chunk) if possible.  If the block of
1656
  // addresses is not big enough to contain a single page-aligned page, a
1657
  // fresh chunk will be allocated.
1658
  bool SetUp();
1659

    
1660
  // Returns true if the space has been successfully set up and not
1661
  // subsequently torn down.
1662
  bool HasBeenSetUp();
1663

    
1664
  // Cleans up the space, frees all pages in this space except those belonging
1665
  // to the initial chunk, uncommits addresses in the initial chunk.
1666
  void TearDown();
1667

    
1668
  // Checks whether an object/address is in this space.
1669
  inline bool Contains(Address a);
1670
  bool Contains(HeapObject* o) { return Contains(o->address()); }
1671

    
1672
  // Given an address occupied by a live object, return that object if it is
1673
  // in this space, or Failure::Exception() if it is not. The implementation
1674
  // iterates over objects in the page containing the address, the cost is
1675
  // linear in the number of objects in the page. It may be slow.
1676
  MUST_USE_RESULT MaybeObject* FindObject(Address addr);
1677

    
1678
  // During boot the free_space_map is created, and afterwards we may need
1679
  // to write it into the free list nodes that were already created.
1680
  virtual void RepairFreeListsAfterBoot();
1681

    
1682
  // Prepares for a mark-compact GC.
1683
  virtual void PrepareForMarkCompact();
1684

    
1685
  // Current capacity without growing (Size() + Available()).
1686
  intptr_t Capacity() { return accounting_stats_.Capacity(); }
1687

    
1688
  // Total amount of memory committed for this space.  For paged
1689
  // spaces this equals the capacity.
1690
  intptr_t CommittedMemory() { return Capacity(); }
1691

    
1692
  // Approximate amount of physical memory committed for this space.
1693
  size_t CommittedPhysicalMemory();
1694

    
1695
  struct SizeStats {
1696
    intptr_t Total() {
1697
      return small_size_ + medium_size_ + large_size_ + huge_size_;
1698
    }
1699

    
1700
    intptr_t small_size_;
1701
    intptr_t medium_size_;
1702
    intptr_t large_size_;
1703
    intptr_t huge_size_;
1704
  };
1705

    
1706
  void ObtainFreeListStatistics(Page* p, SizeStats* sizes);
1707
  void ResetFreeListStatistics();
1708

    
1709
  // Sets the capacity, the available space and the wasted space to zero.
1710
  // The stats are rebuilt during sweeping by adding each page to the
1711
  // capacity and the size when it is encountered.  As free spaces are
1712
  // discovered during the sweeping they are subtracted from the size and added
1713
  // to the available and wasted totals.
1714
  void ClearStats() {
1715
    accounting_stats_.ClearSizeWaste();
1716
    ResetFreeListStatistics();
1717
  }
1718

    
1719
  // Increases the number of available bytes of that space.
1720
  void AddToAccountingStats(intptr_t bytes) {
1721
    accounting_stats_.DeallocateBytes(bytes);
1722
  }
1723

    
1724
  // Available bytes without growing.  These are the bytes on the free list.
1725
  // The bytes in the linear allocation area are not included in this total
1726
  // because updating the stats would slow down allocation.  New pages are
1727
  // immediately added to the free list so they show up here.
1728
  intptr_t Available() { return free_list_.available(); }
1729

    
1730
  // Allocated bytes in this space.  Garbage bytes that were not found due to
1731
  // lazy sweeping are counted as being allocated!  The bytes in the current
1732
  // linear allocation area (between top and limit) are also counted here.
1733
  virtual intptr_t Size() { return accounting_stats_.Size(); }
1734

    
1735
  // As size, but the bytes in lazily swept pages are estimated and the bytes
1736
  // in the current linear allocation area are not included.
1737
  virtual intptr_t SizeOfObjects();
1738

    
1739
  // Wasted bytes in this space.  These are just the bytes that were thrown away
1740
  // due to being too small to use for allocation.  They do not include the
1741
  // free bytes that were not found at all due to lazy sweeping.
1742
  virtual intptr_t Waste() { return accounting_stats_.Waste(); }
1743

    
1744
  // Returns the allocation pointer in this space.
1745
  Address top() { return allocation_info_.top(); }
1746
  Address limit() { return allocation_info_.limit(); }
1747

    
1748
  // The allocation top address.
1749
  Address* allocation_top_address() {
1750
    return allocation_info_.top_address();
1751
  }
1752

    
1753
  // The allocation limit address.
1754
  Address* allocation_limit_address() {
1755
    return allocation_info_.limit_address();
1756
  }
1757

    
1758
  enum AllocationType {
1759
    NEW_OBJECT,
1760
    MOVE_OBJECT
1761
  };
1762

    
1763
  // Allocate the requested number of bytes in the space if possible, return a
1764
  // failure object if not.
1765
  MUST_USE_RESULT inline MaybeObject* AllocateRaw(
1766
      int size_in_bytes,
1767
      AllocationType event = NEW_OBJECT);
1768

    
1769
  virtual bool ReserveSpace(int bytes);
1770

    
1771
  // Give a block of memory to the space's free list.  It might be added to
1772
  // the free list or accounted as waste.
1773
  // If add_to_freelist is false then just accounting stats are updated and
1774
  // no attempt to add area to free list is made.
1775
  int Free(Address start, int size_in_bytes) {
1776
    int wasted = free_list_.Free(start, size_in_bytes);
1777
    accounting_stats_.DeallocateBytes(size_in_bytes - wasted);
1778
    return size_in_bytes - wasted;
1779
  }
1780

    
1781
  void ResetFreeList() {
1782
    free_list_.Reset();
1783
  }
1784

    
1785
  // Set space allocation info.
1786
  void SetTop(Address top, Address limit) {
1787
    ASSERT(top == limit ||
1788
           Page::FromAddress(top) == Page::FromAddress(limit - 1));
1789
    MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1790
    allocation_info_.set_top(top);
1791
    allocation_info_.set_limit(limit);
1792
  }
1793

    
1794
  void Allocate(int bytes) {
1795
    accounting_stats_.AllocateBytes(bytes);
1796
  }
1797

    
1798
  void IncreaseCapacity(int size) {
1799
    accounting_stats_.ExpandSpace(size);
1800
  }
1801

    
1802
  // Releases an unused page and shrinks the space.
1803
  void ReleasePage(Page* page, bool unlink);
1804

    
1805
  // The dummy page that anchors the linked list of pages.
1806
  Page* anchor() { return &anchor_; }
1807

    
1808
#ifdef VERIFY_HEAP
1809
  // Verify integrity of this space.
1810
  virtual void Verify(ObjectVisitor* visitor);
1811

    
1812
  // Overridden by subclasses to verify space-specific object
1813
  // properties (e.g., only maps or free-list nodes are in map space).
1814
  virtual void VerifyObject(HeapObject* obj) {}
1815
#endif
1816

    
1817
#ifdef DEBUG
1818
  // Print meta info and objects in this space.
1819
  virtual void Print();
1820

    
1821
  // Reports statistics for the space
1822
  void ReportStatistics();
1823

    
1824
  // Report code object related statistics
1825
  void CollectCodeStatistics();
1826
  static void ReportCodeStatistics(Isolate* isolate);
1827
  static void ResetCodeStatistics(Isolate* isolate);
1828
#endif
1829

    
1830
  bool was_swept_conservatively() { return was_swept_conservatively_; }
1831
  void set_was_swept_conservatively(bool b) { was_swept_conservatively_ = b; }
1832

    
1833
  // Evacuation candidates are swept by evacuator.  Needs to return a valid
1834
  // result before _and_ after evacuation has finished.
1835
  static bool ShouldBeSweptLazily(Page* p) {
1836
    return !p->IsEvacuationCandidate() &&
1837
           !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) &&
1838
           !p->WasSweptPrecisely();
1839
  }
1840

    
1841
  void SetPagesToSweep(Page* first) {
1842
    ASSERT(unswept_free_bytes_ == 0);
1843
    if (first == &anchor_) first = NULL;
1844
    first_unswept_page_ = first;
1845
  }
1846

    
1847
  void IncrementUnsweptFreeBytes(intptr_t by) {
1848
    unswept_free_bytes_ += by;
1849
  }
1850

    
1851
  void IncreaseUnsweptFreeBytes(Page* p) {
1852
    ASSERT(ShouldBeSweptLazily(p));
1853
    unswept_free_bytes_ += (p->area_size() - p->LiveBytes());
1854
  }
1855

    
1856
  void DecrementUnsweptFreeBytes(intptr_t by) {
1857
    unswept_free_bytes_ -= by;
1858
  }
1859

    
1860
  void DecreaseUnsweptFreeBytes(Page* p) {
1861
    ASSERT(ShouldBeSweptLazily(p));
1862
    unswept_free_bytes_ -= (p->area_size() - p->LiveBytes());
1863
  }
1864

    
1865
  void ResetUnsweptFreeBytes() {
1866
    unswept_free_bytes_ = 0;
1867
  }
1868

    
1869
  bool AdvanceSweeper(intptr_t bytes_to_sweep);
1870

    
1871
  // When parallel sweeper threads are active and the main thread finished
1872
  // its sweeping phase, this function waits for them to complete, otherwise
1873
  // AdvanceSweeper with size_in_bytes is called.
1874
  bool EnsureSweeperProgress(intptr_t size_in_bytes);
1875

    
1876
  bool IsLazySweepingComplete() {
1877
    return !first_unswept_page_->is_valid();
1878
  }
1879

    
1880
  Page* FirstPage() { return anchor_.next_page(); }
1881
  Page* LastPage() { return anchor_.prev_page(); }
1882

    
1883
  void EvictEvacuationCandidatesFromFreeLists();
1884

    
1885
  bool CanExpand();
1886

    
1887
  // Returns the number of total pages in this space.
1888
  int CountTotalPages();
1889

    
1890
  // Return size of allocatable area on a page in this space.
1891
  inline int AreaSize() {
1892
    return area_size_;
1893
  }
1894

    
1895
 protected:
1896
  FreeList* free_list() { return &free_list_; }
1897

    
1898
  int area_size_;
1899

    
1900
  // Maximum capacity of this space.
1901
  intptr_t max_capacity_;
1902

    
1903
  intptr_t SizeOfFirstPage();
1904

    
1905
  // Accounting information for this space.
1906
  AllocationStats accounting_stats_;
1907

    
1908
  // The dummy page that anchors the double linked list of pages.
1909
  Page anchor_;
1910

    
1911
  // The space's free list.
1912
  FreeList free_list_;
1913

    
1914
  // Normal allocation information.
1915
  AllocationInfo allocation_info_;
1916

    
1917
  // Bytes of each page that cannot be allocated.  Possibly non-zero
1918
  // for pages in spaces with only fixed-size objects.  Always zero
1919
  // for pages in spaces with variable sized objects (those pages are
1920
  // padded with free-list nodes).
1921
  int page_extra_;
1922

    
1923
  bool was_swept_conservatively_;
1924

    
1925
  // The first page to be swept when the lazy sweeper advances. Is set
1926
  // to NULL when all pages have been swept.
1927
  Page* first_unswept_page_;
1928

    
1929
  // The number of free bytes which could be reclaimed by advancing the
1930
  // lazy sweeper.  This is only an estimation because lazy sweeping is
1931
  // done conservatively.
1932
  intptr_t unswept_free_bytes_;
1933

    
1934
  // Expands the space by allocating a fixed number of pages. Returns false if
1935
  // it cannot allocate requested number of pages from OS, or if the hard heap
1936
  // size limit has been hit.
1937
  bool Expand();
1938

    
1939
  // Generic fast case allocation function that tries linear allocation at the
1940
  // address denoted by top in allocation_info_.
1941
  inline HeapObject* AllocateLinearly(int size_in_bytes);
1942

    
1943
  // Slow path of AllocateRaw.  This function is space-dependent.
1944
  MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes);
1945

    
1946
  friend class PageIterator;
1947
  friend class SweeperThread;
1948
};
1949

    
1950

    
1951
class NumberAndSizeInfo BASE_EMBEDDED {
1952
 public:
1953
  NumberAndSizeInfo() : number_(0), bytes_(0) {}
1954

    
1955
  int number() const { return number_; }
1956
  void increment_number(int num) { number_ += num; }
1957

    
1958
  int bytes() const { return bytes_; }
1959
  void increment_bytes(int size) { bytes_ += size; }
1960

    
1961
  void clear() {
1962
    number_ = 0;
1963
    bytes_ = 0;
1964
  }
1965

    
1966
 private:
1967
  int number_;
1968
  int bytes_;
1969
};
1970

    
1971

    
1972
// HistogramInfo class for recording a single "bar" of a histogram.  This
1973
// class is used for collecting statistics to print to the log file.
1974
class HistogramInfo: public NumberAndSizeInfo {
1975
 public:
1976
  HistogramInfo() : NumberAndSizeInfo() {}
1977

    
1978
  const char* name() { return name_; }
1979
  void set_name(const char* name) { name_ = name; }
1980

    
1981
 private:
1982
  const char* name_;
1983
};
1984

    
1985

    
1986
enum SemiSpaceId {
1987
  kFromSpace = 0,
1988
  kToSpace = 1
1989
};
1990

    
1991

    
1992
class SemiSpace;
1993

    
1994

    
1995
class NewSpacePage : public MemoryChunk {
1996
 public:
1997
  // GC related flags copied from from-space to to-space when
1998
  // flipping semispaces.
1999
  static const intptr_t kCopyOnFlipFlagsMask =
2000
    (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
2001
    (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
2002
    (1 << MemoryChunk::SCAN_ON_SCAVENGE);
2003

    
2004
  static const int kAreaSize = Page::kNonCodeObjectAreaSize;
2005

    
2006
  inline NewSpacePage* next_page() const {
2007
    return static_cast<NewSpacePage*>(next_chunk());
2008
  }
2009

    
2010
  inline void set_next_page(NewSpacePage* page) {
2011
    set_next_chunk(page);
2012
  }
2013

    
2014
  inline NewSpacePage* prev_page() const {
2015
    return static_cast<NewSpacePage*>(prev_chunk());
2016
  }
2017

    
2018
  inline void set_prev_page(NewSpacePage* page) {
2019
    set_prev_chunk(page);
2020
  }
2021

    
2022
  SemiSpace* semi_space() {
2023
    return reinterpret_cast<SemiSpace*>(owner());
2024
  }
2025

    
2026
  bool is_anchor() { return !this->InNewSpace(); }
2027

    
2028
  static bool IsAtStart(Address addr) {
2029
    return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask)
2030
        == kObjectStartOffset;
2031
  }
2032

    
2033
  static bool IsAtEnd(Address addr) {
2034
    return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0;
2035
  }
2036

    
2037
  Address address() {
2038
    return reinterpret_cast<Address>(this);
2039
  }
2040

    
2041
  // Finds the NewSpacePage containg the given address.
2042
  static inline NewSpacePage* FromAddress(Address address_in_page) {
2043
    Address page_start =
2044
        reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) &
2045
                                  ~Page::kPageAlignmentMask);
2046
    NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start);
2047
    return page;
2048
  }
2049

    
2050
  // Find the page for a limit address. A limit address is either an address
2051
  // inside a page, or the address right after the last byte of a page.
2052
  static inline NewSpacePage* FromLimit(Address address_limit) {
2053
    return NewSpacePage::FromAddress(address_limit - 1);
2054
  }
2055

    
2056
 private:
2057
  // Create a NewSpacePage object that is only used as anchor
2058
  // for the doubly-linked list of real pages.
2059
  explicit NewSpacePage(SemiSpace* owner) {
2060
    InitializeAsAnchor(owner);
2061
  }
2062

    
2063
  static NewSpacePage* Initialize(Heap* heap,
2064
                                  Address start,
2065
                                  SemiSpace* semi_space);
2066

    
2067
  // Intialize a fake NewSpacePage used as sentinel at the ends
2068
  // of a doubly-linked list of real NewSpacePages.
2069
  // Only uses the prev/next links, and sets flags to not be in new-space.
2070
  void InitializeAsAnchor(SemiSpace* owner);
2071

    
2072
  friend class SemiSpace;
2073
  friend class SemiSpaceIterator;
2074
};
2075

    
2076

    
2077
// -----------------------------------------------------------------------------
2078
// SemiSpace in young generation
2079
//
2080
// A semispace is a contiguous chunk of memory holding page-like memory
2081
// chunks. The mark-compact collector  uses the memory of the first page in
2082
// the from space as a marking stack when tracing live objects.
2083

    
2084
class SemiSpace : public Space {
2085
 public:
2086
  // Constructor.
2087
  SemiSpace(Heap* heap, SemiSpaceId semispace)
2088
    : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
2089
      start_(NULL),
2090
      age_mark_(NULL),
2091
      id_(semispace),
2092
      anchor_(this),
2093
      current_page_(NULL) { }
2094

    
2095
  // Sets up the semispace using the given chunk.
2096
  void SetUp(Address start, int initial_capacity, int maximum_capacity);
2097

    
2098
  // Tear down the space.  Heap memory was not allocated by the space, so it
2099
  // is not deallocated here.
2100
  void TearDown();
2101

    
2102
  // True if the space has been set up but not torn down.
2103
  bool HasBeenSetUp() { return start_ != NULL; }
2104

    
2105
  // Grow the semispace to the new capacity.  The new capacity
2106
  // requested must be larger than the current capacity and less than
2107
  // the maximum capacity.
2108
  bool GrowTo(int new_capacity);
2109

    
2110
  // Shrinks the semispace to the new capacity.  The new capacity
2111
  // requested must be more than the amount of used memory in the
2112
  // semispace and less than the current capacity.
2113
  bool ShrinkTo(int new_capacity);
2114

    
2115
  // Returns the start address of the first page of the space.
2116
  Address space_start() {
2117
    ASSERT(anchor_.next_page() != &anchor_);
2118
    return anchor_.next_page()->area_start();
2119
  }
2120

    
2121
  // Returns the start address of the current page of the space.
2122
  Address page_low() {
2123
    return current_page_->area_start();
2124
  }
2125

    
2126
  // Returns one past the end address of the space.
2127
  Address space_end() {
2128
    return anchor_.prev_page()->area_end();
2129
  }
2130

    
2131
  // Returns one past the end address of the current page of the space.
2132
  Address page_high() {
2133
    return current_page_->area_end();
2134
  }
2135

    
2136
  bool AdvancePage() {
2137
    NewSpacePage* next_page = current_page_->next_page();
2138
    if (next_page == anchor()) return false;
2139
    current_page_ = next_page;
2140
    return true;
2141
  }
2142

    
2143
  // Resets the space to using the first page.
2144
  void Reset();
2145

    
2146
  // Age mark accessors.
2147
  Address age_mark() { return age_mark_; }
2148
  void set_age_mark(Address mark);
2149

    
2150
  // True if the address is in the address range of this semispace (not
2151
  // necessarily below the allocation pointer).
2152
  bool Contains(Address a) {
2153
    return (reinterpret_cast<uintptr_t>(a) & address_mask_)
2154
           == reinterpret_cast<uintptr_t>(start_);
2155
  }
2156

    
2157
  // True if the object is a heap object in the address range of this
2158
  // semispace (not necessarily below the allocation pointer).
2159
  bool Contains(Object* o) {
2160
    return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
2161
  }
2162

    
2163
  // If we don't have these here then SemiSpace will be abstract.  However
2164
  // they should never be called.
2165
  virtual intptr_t Size() {
2166
    UNREACHABLE();
2167
    return 0;
2168
  }
2169

    
2170
  virtual bool ReserveSpace(int bytes) {
2171
    UNREACHABLE();
2172
    return false;
2173
  }
2174

    
2175
  bool is_committed() { return committed_; }
2176
  bool Commit();
2177
  bool Uncommit();
2178

    
2179
  NewSpacePage* first_page() { return anchor_.next_page(); }
2180
  NewSpacePage* current_page() { return current_page_; }
2181

    
2182
#ifdef VERIFY_HEAP
2183
  virtual void Verify();
2184
#endif
2185

    
2186
#ifdef DEBUG
2187
  virtual void Print();
2188
  // Validate a range of of addresses in a SemiSpace.
2189
  // The "from" address must be on a page prior to the "to" address,
2190
  // in the linked page order, or it must be earlier on the same page.
2191
  static void AssertValidRange(Address from, Address to);
2192
#else
2193
  // Do nothing.
2194
  inline static void AssertValidRange(Address from, Address to) {}
2195
#endif
2196

    
2197
  // Returns the current capacity of the semi space.
2198
  int Capacity() { return capacity_; }
2199

    
2200
  // Returns the maximum capacity of the semi space.
2201
  int MaximumCapacity() { return maximum_capacity_; }
2202

    
2203
  // Returns the initial capacity of the semi space.
2204
  int InitialCapacity() { return initial_capacity_; }
2205

    
2206
  SemiSpaceId id() { return id_; }
2207

    
2208
  static void Swap(SemiSpace* from, SemiSpace* to);
2209

    
2210
  // Approximate amount of physical memory committed for this space.
2211
  size_t CommittedPhysicalMemory();
2212

    
2213
 private:
2214
  // Flips the semispace between being from-space and to-space.
2215
  // Copies the flags into the masked positions on all pages in the space.
2216
  void FlipPages(intptr_t flags, intptr_t flag_mask);
2217

    
2218
  NewSpacePage* anchor() { return &anchor_; }
2219

    
2220
  // The current and maximum capacity of the space.
2221
  int capacity_;
2222
  int maximum_capacity_;
2223
  int initial_capacity_;
2224

    
2225
  // The start address of the space.
2226
  Address start_;
2227
  // Used to govern object promotion during mark-compact collection.
2228
  Address age_mark_;
2229

    
2230
  // Masks and comparison values to test for containment in this semispace.
2231
  uintptr_t address_mask_;
2232
  uintptr_t object_mask_;
2233
  uintptr_t object_expected_;
2234

    
2235
  bool committed_;
2236
  SemiSpaceId id_;
2237

    
2238
  NewSpacePage anchor_;
2239
  NewSpacePage* current_page_;
2240

    
2241
  friend class SemiSpaceIterator;
2242
  friend class NewSpacePageIterator;
2243
 public:
2244
  TRACK_MEMORY("SemiSpace")
2245
};
2246

    
2247

    
2248
// A SemiSpaceIterator is an ObjectIterator that iterates over the active
2249
// semispace of the heap's new space.  It iterates over the objects in the
2250
// semispace from a given start address (defaulting to the bottom of the
2251
// semispace) to the top of the semispace.  New objects allocated after the
2252
// iterator is created are not iterated.
2253
class SemiSpaceIterator : public ObjectIterator {
2254
 public:
2255
  // Create an iterator over the objects in the given space.  If no start
2256
  // address is given, the iterator starts from the bottom of the space.  If
2257
  // no size function is given, the iterator calls Object::Size().
2258

    
2259
  // Iterate over all of allocated to-space.
2260
  explicit SemiSpaceIterator(NewSpace* space);
2261
  // Iterate over all of allocated to-space, with a custome size function.
2262
  SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
2263
  // Iterate over part of allocated to-space, from start to the end
2264
  // of allocation.
2265
  SemiSpaceIterator(NewSpace* space, Address start);
2266
  // Iterate from one address to another in the same semi-space.
2267
  SemiSpaceIterator(Address from, Address to);
2268

    
2269
  HeapObject* Next() {
2270
    if (current_ == limit_) return NULL;
2271
    if (NewSpacePage::IsAtEnd(current_)) {
2272
      NewSpacePage* page = NewSpacePage::FromLimit(current_);
2273
      page = page->next_page();
2274
      ASSERT(!page->is_anchor());
2275
      current_ = page->area_start();
2276
      if (current_ == limit_) return NULL;
2277
    }
2278

    
2279
    HeapObject* object = HeapObject::FromAddress(current_);
2280
    int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
2281

    
2282
    current_ += size;
2283
    return object;
2284
  }
2285

    
2286
  // Implementation of the ObjectIterator functions.
2287
  virtual HeapObject* next_object() { return Next(); }
2288

    
2289
 private:
2290
  void Initialize(Address start,
2291
                  Address end,
2292
                  HeapObjectCallback size_func);
2293

    
2294
  // The current iteration point.
2295
  Address current_;
2296
  // The end of iteration.
2297
  Address limit_;
2298
  // The callback function.
2299
  HeapObjectCallback size_func_;
2300
};
2301

    
2302

    
2303
// -----------------------------------------------------------------------------
2304
// A PageIterator iterates the pages in a semi-space.
2305
class NewSpacePageIterator BASE_EMBEDDED {
2306
 public:
2307
  // Make an iterator that runs over all pages in to-space.
2308
  explicit inline NewSpacePageIterator(NewSpace* space);
2309

    
2310
  // Make an iterator that runs over all pages in the given semispace,
2311
  // even those not used in allocation.
2312
  explicit inline NewSpacePageIterator(SemiSpace* space);
2313

    
2314
  // Make iterator that iterates from the page containing start
2315
  // to the page that contains limit in the same semispace.
2316
  inline NewSpacePageIterator(Address start, Address limit);
2317

    
2318
  inline bool has_next();
2319
  inline NewSpacePage* next();
2320

    
2321
 private:
2322
  NewSpacePage* prev_page_;  // Previous page returned.
2323
  // Next page that will be returned.  Cached here so that we can use this
2324
  // iterator for operations that deallocate pages.
2325
  NewSpacePage* next_page_;
2326
  // Last page returned.
2327
  NewSpacePage* last_page_;
2328
};
2329

    
2330

    
2331
// -----------------------------------------------------------------------------
2332
// The young generation space.
2333
//
2334
// The new space consists of a contiguous pair of semispaces.  It simply
2335
// forwards most functions to the appropriate semispace.
2336

    
2337
class NewSpace : public Space {
2338
 public:
2339
  // Constructor.
2340
  explicit NewSpace(Heap* heap)
2341
    : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
2342
      to_space_(heap, kToSpace),
2343
      from_space_(heap, kFromSpace),
2344
      reservation_(),
2345
      inline_allocation_limit_step_(0) {}
2346

    
2347
  // Sets up the new space using the given chunk.
2348
  bool SetUp(int reserved_semispace_size_, int max_semispace_size);
2349

    
2350
  // Tears down the space.  Heap memory was not allocated by the space, so it
2351
  // is not deallocated here.
2352
  void TearDown();
2353

    
2354
  // True if the space has been set up but not torn down.
2355
  bool HasBeenSetUp() {
2356
    return to_space_.HasBeenSetUp() && from_space_.HasBeenSetUp();
2357
  }
2358

    
2359
  // Flip the pair of spaces.
2360
  void Flip();
2361

    
2362
  // Grow the capacity of the semispaces.  Assumes that they are not at
2363
  // their maximum capacity.
2364
  void Grow();
2365

    
2366
  // Shrink the capacity of the semispaces.
2367
  void Shrink();
2368

    
2369
  // True if the address or object lies in the address range of either
2370
  // semispace (not necessarily below the allocation pointer).
2371
  bool Contains(Address a) {
2372
    return (reinterpret_cast<uintptr_t>(a) & address_mask_)
2373
        == reinterpret_cast<uintptr_t>(start_);
2374
  }
2375

    
2376
  bool Contains(Object* o) {
2377
    Address a = reinterpret_cast<Address>(o);
2378
    return (reinterpret_cast<uintptr_t>(a) & object_mask_) == object_expected_;
2379
  }
2380

    
2381
  // Return the allocated bytes in the active semispace.
2382
  virtual intptr_t Size() {
2383
    return pages_used_ * NewSpacePage::kAreaSize +
2384
        static_cast<int>(top() - to_space_.page_low());
2385
  }
2386

    
2387
  // The same, but returning an int.  We have to have the one that returns
2388
  // intptr_t because it is inherited, but if we know we are dealing with the
2389
  // new space, which can't get as big as the other spaces then this is useful:
2390
  int SizeAsInt() { return static_cast<int>(Size()); }
2391

    
2392
  // Return the current capacity of a semispace.
2393
  intptr_t EffectiveCapacity() {
2394
    SLOW_ASSERT(to_space_.Capacity() == from_space_.Capacity());
2395
    return (to_space_.Capacity() / Page::kPageSize) * NewSpacePage::kAreaSize;
2396
  }
2397

    
2398
  // Return the current capacity of a semispace.
2399
  intptr_t Capacity() {
2400
    ASSERT(to_space_.Capacity() == from_space_.Capacity());
2401
    return to_space_.Capacity();
2402
  }
2403

    
2404
  // Return the total amount of memory committed for new space.
2405
  intptr_t CommittedMemory() {
2406
    if (from_space_.is_committed()) return 2 * Capacity();
2407
    return Capacity();
2408
  }
2409

    
2410
  // Approximate amount of physical memory committed for this space.
2411
  size_t CommittedPhysicalMemory();
2412

    
2413
  // Return the available bytes without growing.
2414
  intptr_t Available() {
2415
    return Capacity() - Size();
2416
  }
2417

    
2418
  // Return the maximum capacity of a semispace.
2419
  int MaximumCapacity() {
2420
    ASSERT(to_space_.MaximumCapacity() == from_space_.MaximumCapacity());
2421
    return to_space_.MaximumCapacity();
2422
  }
2423

    
2424
  // Returns the initial capacity of a semispace.
2425
  int InitialCapacity() {
2426
    ASSERT(to_space_.InitialCapacity() == from_space_.InitialCapacity());
2427
    return to_space_.InitialCapacity();
2428
  }
2429

    
2430
  // Return the address of the allocation pointer in the active semispace.
2431
  Address top() {
2432
    ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.top()));
2433
    return allocation_info_.top();
2434
  }
2435

    
2436
  void set_top(Address top) {
2437
    ASSERT(to_space_.current_page()->ContainsLimit(top));
2438
    allocation_info_.set_top(top);
2439
  }
2440

    
2441
  // Return the address of the first object in the active semispace.
2442
  Address bottom() { return to_space_.space_start(); }
2443

    
2444
  // Get the age mark of the inactive semispace.
2445
  Address age_mark() { return from_space_.age_mark(); }
2446
  // Set the age mark in the active semispace.
2447
  void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
2448

    
2449
  // The start address of the space and a bit mask. Anding an address in the
2450
  // new space with the mask will result in the start address.
2451
  Address start() { return start_; }
2452
  uintptr_t mask() { return address_mask_; }
2453

    
2454
  INLINE(uint32_t AddressToMarkbitIndex(Address addr)) {
2455
    ASSERT(Contains(addr));
2456
    ASSERT(IsAligned(OffsetFrom(addr), kPointerSize) ||
2457
           IsAligned(OffsetFrom(addr) - 1, kPointerSize));
2458
    return static_cast<uint32_t>(addr - start_) >> kPointerSizeLog2;
2459
  }
2460

    
2461
  INLINE(Address MarkbitIndexToAddress(uint32_t index)) {
2462
    return reinterpret_cast<Address>(index << kPointerSizeLog2);
2463
  }
2464

    
2465
  // The allocation top and limit address.
2466
  Address* allocation_top_address() {
2467
    return allocation_info_.top_address();
2468
  }
2469

    
2470
  // The allocation limit address.
2471
  Address* allocation_limit_address() {
2472
    return allocation_info_.limit_address();
2473
  }
2474

    
2475
  MUST_USE_RESULT INLINE(MaybeObject* AllocateRaw(int size_in_bytes));
2476

    
2477
  // Reset the allocation pointer to the beginning of the active semispace.
2478
  void ResetAllocationInfo();
2479

    
2480
  void LowerInlineAllocationLimit(intptr_t step) {
2481
    inline_allocation_limit_step_ = step;
2482
    if (step == 0) {
2483
      allocation_info_.set_limit(to_space_.page_high());
2484
    } else {
2485
      Address new_limit = Min(
2486
          allocation_info_.top() + inline_allocation_limit_step_,
2487
          allocation_info_.limit());
2488
      allocation_info_.set_limit(new_limit);
2489
    }
2490
    top_on_previous_step_ = allocation_info_.top();
2491
  }
2492

    
2493
  // Get the extent of the inactive semispace (for use as a marking stack,
2494
  // or to zap it). Notice: space-addresses are not necessarily on the
2495
  // same page, so FromSpaceStart() might be above FromSpaceEnd().
2496
  Address FromSpacePageLow() { return from_space_.page_low(); }
2497
  Address FromSpacePageHigh() { return from_space_.page_high(); }
2498
  Address FromSpaceStart() { return from_space_.space_start(); }
2499
  Address FromSpaceEnd() { return from_space_.space_end(); }
2500

    
2501
  // Get the extent of the active semispace's pages' memory.
2502
  Address ToSpaceStart() { return to_space_.space_start(); }
2503
  Address ToSpaceEnd() { return to_space_.space_end(); }
2504

    
2505
  inline bool ToSpaceContains(Address address) {
2506
    return to_space_.Contains(address);
2507
  }
2508
  inline bool FromSpaceContains(Address address) {
2509
    return from_space_.Contains(address);
2510
  }
2511

    
2512
  // True if the object is a heap object in the address range of the
2513
  // respective semispace (not necessarily below the allocation pointer of the
2514
  // semispace).
2515
  inline bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
2516
  inline bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
2517

    
2518
  // Try to switch the active semispace to a new, empty, page.
2519
  // Returns false if this isn't possible or reasonable (i.e., there
2520
  // are no pages, or the current page is already empty), or true
2521
  // if successful.
2522
  bool AddFreshPage();
2523

    
2524
  virtual bool ReserveSpace(int bytes);
2525

    
2526
#ifdef VERIFY_HEAP
2527
  // Verify the active semispace.
2528
  virtual void Verify();
2529
#endif
2530

    
2531
#ifdef DEBUG
2532
  // Print the active semispace.
2533
  virtual void Print() { to_space_.Print(); }
2534
#endif
2535

    
2536
  // Iterates the active semispace to collect statistics.
2537
  void CollectStatistics();
2538
  // Reports previously collected statistics of the active semispace.
2539
  void ReportStatistics();
2540
  // Clears previously collected statistics.
2541
  void ClearHistograms();
2542

    
2543
  // Record the allocation or promotion of a heap object.  Note that we don't
2544
  // record every single allocation, but only those that happen in the
2545
  // to space during a scavenge GC.
2546
  void RecordAllocation(HeapObject* obj);
2547
  void RecordPromotion(HeapObject* obj);
2548

    
2549
  // Return whether the operation succeded.
2550
  bool CommitFromSpaceIfNeeded() {
2551
    if (from_space_.is_committed()) return true;
2552
    return from_space_.Commit();
2553
  }
2554

    
2555
  bool UncommitFromSpace() {
2556
    if (!from_space_.is_committed()) return true;
2557
    return from_space_.Uncommit();
2558
  }
2559

    
2560
  inline intptr_t inline_allocation_limit_step() {
2561
    return inline_allocation_limit_step_;
2562
  }
2563

    
2564
  SemiSpace* active_space() { return &to_space_; }
2565

    
2566
 private:
2567
  // Update allocation info to match the current to-space page.
2568
  void UpdateAllocationInfo();
2569

    
2570
  Address chunk_base_;
2571
  uintptr_t chunk_size_;
2572

    
2573
  // The semispaces.
2574
  SemiSpace to_space_;
2575
  SemiSpace from_space_;
2576
  VirtualMemory reservation_;
2577
  int pages_used_;
2578

    
2579
  // Start address and bit mask for containment testing.
2580
  Address start_;
2581
  uintptr_t address_mask_;
2582
  uintptr_t object_mask_;
2583
  uintptr_t object_expected_;
2584

    
2585
  // Allocation pointer and limit for normal allocation and allocation during
2586
  // mark-compact collection.
2587
  AllocationInfo allocation_info_;
2588

    
2589
  // When incremental marking is active we will set allocation_info_.limit
2590
  // to be lower than actual limit and then will gradually increase it
2591
  // in steps to guarantee that we do incremental marking steps even
2592
  // when all allocation is performed from inlined generated code.
2593
  intptr_t inline_allocation_limit_step_;
2594

    
2595
  Address top_on_previous_step_;
2596

    
2597
  HistogramInfo* allocated_histogram_;
2598
  HistogramInfo* promoted_histogram_;
2599

    
2600
  MUST_USE_RESULT MaybeObject* SlowAllocateRaw(int size_in_bytes);
2601

    
2602
  friend class SemiSpaceIterator;
2603

    
2604
 public:
2605
  TRACK_MEMORY("NewSpace")
2606
};
2607

    
2608

    
2609
// -----------------------------------------------------------------------------
2610
// Old object space (excluding map objects)
2611

    
2612
class OldSpace : public PagedSpace {
2613
 public:
2614
  // Creates an old space object with a given maximum capacity.
2615
  // The constructor does not allocate pages from OS.
2616
  OldSpace(Heap* heap,
2617
           intptr_t max_capacity,
2618
           AllocationSpace id,
2619
           Executability executable)
2620
      : PagedSpace(heap, max_capacity, id, executable) {
2621
    page_extra_ = 0;
2622
  }
2623

    
2624
  // The limit of allocation for a page in this space.
2625
  virtual Address PageAllocationLimit(Page* page) {
2626
    return page->area_end();
2627
  }
2628

    
2629
 public:
2630
  TRACK_MEMORY("OldSpace")
2631
};
2632

    
2633

    
2634
// For contiguous spaces, top should be in the space (or at the end) and limit
2635
// should be the end of the space.
2636
#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
2637
  SLOW_ASSERT((space).page_low() <= (info).top() \
2638
              && (info).top() <= (space).page_high() \
2639
              && (info).limit() <= (space).page_high())
2640

    
2641

    
2642
// -----------------------------------------------------------------------------
2643
// Old space for objects of a fixed size
2644

    
2645
class FixedSpace : public PagedSpace {
2646
 public:
2647
  FixedSpace(Heap* heap,
2648
             intptr_t max_capacity,
2649
             AllocationSpace id,
2650
             int object_size_in_bytes)
2651
      : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
2652
        object_size_in_bytes_(object_size_in_bytes) {
2653
    page_extra_ = Page::kNonCodeObjectAreaSize % object_size_in_bytes;
2654
  }
2655

    
2656
  // The limit of allocation for a page in this space.
2657
  virtual Address PageAllocationLimit(Page* page) {
2658
    return page->area_end() - page_extra_;
2659
  }
2660

    
2661
  int object_size_in_bytes() { return object_size_in_bytes_; }
2662

    
2663
  // Prepares for a mark-compact GC.
2664
  virtual void PrepareForMarkCompact();
2665

    
2666
 private:
2667
  // The size of objects in this space.
2668
  int object_size_in_bytes_;
2669
};
2670

    
2671

    
2672
// -----------------------------------------------------------------------------
2673
// Old space for all map objects
2674

    
2675
class MapSpace : public FixedSpace {
2676
 public:
2677
  // Creates a map space object with a maximum capacity.
2678
  MapSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
2679
      : FixedSpace(heap, max_capacity, id, Map::kSize),
2680
        max_map_space_pages_(kMaxMapPageIndex - 1) {
2681
  }
2682

    
2683
  // Given an index, returns the page address.
2684
  // TODO(1600): this limit is artifical just to keep code compilable
2685
  static const int kMaxMapPageIndex = 1 << 16;
2686

    
2687
  virtual int RoundSizeDownToObjectAlignment(int size) {
2688
    if (IsPowerOf2(Map::kSize)) {
2689
      return RoundDown(size, Map::kSize);
2690
    } else {
2691
      return (size / Map::kSize) * Map::kSize;
2692
    }
2693
  }
2694

    
2695
 protected:
2696
  virtual void VerifyObject(HeapObject* obj);
2697

    
2698
 private:
2699
  static const int kMapsPerPage = Page::kNonCodeObjectAreaSize / Map::kSize;
2700

    
2701
  // Do map space compaction if there is a page gap.
2702
  int CompactionThreshold() {
2703
    return kMapsPerPage * (max_map_space_pages_ - 1);
2704
  }
2705

    
2706
  const int max_map_space_pages_;
2707

    
2708
 public:
2709
  TRACK_MEMORY("MapSpace")
2710
};
2711

    
2712

    
2713
// -----------------------------------------------------------------------------
2714
// Old space for simple property cell objects
2715

    
2716
class CellSpace : public FixedSpace {
2717
 public:
2718
  // Creates a property cell space object with a maximum capacity.
2719
  CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
2720
      : FixedSpace(heap, max_capacity, id, Cell::kSize)
2721
  {}
2722

    
2723
  virtual int RoundSizeDownToObjectAlignment(int size) {
2724
    if (IsPowerOf2(Cell::kSize)) {
2725
      return RoundDown(size, Cell::kSize);
2726
    } else {
2727
      return (size / Cell::kSize) * Cell::kSize;
2728
    }
2729
  }
2730

    
2731
 protected:
2732
  virtual void VerifyObject(HeapObject* obj);
2733

    
2734
 public:
2735
  TRACK_MEMORY("CellSpace")
2736
};
2737

    
2738

    
2739
// -----------------------------------------------------------------------------
2740
// Old space for all global object property cell objects
2741

    
2742
class PropertyCellSpace : public FixedSpace {
2743
 public:
2744
  // Creates a property cell space object with a maximum capacity.
2745
  PropertyCellSpace(Heap* heap, intptr_t max_capacity,
2746
                    AllocationSpace id)
2747
      : FixedSpace(heap, max_capacity, id, PropertyCell::kSize)
2748
  {}
2749

    
2750
  virtual int RoundSizeDownToObjectAlignment(int size) {
2751
    if (IsPowerOf2(PropertyCell::kSize)) {
2752
      return RoundDown(size, PropertyCell::kSize);
2753
    } else {
2754
      return (size / PropertyCell::kSize) * PropertyCell::kSize;
2755
    }
2756
  }
2757

    
2758
 protected:
2759
  virtual void VerifyObject(HeapObject* obj);
2760

    
2761
 public:
2762
  TRACK_MEMORY("PropertyCellSpace")
2763
};
2764

    
2765

    
2766
// -----------------------------------------------------------------------------
2767
// Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
2768
// the large object space. A large object is allocated from OS heap with
2769
// extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
2770
// A large object always starts at Page::kObjectStartOffset to a page.
2771
// Large objects do not move during garbage collections.
2772

    
2773
class LargeObjectSpace : public Space {
2774
 public:
2775
  LargeObjectSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id);
2776
  virtual ~LargeObjectSpace() {}
2777

    
2778
  // Initializes internal data structures.
2779
  bool SetUp();
2780

    
2781
  // Releases internal resources, frees objects in this space.
2782
  void TearDown();
2783

    
2784
  static intptr_t ObjectSizeFor(intptr_t chunk_size) {
2785
    if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
2786
    return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
2787
  }
2788

    
2789
  // Shared implementation of AllocateRaw, AllocateRawCode and
2790
  // AllocateRawFixedArray.
2791
  MUST_USE_RESULT MaybeObject* AllocateRaw(int object_size,
2792
                                           Executability executable);
2793

    
2794
  // Available bytes for objects in this space.
2795
  inline intptr_t Available();
2796

    
2797
  virtual intptr_t Size() {
2798
    return size_;
2799
  }
2800

    
2801
  virtual intptr_t SizeOfObjects() {
2802
    return objects_size_;
2803
  }
2804

    
2805
  intptr_t CommittedMemory() {
2806
    return Size();
2807
  }
2808

    
2809
  // Approximate amount of physical memory committed for this space.
2810
  size_t CommittedPhysicalMemory();
2811

    
2812
  int PageCount() {
2813
    return page_count_;
2814
  }
2815

    
2816
  // Finds an object for a given address, returns Failure::Exception()
2817
  // if it is not found. The function iterates through all objects in this
2818
  // space, may be slow.
2819
  MaybeObject* FindObject(Address a);
2820

    
2821
  // Finds a large object page containing the given address, returns NULL
2822
  // if such a page doesn't exist.
2823
  LargePage* FindPage(Address a);
2824

    
2825
  // Frees unmarked objects.
2826
  void FreeUnmarkedObjects();
2827

    
2828
  // Checks whether a heap object is in this space; O(1).
2829
  bool Contains(HeapObject* obj);
2830

    
2831
  // Checks whether the space is empty.
2832
  bool IsEmpty() { return first_page_ == NULL; }
2833

    
2834
  // See the comments for ReserveSpace in the Space class.  This has to be
2835
  // called after ReserveSpace has been called on the paged spaces, since they
2836
  // may use some memory, leaving less for large objects.
2837
  virtual bool ReserveSpace(int bytes);
2838

    
2839
  LargePage* first_page() { return first_page_; }
2840

    
2841
#ifdef VERIFY_HEAP
2842
  virtual void Verify();
2843
#endif
2844

    
2845
#ifdef DEBUG
2846
  virtual void Print();
2847
  void ReportStatistics();
2848
  void CollectCodeStatistics();
2849
#endif
2850
  // Checks whether an address is in the object area in this space.  It
2851
  // iterates all objects in the space. May be slow.
2852
  bool SlowContains(Address addr) { return !FindObject(addr)->IsFailure(); }
2853

    
2854
 private:
2855
  intptr_t max_capacity_;
2856
  // The head of the linked list of large object chunks.
2857
  LargePage* first_page_;
2858
  intptr_t size_;  // allocated bytes
2859
  int page_count_;  // number of chunks
2860
  intptr_t objects_size_;  // size of objects
2861
  // Map MemoryChunk::kAlignment-aligned chunks to large pages covering them
2862
  HashMap chunk_map_;
2863

    
2864
  friend class LargeObjectIterator;
2865

    
2866
 public:
2867
  TRACK_MEMORY("LargeObjectSpace")
2868
};
2869

    
2870

    
2871
class LargeObjectIterator: public ObjectIterator {
2872
 public:
2873
  explicit LargeObjectIterator(LargeObjectSpace* space);
2874
  LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
2875

    
2876
  HeapObject* Next();
2877

    
2878
  // implementation of ObjectIterator.
2879
  virtual HeapObject* next_object() { return Next(); }
2880

    
2881
 private:
2882
  LargePage* current_;
2883
  HeapObjectCallback size_func_;
2884
};
2885

    
2886

    
2887
// Iterates over the chunks (pages and large object pages) that can contain
2888
// pointers to new space.
2889
class PointerChunkIterator BASE_EMBEDDED {
2890
 public:
2891
  inline explicit PointerChunkIterator(Heap* heap);
2892

    
2893
  // Return NULL when the iterator is done.
2894
  MemoryChunk* next() {
2895
    switch (state_) {
2896
      case kOldPointerState: {
2897
        if (old_pointer_iterator_.has_next()) {
2898
          return old_pointer_iterator_.next();
2899
        }
2900
        state_ = kMapState;
2901
        // Fall through.
2902
      }
2903
      case kMapState: {
2904
        if (map_iterator_.has_next()) {
2905
          return map_iterator_.next();
2906
        }
2907
        state_ = kLargeObjectState;
2908
        // Fall through.
2909
      }
2910
      case kLargeObjectState: {
2911
        HeapObject* heap_object;
2912
        do {
2913
          heap_object = lo_iterator_.Next();
2914
          if (heap_object == NULL) {
2915
            state_ = kFinishedState;
2916
            return NULL;
2917
          }
2918
          // Fixed arrays are the only pointer-containing objects in large
2919
          // object space.
2920
        } while (!heap_object->IsFixedArray());
2921
        MemoryChunk* answer = MemoryChunk::FromAddress(heap_object->address());
2922
        return answer;
2923
      }
2924
      case kFinishedState:
2925
        return NULL;
2926
      default:
2927
        break;
2928
    }
2929
    UNREACHABLE();
2930
    return NULL;
2931
  }
2932

    
2933

    
2934
 private:
2935
  enum State {
2936
    kOldPointerState,
2937
    kMapState,
2938
    kLargeObjectState,
2939
    kFinishedState
2940
  };
2941
  State state_;
2942
  PageIterator old_pointer_iterator_;
2943
  PageIterator map_iterator_;
2944
  LargeObjectIterator lo_iterator_;
2945
};
2946

    
2947

    
2948
#ifdef DEBUG
2949
struct CommentStatistic {
2950
  const char* comment;
2951
  int size;
2952
  int count;
2953
  void Clear() {
2954
    comment = NULL;
2955
    size = 0;
2956
    count = 0;
2957
  }
2958
  // Must be small, since an iteration is used for lookup.
2959
  static const int kMaxComments = 64;
2960
};
2961
#endif
2962

    
2963

    
2964
} }  // namespace v8::internal
2965

    
2966
#endif  // V8_SPACES_H_