The data contained in this repository can be downloaded to your computer using one of several clients.
Please see the documentation of your version control software client for more information.

Please select the desired protocol below to get the URL.

This URL has Read-Only access.

Statistics
| Branch: | Revision:

main_repo / deps / v8 / src / spaces.cc @ f230a1cf

History | View | Annotate | Download (103 KB)

1
// Copyright 2011 the V8 project authors. All rights reserved.
2
// Redistribution and use in source and binary forms, with or without
3
// modification, are permitted provided that the following conditions are
4
// met:
5
//
6
//     * Redistributions of source code must retain the above copyright
7
//       notice, this list of conditions and the following disclaimer.
8
//     * Redistributions in binary form must reproduce the above
9
//       copyright notice, this list of conditions and the following
10
//       disclaimer in the documentation and/or other materials provided
11
//       with the distribution.
12
//     * Neither the name of Google Inc. nor the names of its
13
//       contributors may be used to endorse or promote products derived
14
//       from this software without specific prior written permission.
15
//
16
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27

    
28
#include "v8.h"
29

    
30
#include "macro-assembler.h"
31
#include "mark-compact.h"
32
#include "msan.h"
33
#include "platform.h"
34

    
35
namespace v8 {
36
namespace internal {
37

    
38

    
39
// ----------------------------------------------------------------------------
40
// HeapObjectIterator
41

    
42
HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
43
  // You can't actually iterate over the anchor page.  It is not a real page,
44
  // just an anchor for the double linked page list.  Initialize as if we have
45
  // reached the end of the anchor page, then the first iteration will move on
46
  // to the first page.
47
  Initialize(space,
48
             NULL,
49
             NULL,
50
             kAllPagesInSpace,
51
             NULL);
52
}
53

    
54

    
55
HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
56
                                       HeapObjectCallback size_func) {
57
  // You can't actually iterate over the anchor page.  It is not a real page,
58
  // just an anchor for the double linked page list.  Initialize the current
59
  // address and end as NULL, then the first iteration will move on
60
  // to the first page.
61
  Initialize(space,
62
             NULL,
63
             NULL,
64
             kAllPagesInSpace,
65
             size_func);
66
}
67

    
68

    
69
HeapObjectIterator::HeapObjectIterator(Page* page,
70
                                       HeapObjectCallback size_func) {
71
  Space* owner = page->owner();
72
  ASSERT(owner == page->heap()->old_pointer_space() ||
73
         owner == page->heap()->old_data_space() ||
74
         owner == page->heap()->map_space() ||
75
         owner == page->heap()->cell_space() ||
76
         owner == page->heap()->property_cell_space() ||
77
         owner == page->heap()->code_space());
78
  Initialize(reinterpret_cast<PagedSpace*>(owner),
79
             page->area_start(),
80
             page->area_end(),
81
             kOnePageOnly,
82
             size_func);
83
  ASSERT(page->WasSweptPrecisely());
84
}
85

    
86

    
87
void HeapObjectIterator::Initialize(PagedSpace* space,
88
                                    Address cur, Address end,
89
                                    HeapObjectIterator::PageMode mode,
90
                                    HeapObjectCallback size_f) {
91
  // Check that we actually can iterate this space.
92
  ASSERT(!space->was_swept_conservatively());
93

    
94
  space_ = space;
95
  cur_addr_ = cur;
96
  cur_end_ = end;
97
  page_mode_ = mode;
98
  size_func_ = size_f;
99
}
100

    
101

    
102
// We have hit the end of the page and should advance to the next block of
103
// objects.  This happens at the end of the page.
104
bool HeapObjectIterator::AdvanceToNextPage() {
105
  ASSERT(cur_addr_ == cur_end_);
106
  if (page_mode_ == kOnePageOnly) return false;
107
  Page* cur_page;
108
  if (cur_addr_ == NULL) {
109
    cur_page = space_->anchor();
110
  } else {
111
    cur_page = Page::FromAddress(cur_addr_ - 1);
112
    ASSERT(cur_addr_ == cur_page->area_end());
113
  }
114
  cur_page = cur_page->next_page();
115
  if (cur_page == space_->anchor()) return false;
116
  cur_addr_ = cur_page->area_start();
117
  cur_end_ = cur_page->area_end();
118
  ASSERT(cur_page->WasSweptPrecisely());
119
  return true;
120
}
121

    
122

    
123
// -----------------------------------------------------------------------------
124
// CodeRange
125

    
126

    
127
CodeRange::CodeRange(Isolate* isolate)
128
    : isolate_(isolate),
129
      code_range_(NULL),
130
      free_list_(0),
131
      allocation_list_(0),
132
      current_allocation_block_index_(0) {
133
}
134

    
135

    
136
bool CodeRange::SetUp(const size_t requested) {
137
  ASSERT(code_range_ == NULL);
138

    
139
  code_range_ = new VirtualMemory(requested);
140
  CHECK(code_range_ != NULL);
141
  if (!code_range_->IsReserved()) {
142
    delete code_range_;
143
    code_range_ = NULL;
144
    return false;
145
  }
146

    
147
  // We are sure that we have mapped a block of requested addresses.
148
  ASSERT(code_range_->size() == requested);
149
  LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
150
  Address base = reinterpret_cast<Address>(code_range_->address());
151
  Address aligned_base =
152
      RoundUp(reinterpret_cast<Address>(code_range_->address()),
153
              MemoryChunk::kAlignment);
154
  size_t size = code_range_->size() - (aligned_base - base);
155
  allocation_list_.Add(FreeBlock(aligned_base, size));
156
  current_allocation_block_index_ = 0;
157
  return true;
158
}
159

    
160

    
161
int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
162
                                       const FreeBlock* right) {
163
  // The entire point of CodeRange is that the difference between two
164
  // addresses in the range can be represented as a signed 32-bit int,
165
  // so the cast is semantically correct.
166
  return static_cast<int>(left->start - right->start);
167
}
168

    
169

    
170
void CodeRange::GetNextAllocationBlock(size_t requested) {
171
  for (current_allocation_block_index_++;
172
       current_allocation_block_index_ < allocation_list_.length();
173
       current_allocation_block_index_++) {
174
    if (requested <= allocation_list_[current_allocation_block_index_].size) {
175
      return;  // Found a large enough allocation block.
176
    }
177
  }
178

    
179
  // Sort and merge the free blocks on the free list and the allocation list.
180
  free_list_.AddAll(allocation_list_);
181
  allocation_list_.Clear();
182
  free_list_.Sort(&CompareFreeBlockAddress);
183
  for (int i = 0; i < free_list_.length();) {
184
    FreeBlock merged = free_list_[i];
185
    i++;
186
    // Add adjacent free blocks to the current merged block.
187
    while (i < free_list_.length() &&
188
           free_list_[i].start == merged.start + merged.size) {
189
      merged.size += free_list_[i].size;
190
      i++;
191
    }
192
    if (merged.size > 0) {
193
      allocation_list_.Add(merged);
194
    }
195
  }
196
  free_list_.Clear();
197

    
198
  for (current_allocation_block_index_ = 0;
199
       current_allocation_block_index_ < allocation_list_.length();
200
       current_allocation_block_index_++) {
201
    if (requested <= allocation_list_[current_allocation_block_index_].size) {
202
      return;  // Found a large enough allocation block.
203
    }
204
  }
205

    
206
  // Code range is full or too fragmented.
207
  V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock");
208
}
209

    
210

    
211
Address CodeRange::AllocateRawMemory(const size_t requested_size,
212
                                     const size_t commit_size,
213
                                     size_t* allocated) {
214
  ASSERT(commit_size <= requested_size);
215
  ASSERT(current_allocation_block_index_ < allocation_list_.length());
216
  if (requested_size > allocation_list_[current_allocation_block_index_].size) {
217
    // Find an allocation block large enough.  This function call may
218
    // call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
219
    GetNextAllocationBlock(requested_size);
220
  }
221
  // Commit the requested memory at the start of the current allocation block.
222
  size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
223
  FreeBlock current = allocation_list_[current_allocation_block_index_];
224
  if (aligned_requested >= (current.size - Page::kPageSize)) {
225
    // Don't leave a small free block, useless for a large object or chunk.
226
    *allocated = current.size;
227
  } else {
228
    *allocated = aligned_requested;
229
  }
230
  ASSERT(*allocated <= current.size);
231
  ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment));
232
  if (!isolate_->memory_allocator()->CommitExecutableMemory(code_range_,
233
                                                            current.start,
234
                                                            commit_size,
235
                                                            *allocated)) {
236
    *allocated = 0;
237
    return NULL;
238
  }
239
  allocation_list_[current_allocation_block_index_].start += *allocated;
240
  allocation_list_[current_allocation_block_index_].size -= *allocated;
241
  if (*allocated == current.size) {
242
    GetNextAllocationBlock(0);  // This block is used up, get the next one.
243
  }
244
  return current.start;
245
}
246

    
247

    
248
bool CodeRange::CommitRawMemory(Address start, size_t length) {
249
  return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE);
250
}
251

    
252

    
253
bool CodeRange::UncommitRawMemory(Address start, size_t length) {
254
  return code_range_->Uncommit(start, length);
255
}
256

    
257

    
258
void CodeRange::FreeRawMemory(Address address, size_t length) {
259
  ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment));
260
  free_list_.Add(FreeBlock(address, length));
261
  code_range_->Uncommit(address, length);
262
}
263

    
264

    
265
void CodeRange::TearDown() {
266
    delete code_range_;  // Frees all memory in the virtual memory range.
267
    code_range_ = NULL;
268
    free_list_.Free();
269
    allocation_list_.Free();
270
}
271

    
272

    
273
// -----------------------------------------------------------------------------
274
// MemoryAllocator
275
//
276

    
277
MemoryAllocator::MemoryAllocator(Isolate* isolate)
278
    : isolate_(isolate),
279
      capacity_(0),
280
      capacity_executable_(0),
281
      size_(0),
282
      size_executable_(0),
283
      lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
284
      highest_ever_allocated_(reinterpret_cast<void*>(0)) {
285
}
286

    
287

    
288
bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
289
  capacity_ = RoundUp(capacity, Page::kPageSize);
290
  capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
291
  ASSERT_GE(capacity_, capacity_executable_);
292

    
293
  size_ = 0;
294
  size_executable_ = 0;
295

    
296
  return true;
297
}
298

    
299

    
300
void MemoryAllocator::TearDown() {
301
  // Check that spaces were torn down before MemoryAllocator.
302
  ASSERT(size_ == 0);
303
  // TODO(gc) this will be true again when we fix FreeMemory.
304
  // ASSERT(size_executable_ == 0);
305
  capacity_ = 0;
306
  capacity_executable_ = 0;
307
}
308

    
309

    
310
bool MemoryAllocator::CommitMemory(Address base,
311
                                   size_t size,
312
                                   Executability executable) {
313
  if (!VirtualMemory::CommitRegion(base, size, executable == EXECUTABLE)) {
314
    return false;
315
  }
316
  UpdateAllocatedSpaceLimits(base, base + size);
317
  return true;
318
}
319

    
320

    
321
void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
322
                                 Executability executable) {
323
  // TODO(gc) make code_range part of memory allocator?
324
  ASSERT(reservation->IsReserved());
325
  size_t size = reservation->size();
326
  ASSERT(size_ >= size);
327
  size_ -= size;
328

    
329
  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
330

    
331
  if (executable == EXECUTABLE) {
332
    ASSERT(size_executable_ >= size);
333
    size_executable_ -= size;
334
  }
335
  // Code which is part of the code-range does not have its own VirtualMemory.
336
  ASSERT(!isolate_->code_range()->contains(
337
      static_cast<Address>(reservation->address())));
338
  ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
339
  reservation->Release();
340
}
341

    
342

    
343
void MemoryAllocator::FreeMemory(Address base,
344
                                 size_t size,
345
                                 Executability executable) {
346
  // TODO(gc) make code_range part of memory allocator?
347
  ASSERT(size_ >= size);
348
  size_ -= size;
349

    
350
  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
351

    
352
  if (executable == EXECUTABLE) {
353
    ASSERT(size_executable_ >= size);
354
    size_executable_ -= size;
355
  }
356
  if (isolate_->code_range()->contains(static_cast<Address>(base))) {
357
    ASSERT(executable == EXECUTABLE);
358
    isolate_->code_range()->FreeRawMemory(base, size);
359
  } else {
360
    ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
361
    bool result = VirtualMemory::ReleaseRegion(base, size);
362
    USE(result);
363
    ASSERT(result);
364
  }
365
}
366

    
367

    
368
Address MemoryAllocator::ReserveAlignedMemory(size_t size,
369
                                              size_t alignment,
370
                                              VirtualMemory* controller) {
371
  VirtualMemory reservation(size, alignment);
372

    
373
  if (!reservation.IsReserved()) return NULL;
374
  size_ += reservation.size();
375
  Address base = RoundUp(static_cast<Address>(reservation.address()),
376
                         alignment);
377
  controller->TakeControl(&reservation);
378
  return base;
379
}
380

    
381

    
382
Address MemoryAllocator::AllocateAlignedMemory(size_t reserve_size,
383
                                               size_t commit_size,
384
                                               size_t alignment,
385
                                               Executability executable,
386
                                               VirtualMemory* controller) {
387
  ASSERT(commit_size <= reserve_size);
388
  VirtualMemory reservation;
389
  Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
390
  if (base == NULL) return NULL;
391

    
392
  if (executable == EXECUTABLE) {
393
    if (!CommitExecutableMemory(&reservation,
394
                                base,
395
                                commit_size,
396
                                reserve_size)) {
397
      base = NULL;
398
    }
399
  } else {
400
    if (reservation.Commit(base, commit_size, false)) {
401
      UpdateAllocatedSpaceLimits(base, base + commit_size);
402
    } else {
403
      base = NULL;
404
    }
405
  }
406

    
407
  if (base == NULL) {
408
    // Failed to commit the body. Release the mapping and any partially
409
    // commited regions inside it.
410
    reservation.Release();
411
    return NULL;
412
  }
413

    
414
  controller->TakeControl(&reservation);
415
  return base;
416
}
417

    
418

    
419
void Page::InitializeAsAnchor(PagedSpace* owner) {
420
  set_owner(owner);
421
  set_prev_page(this);
422
  set_next_page(this);
423
}
424

    
425

    
426
NewSpacePage* NewSpacePage::Initialize(Heap* heap,
427
                                       Address start,
428
                                       SemiSpace* semi_space) {
429
  Address area_start = start + NewSpacePage::kObjectStartOffset;
430
  Address area_end = start + Page::kPageSize;
431

    
432
  MemoryChunk* chunk = MemoryChunk::Initialize(heap,
433
                                               start,
434
                                               Page::kPageSize,
435
                                               area_start,
436
                                               area_end,
437
                                               NOT_EXECUTABLE,
438
                                               semi_space);
439
  chunk->set_next_chunk(NULL);
440
  chunk->set_prev_chunk(NULL);
441
  chunk->initialize_scan_on_scavenge(true);
442
  bool in_to_space = (semi_space->id() != kFromSpace);
443
  chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
444
                             : MemoryChunk::IN_FROM_SPACE);
445
  ASSERT(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
446
                                       : MemoryChunk::IN_TO_SPACE));
447
  NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
448
  heap->incremental_marking()->SetNewSpacePageFlags(page);
449
  return page;
450
}
451

    
452

    
453
void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
454
  set_owner(semi_space);
455
  set_next_chunk(this);
456
  set_prev_chunk(this);
457
  // Flags marks this invalid page as not being in new-space.
458
  // All real new-space pages will be in new-space.
459
  SetFlags(0, ~0);
460
}
461

    
462

    
463
MemoryChunk* MemoryChunk::Initialize(Heap* heap,
464
                                     Address base,
465
                                     size_t size,
466
                                     Address area_start,
467
                                     Address area_end,
468
                                     Executability executable,
469
                                     Space* owner) {
470
  MemoryChunk* chunk = FromAddress(base);
471

    
472
  ASSERT(base == chunk->address());
473

    
474
  chunk->heap_ = heap;
475
  chunk->size_ = size;
476
  chunk->area_start_ = area_start;
477
  chunk->area_end_ = area_end;
478
  chunk->flags_ = 0;
479
  chunk->set_owner(owner);
480
  chunk->InitializeReservedMemory();
481
  chunk->slots_buffer_ = NULL;
482
  chunk->skip_list_ = NULL;
483
  chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
484
  chunk->progress_bar_ = 0;
485
  chunk->high_water_mark_ = static_cast<int>(area_start - base);
486
  chunk->parallel_sweeping_ = 0;
487
  chunk->available_in_small_free_list_ = 0;
488
  chunk->available_in_medium_free_list_ = 0;
489
  chunk->available_in_large_free_list_ = 0;
490
  chunk->available_in_huge_free_list_ = 0;
491
  chunk->non_available_small_blocks_ = 0;
492
  chunk->ResetLiveBytes();
493
  Bitmap::Clear(chunk);
494
  chunk->initialize_scan_on_scavenge(false);
495
  chunk->SetFlag(WAS_SWEPT_PRECISELY);
496

    
497
  ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
498
  ASSERT(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
499

    
500
  if (executable == EXECUTABLE) {
501
    chunk->SetFlag(IS_EXECUTABLE);
502
  }
503

    
504
  if (owner == heap->old_data_space()) {
505
    chunk->SetFlag(CONTAINS_ONLY_DATA);
506
  }
507

    
508
  return chunk;
509
}
510

    
511

    
512
// Commit MemoryChunk area to the requested size.
513
bool MemoryChunk::CommitArea(size_t requested) {
514
  size_t guard_size = IsFlagSet(IS_EXECUTABLE) ?
515
                      MemoryAllocator::CodePageGuardSize() : 0;
516
  size_t header_size = area_start() - address() - guard_size;
517
  size_t commit_size = RoundUp(header_size + requested, OS::CommitPageSize());
518
  size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
519
                                  OS::CommitPageSize());
520

    
521
  if (commit_size > committed_size) {
522
    // Commit size should be less or equal than the reserved size.
523
    ASSERT(commit_size <= size() - 2 * guard_size);
524
    // Append the committed area.
525
    Address start = address() + committed_size + guard_size;
526
    size_t length = commit_size - committed_size;
527
    if (reservation_.IsReserved()) {
528
      Executability executable = IsFlagSet(IS_EXECUTABLE)
529
          ? EXECUTABLE : NOT_EXECUTABLE;
530
      if (!heap()->isolate()->memory_allocator()->CommitMemory(
531
              start, length, executable)) {
532
        return false;
533
      }
534
    } else {
535
      CodeRange* code_range = heap_->isolate()->code_range();
536
      ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE));
537
      if (!code_range->CommitRawMemory(start, length)) return false;
538
    }
539

    
540
    if (Heap::ShouldZapGarbage()) {
541
      heap_->isolate()->memory_allocator()->ZapBlock(start, length);
542
    }
543
  } else if (commit_size < committed_size) {
544
    ASSERT(commit_size > 0);
545
    // Shrink the committed area.
546
    size_t length = committed_size - commit_size;
547
    Address start = address() + committed_size + guard_size - length;
548
    if (reservation_.IsReserved()) {
549
      if (!reservation_.Uncommit(start, length)) return false;
550
    } else {
551
      CodeRange* code_range = heap_->isolate()->code_range();
552
      ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE));
553
      if (!code_range->UncommitRawMemory(start, length)) return false;
554
    }
555
  }
556

    
557
  area_end_ = area_start_ + requested;
558
  return true;
559
}
560

    
561

    
562
void MemoryChunk::InsertAfter(MemoryChunk* other) {
563
  next_chunk_ = other->next_chunk_;
564
  prev_chunk_ = other;
565

    
566
  // This memory barrier is needed since concurrent sweeper threads may iterate
567
  // over the list of pages while a new page is inserted.
568
  // TODO(hpayer): find a cleaner way to guarantee that the page list can be
569
  // expanded concurrently
570
  MemoryBarrier();
571

    
572
  // The following two write operations can take effect in arbitrary order
573
  // since pages are always iterated by the sweeper threads in LIFO order, i.e,
574
  // the inserted page becomes visible for the sweeper threads after
575
  // other->next_chunk_ = this;
576
  other->next_chunk_->prev_chunk_ = this;
577
  other->next_chunk_ = this;
578
}
579

    
580

    
581
void MemoryChunk::Unlink() {
582
  if (!InNewSpace() && IsFlagSet(SCAN_ON_SCAVENGE)) {
583
    heap_->decrement_scan_on_scavenge_pages();
584
    ClearFlag(SCAN_ON_SCAVENGE);
585
  }
586
  next_chunk_->prev_chunk_ = prev_chunk_;
587
  prev_chunk_->next_chunk_ = next_chunk_;
588
  prev_chunk_ = NULL;
589
  next_chunk_ = NULL;
590
}
591

    
592

    
593
MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
594
                                            intptr_t commit_area_size,
595
                                            Executability executable,
596
                                            Space* owner) {
597
  ASSERT(commit_area_size <= reserve_area_size);
598

    
599
  size_t chunk_size;
600
  Heap* heap = isolate_->heap();
601
  Address base = NULL;
602
  VirtualMemory reservation;
603
  Address area_start = NULL;
604
  Address area_end = NULL;
605

    
606
  //
607
  // MemoryChunk layout:
608
  //
609
  //             Executable
610
  // +----------------------------+<- base aligned with MemoryChunk::kAlignment
611
  // |           Header           |
612
  // +----------------------------+<- base + CodePageGuardStartOffset
613
  // |           Guard            |
614
  // +----------------------------+<- area_start_
615
  // |           Area             |
616
  // +----------------------------+<- area_end_ (area_start + commit_area_size)
617
  // |   Committed but not used   |
618
  // +----------------------------+<- aligned at OS page boundary
619
  // | Reserved but not committed |
620
  // +----------------------------+<- aligned at OS page boundary
621
  // |           Guard            |
622
  // +----------------------------+<- base + chunk_size
623
  //
624
  //           Non-executable
625
  // +----------------------------+<- base aligned with MemoryChunk::kAlignment
626
  // |          Header            |
627
  // +----------------------------+<- area_start_ (base + kObjectStartOffset)
628
  // |           Area             |
629
  // +----------------------------+<- area_end_ (area_start + commit_area_size)
630
  // |  Committed but not used    |
631
  // +----------------------------+<- aligned at OS page boundary
632
  // | Reserved but not committed |
633
  // +----------------------------+<- base + chunk_size
634
  //
635

    
636
  if (executable == EXECUTABLE) {
637
    chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
638
                         OS::CommitPageSize()) + CodePageGuardSize();
639

    
640
    // Check executable memory limit.
641
    if (size_executable_ + chunk_size > capacity_executable_) {
642
      LOG(isolate_,
643
          StringEvent("MemoryAllocator::AllocateRawMemory",
644
                      "V8 Executable Allocation capacity exceeded"));
645
      return NULL;
646
    }
647

    
648
    // Size of header (not executable) plus area (executable).
649
    size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
650
                                 OS::CommitPageSize());
651
    // Allocate executable memory either from code range or from the
652
    // OS.
653
    if (isolate_->code_range()->exists()) {
654
      base = isolate_->code_range()->AllocateRawMemory(chunk_size,
655
                                                       commit_size,
656
                                                       &chunk_size);
657
      ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
658
                       MemoryChunk::kAlignment));
659
      if (base == NULL) return NULL;
660
      size_ += chunk_size;
661
      // Update executable memory size.
662
      size_executable_ += chunk_size;
663
    } else {
664
      base = AllocateAlignedMemory(chunk_size,
665
                                   commit_size,
666
                                   MemoryChunk::kAlignment,
667
                                   executable,
668
                                   &reservation);
669
      if (base == NULL) return NULL;
670
      // Update executable memory size.
671
      size_executable_ += reservation.size();
672
    }
673

    
674
    if (Heap::ShouldZapGarbage()) {
675
      ZapBlock(base, CodePageGuardStartOffset());
676
      ZapBlock(base + CodePageAreaStartOffset(), commit_area_size);
677
    }
678

    
679
    area_start = base + CodePageAreaStartOffset();
680
    area_end = area_start + commit_area_size;
681
  } else {
682
    chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
683
                         OS::CommitPageSize());
684
    size_t commit_size = RoundUp(MemoryChunk::kObjectStartOffset +
685
                                 commit_area_size, OS::CommitPageSize());
686
    base = AllocateAlignedMemory(chunk_size,
687
                                 commit_size,
688
                                 MemoryChunk::kAlignment,
689
                                 executable,
690
                                 &reservation);
691

    
692
    if (base == NULL) return NULL;
693

    
694
    if (Heap::ShouldZapGarbage()) {
695
      ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
696
    }
697

    
698
    area_start = base + Page::kObjectStartOffset;
699
    area_end = area_start + commit_area_size;
700
  }
701

    
702
  // Use chunk_size for statistics and callbacks because we assume that they
703
  // treat reserved but not-yet committed memory regions of chunks as allocated.
704
  isolate_->counters()->memory_allocated()->
705
      Increment(static_cast<int>(chunk_size));
706

    
707
  LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
708
  if (owner != NULL) {
709
    ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
710
    PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
711
  }
712

    
713
  MemoryChunk* result = MemoryChunk::Initialize(heap,
714
                                                base,
715
                                                chunk_size,
716
                                                area_start,
717
                                                area_end,
718
                                                executable,
719
                                                owner);
720
  result->set_reserved_memory(&reservation);
721
  MSAN_MEMORY_IS_INITIALIZED(base, chunk_size);
722
  return result;
723
}
724

    
725

    
726
void Page::ResetFreeListStatistics() {
727
  non_available_small_blocks_ = 0;
728
  available_in_small_free_list_ = 0;
729
  available_in_medium_free_list_ = 0;
730
  available_in_large_free_list_ = 0;
731
  available_in_huge_free_list_ = 0;
732
}
733

    
734

    
735
Page* MemoryAllocator::AllocatePage(intptr_t size,
736
                                    PagedSpace* owner,
737
                                    Executability executable) {
738
  MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
739

    
740
  if (chunk == NULL) return NULL;
741

    
742
  return Page::Initialize(isolate_->heap(), chunk, executable, owner);
743
}
744

    
745

    
746
LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
747
                                              Space* owner,
748
                                              Executability executable) {
749
  MemoryChunk* chunk = AllocateChunk(object_size,
750
                                     object_size,
751
                                     executable,
752
                                     owner);
753
  if (chunk == NULL) return NULL;
754
  return LargePage::Initialize(isolate_->heap(), chunk);
755
}
756

    
757

    
758
void MemoryAllocator::Free(MemoryChunk* chunk) {
759
  LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
760
  if (chunk->owner() != NULL) {
761
    ObjectSpace space =
762
        static_cast<ObjectSpace>(1 << chunk->owner()->identity());
763
    PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
764
  }
765

    
766
  isolate_->heap()->RememberUnmappedPage(
767
      reinterpret_cast<Address>(chunk), chunk->IsEvacuationCandidate());
768

    
769
  delete chunk->slots_buffer();
770
  delete chunk->skip_list();
771

    
772
  VirtualMemory* reservation = chunk->reserved_memory();
773
  if (reservation->IsReserved()) {
774
    FreeMemory(reservation, chunk->executable());
775
  } else {
776
    FreeMemory(chunk->address(),
777
               chunk->size(),
778
               chunk->executable());
779
  }
780
}
781

    
782

    
783
bool MemoryAllocator::CommitBlock(Address start,
784
                                  size_t size,
785
                                  Executability executable) {
786
  if (!CommitMemory(start, size, executable)) return false;
787

    
788
  if (Heap::ShouldZapGarbage()) {
789
    ZapBlock(start, size);
790
  }
791

    
792
  isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
793
  return true;
794
}
795

    
796

    
797
bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
798
  if (!VirtualMemory::UncommitRegion(start, size)) return false;
799
  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
800
  return true;
801
}
802

    
803

    
804
void MemoryAllocator::ZapBlock(Address start, size_t size) {
805
  for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
806
    Memory::Address_at(start + s) = kZapValue;
807
  }
808
}
809

    
810

    
811
void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
812
                                                AllocationAction action,
813
                                                size_t size) {
814
  for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
815
    MemoryAllocationCallbackRegistration registration =
816
      memory_allocation_callbacks_[i];
817
    if ((registration.space & space) == space &&
818
        (registration.action & action) == action)
819
      registration.callback(space, action, static_cast<int>(size));
820
  }
821
}
822

    
823

    
824
bool MemoryAllocator::MemoryAllocationCallbackRegistered(
825
    MemoryAllocationCallback callback) {
826
  for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
827
    if (memory_allocation_callbacks_[i].callback == callback) return true;
828
  }
829
  return false;
830
}
831

    
832

    
833
void MemoryAllocator::AddMemoryAllocationCallback(
834
    MemoryAllocationCallback callback,
835
    ObjectSpace space,
836
    AllocationAction action) {
837
  ASSERT(callback != NULL);
838
  MemoryAllocationCallbackRegistration registration(callback, space, action);
839
  ASSERT(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
840
  return memory_allocation_callbacks_.Add(registration);
841
}
842

    
843

    
844
void MemoryAllocator::RemoveMemoryAllocationCallback(
845
     MemoryAllocationCallback callback) {
846
  ASSERT(callback != NULL);
847
  for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
848
    if (memory_allocation_callbacks_[i].callback == callback) {
849
      memory_allocation_callbacks_.Remove(i);
850
      return;
851
    }
852
  }
853
  UNREACHABLE();
854
}
855

    
856

    
857
#ifdef DEBUG
858
void MemoryAllocator::ReportStatistics() {
859
  float pct = static_cast<float>(capacity_ - size_) / capacity_;
860
  PrintF("  capacity: %" V8_PTR_PREFIX "d"
861
             ", used: %" V8_PTR_PREFIX "d"
862
             ", available: %%%d\n\n",
863
         capacity_, size_, static_cast<int>(pct*100));
864
}
865
#endif
866

    
867

    
868
int MemoryAllocator::CodePageGuardStartOffset() {
869
  // We are guarding code pages: the first OS page after the header
870
  // will be protected as non-writable.
871
  return RoundUp(Page::kObjectStartOffset, OS::CommitPageSize());
872
}
873

    
874

    
875
int MemoryAllocator::CodePageGuardSize() {
876
  return static_cast<int>(OS::CommitPageSize());
877
}
878

    
879

    
880
int MemoryAllocator::CodePageAreaStartOffset() {
881
  // We are guarding code pages: the first OS page after the header
882
  // will be protected as non-writable.
883
  return CodePageGuardStartOffset() + CodePageGuardSize();
884
}
885

    
886

    
887
int MemoryAllocator::CodePageAreaEndOffset() {
888
  // We are guarding code pages: the last OS page will be protected as
889
  // non-writable.
890
  return Page::kPageSize - static_cast<int>(OS::CommitPageSize());
891
}
892

    
893

    
894
bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm,
895
                                             Address start,
896
                                             size_t commit_size,
897
                                             size_t reserved_size) {
898
  // Commit page header (not executable).
899
  if (!vm->Commit(start,
900
                  CodePageGuardStartOffset(),
901
                  false)) {
902
    return false;
903
  }
904

    
905
  // Create guard page after the header.
906
  if (!vm->Guard(start + CodePageGuardStartOffset())) {
907
    return false;
908
  }
909

    
910
  // Commit page body (executable).
911
  if (!vm->Commit(start + CodePageAreaStartOffset(),
912
                  commit_size - CodePageGuardStartOffset(),
913
                  true)) {
914
    return false;
915
  }
916

    
917
  // Create guard page before the end.
918
  if (!vm->Guard(start + reserved_size - CodePageGuardSize())) {
919
    return false;
920
  }
921

    
922
  UpdateAllocatedSpaceLimits(start,
923
                             start + CodePageAreaStartOffset() +
924
                             commit_size - CodePageGuardStartOffset());
925
  return true;
926
}
927

    
928

    
929
// -----------------------------------------------------------------------------
930
// MemoryChunk implementation
931

    
932
void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) {
933
  MemoryChunk* chunk = MemoryChunk::FromAddress(address);
934
  if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
935
    static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by);
936
  }
937
  chunk->IncrementLiveBytes(by);
938
}
939

    
940

    
941
// -----------------------------------------------------------------------------
942
// PagedSpace implementation
943

    
944
PagedSpace::PagedSpace(Heap* heap,
945
                       intptr_t max_capacity,
946
                       AllocationSpace id,
947
                       Executability executable)
948
    : Space(heap, id, executable),
949
      free_list_(this),
950
      was_swept_conservatively_(false),
951
      first_unswept_page_(Page::FromAddress(NULL)),
952
      unswept_free_bytes_(0) {
953
  if (id == CODE_SPACE) {
954
    area_size_ = heap->isolate()->memory_allocator()->
955
        CodePageAreaSize();
956
  } else {
957
    area_size_ = Page::kPageSize - Page::kObjectStartOffset;
958
  }
959
  max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
960
      * AreaSize();
961
  accounting_stats_.Clear();
962

    
963
  allocation_info_.set_top(NULL);
964
  allocation_info_.set_limit(NULL);
965

    
966
  anchor_.InitializeAsAnchor(this);
967
}
968

    
969

    
970
bool PagedSpace::SetUp() {
971
  return true;
972
}
973

    
974

    
975
bool PagedSpace::HasBeenSetUp() {
976
  return true;
977
}
978

    
979

    
980
void PagedSpace::TearDown() {
981
  PageIterator iterator(this);
982
  while (iterator.has_next()) {
983
    heap()->isolate()->memory_allocator()->Free(iterator.next());
984
  }
985
  anchor_.set_next_page(&anchor_);
986
  anchor_.set_prev_page(&anchor_);
987
  accounting_stats_.Clear();
988
}
989

    
990

    
991
size_t PagedSpace::CommittedPhysicalMemory() {
992
  if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
993
  MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
994
  size_t size = 0;
995
  PageIterator it(this);
996
  while (it.has_next()) {
997
    size += it.next()->CommittedPhysicalMemory();
998
  }
999
  return size;
1000
}
1001

    
1002

    
1003
MaybeObject* PagedSpace::FindObject(Address addr) {
1004
  // Note: this function can only be called on precisely swept spaces.
1005
  ASSERT(!heap()->mark_compact_collector()->in_use());
1006

    
1007
  if (!Contains(addr)) return Failure::Exception();
1008

    
1009
  Page* p = Page::FromAddress(addr);
1010
  HeapObjectIterator it(p, NULL);
1011
  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
1012
    Address cur = obj->address();
1013
    Address next = cur + obj->Size();
1014
    if ((cur <= addr) && (addr < next)) return obj;
1015
  }
1016

    
1017
  UNREACHABLE();
1018
  return Failure::Exception();
1019
}
1020

    
1021

    
1022
bool PagedSpace::CanExpand() {
1023
  ASSERT(max_capacity_ % AreaSize() == 0);
1024

    
1025
  if (Capacity() == max_capacity_) return false;
1026

    
1027
  ASSERT(Capacity() < max_capacity_);
1028

    
1029
  // Are we going to exceed capacity for this space?
1030
  if ((Capacity() + Page::kPageSize) > max_capacity_) return false;
1031

    
1032
  return true;
1033
}
1034

    
1035

    
1036
bool PagedSpace::Expand() {
1037
  if (!CanExpand()) return false;
1038

    
1039
  intptr_t size = AreaSize();
1040

    
1041
  if (anchor_.next_page() == &anchor_) {
1042
    size = SizeOfFirstPage();
1043
  }
1044

    
1045
  Page* p = heap()->isolate()->memory_allocator()->AllocatePage(
1046
      size, this, executable());
1047
  if (p == NULL) return false;
1048

    
1049
  ASSERT(Capacity() <= max_capacity_);
1050

    
1051
  p->InsertAfter(anchor_.prev_page());
1052

    
1053
  return true;
1054
}
1055

    
1056

    
1057
intptr_t PagedSpace::SizeOfFirstPage() {
1058
  int size = 0;
1059
  switch (identity()) {
1060
    case OLD_POINTER_SPACE:
1061
      size = 72 * kPointerSize * KB;
1062
      break;
1063
    case OLD_DATA_SPACE:
1064
      size = 192 * KB;
1065
      break;
1066
    case MAP_SPACE:
1067
      size = 16 * kPointerSize * KB;
1068
      break;
1069
    case CELL_SPACE:
1070
      size = 16 * kPointerSize * KB;
1071
      break;
1072
    case PROPERTY_CELL_SPACE:
1073
      size = 8 * kPointerSize * KB;
1074
      break;
1075
    case CODE_SPACE:
1076
      if (heap()->isolate()->code_range()->exists()) {
1077
        // When code range exists, code pages are allocated in a special way
1078
        // (from the reserved code range). That part of the code is not yet
1079
        // upgraded to handle small pages.
1080
        size = AreaSize();
1081
      } else {
1082
#if V8_TARGET_ARCH_MIPS
1083
        // TODO(plind): Investigate larger code stubs size on MIPS.
1084
        size = 480 * KB;
1085
#else
1086
        size = 416 * KB;
1087
#endif
1088
      }
1089
      break;
1090
    default:
1091
      UNREACHABLE();
1092
  }
1093
  return Min(size, AreaSize());
1094
}
1095

    
1096

    
1097
int PagedSpace::CountTotalPages() {
1098
  PageIterator it(this);
1099
  int count = 0;
1100
  while (it.has_next()) {
1101
    it.next();
1102
    count++;
1103
  }
1104
  return count;
1105
}
1106

    
1107

    
1108
void PagedSpace::ObtainFreeListStatistics(Page* page, SizeStats* sizes) {
1109
  sizes->huge_size_ = page->available_in_huge_free_list();
1110
  sizes->small_size_ = page->available_in_small_free_list();
1111
  sizes->medium_size_ = page->available_in_medium_free_list();
1112
  sizes->large_size_ = page->available_in_large_free_list();
1113
}
1114

    
1115

    
1116
void PagedSpace::ResetFreeListStatistics() {
1117
  PageIterator page_iterator(this);
1118
  while (page_iterator.has_next()) {
1119
    Page* page = page_iterator.next();
1120
    page->ResetFreeListStatistics();
1121
  }
1122
}
1123

    
1124

    
1125
void PagedSpace::ReleasePage(Page* page, bool unlink) {
1126
  ASSERT(page->LiveBytes() == 0);
1127
  ASSERT(AreaSize() == page->area_size());
1128

    
1129
  // Adjust list of unswept pages if the page is the head of the list.
1130
  if (first_unswept_page_ == page) {
1131
    first_unswept_page_ = page->next_page();
1132
    if (first_unswept_page_ == anchor()) {
1133
      first_unswept_page_ = Page::FromAddress(NULL);
1134
    }
1135
  }
1136

    
1137
  if (page->WasSwept()) {
1138
    intptr_t size = free_list_.EvictFreeListItems(page);
1139
    accounting_stats_.AllocateBytes(size);
1140
    ASSERT_EQ(AreaSize(), static_cast<int>(size));
1141
  } else {
1142
    DecreaseUnsweptFreeBytes(page);
1143
  }
1144

    
1145
  if (Page::FromAllocationTop(allocation_info_.top()) == page) {
1146
    allocation_info_.set_top(NULL);
1147
    allocation_info_.set_limit(NULL);
1148
  }
1149

    
1150
  if (unlink) {
1151
    page->Unlink();
1152
  }
1153
  if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
1154
    heap()->isolate()->memory_allocator()->Free(page);
1155
  } else {
1156
    heap()->QueueMemoryChunkForFree(page);
1157
  }
1158

    
1159
  ASSERT(Capacity() > 0);
1160
  accounting_stats_.ShrinkSpace(AreaSize());
1161
}
1162

    
1163

    
1164
#ifdef DEBUG
1165
void PagedSpace::Print() { }
1166
#endif
1167

    
1168
#ifdef VERIFY_HEAP
1169
void PagedSpace::Verify(ObjectVisitor* visitor) {
1170
  // We can only iterate over the pages if they were swept precisely.
1171
  if (was_swept_conservatively_) return;
1172

    
1173
  bool allocation_pointer_found_in_space =
1174
      (allocation_info_.top() == allocation_info_.limit());
1175
  PageIterator page_iterator(this);
1176
  while (page_iterator.has_next()) {
1177
    Page* page = page_iterator.next();
1178
    CHECK(page->owner() == this);
1179
    if (page == Page::FromAllocationTop(allocation_info_.top())) {
1180
      allocation_pointer_found_in_space = true;
1181
    }
1182
    CHECK(page->WasSweptPrecisely());
1183
    HeapObjectIterator it(page, NULL);
1184
    Address end_of_previous_object = page->area_start();
1185
    Address top = page->area_end();
1186
    int black_size = 0;
1187
    for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
1188
      CHECK(end_of_previous_object <= object->address());
1189

    
1190
      // The first word should be a map, and we expect all map pointers to
1191
      // be in map space.
1192
      Map* map = object->map();
1193
      CHECK(map->IsMap());
1194
      CHECK(heap()->map_space()->Contains(map));
1195

    
1196
      // Perform space-specific object verification.
1197
      VerifyObject(object);
1198

    
1199
      // The object itself should look OK.
1200
      object->Verify();
1201

    
1202
      // All the interior pointers should be contained in the heap.
1203
      int size = object->Size();
1204
      object->IterateBody(map->instance_type(), size, visitor);
1205
      if (Marking::IsBlack(Marking::MarkBitFrom(object))) {
1206
        black_size += size;
1207
      }
1208

    
1209
      CHECK(object->address() + size <= top);
1210
      end_of_previous_object = object->address() + size;
1211
    }
1212
    CHECK_LE(black_size, page->LiveBytes());
1213
  }
1214
  CHECK(allocation_pointer_found_in_space);
1215
}
1216
#endif  // VERIFY_HEAP
1217

    
1218
// -----------------------------------------------------------------------------
1219
// NewSpace implementation
1220

    
1221

    
1222
bool NewSpace::SetUp(int reserved_semispace_capacity,
1223
                     int maximum_semispace_capacity) {
1224
  // Set up new space based on the preallocated memory block defined by
1225
  // start and size. The provided space is divided into two semi-spaces.
1226
  // To support fast containment testing in the new space, the size of
1227
  // this chunk must be a power of two and it must be aligned to its size.
1228
  int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
1229

    
1230
  size_t size = 2 * reserved_semispace_capacity;
1231
  Address base =
1232
      heap()->isolate()->memory_allocator()->ReserveAlignedMemory(
1233
          size, size, &reservation_);
1234
  if (base == NULL) return false;
1235

    
1236
  chunk_base_ = base;
1237
  chunk_size_ = static_cast<uintptr_t>(size);
1238
  LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_));
1239

    
1240
  ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
1241
  ASSERT(IsPowerOf2(maximum_semispace_capacity));
1242

    
1243
  // Allocate and set up the histogram arrays if necessary.
1244
  allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1245
  promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1246

    
1247
#define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
1248
                       promoted_histogram_[name].set_name(#name);
1249
  INSTANCE_TYPE_LIST(SET_NAME)
1250
#undef SET_NAME
1251

    
1252
  ASSERT(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize());
1253
  ASSERT(static_cast<intptr_t>(chunk_size_) >=
1254
         2 * heap()->ReservedSemiSpaceSize());
1255
  ASSERT(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
1256

    
1257
  to_space_.SetUp(chunk_base_,
1258
                  initial_semispace_capacity,
1259
                  maximum_semispace_capacity);
1260
  from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
1261
                    initial_semispace_capacity,
1262
                    maximum_semispace_capacity);
1263
  if (!to_space_.Commit()) {
1264
    return false;
1265
  }
1266
  ASSERT(!from_space_.is_committed());  // No need to use memory yet.
1267

    
1268
  start_ = chunk_base_;
1269
  address_mask_ = ~(2 * reserved_semispace_capacity - 1);
1270
  object_mask_ = address_mask_ | kHeapObjectTagMask;
1271
  object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag;
1272

    
1273
  ResetAllocationInfo();
1274

    
1275
  return true;
1276
}
1277

    
1278

    
1279
void NewSpace::TearDown() {
1280
  if (allocated_histogram_) {
1281
    DeleteArray(allocated_histogram_);
1282
    allocated_histogram_ = NULL;
1283
  }
1284
  if (promoted_histogram_) {
1285
    DeleteArray(promoted_histogram_);
1286
    promoted_histogram_ = NULL;
1287
  }
1288

    
1289
  start_ = NULL;
1290
  allocation_info_.set_top(NULL);
1291
  allocation_info_.set_limit(NULL);
1292

    
1293
  to_space_.TearDown();
1294
  from_space_.TearDown();
1295

    
1296
  LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_));
1297

    
1298
  ASSERT(reservation_.IsReserved());
1299
  heap()->isolate()->memory_allocator()->FreeMemory(&reservation_,
1300
                                                    NOT_EXECUTABLE);
1301
  chunk_base_ = NULL;
1302
  chunk_size_ = 0;
1303
}
1304

    
1305

    
1306
void NewSpace::Flip() {
1307
  SemiSpace::Swap(&from_space_, &to_space_);
1308
}
1309

    
1310

    
1311
void NewSpace::Grow() {
1312
  // Double the semispace size but only up to maximum capacity.
1313
  ASSERT(Capacity() < MaximumCapacity());
1314
  int new_capacity = Min(MaximumCapacity(), 2 * static_cast<int>(Capacity()));
1315
  if (to_space_.GrowTo(new_capacity)) {
1316
    // Only grow from space if we managed to grow to-space.
1317
    if (!from_space_.GrowTo(new_capacity)) {
1318
      // If we managed to grow to-space but couldn't grow from-space,
1319
      // attempt to shrink to-space.
1320
      if (!to_space_.ShrinkTo(from_space_.Capacity())) {
1321
        // We are in an inconsistent state because we could not
1322
        // commit/uncommit memory from new space.
1323
        V8::FatalProcessOutOfMemory("Failed to grow new space.");
1324
      }
1325
    }
1326
  }
1327
  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1328
}
1329

    
1330

    
1331
void NewSpace::Shrink() {
1332
  int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt());
1333
  int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
1334
  if (rounded_new_capacity < Capacity() &&
1335
      to_space_.ShrinkTo(rounded_new_capacity))  {
1336
    // Only shrink from-space if we managed to shrink to-space.
1337
    from_space_.Reset();
1338
    if (!from_space_.ShrinkTo(rounded_new_capacity)) {
1339
      // If we managed to shrink to-space but couldn't shrink from
1340
      // space, attempt to grow to-space again.
1341
      if (!to_space_.GrowTo(from_space_.Capacity())) {
1342
        // We are in an inconsistent state because we could not
1343
        // commit/uncommit memory from new space.
1344
        V8::FatalProcessOutOfMemory("Failed to shrink new space.");
1345
      }
1346
    }
1347
  }
1348
  allocation_info_.set_limit(to_space_.page_high());
1349
  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1350
}
1351

    
1352

    
1353
void NewSpace::UpdateAllocationInfo() {
1354
  MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1355
  allocation_info_.set_top(to_space_.page_low());
1356
  allocation_info_.set_limit(to_space_.page_high());
1357

    
1358
  // Lower limit during incremental marking.
1359
  if (heap()->incremental_marking()->IsMarking() &&
1360
      inline_allocation_limit_step() != 0) {
1361
    Address new_limit =
1362
        allocation_info_.top() + inline_allocation_limit_step();
1363
    allocation_info_.set_limit(Min(new_limit, allocation_info_.limit()));
1364
  }
1365
  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1366
}
1367

    
1368

    
1369
void NewSpace::ResetAllocationInfo() {
1370
  to_space_.Reset();
1371
  UpdateAllocationInfo();
1372
  pages_used_ = 0;
1373
  // Clear all mark-bits in the to-space.
1374
  NewSpacePageIterator it(&to_space_);
1375
  while (it.has_next()) {
1376
    Bitmap::Clear(it.next());
1377
  }
1378
}
1379

    
1380

    
1381
bool NewSpace::AddFreshPage() {
1382
  Address top = allocation_info_.top();
1383
  if (NewSpacePage::IsAtStart(top)) {
1384
    // The current page is already empty. Don't try to make another.
1385

    
1386
    // We should only get here if someone asks to allocate more
1387
    // than what can be stored in a single page.
1388
    // TODO(gc): Change the limit on new-space allocation to prevent this
1389
    // from happening (all such allocations should go directly to LOSpace).
1390
    return false;
1391
  }
1392
  if (!to_space_.AdvancePage()) {
1393
    // Failed to get a new page in to-space.
1394
    return false;
1395
  }
1396

    
1397
  // Clear remainder of current page.
1398
  Address limit = NewSpacePage::FromLimit(top)->area_end();
1399
  if (heap()->gc_state() == Heap::SCAVENGE) {
1400
    heap()->promotion_queue()->SetNewLimit(limit);
1401
    heap()->promotion_queue()->ActivateGuardIfOnTheSamePage();
1402
  }
1403

    
1404
  int remaining_in_page = static_cast<int>(limit - top);
1405
  heap()->CreateFillerObjectAt(top, remaining_in_page);
1406
  pages_used_++;
1407
  UpdateAllocationInfo();
1408

    
1409
  return true;
1410
}
1411

    
1412

    
1413
MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) {
1414
  Address old_top = allocation_info_.top();
1415
  Address new_top = old_top + size_in_bytes;
1416
  Address high = to_space_.page_high();
1417
  if (allocation_info_.limit() < high) {
1418
    // Incremental marking has lowered the limit to get a
1419
    // chance to do a step.
1420
    Address new_limit = Min(
1421
        allocation_info_.limit() + inline_allocation_limit_step_,
1422
        high);
1423
    allocation_info_.set_limit(new_limit);
1424
    int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
1425
    heap()->incremental_marking()->Step(
1426
        bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
1427
    top_on_previous_step_ = new_top;
1428
    return AllocateRaw(size_in_bytes);
1429
  } else if (AddFreshPage()) {
1430
    // Switched to new page. Try allocating again.
1431
    int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
1432
    heap()->incremental_marking()->Step(
1433
        bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
1434
    top_on_previous_step_ = to_space_.page_low();
1435
    return AllocateRaw(size_in_bytes);
1436
  } else {
1437
    return Failure::RetryAfterGC();
1438
  }
1439
}
1440

    
1441

    
1442
#ifdef VERIFY_HEAP
1443
// We do not use the SemiSpaceIterator because verification doesn't assume
1444
// that it works (it depends on the invariants we are checking).
1445
void NewSpace::Verify() {
1446
  // The allocation pointer should be in the space or at the very end.
1447
  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1448

    
1449
  // There should be objects packed in from the low address up to the
1450
  // allocation pointer.
1451
  Address current = to_space_.first_page()->area_start();
1452
  CHECK_EQ(current, to_space_.space_start());
1453

    
1454
  while (current != top()) {
1455
    if (!NewSpacePage::IsAtEnd(current)) {
1456
      // The allocation pointer should not be in the middle of an object.
1457
      CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) ||
1458
            current < top());
1459

    
1460
      HeapObject* object = HeapObject::FromAddress(current);
1461

    
1462
      // The first word should be a map, and we expect all map pointers to
1463
      // be in map space.
1464
      Map* map = object->map();
1465
      CHECK(map->IsMap());
1466
      CHECK(heap()->map_space()->Contains(map));
1467

    
1468
      // The object should not be code or a map.
1469
      CHECK(!object->IsMap());
1470
      CHECK(!object->IsCode());
1471

    
1472
      // The object itself should look OK.
1473
      object->Verify();
1474

    
1475
      // All the interior pointers should be contained in the heap.
1476
      VerifyPointersVisitor visitor;
1477
      int size = object->Size();
1478
      object->IterateBody(map->instance_type(), size, &visitor);
1479

    
1480
      current += size;
1481
    } else {
1482
      // At end of page, switch to next page.
1483
      NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
1484
      // Next page should be valid.
1485
      CHECK(!page->is_anchor());
1486
      current = page->area_start();
1487
    }
1488
  }
1489

    
1490
  // Check semi-spaces.
1491
  CHECK_EQ(from_space_.id(), kFromSpace);
1492
  CHECK_EQ(to_space_.id(), kToSpace);
1493
  from_space_.Verify();
1494
  to_space_.Verify();
1495
}
1496
#endif
1497

    
1498
// -----------------------------------------------------------------------------
1499
// SemiSpace implementation
1500

    
1501
void SemiSpace::SetUp(Address start,
1502
                      int initial_capacity,
1503
                      int maximum_capacity) {
1504
  // Creates a space in the young generation. The constructor does not
1505
  // allocate memory from the OS.  A SemiSpace is given a contiguous chunk of
1506
  // memory of size 'capacity' when set up, and does not grow or shrink
1507
  // otherwise.  In the mark-compact collector, the memory region of the from
1508
  // space is used as the marking stack. It requires contiguous memory
1509
  // addresses.
1510
  ASSERT(maximum_capacity >= Page::kPageSize);
1511
  initial_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
1512
  capacity_ = initial_capacity;
1513
  maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
1514
  committed_ = false;
1515
  start_ = start;
1516
  address_mask_ = ~(maximum_capacity - 1);
1517
  object_mask_ = address_mask_ | kHeapObjectTagMask;
1518
  object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
1519
  age_mark_ = start_;
1520
}
1521

    
1522

    
1523
void SemiSpace::TearDown() {
1524
  start_ = NULL;
1525
  capacity_ = 0;
1526
}
1527

    
1528

    
1529
bool SemiSpace::Commit() {
1530
  ASSERT(!is_committed());
1531
  int pages = capacity_ / Page::kPageSize;
1532
  if (!heap()->isolate()->memory_allocator()->CommitBlock(start_,
1533
                                                          capacity_,
1534
                                                          executable())) {
1535
    return false;
1536
  }
1537

    
1538
  NewSpacePage* current = anchor();
1539
  for (int i = 0; i < pages; i++) {
1540
    NewSpacePage* new_page =
1541
      NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this);
1542
    new_page->InsertAfter(current);
1543
    current = new_page;
1544
  }
1545

    
1546
  committed_ = true;
1547
  Reset();
1548
  return true;
1549
}
1550

    
1551

    
1552
bool SemiSpace::Uncommit() {
1553
  ASSERT(is_committed());
1554
  Address start = start_ + maximum_capacity_ - capacity_;
1555
  if (!heap()->isolate()->memory_allocator()->UncommitBlock(start, capacity_)) {
1556
    return false;
1557
  }
1558
  anchor()->set_next_page(anchor());
1559
  anchor()->set_prev_page(anchor());
1560

    
1561
  committed_ = false;
1562
  return true;
1563
}
1564

    
1565

    
1566
size_t SemiSpace::CommittedPhysicalMemory() {
1567
  if (!is_committed()) return 0;
1568
  size_t size = 0;
1569
  NewSpacePageIterator it(this);
1570
  while (it.has_next()) {
1571
    size += it.next()->CommittedPhysicalMemory();
1572
  }
1573
  return size;
1574
}
1575

    
1576

    
1577
bool SemiSpace::GrowTo(int new_capacity) {
1578
  if (!is_committed()) {
1579
    if (!Commit()) return false;
1580
  }
1581
  ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
1582
  ASSERT(new_capacity <= maximum_capacity_);
1583
  ASSERT(new_capacity > capacity_);
1584
  int pages_before = capacity_ / Page::kPageSize;
1585
  int pages_after = new_capacity / Page::kPageSize;
1586

    
1587
  size_t delta = new_capacity - capacity_;
1588

    
1589
  ASSERT(IsAligned(delta, OS::AllocateAlignment()));
1590
  if (!heap()->isolate()->memory_allocator()->CommitBlock(
1591
      start_ + capacity_, delta, executable())) {
1592
    return false;
1593
  }
1594
  capacity_ = new_capacity;
1595
  NewSpacePage* last_page = anchor()->prev_page();
1596
  ASSERT(last_page != anchor());
1597
  for (int i = pages_before; i < pages_after; i++) {
1598
    Address page_address = start_ + i * Page::kPageSize;
1599
    NewSpacePage* new_page = NewSpacePage::Initialize(heap(),
1600
                                                      page_address,
1601
                                                      this);
1602
    new_page->InsertAfter(last_page);
1603
    Bitmap::Clear(new_page);
1604
    // Duplicate the flags that was set on the old page.
1605
    new_page->SetFlags(last_page->GetFlags(),
1606
                       NewSpacePage::kCopyOnFlipFlagsMask);
1607
    last_page = new_page;
1608
  }
1609
  return true;
1610
}
1611

    
1612

    
1613
bool SemiSpace::ShrinkTo(int new_capacity) {
1614
  ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
1615
  ASSERT(new_capacity >= initial_capacity_);
1616
  ASSERT(new_capacity < capacity_);
1617
  if (is_committed()) {
1618
    size_t delta = capacity_ - new_capacity;
1619
    ASSERT(IsAligned(delta, OS::AllocateAlignment()));
1620

    
1621
    MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
1622
    if (!allocator->UncommitBlock(start_ + new_capacity, delta)) {
1623
      return false;
1624
    }
1625

    
1626
    int pages_after = new_capacity / Page::kPageSize;
1627
    NewSpacePage* new_last_page =
1628
        NewSpacePage::FromAddress(start_ + (pages_after - 1) * Page::kPageSize);
1629
    new_last_page->set_next_page(anchor());
1630
    anchor()->set_prev_page(new_last_page);
1631
    ASSERT((current_page_ >= first_page()) && (current_page_ <= new_last_page));
1632
  }
1633

    
1634
  capacity_ = new_capacity;
1635

    
1636
  return true;
1637
}
1638

    
1639

    
1640
void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) {
1641
  anchor_.set_owner(this);
1642
  // Fixup back-pointers to anchor. Address of anchor changes
1643
  // when we swap.
1644
  anchor_.prev_page()->set_next_page(&anchor_);
1645
  anchor_.next_page()->set_prev_page(&anchor_);
1646

    
1647
  bool becomes_to_space = (id_ == kFromSpace);
1648
  id_ = becomes_to_space ? kToSpace : kFromSpace;
1649
  NewSpacePage* page = anchor_.next_page();
1650
  while (page != &anchor_) {
1651
    page->set_owner(this);
1652
    page->SetFlags(flags, mask);
1653
    if (becomes_to_space) {
1654
      page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
1655
      page->SetFlag(MemoryChunk::IN_TO_SPACE);
1656
      page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1657
      page->ResetLiveBytes();
1658
    } else {
1659
      page->SetFlag(MemoryChunk::IN_FROM_SPACE);
1660
      page->ClearFlag(MemoryChunk::IN_TO_SPACE);
1661
    }
1662
    ASSERT(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1663
    ASSERT(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
1664
           page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
1665
    page = page->next_page();
1666
  }
1667
}
1668

    
1669

    
1670
void SemiSpace::Reset() {
1671
  ASSERT(anchor_.next_page() != &anchor_);
1672
  current_page_ = anchor_.next_page();
1673
}
1674

    
1675

    
1676
void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
1677
  // We won't be swapping semispaces without data in them.
1678
  ASSERT(from->anchor_.next_page() != &from->anchor_);
1679
  ASSERT(to->anchor_.next_page() != &to->anchor_);
1680

    
1681
  // Swap bits.
1682
  SemiSpace tmp = *from;
1683
  *from = *to;
1684
  *to = tmp;
1685

    
1686
  // Fixup back-pointers to the page list anchor now that its address
1687
  // has changed.
1688
  // Swap to/from-space bits on pages.
1689
  // Copy GC flags from old active space (from-space) to new (to-space).
1690
  intptr_t flags = from->current_page()->GetFlags();
1691
  to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask);
1692

    
1693
  from->FlipPages(0, 0);
1694
}
1695

    
1696

    
1697
void SemiSpace::set_age_mark(Address mark) {
1698
  ASSERT(NewSpacePage::FromLimit(mark)->semi_space() == this);
1699
  age_mark_ = mark;
1700
  // Mark all pages up to the one containing mark.
1701
  NewSpacePageIterator it(space_start(), mark);
1702
  while (it.has_next()) {
1703
    it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1704
  }
1705
}
1706

    
1707

    
1708
#ifdef DEBUG
1709
void SemiSpace::Print() { }
1710
#endif
1711

    
1712
#ifdef VERIFY_HEAP
1713
void SemiSpace::Verify() {
1714
  bool is_from_space = (id_ == kFromSpace);
1715
  NewSpacePage* page = anchor_.next_page();
1716
  CHECK(anchor_.semi_space() == this);
1717
  while (page != &anchor_) {
1718
    CHECK(page->semi_space() == this);
1719
    CHECK(page->InNewSpace());
1720
    CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
1721
                                        : MemoryChunk::IN_TO_SPACE));
1722
    CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
1723
                                         : MemoryChunk::IN_FROM_SPACE));
1724
    CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
1725
    if (!is_from_space) {
1726
      // The pointers-from-here-are-interesting flag isn't updated dynamically
1727
      // on from-space pages, so it might be out of sync with the marking state.
1728
      if (page->heap()->incremental_marking()->IsMarking()) {
1729
        CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1730
      } else {
1731
        CHECK(!page->IsFlagSet(
1732
            MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1733
      }
1734
      // TODO(gc): Check that the live_bytes_count_ field matches the
1735
      // black marking on the page (if we make it match in new-space).
1736
    }
1737
    CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1738
    CHECK(page->prev_page()->next_page() == page);
1739
    page = page->next_page();
1740
  }
1741
}
1742
#endif
1743

    
1744
#ifdef DEBUG
1745
void SemiSpace::AssertValidRange(Address start, Address end) {
1746
  // Addresses belong to same semi-space
1747
  NewSpacePage* page = NewSpacePage::FromLimit(start);
1748
  NewSpacePage* end_page = NewSpacePage::FromLimit(end);
1749
  SemiSpace* space = page->semi_space();
1750
  CHECK_EQ(space, end_page->semi_space());
1751
  // Start address is before end address, either on same page,
1752
  // or end address is on a later page in the linked list of
1753
  // semi-space pages.
1754
  if (page == end_page) {
1755
    CHECK(start <= end);
1756
  } else {
1757
    while (page != end_page) {
1758
      page = page->next_page();
1759
      CHECK_NE(page, space->anchor());
1760
    }
1761
  }
1762
}
1763
#endif
1764

    
1765

    
1766
// -----------------------------------------------------------------------------
1767
// SemiSpaceIterator implementation.
1768
SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
1769
  Initialize(space->bottom(), space->top(), NULL);
1770
}
1771

    
1772

    
1773
SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
1774
                                     HeapObjectCallback size_func) {
1775
  Initialize(space->bottom(), space->top(), size_func);
1776
}
1777

    
1778

    
1779
SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
1780
  Initialize(start, space->top(), NULL);
1781
}
1782

    
1783

    
1784
SemiSpaceIterator::SemiSpaceIterator(Address from, Address to) {
1785
  Initialize(from, to, NULL);
1786
}
1787

    
1788

    
1789
void SemiSpaceIterator::Initialize(Address start,
1790
                                   Address end,
1791
                                   HeapObjectCallback size_func) {
1792
  SemiSpace::AssertValidRange(start, end);
1793
  current_ = start;
1794
  limit_ = end;
1795
  size_func_ = size_func;
1796
}
1797

    
1798

    
1799
#ifdef DEBUG
1800
// heap_histograms is shared, always clear it before using it.
1801
static void ClearHistograms(Isolate* isolate) {
1802
  // We reset the name each time, though it hasn't changed.
1803
#define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
1804
  INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
1805
#undef DEF_TYPE_NAME
1806

    
1807
#define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
1808
  INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
1809
#undef CLEAR_HISTOGRAM
1810

    
1811
  isolate->js_spill_information()->Clear();
1812
}
1813

    
1814

    
1815
static void ClearCodeKindStatistics(int* code_kind_statistics) {
1816
  for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1817
    code_kind_statistics[i] = 0;
1818
  }
1819
}
1820

    
1821

    
1822
static void ReportCodeKindStatistics(int* code_kind_statistics) {
1823
  PrintF("\n   Code kind histograms: \n");
1824
  for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1825
    if (code_kind_statistics[i] > 0) {
1826
      PrintF("     %-20s: %10d bytes\n",
1827
             Code::Kind2String(static_cast<Code::Kind>(i)),
1828
             code_kind_statistics[i]);
1829
    }
1830
  }
1831
  PrintF("\n");
1832
}
1833

    
1834

    
1835
static int CollectHistogramInfo(HeapObject* obj) {
1836
  Isolate* isolate = obj->GetIsolate();
1837
  InstanceType type = obj->map()->instance_type();
1838
  ASSERT(0 <= type && type <= LAST_TYPE);
1839
  ASSERT(isolate->heap_histograms()[type].name() != NULL);
1840
  isolate->heap_histograms()[type].increment_number(1);
1841
  isolate->heap_histograms()[type].increment_bytes(obj->Size());
1842

    
1843
  if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
1844
    JSObject::cast(obj)->IncrementSpillStatistics(
1845
        isolate->js_spill_information());
1846
  }
1847

    
1848
  return obj->Size();
1849
}
1850

    
1851

    
1852
static void ReportHistogram(Isolate* isolate, bool print_spill) {
1853
  PrintF("\n  Object Histogram:\n");
1854
  for (int i = 0; i <= LAST_TYPE; i++) {
1855
    if (isolate->heap_histograms()[i].number() > 0) {
1856
      PrintF("    %-34s%10d (%10d bytes)\n",
1857
             isolate->heap_histograms()[i].name(),
1858
             isolate->heap_histograms()[i].number(),
1859
             isolate->heap_histograms()[i].bytes());
1860
    }
1861
  }
1862
  PrintF("\n");
1863

    
1864
  // Summarize string types.
1865
  int string_number = 0;
1866
  int string_bytes = 0;
1867
#define INCREMENT(type, size, name, camel_name)      \
1868
    string_number += isolate->heap_histograms()[type].number(); \
1869
    string_bytes += isolate->heap_histograms()[type].bytes();
1870
  STRING_TYPE_LIST(INCREMENT)
1871
#undef INCREMENT
1872
  if (string_number > 0) {
1873
    PrintF("    %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
1874
           string_bytes);
1875
  }
1876

    
1877
  if (FLAG_collect_heap_spill_statistics && print_spill) {
1878
    isolate->js_spill_information()->Print();
1879
  }
1880
}
1881
#endif  // DEBUG
1882

    
1883

    
1884
// Support for statistics gathering for --heap-stats and --log-gc.
1885
void NewSpace::ClearHistograms() {
1886
  for (int i = 0; i <= LAST_TYPE; i++) {
1887
    allocated_histogram_[i].clear();
1888
    promoted_histogram_[i].clear();
1889
  }
1890
}
1891

    
1892

    
1893
// Because the copying collector does not touch garbage objects, we iterate
1894
// the new space before a collection to get a histogram of allocated objects.
1895
// This only happens when --log-gc flag is set.
1896
void NewSpace::CollectStatistics() {
1897
  ClearHistograms();
1898
  SemiSpaceIterator it(this);
1899
  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next())
1900
    RecordAllocation(obj);
1901
}
1902

    
1903

    
1904
static void DoReportStatistics(Isolate* isolate,
1905
                               HistogramInfo* info, const char* description) {
1906
  LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
1907
  // Lump all the string types together.
1908
  int string_number = 0;
1909
  int string_bytes = 0;
1910
#define INCREMENT(type, size, name, camel_name)       \
1911
    string_number += info[type].number();             \
1912
    string_bytes += info[type].bytes();
1913
  STRING_TYPE_LIST(INCREMENT)
1914
#undef INCREMENT
1915
  if (string_number > 0) {
1916
    LOG(isolate,
1917
        HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
1918
  }
1919

    
1920
  // Then do the other types.
1921
  for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
1922
    if (info[i].number() > 0) {
1923
      LOG(isolate,
1924
          HeapSampleItemEvent(info[i].name(), info[i].number(),
1925
                              info[i].bytes()));
1926
    }
1927
  }
1928
  LOG(isolate, HeapSampleEndEvent("NewSpace", description));
1929
}
1930

    
1931

    
1932
void NewSpace::ReportStatistics() {
1933
#ifdef DEBUG
1934
  if (FLAG_heap_stats) {
1935
    float pct = static_cast<float>(Available()) / Capacity();
1936
    PrintF("  capacity: %" V8_PTR_PREFIX "d"
1937
               ", available: %" V8_PTR_PREFIX "d, %%%d\n",
1938
           Capacity(), Available(), static_cast<int>(pct*100));
1939
    PrintF("\n  Object Histogram:\n");
1940
    for (int i = 0; i <= LAST_TYPE; i++) {
1941
      if (allocated_histogram_[i].number() > 0) {
1942
        PrintF("    %-34s%10d (%10d bytes)\n",
1943
               allocated_histogram_[i].name(),
1944
               allocated_histogram_[i].number(),
1945
               allocated_histogram_[i].bytes());
1946
      }
1947
    }
1948
    PrintF("\n");
1949
  }
1950
#endif  // DEBUG
1951

    
1952
  if (FLAG_log_gc) {
1953
    Isolate* isolate = heap()->isolate();
1954
    DoReportStatistics(isolate, allocated_histogram_, "allocated");
1955
    DoReportStatistics(isolate, promoted_histogram_, "promoted");
1956
  }
1957
}
1958

    
1959

    
1960
void NewSpace::RecordAllocation(HeapObject* obj) {
1961
  InstanceType type = obj->map()->instance_type();
1962
  ASSERT(0 <= type && type <= LAST_TYPE);
1963
  allocated_histogram_[type].increment_number(1);
1964
  allocated_histogram_[type].increment_bytes(obj->Size());
1965
}
1966

    
1967

    
1968
void NewSpace::RecordPromotion(HeapObject* obj) {
1969
  InstanceType type = obj->map()->instance_type();
1970
  ASSERT(0 <= type && type <= LAST_TYPE);
1971
  promoted_histogram_[type].increment_number(1);
1972
  promoted_histogram_[type].increment_bytes(obj->Size());
1973
}
1974

    
1975

    
1976
size_t NewSpace::CommittedPhysicalMemory() {
1977
  if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
1978
  MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1979
  size_t size = to_space_.CommittedPhysicalMemory();
1980
  if (from_space_.is_committed()) {
1981
    size += from_space_.CommittedPhysicalMemory();
1982
  }
1983
  return size;
1984
}
1985

    
1986

    
1987
// -----------------------------------------------------------------------------
1988
// Free lists for old object spaces implementation
1989

    
1990
void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
1991
  ASSERT(size_in_bytes > 0);
1992
  ASSERT(IsAligned(size_in_bytes, kPointerSize));
1993

    
1994
  // We write a map and possibly size information to the block.  If the block
1995
  // is big enough to be a FreeSpace with at least one extra word (the next
1996
  // pointer), we set its map to be the free space map and its size to an
1997
  // appropriate array length for the desired size from HeapObject::Size().
1998
  // If the block is too small (eg, one or two words), to hold both a size
1999
  // field and a next pointer, we give it a filler map that gives it the
2000
  // correct size.
2001
  if (size_in_bytes > FreeSpace::kHeaderSize) {
2002
    set_map_no_write_barrier(heap->raw_unchecked_free_space_map());
2003
    // Can't use FreeSpace::cast because it fails during deserialization.
2004
    FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
2005
    this_as_free_space->set_size(size_in_bytes);
2006
  } else if (size_in_bytes == kPointerSize) {
2007
    set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map());
2008
  } else if (size_in_bytes == 2 * kPointerSize) {
2009
    set_map_no_write_barrier(heap->raw_unchecked_two_pointer_filler_map());
2010
  } else {
2011
    UNREACHABLE();
2012
  }
2013
  // We would like to ASSERT(Size() == size_in_bytes) but this would fail during
2014
  // deserialization because the free space map is not done yet.
2015
}
2016

    
2017

    
2018
FreeListNode* FreeListNode::next() {
2019
  ASSERT(IsFreeListNode(this));
2020
  if (map() == GetHeap()->raw_unchecked_free_space_map()) {
2021
    ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
2022
    return reinterpret_cast<FreeListNode*>(
2023
        Memory::Address_at(address() + kNextOffset));
2024
  } else {
2025
    return reinterpret_cast<FreeListNode*>(
2026
        Memory::Address_at(address() + kPointerSize));
2027
  }
2028
}
2029

    
2030

    
2031
FreeListNode** FreeListNode::next_address() {
2032
  ASSERT(IsFreeListNode(this));
2033
  if (map() == GetHeap()->raw_unchecked_free_space_map()) {
2034
    ASSERT(Size() >= kNextOffset + kPointerSize);
2035
    return reinterpret_cast<FreeListNode**>(address() + kNextOffset);
2036
  } else {
2037
    return reinterpret_cast<FreeListNode**>(address() + kPointerSize);
2038
  }
2039
}
2040

    
2041

    
2042
void FreeListNode::set_next(FreeListNode* next) {
2043
  ASSERT(IsFreeListNode(this));
2044
  // While we are booting the VM the free space map will actually be null.  So
2045
  // we have to make sure that we don't try to use it for anything at that
2046
  // stage.
2047
  if (map() == GetHeap()->raw_unchecked_free_space_map()) {
2048
    ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
2049
    Memory::Address_at(address() + kNextOffset) =
2050
        reinterpret_cast<Address>(next);
2051
  } else {
2052
    Memory::Address_at(address() + kPointerSize) =
2053
        reinterpret_cast<Address>(next);
2054
  }
2055
}
2056

    
2057

    
2058
intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
2059
  intptr_t free_bytes = 0;
2060
  if (category->top_ != NULL) {
2061
    ASSERT(category->end_ != NULL);
2062
    // This is safe (not going to deadlock) since Concatenate operations
2063
    // are never performed on the same free lists at the same time in
2064
    // reverse order.
2065
    LockGuard<Mutex> target_lock_guard(mutex());
2066
    LockGuard<Mutex> source_lock_guard(category->mutex());
2067
    free_bytes = category->available();
2068
    if (end_ == NULL) {
2069
      end_ = category->end();
2070
    } else {
2071
      category->end()->set_next(top_);
2072
    }
2073
    top_ = category->top();
2074
    available_ += category->available();
2075
    category->Reset();
2076
  }
2077
  return free_bytes;
2078
}
2079

    
2080

    
2081
void FreeListCategory::Reset() {
2082
  top_ = NULL;
2083
  end_ = NULL;
2084
  available_ = 0;
2085
}
2086

    
2087

    
2088
intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
2089
  int sum = 0;
2090
  FreeListNode** n = &top_;
2091
  while (*n != NULL) {
2092
    if (Page::FromAddress((*n)->address()) == p) {
2093
      FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
2094
      sum += free_space->Size();
2095
      *n = (*n)->next();
2096
    } else {
2097
      n = (*n)->next_address();
2098
    }
2099
  }
2100
  if (top_ == NULL) {
2101
    end_ = NULL;
2102
  }
2103
  available_ -= sum;
2104
  return sum;
2105
}
2106

    
2107

    
2108
FreeListNode* FreeListCategory::PickNodeFromList(int *node_size) {
2109
  FreeListNode* node = top_;
2110

    
2111
  if (node == NULL) return NULL;
2112

    
2113
  while (node != NULL &&
2114
         Page::FromAddress(node->address())->IsEvacuationCandidate()) {
2115
    available_ -= reinterpret_cast<FreeSpace*>(node)->Size();
2116
    node = node->next();
2117
  }
2118

    
2119
  if (node != NULL) {
2120
    set_top(node->next());
2121
    *node_size = reinterpret_cast<FreeSpace*>(node)->Size();
2122
    available_ -= *node_size;
2123
  } else {
2124
    set_top(NULL);
2125
  }
2126

    
2127
  if (top() == NULL) {
2128
    set_end(NULL);
2129
  }
2130

    
2131
  return node;
2132
}
2133

    
2134

    
2135
FreeListNode* FreeListCategory::PickNodeFromList(int size_in_bytes,
2136
                                                 int *node_size) {
2137
  FreeListNode* node = PickNodeFromList(node_size);
2138
  if (node != NULL && *node_size < size_in_bytes) {
2139
    Free(node, *node_size);
2140
    *node_size = 0;
2141
    return NULL;
2142
  }
2143
  return node;
2144
}
2145

    
2146

    
2147
void FreeListCategory::Free(FreeListNode* node, int size_in_bytes) {
2148
  node->set_next(top_);
2149
  top_ = node;
2150
  if (end_ == NULL) {
2151
    end_ = node;
2152
  }
2153
  available_ += size_in_bytes;
2154
}
2155

    
2156

    
2157
void FreeListCategory::RepairFreeList(Heap* heap) {
2158
  FreeListNode* n = top_;
2159
  while (n != NULL) {
2160
    Map** map_location = reinterpret_cast<Map**>(n->address());
2161
    if (*map_location == NULL) {
2162
      *map_location = heap->free_space_map();
2163
    } else {
2164
      ASSERT(*map_location == heap->free_space_map());
2165
    }
2166
    n = n->next();
2167
  }
2168
}
2169

    
2170

    
2171
FreeList::FreeList(PagedSpace* owner)
2172
    : owner_(owner), heap_(owner->heap()) {
2173
  Reset();
2174
}
2175

    
2176

    
2177
intptr_t FreeList::Concatenate(FreeList* free_list) {
2178
  intptr_t free_bytes = 0;
2179
  free_bytes += small_list_.Concatenate(free_list->small_list());
2180
  free_bytes += medium_list_.Concatenate(free_list->medium_list());
2181
  free_bytes += large_list_.Concatenate(free_list->large_list());
2182
  free_bytes += huge_list_.Concatenate(free_list->huge_list());
2183
  return free_bytes;
2184
}
2185

    
2186

    
2187
void FreeList::Reset() {
2188
  small_list_.Reset();
2189
  medium_list_.Reset();
2190
  large_list_.Reset();
2191
  huge_list_.Reset();
2192
}
2193

    
2194

    
2195
int FreeList::Free(Address start, int size_in_bytes) {
2196
  if (size_in_bytes == 0) return 0;
2197

    
2198
  FreeListNode* node = FreeListNode::FromAddress(start);
2199
  node->set_size(heap_, size_in_bytes);
2200
  Page* page = Page::FromAddress(start);
2201

    
2202
  // Early return to drop too-small blocks on the floor.
2203
  if (size_in_bytes < kSmallListMin) {
2204
    page->add_non_available_small_blocks(size_in_bytes);
2205
    return size_in_bytes;
2206
  }
2207

    
2208
  // Insert other blocks at the head of a free list of the appropriate
2209
  // magnitude.
2210
  if (size_in_bytes <= kSmallListMax) {
2211
    small_list_.Free(node, size_in_bytes);
2212
    page->add_available_in_small_free_list(size_in_bytes);
2213
  } else if (size_in_bytes <= kMediumListMax) {
2214
    medium_list_.Free(node, size_in_bytes);
2215
    page->add_available_in_medium_free_list(size_in_bytes);
2216
  } else if (size_in_bytes <= kLargeListMax) {
2217
    large_list_.Free(node, size_in_bytes);
2218
    page->add_available_in_large_free_list(size_in_bytes);
2219
  } else {
2220
    huge_list_.Free(node, size_in_bytes);
2221
    page->add_available_in_huge_free_list(size_in_bytes);
2222
  }
2223

    
2224
  ASSERT(IsVeryLong() || available() == SumFreeLists());
2225
  return 0;
2226
}
2227

    
2228

    
2229
FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
2230
  FreeListNode* node = NULL;
2231
  Page* page = NULL;
2232

    
2233
  if (size_in_bytes <= kSmallAllocationMax) {
2234
    node = small_list_.PickNodeFromList(node_size);
2235
    if (node != NULL) {
2236
      ASSERT(size_in_bytes <= *node_size);
2237
      page = Page::FromAddress(node->address());
2238
      page->add_available_in_small_free_list(-(*node_size));
2239
      ASSERT(IsVeryLong() || available() == SumFreeLists());
2240
      return node;
2241
    }
2242
  }
2243

    
2244
  if (size_in_bytes <= kMediumAllocationMax) {
2245
    node = medium_list_.PickNodeFromList(node_size);
2246
    if (node != NULL) {
2247
      ASSERT(size_in_bytes <= *node_size);
2248
      page = Page::FromAddress(node->address());
2249
      page->add_available_in_medium_free_list(-(*node_size));
2250
      ASSERT(IsVeryLong() || available() == SumFreeLists());
2251
      return node;
2252
    }
2253
  }
2254

    
2255
  if (size_in_bytes <= kLargeAllocationMax) {
2256
    node = large_list_.PickNodeFromList(node_size);
2257
    if (node != NULL) {
2258
      ASSERT(size_in_bytes <= *node_size);
2259
      page = Page::FromAddress(node->address());
2260
      page->add_available_in_large_free_list(-(*node_size));
2261
      ASSERT(IsVeryLong() || available() == SumFreeLists());
2262
      return node;
2263
    }
2264
  }
2265

    
2266
  int huge_list_available = huge_list_.available();
2267
  for (FreeListNode** cur = huge_list_.GetTopAddress();
2268
       *cur != NULL;
2269
       cur = (*cur)->next_address()) {
2270
    FreeListNode* cur_node = *cur;
2271
    while (cur_node != NULL &&
2272
           Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
2273
      int size = reinterpret_cast<FreeSpace*>(cur_node)->Size();
2274
      huge_list_available -= size;
2275
      page = Page::FromAddress(cur_node->address());
2276
      page->add_available_in_huge_free_list(-size);
2277
      cur_node = cur_node->next();
2278
    }
2279

    
2280
    *cur = cur_node;
2281
    if (cur_node == NULL) {
2282
      huge_list_.set_end(NULL);
2283
      break;
2284
    }
2285

    
2286
    ASSERT((*cur)->map() == heap_->raw_unchecked_free_space_map());
2287
    FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
2288
    int size = cur_as_free_space->Size();
2289
    if (size >= size_in_bytes) {
2290
      // Large enough node found.  Unlink it from the list.
2291
      node = *cur;
2292
      *cur = node->next();
2293
      *node_size = size;
2294
      huge_list_available -= size;
2295
      page = Page::FromAddress(node->address());
2296
      page->add_available_in_huge_free_list(-size);
2297
      break;
2298
    }
2299
  }
2300

    
2301
  if (huge_list_.top() == NULL) {
2302
    huge_list_.set_end(NULL);
2303
  }
2304
  huge_list_.set_available(huge_list_available);
2305

    
2306
  if (node != NULL) {
2307
    ASSERT(IsVeryLong() || available() == SumFreeLists());
2308
    return node;
2309
  }
2310

    
2311
  if (size_in_bytes <= kSmallListMax) {
2312
    node = small_list_.PickNodeFromList(size_in_bytes, node_size);
2313
    if (node != NULL) {
2314
      ASSERT(size_in_bytes <= *node_size);
2315
      page = Page::FromAddress(node->address());
2316
      page->add_available_in_small_free_list(-(*node_size));
2317
    }
2318
  } else if (size_in_bytes <= kMediumListMax) {
2319
    node = medium_list_.PickNodeFromList(size_in_bytes, node_size);
2320
    if (node != NULL) {
2321
      ASSERT(size_in_bytes <= *node_size);
2322
      page = Page::FromAddress(node->address());
2323
      page->add_available_in_medium_free_list(-(*node_size));
2324
    }
2325
  } else if (size_in_bytes <= kLargeListMax) {
2326
    node = large_list_.PickNodeFromList(size_in_bytes, node_size);
2327
    if (node != NULL) {
2328
      ASSERT(size_in_bytes <= *node_size);
2329
      page = Page::FromAddress(node->address());
2330
      page->add_available_in_large_free_list(-(*node_size));
2331
    }
2332
  }
2333

    
2334
  ASSERT(IsVeryLong() || available() == SumFreeLists());
2335
  return node;
2336
}
2337

    
2338

    
2339
// Allocation on the old space free list.  If it succeeds then a new linear
2340
// allocation space has been set up with the top and limit of the space.  If
2341
// the allocation fails then NULL is returned, and the caller can perform a GC
2342
// or allocate a new page before retrying.
2343
HeapObject* FreeList::Allocate(int size_in_bytes) {
2344
  ASSERT(0 < size_in_bytes);
2345
  ASSERT(size_in_bytes <= kMaxBlockSize);
2346
  ASSERT(IsAligned(size_in_bytes, kPointerSize));
2347
  // Don't free list allocate if there is linear space available.
2348
  ASSERT(owner_->limit() - owner_->top() < size_in_bytes);
2349

    
2350
  int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
2351
  // Mark the old linear allocation area with a free space map so it can be
2352
  // skipped when scanning the heap.  This also puts it back in the free list
2353
  // if it is big enough.
2354
  owner_->Free(owner_->top(), old_linear_size);
2355

    
2356
  owner_->heap()->incremental_marking()->OldSpaceStep(
2357
      size_in_bytes - old_linear_size);
2358

    
2359
  int new_node_size = 0;
2360
  FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
2361
  if (new_node == NULL) {
2362
    owner_->SetTop(NULL, NULL);
2363
    return NULL;
2364
  }
2365

    
2366
  int bytes_left = new_node_size - size_in_bytes;
2367
  ASSERT(bytes_left >= 0);
2368

    
2369
#ifdef DEBUG
2370
  for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
2371
    reinterpret_cast<Object**>(new_node->address())[i] =
2372
        Smi::FromInt(kCodeZapValue);
2373
  }
2374
#endif
2375

    
2376
  // The old-space-step might have finished sweeping and restarted marking.
2377
  // Verify that it did not turn the page of the new node into an evacuation
2378
  // candidate.
2379
  ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
2380

    
2381
  const int kThreshold = IncrementalMarking::kAllocatedThreshold;
2382

    
2383
  // Memory in the linear allocation area is counted as allocated.  We may free
2384
  // a little of this again immediately - see below.
2385
  owner_->Allocate(new_node_size);
2386

    
2387
  if (bytes_left > kThreshold &&
2388
      owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
2389
      FLAG_incremental_marking_steps) {
2390
    int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
2391
    // We don't want to give too large linear areas to the allocator while
2392
    // incremental marking is going on, because we won't check again whether
2393
    // we want to do another increment until the linear area is used up.
2394
    owner_->Free(new_node->address() + size_in_bytes + linear_size,
2395
                 new_node_size - size_in_bytes - linear_size);
2396
    owner_->SetTop(new_node->address() + size_in_bytes,
2397
                   new_node->address() + size_in_bytes + linear_size);
2398
  } else if (bytes_left > 0) {
2399
    // Normally we give the rest of the node to the allocator as its new
2400
    // linear allocation area.
2401
    owner_->SetTop(new_node->address() + size_in_bytes,
2402
                   new_node->address() + new_node_size);
2403
  } else {
2404
    // TODO(gc) Try not freeing linear allocation region when bytes_left
2405
    // are zero.
2406
    owner_->SetTop(NULL, NULL);
2407
  }
2408

    
2409
  return new_node;
2410
}
2411

    
2412

    
2413
intptr_t FreeList::EvictFreeListItems(Page* p) {
2414
  intptr_t sum = huge_list_.EvictFreeListItemsInList(p);
2415
  p->set_available_in_huge_free_list(0);
2416

    
2417
  if (sum < p->area_size()) {
2418
    sum += small_list_.EvictFreeListItemsInList(p) +
2419
        medium_list_.EvictFreeListItemsInList(p) +
2420
        large_list_.EvictFreeListItemsInList(p);
2421
    p->set_available_in_small_free_list(0);
2422
    p->set_available_in_medium_free_list(0);
2423
    p->set_available_in_large_free_list(0);
2424
  }
2425

    
2426
  return sum;
2427
}
2428

    
2429

    
2430
void FreeList::RepairLists(Heap* heap) {
2431
  small_list_.RepairFreeList(heap);
2432
  medium_list_.RepairFreeList(heap);
2433
  large_list_.RepairFreeList(heap);
2434
  huge_list_.RepairFreeList(heap);
2435
}
2436

    
2437

    
2438
#ifdef DEBUG
2439
intptr_t FreeListCategory::SumFreeList() {
2440
  intptr_t sum = 0;
2441
  FreeListNode* cur = top_;
2442
  while (cur != NULL) {
2443
    ASSERT(cur->map() == cur->GetHeap()->raw_unchecked_free_space_map());
2444
    FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
2445
    sum += cur_as_free_space->Size();
2446
    cur = cur->next();
2447
  }
2448
  return sum;
2449
}
2450

    
2451

    
2452
static const int kVeryLongFreeList = 500;
2453

    
2454

    
2455
int FreeListCategory::FreeListLength() {
2456
  int length = 0;
2457
  FreeListNode* cur = top_;
2458
  while (cur != NULL) {
2459
    length++;
2460
    cur = cur->next();
2461
    if (length == kVeryLongFreeList) return length;
2462
  }
2463
  return length;
2464
}
2465

    
2466

    
2467
bool FreeList::IsVeryLong() {
2468
  if (small_list_.FreeListLength() == kVeryLongFreeList) return  true;
2469
  if (medium_list_.FreeListLength() == kVeryLongFreeList) return  true;
2470
  if (large_list_.FreeListLength() == kVeryLongFreeList) return  true;
2471
  if (huge_list_.FreeListLength() == kVeryLongFreeList) return  true;
2472
  return false;
2473
}
2474

    
2475

    
2476
// This can take a very long time because it is linear in the number of entries
2477
// on the free list, so it should not be called if FreeListLength returns
2478
// kVeryLongFreeList.
2479
intptr_t FreeList::SumFreeLists() {
2480
  intptr_t sum = small_list_.SumFreeList();
2481
  sum += medium_list_.SumFreeList();
2482
  sum += large_list_.SumFreeList();
2483
  sum += huge_list_.SumFreeList();
2484
  return sum;
2485
}
2486
#endif
2487

    
2488

    
2489
// -----------------------------------------------------------------------------
2490
// OldSpace implementation
2491

    
2492
bool NewSpace::ReserveSpace(int bytes) {
2493
  // We can't reliably unpack a partial snapshot that needs more new space
2494
  // space than the minimum NewSpace size.  The limit can be set lower than
2495
  // the end of new space either because there is more space on the next page
2496
  // or because we have lowered the limit in order to get periodic incremental
2497
  // marking.  The most reliable way to ensure that there is linear space is
2498
  // to do the allocation, then rewind the limit.
2499
  ASSERT(bytes <= InitialCapacity());
2500
  MaybeObject* maybe = AllocateRaw(bytes);
2501
  Object* object = NULL;
2502
  if (!maybe->ToObject(&object)) return false;
2503
  HeapObject* allocation = HeapObject::cast(object);
2504
  Address top = allocation_info_.top();
2505
  if ((top - bytes) == allocation->address()) {
2506
    allocation_info_.set_top(allocation->address());
2507
    return true;
2508
  }
2509
  // There may be a borderline case here where the allocation succeeded, but
2510
  // the limit and top have moved on to a new page.  In that case we try again.
2511
  return ReserveSpace(bytes);
2512
}
2513

    
2514

    
2515
void PagedSpace::PrepareForMarkCompact() {
2516
  // We don't have a linear allocation area while sweeping.  It will be restored
2517
  // on the first allocation after the sweep.
2518
  // Mark the old linear allocation area with a free space map so it can be
2519
  // skipped when scanning the heap.
2520
  int old_linear_size = static_cast<int>(limit() - top());
2521
  Free(top(), old_linear_size);
2522
  SetTop(NULL, NULL);
2523

    
2524
  // Stop lazy sweeping and clear marking bits for unswept pages.
2525
  if (first_unswept_page_ != NULL) {
2526
    Page* p = first_unswept_page_;
2527
    do {
2528
      // Do not use ShouldBeSweptLazily predicate here.
2529
      // New evacuation candidates were selected but they still have
2530
      // to be swept before collection starts.
2531
      if (!p->WasSwept()) {
2532
        Bitmap::Clear(p);
2533
        if (FLAG_gc_verbose) {
2534
          PrintF("Sweeping 0x%" V8PRIxPTR " lazily abandoned.\n",
2535
                 reinterpret_cast<intptr_t>(p));
2536
        }
2537
      }
2538
      p = p->next_page();
2539
    } while (p != anchor());
2540
  }
2541
  first_unswept_page_ = Page::FromAddress(NULL);
2542
  unswept_free_bytes_ = 0;
2543

    
2544
  // Clear the free list before a full GC---it will be rebuilt afterward.
2545
  free_list_.Reset();
2546
}
2547

    
2548

    
2549
bool PagedSpace::ReserveSpace(int size_in_bytes) {
2550
  ASSERT(size_in_bytes <= AreaSize());
2551
  ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes));
2552
  Address current_top = allocation_info_.top();
2553
  Address new_top = current_top + size_in_bytes;
2554
  if (new_top <= allocation_info_.limit()) return true;
2555

    
2556
  HeapObject* new_area = free_list_.Allocate(size_in_bytes);
2557
  if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes);
2558
  if (new_area == NULL) return false;
2559

    
2560
  int old_linear_size = static_cast<int>(limit() - top());
2561
  // Mark the old linear allocation area with a free space so it can be
2562
  // skipped when scanning the heap.  This also puts it back in the free list
2563
  // if it is big enough.
2564
  Free(top(), old_linear_size);
2565

    
2566
  SetTop(new_area->address(), new_area->address() + size_in_bytes);
2567
  return true;
2568
}
2569

    
2570

    
2571
intptr_t PagedSpace::SizeOfObjects() {
2572
  ASSERT(!heap()->IsSweepingComplete() || (unswept_free_bytes_ == 0));
2573
  return Size() - unswept_free_bytes_ - (limit() - top());
2574
}
2575

    
2576

    
2577
// After we have booted, we have created a map which represents free space
2578
// on the heap.  If there was already a free list then the elements on it
2579
// were created with the wrong FreeSpaceMap (normally NULL), so we need to
2580
// fix them.
2581
void PagedSpace::RepairFreeListsAfterBoot() {
2582
  free_list_.RepairLists(heap());
2583
}
2584

    
2585

    
2586
// You have to call this last, since the implementation from PagedSpace
2587
// doesn't know that memory was 'promised' to large object space.
2588
bool LargeObjectSpace::ReserveSpace(int bytes) {
2589
  return heap()->OldGenerationCapacityAvailable() >= bytes &&
2590
         (!heap()->incremental_marking()->IsStopped() ||
2591
           heap()->OldGenerationSpaceAvailable() >= bytes);
2592
}
2593

    
2594

    
2595
bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
2596
  if (IsLazySweepingComplete()) return true;
2597

    
2598
  intptr_t freed_bytes = 0;
2599
  Page* p = first_unswept_page_;
2600
  do {
2601
    Page* next_page = p->next_page();
2602
    if (ShouldBeSweptLazily(p)) {
2603
      if (FLAG_gc_verbose) {
2604
        PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n",
2605
               reinterpret_cast<intptr_t>(p));
2606
      }
2607
      DecreaseUnsweptFreeBytes(p);
2608
      freed_bytes +=
2609
          MarkCompactCollector::
2610
              SweepConservatively<MarkCompactCollector::SWEEP_SEQUENTIALLY>(
2611
                  this, NULL, p);
2612
    }
2613
    p = next_page;
2614
  } while (p != anchor() && freed_bytes < bytes_to_sweep);
2615

    
2616
  if (p == anchor()) {
2617
    first_unswept_page_ = Page::FromAddress(NULL);
2618
  } else {
2619
    first_unswept_page_ = p;
2620
  }
2621

    
2622
  heap()->FreeQueuedChunks();
2623

    
2624
  return IsLazySweepingComplete();
2625
}
2626

    
2627

    
2628
void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
2629
  if (allocation_info_.top() >= allocation_info_.limit()) return;
2630

    
2631
  if (Page::FromAllocationTop(allocation_info_.top())->
2632
      IsEvacuationCandidate()) {
2633
    // Create filler object to keep page iterable if it was iterable.
2634
    int remaining =
2635
        static_cast<int>(allocation_info_.limit() - allocation_info_.top());
2636
    heap()->CreateFillerObjectAt(allocation_info_.top(), remaining);
2637

    
2638
    allocation_info_.set_top(NULL);
2639
    allocation_info_.set_limit(NULL);
2640
  }
2641
}
2642

    
2643

    
2644
bool PagedSpace::EnsureSweeperProgress(intptr_t size_in_bytes) {
2645
  MarkCompactCollector* collector = heap()->mark_compact_collector();
2646
  if (collector->AreSweeperThreadsActivated()) {
2647
    if (collector->IsConcurrentSweepingInProgress()) {
2648
      if (collector->StealMemoryFromSweeperThreads(this) < size_in_bytes) {
2649
        if (!collector->sequential_sweeping()) {
2650
          collector->WaitUntilSweepingCompleted();
2651
          return true;
2652
        }
2653
      }
2654
      return false;
2655
    }
2656
    return true;
2657
  } else {
2658
    return AdvanceSweeper(size_in_bytes);
2659
  }
2660
}
2661

    
2662

    
2663
HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
2664
  // Allocation in this space has failed.
2665

    
2666
  // If there are unswept pages advance lazy sweeper a bounded number of times
2667
  // until we find a size_in_bytes contiguous piece of memory
2668
  const int kMaxSweepingTries = 5;
2669
  bool sweeping_complete = false;
2670

    
2671
  for (int i = 0; i < kMaxSweepingTries && !sweeping_complete; i++) {
2672
    sweeping_complete = EnsureSweeperProgress(size_in_bytes);
2673

    
2674
    // Retry the free list allocation.
2675
    HeapObject* object = free_list_.Allocate(size_in_bytes);
2676
    if (object != NULL) return object;
2677
  }
2678

    
2679
  // Free list allocation failed and there is no next page.  Fail if we have
2680
  // hit the old generation size limit that should cause a garbage
2681
  // collection.
2682
  if (!heap()->always_allocate() &&
2683
      heap()->OldGenerationAllocationLimitReached()) {
2684
    return NULL;
2685
  }
2686

    
2687
  // Try to expand the space and allocate in the new next page.
2688
  if (Expand()) {
2689
    ASSERT(CountTotalPages() > 1 || size_in_bytes <= free_list_.available());
2690
    return free_list_.Allocate(size_in_bytes);
2691
  }
2692

    
2693
  // Last ditch, sweep all the remaining pages to try to find space.  This may
2694
  // cause a pause.
2695
  if (!IsLazySweepingComplete()) {
2696
    EnsureSweeperProgress(kMaxInt);
2697

    
2698
    // Retry the free list allocation.
2699
    HeapObject* object = free_list_.Allocate(size_in_bytes);
2700
    if (object != NULL) return object;
2701
  }
2702

    
2703
  // Finally, fail.
2704
  return NULL;
2705
}
2706

    
2707

    
2708
#ifdef DEBUG
2709
void PagedSpace::ReportCodeStatistics(Isolate* isolate) {
2710
  CommentStatistic* comments_statistics =
2711
      isolate->paged_space_comments_statistics();
2712
  ReportCodeKindStatistics(isolate->code_kind_statistics());
2713
  PrintF("Code comment statistics (\"   [ comment-txt   :    size/   "
2714
         "count  (average)\"):\n");
2715
  for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
2716
    const CommentStatistic& cs = comments_statistics[i];
2717
    if (cs.size > 0) {
2718
      PrintF("   %-30s: %10d/%6d     (%d)\n", cs.comment, cs.size, cs.count,
2719
             cs.size/cs.count);
2720
    }
2721
  }
2722
  PrintF("\n");
2723
}
2724

    
2725

    
2726
void PagedSpace::ResetCodeStatistics(Isolate* isolate) {
2727
  CommentStatistic* comments_statistics =
2728
      isolate->paged_space_comments_statistics();
2729
  ClearCodeKindStatistics(isolate->code_kind_statistics());
2730
  for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2731
    comments_statistics[i].Clear();
2732
  }
2733
  comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown";
2734
  comments_statistics[CommentStatistic::kMaxComments].size = 0;
2735
  comments_statistics[CommentStatistic::kMaxComments].count = 0;
2736
}
2737

    
2738

    
2739
// Adds comment to 'comment_statistics' table. Performance OK as long as
2740
// 'kMaxComments' is small
2741
static void EnterComment(Isolate* isolate, const char* comment, int delta) {
2742
  CommentStatistic* comments_statistics =
2743
      isolate->paged_space_comments_statistics();
2744
  // Do not count empty comments
2745
  if (delta <= 0) return;
2746
  CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments];
2747
  // Search for a free or matching entry in 'comments_statistics': 'cs'
2748
  // points to result.
2749
  for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2750
    if (comments_statistics[i].comment == NULL) {
2751
      cs = &comments_statistics[i];
2752
      cs->comment = comment;
2753
      break;
2754
    } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
2755
      cs = &comments_statistics[i];
2756
      break;
2757
    }
2758
  }
2759
  // Update entry for 'comment'
2760
  cs->size += delta;
2761
  cs->count += 1;
2762
}
2763

    
2764

    
2765
// Call for each nested comment start (start marked with '[ xxx', end marked
2766
// with ']'.  RelocIterator 'it' must point to a comment reloc info.
2767
static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
2768
  ASSERT(!it->done());
2769
  ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT);
2770
  const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
2771
  if (tmp[0] != '[') {
2772
    // Not a nested comment; skip
2773
    return;
2774
  }
2775

    
2776
  // Search for end of nested comment or a new nested comment
2777
  const char* const comment_txt =
2778
      reinterpret_cast<const char*>(it->rinfo()->data());
2779
  const byte* prev_pc = it->rinfo()->pc();
2780
  int flat_delta = 0;
2781
  it->next();
2782
  while (true) {
2783
    // All nested comments must be terminated properly, and therefore exit
2784
    // from loop.
2785
    ASSERT(!it->done());
2786
    if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
2787
      const char* const txt =
2788
          reinterpret_cast<const char*>(it->rinfo()->data());
2789
      flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
2790
      if (txt[0] == ']') break;  // End of nested  comment
2791
      // A new comment
2792
      CollectCommentStatistics(isolate, it);
2793
      // Skip code that was covered with previous comment
2794
      prev_pc = it->rinfo()->pc();
2795
    }
2796
    it->next();
2797
  }
2798
  EnterComment(isolate, comment_txt, flat_delta);
2799
}
2800

    
2801

    
2802
// Collects code size statistics:
2803
// - by code kind
2804
// - by code comment
2805
void PagedSpace::CollectCodeStatistics() {
2806
  Isolate* isolate = heap()->isolate();
2807
  HeapObjectIterator obj_it(this);
2808
  for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
2809
    if (obj->IsCode()) {
2810
      Code* code = Code::cast(obj);
2811
      isolate->code_kind_statistics()[code->kind()] += code->Size();
2812
      RelocIterator it(code);
2813
      int delta = 0;
2814
      const byte* prev_pc = code->instruction_start();
2815
      while (!it.done()) {
2816
        if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
2817
          delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
2818
          CollectCommentStatistics(isolate, &it);
2819
          prev_pc = it.rinfo()->pc();
2820
        }
2821
        it.next();
2822
      }
2823

    
2824
      ASSERT(code->instruction_start() <= prev_pc &&
2825
             prev_pc <= code->instruction_end());
2826
      delta += static_cast<int>(code->instruction_end() - prev_pc);
2827
      EnterComment(isolate, "NoComment", delta);
2828
    }
2829
  }
2830
}
2831

    
2832

    
2833
void PagedSpace::ReportStatistics() {
2834
  int pct = static_cast<int>(Available() * 100 / Capacity());
2835
  PrintF("  capacity: %" V8_PTR_PREFIX "d"
2836
             ", waste: %" V8_PTR_PREFIX "d"
2837
             ", available: %" V8_PTR_PREFIX "d, %%%d\n",
2838
         Capacity(), Waste(), Available(), pct);
2839

    
2840
  if (was_swept_conservatively_) return;
2841
  ClearHistograms(heap()->isolate());
2842
  HeapObjectIterator obj_it(this);
2843
  for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
2844
    CollectHistogramInfo(obj);
2845
  ReportHistogram(heap()->isolate(), true);
2846
}
2847
#endif
2848

    
2849
// -----------------------------------------------------------------------------
2850
// FixedSpace implementation
2851

    
2852
void FixedSpace::PrepareForMarkCompact() {
2853
  // Call prepare of the super class.
2854
  PagedSpace::PrepareForMarkCompact();
2855

    
2856
  // During a non-compacting collection, everything below the linear
2857
  // allocation pointer except wasted top-of-page blocks is considered
2858
  // allocated and we will rediscover available bytes during the
2859
  // collection.
2860
  accounting_stats_.AllocateBytes(free_list_.available());
2861

    
2862
  // Clear the free list before a full GC---it will be rebuilt afterward.
2863
  free_list_.Reset();
2864
}
2865

    
2866

    
2867
// -----------------------------------------------------------------------------
2868
// MapSpace implementation
2869
// TODO(mvstanton): this is weird...the compiler can't make a vtable unless
2870
// there is at least one non-inlined virtual function. I would prefer to hide
2871
// the VerifyObject definition behind VERIFY_HEAP.
2872

    
2873
void MapSpace::VerifyObject(HeapObject* object) {
2874
  CHECK(object->IsMap());
2875
}
2876

    
2877

    
2878
// -----------------------------------------------------------------------------
2879
// CellSpace and PropertyCellSpace implementation
2880
// TODO(mvstanton): this is weird...the compiler can't make a vtable unless
2881
// there is at least one non-inlined virtual function. I would prefer to hide
2882
// the VerifyObject definition behind VERIFY_HEAP.
2883

    
2884
void CellSpace::VerifyObject(HeapObject* object) {
2885
  CHECK(object->IsCell());
2886
}
2887

    
2888

    
2889
void PropertyCellSpace::VerifyObject(HeapObject* object) {
2890
  CHECK(object->IsPropertyCell());
2891
}
2892

    
2893

    
2894
// -----------------------------------------------------------------------------
2895
// LargeObjectIterator
2896

    
2897
LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
2898
  current_ = space->first_page_;
2899
  size_func_ = NULL;
2900
}
2901

    
2902

    
2903
LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
2904
                                         HeapObjectCallback size_func) {
2905
  current_ = space->first_page_;
2906
  size_func_ = size_func;
2907
}
2908

    
2909

    
2910
HeapObject* LargeObjectIterator::Next() {
2911
  if (current_ == NULL) return NULL;
2912

    
2913
  HeapObject* object = current_->GetObject();
2914
  current_ = current_->next_page();
2915
  return object;
2916
}
2917

    
2918

    
2919
// -----------------------------------------------------------------------------
2920
// LargeObjectSpace
2921
static bool ComparePointers(void* key1, void* key2) {
2922
    return key1 == key2;
2923
}
2924

    
2925

    
2926
LargeObjectSpace::LargeObjectSpace(Heap* heap,
2927
                                   intptr_t max_capacity,
2928
                                   AllocationSpace id)
2929
    : Space(heap, id, NOT_EXECUTABLE),  // Managed on a per-allocation basis
2930
      max_capacity_(max_capacity),
2931
      first_page_(NULL),
2932
      size_(0),
2933
      page_count_(0),
2934
      objects_size_(0),
2935
      chunk_map_(ComparePointers, 1024) {}
2936

    
2937

    
2938
bool LargeObjectSpace::SetUp() {
2939
  first_page_ = NULL;
2940
  size_ = 0;
2941
  page_count_ = 0;
2942
  objects_size_ = 0;
2943
  chunk_map_.Clear();
2944
  return true;
2945
}
2946

    
2947

    
2948
void LargeObjectSpace::TearDown() {
2949
  while (first_page_ != NULL) {
2950
    LargePage* page = first_page_;
2951
    first_page_ = first_page_->next_page();
2952
    LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
2953

    
2954
    ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
2955
    heap()->isolate()->memory_allocator()->PerformAllocationCallback(
2956
        space, kAllocationActionFree, page->size());
2957
    heap()->isolate()->memory_allocator()->Free(page);
2958
  }
2959
  SetUp();
2960
}
2961

    
2962

    
2963
MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
2964
                                           Executability executable) {
2965
  // Check if we want to force a GC before growing the old space further.
2966
  // If so, fail the allocation.
2967
  if (!heap()->always_allocate() &&
2968
      heap()->OldGenerationAllocationLimitReached()) {
2969
    return Failure::RetryAfterGC(identity());
2970
  }
2971

    
2972
  if (Size() + object_size > max_capacity_) {
2973
    return Failure::RetryAfterGC(identity());
2974
  }
2975

    
2976
  LargePage* page = heap()->isolate()->memory_allocator()->
2977
      AllocateLargePage(object_size, this, executable);
2978
  if (page == NULL) return Failure::RetryAfterGC(identity());
2979
  ASSERT(page->area_size() >= object_size);
2980

    
2981
  size_ += static_cast<int>(page->size());
2982
  objects_size_ += object_size;
2983
  page_count_++;
2984
  page->set_next_page(first_page_);
2985
  first_page_ = page;
2986

    
2987
  // Register all MemoryChunk::kAlignment-aligned chunks covered by
2988
  // this large page in the chunk map.
2989
  uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
2990
  uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment;
2991
  for (uintptr_t key = base; key <= limit; key++) {
2992
    HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key),
2993
                                              static_cast<uint32_t>(key),
2994
                                              true);
2995
    ASSERT(entry != NULL);
2996
    entry->value = page;
2997
  }
2998

    
2999
  HeapObject* object = page->GetObject();
3000

    
3001
  if (Heap::ShouldZapGarbage()) {
3002
    // Make the object consistent so the heap can be verified in OldSpaceStep.
3003
    // We only need to do this in debug builds or if verify_heap is on.
3004
    reinterpret_cast<Object**>(object->address())[0] =
3005
        heap()->fixed_array_map();
3006
    reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
3007
  }
3008

    
3009
  heap()->incremental_marking()->OldSpaceStep(object_size);
3010
  return object;
3011
}
3012

    
3013

    
3014
size_t LargeObjectSpace::CommittedPhysicalMemory() {
3015
  if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
3016
  size_t size = 0;
3017
  LargePage* current = first_page_;
3018
  while (current != NULL) {
3019
    size += current->CommittedPhysicalMemory();
3020
    current = current->next_page();
3021
  }
3022
  return size;
3023
}
3024

    
3025

    
3026
// GC support
3027
MaybeObject* LargeObjectSpace::FindObject(Address a) {
3028
  LargePage* page = FindPage(a);
3029
  if (page != NULL) {
3030
    return page->GetObject();
3031
  }
3032
  return Failure::Exception();
3033
}
3034

    
3035

    
3036
LargePage* LargeObjectSpace::FindPage(Address a) {
3037
  uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
3038
  HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
3039
                                        static_cast<uint32_t>(key),
3040
                                        false);
3041
  if (e != NULL) {
3042
    ASSERT(e->value != NULL);
3043
    LargePage* page = reinterpret_cast<LargePage*>(e->value);
3044
    ASSERT(page->is_valid());
3045
    if (page->Contains(a)) {
3046
      return page;
3047
    }
3048
  }
3049
  return NULL;
3050
}
3051

    
3052

    
3053
void LargeObjectSpace::FreeUnmarkedObjects() {
3054
  LargePage* previous = NULL;
3055
  LargePage* current = first_page_;
3056
  while (current != NULL) {
3057
    HeapObject* object = current->GetObject();
3058
    // Can this large page contain pointers to non-trivial objects.  No other
3059
    // pointer object is this big.
3060
    bool is_pointer_object = object->IsFixedArray();
3061
    MarkBit mark_bit = Marking::MarkBitFrom(object);
3062
    if (mark_bit.Get()) {
3063
      mark_bit.Clear();
3064
      Page::FromAddress(object->address())->ResetProgressBar();
3065
      Page::FromAddress(object->address())->ResetLiveBytes();
3066
      previous = current;
3067
      current = current->next_page();
3068
    } else {
3069
      LargePage* page = current;
3070
      // Cut the chunk out from the chunk list.
3071
      current = current->next_page();
3072
      if (previous == NULL) {
3073
        first_page_ = current;
3074
      } else {
3075
        previous->set_next_page(current);
3076
      }
3077

    
3078
      // Free the chunk.
3079
      heap()->mark_compact_collector()->ReportDeleteIfNeeded(
3080
          object, heap()->isolate());
3081
      size_ -= static_cast<int>(page->size());
3082
      objects_size_ -= object->Size();
3083
      page_count_--;
3084

    
3085
      // Remove entries belonging to this page.
3086
      // Use variable alignment to help pass length check (<= 80 characters)
3087
      // of single line in tools/presubmit.py.
3088
      const intptr_t alignment = MemoryChunk::kAlignment;
3089
      uintptr_t base = reinterpret_cast<uintptr_t>(page)/alignment;
3090
      uintptr_t limit = base + (page->size()-1)/alignment;
3091
      for (uintptr_t key = base; key <= limit; key++) {
3092
        chunk_map_.Remove(reinterpret_cast<void*>(key),
3093
                          static_cast<uint32_t>(key));
3094
      }
3095

    
3096
      if (is_pointer_object) {
3097
        heap()->QueueMemoryChunkForFree(page);
3098
      } else {
3099
        heap()->isolate()->memory_allocator()->Free(page);
3100
      }
3101
    }
3102
  }
3103
  heap()->FreeQueuedChunks();
3104
}
3105

    
3106

    
3107
bool LargeObjectSpace::Contains(HeapObject* object) {
3108
  Address address = object->address();
3109
  MemoryChunk* chunk = MemoryChunk::FromAddress(address);
3110

    
3111
  bool owned = (chunk->owner() == this);
3112

    
3113
  SLOW_ASSERT(!owned || !FindObject(address)->IsFailure());
3114

    
3115
  return owned;
3116
}
3117

    
3118

    
3119
#ifdef VERIFY_HEAP
3120
// We do not assume that the large object iterator works, because it depends
3121
// on the invariants we are checking during verification.
3122
void LargeObjectSpace::Verify() {
3123
  for (LargePage* chunk = first_page_;
3124
       chunk != NULL;
3125
       chunk = chunk->next_page()) {
3126
    // Each chunk contains an object that starts at the large object page's
3127
    // object area start.
3128
    HeapObject* object = chunk->GetObject();
3129
    Page* page = Page::FromAddress(object->address());
3130
    CHECK(object->address() == page->area_start());
3131

    
3132
    // The first word should be a map, and we expect all map pointers to be
3133
    // in map space.
3134
    Map* map = object->map();
3135
    CHECK(map->IsMap());
3136
    CHECK(heap()->map_space()->Contains(map));
3137

    
3138
    // We have only code, sequential strings, external strings
3139
    // (sequential strings that have been morphed into external
3140
    // strings), fixed arrays, and byte arrays in large object space.
3141
    CHECK(object->IsCode() || object->IsSeqString() ||
3142
           object->IsExternalString() || object->IsFixedArray() ||
3143
           object->IsFixedDoubleArray() || object->IsByteArray());
3144

    
3145
    // The object itself should look OK.
3146
    object->Verify();
3147

    
3148
    // Byte arrays and strings don't have interior pointers.
3149
    if (object->IsCode()) {
3150
      VerifyPointersVisitor code_visitor;
3151
      object->IterateBody(map->instance_type(),
3152
                          object->Size(),
3153
                          &code_visitor);
3154
    } else if (object->IsFixedArray()) {
3155
      FixedArray* array = FixedArray::cast(object);
3156
      for (int j = 0; j < array->length(); j++) {
3157
        Object* element = array->get(j);
3158
        if (element->IsHeapObject()) {
3159
          HeapObject* element_object = HeapObject::cast(element);
3160
          CHECK(heap()->Contains(element_object));
3161
          CHECK(element_object->map()->IsMap());
3162
        }
3163
      }
3164
    }
3165
  }
3166
}
3167
#endif
3168

    
3169

    
3170
#ifdef DEBUG
3171
void LargeObjectSpace::Print() {
3172
  LargeObjectIterator it(this);
3173
  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3174
    obj->Print();
3175
  }
3176
}
3177

    
3178

    
3179
void LargeObjectSpace::ReportStatistics() {
3180
  PrintF("  size: %" V8_PTR_PREFIX "d\n", size_);
3181
  int num_objects = 0;
3182
  ClearHistograms(heap()->isolate());
3183
  LargeObjectIterator it(this);
3184
  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3185
    num_objects++;
3186
    CollectHistogramInfo(obj);
3187
  }
3188

    
3189
  PrintF("  number of objects %d, "
3190
         "size of objects %" V8_PTR_PREFIX "d\n", num_objects, objects_size_);
3191
  if (num_objects > 0) ReportHistogram(heap()->isolate(), false);
3192
}
3193

    
3194

    
3195
void LargeObjectSpace::CollectCodeStatistics() {
3196
  Isolate* isolate = heap()->isolate();
3197
  LargeObjectIterator obj_it(this);
3198
  for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
3199
    if (obj->IsCode()) {
3200
      Code* code = Code::cast(obj);
3201
      isolate->code_kind_statistics()[code->kind()] += code->Size();
3202
    }
3203
  }
3204
}
3205

    
3206

    
3207
void Page::Print() {
3208
  // Make a best-effort to print the objects in the page.
3209
  PrintF("Page@%p in %s\n",
3210
         this->address(),
3211
         AllocationSpaceName(this->owner()->identity()));
3212
  printf(" --------------------------------------\n");
3213
  HeapObjectIterator objects(this, heap()->GcSafeSizeOfOldObjectFunction());
3214
  unsigned mark_size = 0;
3215
  for (HeapObject* object = objects.Next();
3216
       object != NULL;
3217
       object = objects.Next()) {
3218
    bool is_marked = Marking::MarkBitFrom(object).Get();
3219
    PrintF(" %c ", (is_marked ? '!' : ' '));  // Indent a little.
3220
    if (is_marked) {
3221
      mark_size += heap()->GcSafeSizeOfOldObjectFunction()(object);
3222
    }
3223
    object->ShortPrint();
3224
    PrintF("\n");
3225
  }
3226
  printf(" --------------------------------------\n");
3227
  printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3228
}
3229

    
3230
#endif  // DEBUG
3231

    
3232
} }  // namespace v8::internal