The data contained in this repository can be downloaded to your computer using one of several clients.
Please see the documentation of your version control software client for more information.

Please select the desired protocol below to get the URL.

This URL has Read-Only access.

Statistics
| Branch: | Revision:

main_repo / deps / v8 / src / mark-compact.cc @ f230a1cf

History | View | Annotate | Download (143 KB)

1
// Copyright 2012 the V8 project authors. All rights reserved.
2
// Redistribution and use in source and binary forms, with or without
3
// modification, are permitted provided that the following conditions are
4
// met:
5
//
6
//     * Redistributions of source code must retain the above copyright
7
//       notice, this list of conditions and the following disclaimer.
8
//     * Redistributions in binary form must reproduce the above
9
//       copyright notice, this list of conditions and the following
10
//       disclaimer in the documentation and/or other materials provided
11
//       with the distribution.
12
//     * Neither the name of Google Inc. nor the names of its
13
//       contributors may be used to endorse or promote products derived
14
//       from this software without specific prior written permission.
15
//
16
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27

    
28
#include "v8.h"
29

    
30
#include "code-stubs.h"
31
#include "compilation-cache.h"
32
#include "cpu-profiler.h"
33
#include "deoptimizer.h"
34
#include "execution.h"
35
#include "gdb-jit.h"
36
#include "global-handles.h"
37
#include "heap-profiler.h"
38
#include "ic-inl.h"
39
#include "incremental-marking.h"
40
#include "mark-compact.h"
41
#include "objects-visiting.h"
42
#include "objects-visiting-inl.h"
43
#include "stub-cache.h"
44
#include "sweeper-thread.h"
45

    
46
namespace v8 {
47
namespace internal {
48

    
49

    
50
const char* Marking::kWhiteBitPattern = "00";
51
const char* Marking::kBlackBitPattern = "10";
52
const char* Marking::kGreyBitPattern = "11";
53
const char* Marking::kImpossibleBitPattern = "01";
54

    
55

    
56
// -------------------------------------------------------------------------
57
// MarkCompactCollector
58

    
59
MarkCompactCollector::MarkCompactCollector() :  // NOLINT
60
#ifdef DEBUG
61
      state_(IDLE),
62
#endif
63
      sweep_precisely_(false),
64
      reduce_memory_footprint_(false),
65
      abort_incremental_marking_(false),
66
      marking_parity_(ODD_MARKING_PARITY),
67
      compacting_(false),
68
      was_marked_incrementally_(false),
69
      sweeping_pending_(false),
70
      sequential_sweeping_(false),
71
      tracer_(NULL),
72
      migration_slots_buffer_(NULL),
73
      heap_(NULL),
74
      code_flusher_(NULL),
75
      encountered_weak_collections_(NULL),
76
      have_code_to_deoptimize_(false) { }
77

    
78
#ifdef VERIFY_HEAP
79
class VerifyMarkingVisitor: public ObjectVisitor {
80
 public:
81
  explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {}
82

    
83
  void VisitPointers(Object** start, Object** end) {
84
    for (Object** current = start; current < end; current++) {
85
      if ((*current)->IsHeapObject()) {
86
        HeapObject* object = HeapObject::cast(*current);
87
        CHECK(heap_->mark_compact_collector()->IsMarked(object));
88
      }
89
    }
90
  }
91

    
92
  void VisitEmbeddedPointer(RelocInfo* rinfo) {
93
    ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
94
    if (!Code::IsWeakEmbeddedObject(rinfo->host()->kind(),
95
                                    rinfo->target_object())) {
96
      VisitPointer(rinfo->target_object_address());
97
    }
98
  }
99

    
100
 private:
101
  Heap* heap_;
102
};
103

    
104

    
105
static void VerifyMarking(Heap* heap, Address bottom, Address top) {
106
  VerifyMarkingVisitor visitor(heap);
107
  HeapObject* object;
108
  Address next_object_must_be_here_or_later = bottom;
109

    
110
  for (Address current = bottom;
111
       current < top;
112
       current += kPointerSize) {
113
    object = HeapObject::FromAddress(current);
114
    if (MarkCompactCollector::IsMarked(object)) {
115
      CHECK(current >= next_object_must_be_here_or_later);
116
      object->Iterate(&visitor);
117
      next_object_must_be_here_or_later = current + object->Size();
118
    }
119
  }
120
}
121

    
122

    
123
static void VerifyMarking(NewSpace* space) {
124
  Address end = space->top();
125
  NewSpacePageIterator it(space->bottom(), end);
126
  // The bottom position is at the start of its page. Allows us to use
127
  // page->area_start() as start of range on all pages.
128
  CHECK_EQ(space->bottom(),
129
            NewSpacePage::FromAddress(space->bottom())->area_start());
130
  while (it.has_next()) {
131
    NewSpacePage* page = it.next();
132
    Address limit = it.has_next() ? page->area_end() : end;
133
    CHECK(limit == end || !page->Contains(end));
134
    VerifyMarking(space->heap(), page->area_start(), limit);
135
  }
136
}
137

    
138

    
139
static void VerifyMarking(PagedSpace* space) {
140
  PageIterator it(space);
141

    
142
  while (it.has_next()) {
143
    Page* p = it.next();
144
    VerifyMarking(space->heap(), p->area_start(), p->area_end());
145
  }
146
}
147

    
148

    
149
static void VerifyMarking(Heap* heap) {
150
  VerifyMarking(heap->old_pointer_space());
151
  VerifyMarking(heap->old_data_space());
152
  VerifyMarking(heap->code_space());
153
  VerifyMarking(heap->cell_space());
154
  VerifyMarking(heap->property_cell_space());
155
  VerifyMarking(heap->map_space());
156
  VerifyMarking(heap->new_space());
157

    
158
  VerifyMarkingVisitor visitor(heap);
159

    
160
  LargeObjectIterator it(heap->lo_space());
161
  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
162
    if (MarkCompactCollector::IsMarked(obj)) {
163
      obj->Iterate(&visitor);
164
    }
165
  }
166

    
167
  heap->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
168
}
169

    
170

    
171
class VerifyEvacuationVisitor: public ObjectVisitor {
172
 public:
173
  void VisitPointers(Object** start, Object** end) {
174
    for (Object** current = start; current < end; current++) {
175
      if ((*current)->IsHeapObject()) {
176
        HeapObject* object = HeapObject::cast(*current);
177
        CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
178
      }
179
    }
180
  }
181
};
182

    
183

    
184
static void VerifyEvacuation(Address bottom, Address top) {
185
  VerifyEvacuationVisitor visitor;
186
  HeapObject* object;
187
  Address next_object_must_be_here_or_later = bottom;
188

    
189
  for (Address current = bottom;
190
       current < top;
191
       current += kPointerSize) {
192
    object = HeapObject::FromAddress(current);
193
    if (MarkCompactCollector::IsMarked(object)) {
194
      CHECK(current >= next_object_must_be_here_or_later);
195
      object->Iterate(&visitor);
196
      next_object_must_be_here_or_later = current + object->Size();
197
    }
198
  }
199
}
200

    
201

    
202
static void VerifyEvacuation(NewSpace* space) {
203
  NewSpacePageIterator it(space->bottom(), space->top());
204
  VerifyEvacuationVisitor visitor;
205

    
206
  while (it.has_next()) {
207
    NewSpacePage* page = it.next();
208
    Address current = page->area_start();
209
    Address limit = it.has_next() ? page->area_end() : space->top();
210
    CHECK(limit == space->top() || !page->Contains(space->top()));
211
    while (current < limit) {
212
      HeapObject* object = HeapObject::FromAddress(current);
213
      object->Iterate(&visitor);
214
      current += object->Size();
215
    }
216
  }
217
}
218

    
219

    
220
static void VerifyEvacuation(PagedSpace* space) {
221
  PageIterator it(space);
222

    
223
  while (it.has_next()) {
224
    Page* p = it.next();
225
    if (p->IsEvacuationCandidate()) continue;
226
    VerifyEvacuation(p->area_start(), p->area_end());
227
  }
228
}
229

    
230

    
231
static void VerifyEvacuation(Heap* heap) {
232
  VerifyEvacuation(heap->old_pointer_space());
233
  VerifyEvacuation(heap->old_data_space());
234
  VerifyEvacuation(heap->code_space());
235
  VerifyEvacuation(heap->cell_space());
236
  VerifyEvacuation(heap->property_cell_space());
237
  VerifyEvacuation(heap->map_space());
238
  VerifyEvacuation(heap->new_space());
239

    
240
  VerifyEvacuationVisitor visitor;
241
  heap->IterateStrongRoots(&visitor, VISIT_ALL);
242
}
243
#endif  // VERIFY_HEAP
244

    
245

    
246
#ifdef DEBUG
247
class VerifyNativeContextSeparationVisitor: public ObjectVisitor {
248
 public:
249
  VerifyNativeContextSeparationVisitor() : current_native_context_(NULL) {}
250

    
251
  void VisitPointers(Object** start, Object** end) {
252
    for (Object** current = start; current < end; current++) {
253
      if ((*current)->IsHeapObject()) {
254
        HeapObject* object = HeapObject::cast(*current);
255
        if (object->IsString()) continue;
256
        switch (object->map()->instance_type()) {
257
          case JS_FUNCTION_TYPE:
258
            CheckContext(JSFunction::cast(object)->context());
259
            break;
260
          case JS_GLOBAL_PROXY_TYPE:
261
            CheckContext(JSGlobalProxy::cast(object)->native_context());
262
            break;
263
          case JS_GLOBAL_OBJECT_TYPE:
264
          case JS_BUILTINS_OBJECT_TYPE:
265
            CheckContext(GlobalObject::cast(object)->native_context());
266
            break;
267
          case JS_ARRAY_TYPE:
268
          case JS_DATE_TYPE:
269
          case JS_OBJECT_TYPE:
270
          case JS_REGEXP_TYPE:
271
            VisitPointer(HeapObject::RawField(object, JSObject::kMapOffset));
272
            break;
273
          case MAP_TYPE:
274
            VisitPointer(HeapObject::RawField(object, Map::kPrototypeOffset));
275
            VisitPointer(HeapObject::RawField(object, Map::kConstructorOffset));
276
            break;
277
          case FIXED_ARRAY_TYPE:
278
            if (object->IsContext()) {
279
              CheckContext(object);
280
            } else {
281
              FixedArray* array = FixedArray::cast(object);
282
              int length = array->length();
283
              // Set array length to zero to prevent cycles while iterating
284
              // over array bodies, this is easier than intrusive marking.
285
              array->set_length(0);
286
              array->IterateBody(
287
                  FIXED_ARRAY_TYPE, FixedArray::SizeFor(length), this);
288
              array->set_length(length);
289
            }
290
            break;
291
          case CELL_TYPE:
292
          case JS_PROXY_TYPE:
293
          case JS_VALUE_TYPE:
294
          case TYPE_FEEDBACK_INFO_TYPE:
295
            object->Iterate(this);
296
            break;
297
          case DECLARED_ACCESSOR_INFO_TYPE:
298
          case EXECUTABLE_ACCESSOR_INFO_TYPE:
299
          case BYTE_ARRAY_TYPE:
300
          case CALL_HANDLER_INFO_TYPE:
301
          case CODE_TYPE:
302
          case FIXED_DOUBLE_ARRAY_TYPE:
303
          case HEAP_NUMBER_TYPE:
304
          case INTERCEPTOR_INFO_TYPE:
305
          case ODDBALL_TYPE:
306
          case SCRIPT_TYPE:
307
          case SHARED_FUNCTION_INFO_TYPE:
308
            break;
309
          default:
310
            UNREACHABLE();
311
        }
312
      }
313
    }
314
  }
315

    
316
 private:
317
  void CheckContext(Object* context) {
318
    if (!context->IsContext()) return;
319
    Context* native_context = Context::cast(context)->native_context();
320
    if (current_native_context_ == NULL) {
321
      current_native_context_ = native_context;
322
    } else {
323
      CHECK_EQ(current_native_context_, native_context);
324
    }
325
  }
326

    
327
  Context* current_native_context_;
328
};
329

    
330

    
331
static void VerifyNativeContextSeparation(Heap* heap) {
332
  HeapObjectIterator it(heap->code_space());
333

    
334
  for (Object* object = it.Next(); object != NULL; object = it.Next()) {
335
    VerifyNativeContextSeparationVisitor visitor;
336
    Code::cast(object)->CodeIterateBody(&visitor);
337
  }
338
}
339
#endif
340

    
341

    
342
void MarkCompactCollector::TearDown() {
343
  AbortCompaction();
344
}
345

    
346

    
347
void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
348
  p->MarkEvacuationCandidate();
349
  evacuation_candidates_.Add(p);
350
}
351

    
352

    
353
static void TraceFragmentation(PagedSpace* space) {
354
  int number_of_pages = space->CountTotalPages();
355
  intptr_t reserved = (number_of_pages * space->AreaSize());
356
  intptr_t free = reserved - space->SizeOfObjects();
357
  PrintF("[%s]: %d pages, %d (%.1f%%) free\n",
358
         AllocationSpaceName(space->identity()),
359
         number_of_pages,
360
         static_cast<int>(free),
361
         static_cast<double>(free) * 100 / reserved);
362
}
363

    
364

    
365
bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
366
  if (!compacting_) {
367
    ASSERT(evacuation_candidates_.length() == 0);
368

    
369
#ifdef ENABLE_GDB_JIT_INTERFACE
370
    // If GDBJIT interface is active disable compaction.
371
    if (FLAG_gdbjit) return false;
372
#endif
373

    
374
    CollectEvacuationCandidates(heap()->old_pointer_space());
375
    CollectEvacuationCandidates(heap()->old_data_space());
376

    
377
    if (FLAG_compact_code_space &&
378
        (mode == NON_INCREMENTAL_COMPACTION ||
379
         FLAG_incremental_code_compaction)) {
380
      CollectEvacuationCandidates(heap()->code_space());
381
    } else if (FLAG_trace_fragmentation) {
382
      TraceFragmentation(heap()->code_space());
383
    }
384

    
385
    if (FLAG_trace_fragmentation) {
386
      TraceFragmentation(heap()->map_space());
387
      TraceFragmentation(heap()->cell_space());
388
      TraceFragmentation(heap()->property_cell_space());
389
    }
390

    
391
    heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists();
392
    heap()->old_data_space()->EvictEvacuationCandidatesFromFreeLists();
393
    heap()->code_space()->EvictEvacuationCandidatesFromFreeLists();
394

    
395
    compacting_ = evacuation_candidates_.length() > 0;
396
  }
397

    
398
  return compacting_;
399
}
400

    
401

    
402
void MarkCompactCollector::CollectGarbage() {
403
  // Make sure that Prepare() has been called. The individual steps below will
404
  // update the state as they proceed.
405
  ASSERT(state_ == PREPARE_GC);
406
  ASSERT(encountered_weak_collections_ == Smi::FromInt(0));
407

    
408
  heap()->allocation_mementos_found_ = 0;
409

    
410
  MarkLiveObjects();
411
  ASSERT(heap_->incremental_marking()->IsStopped());
412

    
413
  if (FLAG_collect_maps) ClearNonLiveReferences();
414

    
415
  ClearWeakCollections();
416

    
417
#ifdef VERIFY_HEAP
418
  if (FLAG_verify_heap) {
419
    VerifyMarking(heap_);
420
  }
421
#endif
422

    
423
  SweepSpaces();
424

    
425
  if (!FLAG_collect_maps) ReattachInitialMaps();
426

    
427
#ifdef DEBUG
428
  if (FLAG_verify_native_context_separation) {
429
    VerifyNativeContextSeparation(heap_);
430
  }
431
#endif
432

    
433
#ifdef VERIFY_HEAP
434
  if (heap()->weak_embedded_objects_verification_enabled()) {
435
    VerifyWeakEmbeddedObjectsInOptimizedCode();
436
  }
437
  if (FLAG_collect_maps && FLAG_omit_map_checks_for_leaf_maps) {
438
    VerifyOmittedMapChecks();
439
  }
440
#endif
441

    
442
  Finish();
443

    
444
  if (marking_parity_ == EVEN_MARKING_PARITY) {
445
    marking_parity_ = ODD_MARKING_PARITY;
446
  } else {
447
    ASSERT(marking_parity_ == ODD_MARKING_PARITY);
448
    marking_parity_ = EVEN_MARKING_PARITY;
449
  }
450

    
451
  if (FLAG_trace_track_allocation_sites &&
452
      heap()->allocation_mementos_found_ > 0) {
453
    PrintF("AllocationMementos found during mark-sweep = %d\n",
454
           heap()->allocation_mementos_found_);
455
  }
456
  tracer_ = NULL;
457
}
458

    
459

    
460
#ifdef VERIFY_HEAP
461
void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
462
  PageIterator it(space);
463

    
464
  while (it.has_next()) {
465
    Page* p = it.next();
466
    CHECK(p->markbits()->IsClean());
467
    CHECK_EQ(0, p->LiveBytes());
468
  }
469
}
470

    
471

    
472
void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
473
  NewSpacePageIterator it(space->bottom(), space->top());
474

    
475
  while (it.has_next()) {
476
    NewSpacePage* p = it.next();
477
    CHECK(p->markbits()->IsClean());
478
    CHECK_EQ(0, p->LiveBytes());
479
  }
480
}
481

    
482

    
483
void MarkCompactCollector::VerifyMarkbitsAreClean() {
484
  VerifyMarkbitsAreClean(heap_->old_pointer_space());
485
  VerifyMarkbitsAreClean(heap_->old_data_space());
486
  VerifyMarkbitsAreClean(heap_->code_space());
487
  VerifyMarkbitsAreClean(heap_->cell_space());
488
  VerifyMarkbitsAreClean(heap_->property_cell_space());
489
  VerifyMarkbitsAreClean(heap_->map_space());
490
  VerifyMarkbitsAreClean(heap_->new_space());
491

    
492
  LargeObjectIterator it(heap_->lo_space());
493
  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
494
    MarkBit mark_bit = Marking::MarkBitFrom(obj);
495
    CHECK(Marking::IsWhite(mark_bit));
496
    CHECK_EQ(0, Page::FromAddress(obj->address())->LiveBytes());
497
  }
498
}
499

    
500

    
501
void MarkCompactCollector::VerifyWeakEmbeddedObjectsInOptimizedCode() {
502
  HeapObjectIterator code_iterator(heap()->code_space());
503
  for (HeapObject* obj = code_iterator.Next();
504
       obj != NULL;
505
       obj = code_iterator.Next()) {
506
    Code* code = Code::cast(obj);
507
    if (code->kind() != Code::OPTIMIZED_FUNCTION) continue;
508
    if (WillBeDeoptimized(code)) continue;
509
    code->VerifyEmbeddedObjectsDependency();
510
  }
511
}
512

    
513

    
514
void MarkCompactCollector::VerifyOmittedMapChecks() {
515
  HeapObjectIterator iterator(heap()->map_space());
516
  for (HeapObject* obj = iterator.Next();
517
       obj != NULL;
518
       obj = iterator.Next()) {
519
    Map* map = Map::cast(obj);
520
    map->VerifyOmittedMapChecks();
521
  }
522
}
523
#endif  // VERIFY_HEAP
524

    
525

    
526
static void ClearMarkbitsInPagedSpace(PagedSpace* space) {
527
  PageIterator it(space);
528

    
529
  while (it.has_next()) {
530
    Bitmap::Clear(it.next());
531
  }
532
}
533

    
534

    
535
static void ClearMarkbitsInNewSpace(NewSpace* space) {
536
  NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
537

    
538
  while (it.has_next()) {
539
    Bitmap::Clear(it.next());
540
  }
541
}
542

    
543

    
544
void MarkCompactCollector::ClearMarkbits() {
545
  ClearMarkbitsInPagedSpace(heap_->code_space());
546
  ClearMarkbitsInPagedSpace(heap_->map_space());
547
  ClearMarkbitsInPagedSpace(heap_->old_pointer_space());
548
  ClearMarkbitsInPagedSpace(heap_->old_data_space());
549
  ClearMarkbitsInPagedSpace(heap_->cell_space());
550
  ClearMarkbitsInPagedSpace(heap_->property_cell_space());
551
  ClearMarkbitsInNewSpace(heap_->new_space());
552

    
553
  LargeObjectIterator it(heap_->lo_space());
554
  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
555
    MarkBit mark_bit = Marking::MarkBitFrom(obj);
556
    mark_bit.Clear();
557
    mark_bit.Next().Clear();
558
    Page::FromAddress(obj->address())->ResetProgressBar();
559
    Page::FromAddress(obj->address())->ResetLiveBytes();
560
  }
561
}
562

    
563

    
564
void MarkCompactCollector::StartSweeperThreads() {
565
  sweeping_pending_ = true;
566
  for (int i = 0; i < FLAG_sweeper_threads; i++) {
567
    isolate()->sweeper_threads()[i]->StartSweeping();
568
  }
569
}
570

    
571

    
572
void MarkCompactCollector::WaitUntilSweepingCompleted() {
573
  ASSERT(sweeping_pending_ == true);
574
  for (int i = 0; i < FLAG_sweeper_threads; i++) {
575
    isolate()->sweeper_threads()[i]->WaitForSweeperThread();
576
  }
577
  sweeping_pending_ = false;
578
  StealMemoryFromSweeperThreads(heap()->paged_space(OLD_DATA_SPACE));
579
  StealMemoryFromSweeperThreads(heap()->paged_space(OLD_POINTER_SPACE));
580
  heap()->paged_space(OLD_DATA_SPACE)->ResetUnsweptFreeBytes();
581
  heap()->paged_space(OLD_POINTER_SPACE)->ResetUnsweptFreeBytes();
582
}
583

    
584

    
585
intptr_t MarkCompactCollector::
586
             StealMemoryFromSweeperThreads(PagedSpace* space) {
587
  intptr_t freed_bytes = 0;
588
  for (int i = 0; i < FLAG_sweeper_threads; i++) {
589
    freed_bytes += isolate()->sweeper_threads()[i]->StealMemory(space);
590
  }
591
  space->AddToAccountingStats(freed_bytes);
592
  space->DecrementUnsweptFreeBytes(freed_bytes);
593
  return freed_bytes;
594
}
595

    
596

    
597
bool MarkCompactCollector::AreSweeperThreadsActivated() {
598
  return isolate()->sweeper_threads() != NULL;
599
}
600

    
601

    
602
bool MarkCompactCollector::IsConcurrentSweepingInProgress() {
603
  return sweeping_pending_;
604
}
605

    
606

    
607
bool Marking::TransferMark(Address old_start, Address new_start) {
608
  // This is only used when resizing an object.
609
  ASSERT(MemoryChunk::FromAddress(old_start) ==
610
         MemoryChunk::FromAddress(new_start));
611

    
612
  // If the mark doesn't move, we don't check the color of the object.
613
  // It doesn't matter whether the object is black, since it hasn't changed
614
  // size, so the adjustment to the live data count will be zero anyway.
615
  if (old_start == new_start) return false;
616

    
617
  MarkBit new_mark_bit = MarkBitFrom(new_start);
618
  MarkBit old_mark_bit = MarkBitFrom(old_start);
619

    
620
#ifdef DEBUG
621
  ObjectColor old_color = Color(old_mark_bit);
622
#endif
623

    
624
  if (Marking::IsBlack(old_mark_bit)) {
625
    old_mark_bit.Clear();
626
    ASSERT(IsWhite(old_mark_bit));
627
    Marking::MarkBlack(new_mark_bit);
628
    return true;
629
  } else if (Marking::IsGrey(old_mark_bit)) {
630
    ASSERT(heap_->incremental_marking()->IsMarking());
631
    old_mark_bit.Clear();
632
    old_mark_bit.Next().Clear();
633
    ASSERT(IsWhite(old_mark_bit));
634
    heap_->incremental_marking()->WhiteToGreyAndPush(
635
        HeapObject::FromAddress(new_start), new_mark_bit);
636
    heap_->incremental_marking()->RestartIfNotMarking();
637
  }
638

    
639
#ifdef DEBUG
640
  ObjectColor new_color = Color(new_mark_bit);
641
  ASSERT(new_color == old_color);
642
#endif
643

    
644
  return false;
645
}
646

    
647

    
648
const char* AllocationSpaceName(AllocationSpace space) {
649
  switch (space) {
650
    case NEW_SPACE: return "NEW_SPACE";
651
    case OLD_POINTER_SPACE: return "OLD_POINTER_SPACE";
652
    case OLD_DATA_SPACE: return "OLD_DATA_SPACE";
653
    case CODE_SPACE: return "CODE_SPACE";
654
    case MAP_SPACE: return "MAP_SPACE";
655
    case CELL_SPACE: return "CELL_SPACE";
656
    case PROPERTY_CELL_SPACE:
657
      return "PROPERTY_CELL_SPACE";
658
    case LO_SPACE: return "LO_SPACE";
659
    default:
660
      UNREACHABLE();
661
  }
662

    
663
  return NULL;
664
}
665

    
666

    
667
// Returns zero for pages that have so little fragmentation that it is not
668
// worth defragmenting them.  Otherwise a positive integer that gives an
669
// estimate of fragmentation on an arbitrary scale.
670
static int FreeListFragmentation(PagedSpace* space, Page* p) {
671
  // If page was not swept then there are no free list items on it.
672
  if (!p->WasSwept()) {
673
    if (FLAG_trace_fragmentation) {
674
      PrintF("%p [%s]: %d bytes live (unswept)\n",
675
             reinterpret_cast<void*>(p),
676
             AllocationSpaceName(space->identity()),
677
             p->LiveBytes());
678
    }
679
    return 0;
680
  }
681

    
682
  PagedSpace::SizeStats sizes;
683
  space->ObtainFreeListStatistics(p, &sizes);
684

    
685
  intptr_t ratio;
686
  intptr_t ratio_threshold;
687
  intptr_t area_size = space->AreaSize();
688
  if (space->identity() == CODE_SPACE) {
689
    ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 /
690
        area_size;
691
    ratio_threshold = 10;
692
  } else {
693
    ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 /
694
        area_size;
695
    ratio_threshold = 15;
696
  }
697

    
698
  if (FLAG_trace_fragmentation) {
699
    PrintF("%p [%s]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
700
           reinterpret_cast<void*>(p),
701
           AllocationSpaceName(space->identity()),
702
           static_cast<int>(sizes.small_size_),
703
           static_cast<double>(sizes.small_size_ * 100) /
704
           area_size,
705
           static_cast<int>(sizes.medium_size_),
706
           static_cast<double>(sizes.medium_size_ * 100) /
707
           area_size,
708
           static_cast<int>(sizes.large_size_),
709
           static_cast<double>(sizes.large_size_ * 100) /
710
           area_size,
711
           static_cast<int>(sizes.huge_size_),
712
           static_cast<double>(sizes.huge_size_ * 100) /
713
           area_size,
714
           (ratio > ratio_threshold) ? "[fragmented]" : "");
715
  }
716

    
717
  if (FLAG_always_compact && sizes.Total() != area_size) {
718
    return 1;
719
  }
720

    
721
  if (ratio <= ratio_threshold) return 0;  // Not fragmented.
722

    
723
  return static_cast<int>(ratio - ratio_threshold);
724
}
725

    
726

    
727
void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
728
  ASSERT(space->identity() == OLD_POINTER_SPACE ||
729
         space->identity() == OLD_DATA_SPACE ||
730
         space->identity() == CODE_SPACE);
731

    
732
  static const int kMaxMaxEvacuationCandidates = 1000;
733
  int number_of_pages = space->CountTotalPages();
734
  int max_evacuation_candidates =
735
      static_cast<int>(sqrt(number_of_pages / 2.0) + 1);
736

    
737
  if (FLAG_stress_compaction || FLAG_always_compact) {
738
    max_evacuation_candidates = kMaxMaxEvacuationCandidates;
739
  }
740

    
741
  class Candidate {
742
   public:
743
    Candidate() : fragmentation_(0), page_(NULL) { }
744
    Candidate(int f, Page* p) : fragmentation_(f), page_(p) { }
745

    
746
    int fragmentation() { return fragmentation_; }
747
    Page* page() { return page_; }
748

    
749
   private:
750
    int fragmentation_;
751
    Page* page_;
752
  };
753

    
754
  enum CompactionMode {
755
    COMPACT_FREE_LISTS,
756
    REDUCE_MEMORY_FOOTPRINT
757
  };
758

    
759
  CompactionMode mode = COMPACT_FREE_LISTS;
760

    
761
  intptr_t reserved = number_of_pages * space->AreaSize();
762
  intptr_t over_reserved = reserved - space->SizeOfObjects();
763
  static const intptr_t kFreenessThreshold = 50;
764

    
765
  if (reduce_memory_footprint_ && over_reserved >= space->AreaSize()) {
766
    // If reduction of memory footprint was requested, we are aggressive
767
    // about choosing pages to free.  We expect that half-empty pages
768
    // are easier to compact so slightly bump the limit.
769
    mode = REDUCE_MEMORY_FOOTPRINT;
770
    max_evacuation_candidates += 2;
771
  }
772

    
773

    
774
  if (over_reserved > reserved / 3 && over_reserved >= 2 * space->AreaSize()) {
775
    // If over-usage is very high (more than a third of the space), we
776
    // try to free all mostly empty pages.  We expect that almost empty
777
    // pages are even easier to compact so bump the limit even more.
778
    mode = REDUCE_MEMORY_FOOTPRINT;
779
    max_evacuation_candidates *= 2;
780
  }
781

    
782
  if (FLAG_trace_fragmentation && mode == REDUCE_MEMORY_FOOTPRINT) {
783
    PrintF("Estimated over reserved memory: %.1f / %.1f MB (threshold %d), "
784
           "evacuation candidate limit: %d\n",
785
           static_cast<double>(over_reserved) / MB,
786
           static_cast<double>(reserved) / MB,
787
           static_cast<int>(kFreenessThreshold),
788
           max_evacuation_candidates);
789
  }
790

    
791
  intptr_t estimated_release = 0;
792

    
793
  Candidate candidates[kMaxMaxEvacuationCandidates];
794

    
795
  max_evacuation_candidates =
796
      Min(kMaxMaxEvacuationCandidates, max_evacuation_candidates);
797

    
798
  int count = 0;
799
  int fragmentation = 0;
800
  Candidate* least = NULL;
801

    
802
  PageIterator it(space);
803
  if (it.has_next()) it.next();  // Never compact the first page.
804

    
805
  while (it.has_next()) {
806
    Page* p = it.next();
807
    p->ClearEvacuationCandidate();
808

    
809
    if (FLAG_stress_compaction) {
810
      unsigned int counter = space->heap()->ms_count();
811
      uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits;
812
      if ((counter & 1) == (page_number & 1)) fragmentation = 1;
813
    } else if (mode == REDUCE_MEMORY_FOOTPRINT) {
814
      // Don't try to release too many pages.
815
      if (estimated_release >= over_reserved) {
816
        continue;
817
      }
818

    
819
      intptr_t free_bytes = 0;
820

    
821
      if (!p->WasSwept()) {
822
        free_bytes = (p->area_size() - p->LiveBytes());
823
      } else {
824
        PagedSpace::SizeStats sizes;
825
        space->ObtainFreeListStatistics(p, &sizes);
826
        free_bytes = sizes.Total();
827
      }
828

    
829
      int free_pct = static_cast<int>(free_bytes * 100) / p->area_size();
830

    
831
      if (free_pct >= kFreenessThreshold) {
832
        estimated_release += free_bytes;
833
        fragmentation = free_pct;
834
      } else {
835
        fragmentation = 0;
836
      }
837

    
838
      if (FLAG_trace_fragmentation) {
839
        PrintF("%p [%s]: %d (%.2f%%) free %s\n",
840
               reinterpret_cast<void*>(p),
841
               AllocationSpaceName(space->identity()),
842
               static_cast<int>(free_bytes),
843
               static_cast<double>(free_bytes * 100) / p->area_size(),
844
               (fragmentation > 0) ? "[fragmented]" : "");
845
      }
846
    } else {
847
      fragmentation = FreeListFragmentation(space, p);
848
    }
849

    
850
    if (fragmentation != 0) {
851
      if (count < max_evacuation_candidates) {
852
        candidates[count++] = Candidate(fragmentation, p);
853
      } else {
854
        if (least == NULL) {
855
          for (int i = 0; i < max_evacuation_candidates; i++) {
856
            if (least == NULL ||
857
                candidates[i].fragmentation() < least->fragmentation()) {
858
              least = candidates + i;
859
            }
860
          }
861
        }
862
        if (least->fragmentation() < fragmentation) {
863
          *least = Candidate(fragmentation, p);
864
          least = NULL;
865
        }
866
      }
867
    }
868
  }
869

    
870
  for (int i = 0; i < count; i++) {
871
    AddEvacuationCandidate(candidates[i].page());
872
  }
873

    
874
  if (count > 0 && FLAG_trace_fragmentation) {
875
    PrintF("Collected %d evacuation candidates for space %s\n",
876
           count,
877
           AllocationSpaceName(space->identity()));
878
  }
879
}
880

    
881

    
882
void MarkCompactCollector::AbortCompaction() {
883
  if (compacting_) {
884
    int npages = evacuation_candidates_.length();
885
    for (int i = 0; i < npages; i++) {
886
      Page* p = evacuation_candidates_[i];
887
      slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
888
      p->ClearEvacuationCandidate();
889
      p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
890
    }
891
    compacting_ = false;
892
    evacuation_candidates_.Rewind(0);
893
    invalidated_code_.Rewind(0);
894
  }
895
  ASSERT_EQ(0, evacuation_candidates_.length());
896
}
897

    
898

    
899
void MarkCompactCollector::Prepare(GCTracer* tracer) {
900
  was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
901

    
902
  // Rather than passing the tracer around we stash it in a static member
903
  // variable.
904
  tracer_ = tracer;
905

    
906
#ifdef DEBUG
907
  ASSERT(state_ == IDLE);
908
  state_ = PREPARE_GC;
909
#endif
910

    
911
  ASSERT(!FLAG_never_compact || !FLAG_always_compact);
912

    
913
  if (IsConcurrentSweepingInProgress()) {
914
    // Instead of waiting we could also abort the sweeper threads here.
915
    WaitUntilSweepingCompleted();
916
  }
917

    
918
  // Clear marking bits if incremental marking is aborted.
919
  if (was_marked_incrementally_ && abort_incremental_marking_) {
920
    heap()->incremental_marking()->Abort();
921
    ClearMarkbits();
922
    AbortCompaction();
923
    was_marked_incrementally_ = false;
924
  }
925

    
926
  // Don't start compaction if we are in the middle of incremental
927
  // marking cycle. We did not collect any slots.
928
  if (!FLAG_never_compact && !was_marked_incrementally_) {
929
    StartCompaction(NON_INCREMENTAL_COMPACTION);
930
  }
931

    
932
  PagedSpaces spaces(heap());
933
  for (PagedSpace* space = spaces.next();
934
       space != NULL;
935
       space = spaces.next()) {
936
    space->PrepareForMarkCompact();
937
  }
938

    
939
#ifdef VERIFY_HEAP
940
  if (!was_marked_incrementally_ && FLAG_verify_heap) {
941
    VerifyMarkbitsAreClean();
942
  }
943
#endif
944
}
945

    
946

    
947
void MarkCompactCollector::Finish() {
948
#ifdef DEBUG
949
  ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
950
  state_ = IDLE;
951
#endif
952
  // The stub cache is not traversed during GC; clear the cache to
953
  // force lazy re-initialization of it. This must be done after the
954
  // GC, because it relies on the new address of certain old space
955
  // objects (empty string, illegal builtin).
956
  isolate()->stub_cache()->Clear();
957

    
958
  if (have_code_to_deoptimize_) {
959
    // Some code objects were marked for deoptimization during the GC.
960
    Deoptimizer::DeoptimizeMarkedCode(isolate());
961
    have_code_to_deoptimize_ = false;
962
  }
963
}
964

    
965

    
966
// -------------------------------------------------------------------------
967
// Phase 1: tracing and marking live objects.
968
//   before: all objects are in normal state.
969
//   after: a live object's map pointer is marked as '00'.
970

    
971
// Marking all live objects in the heap as part of mark-sweep or mark-compact
972
// collection.  Before marking, all objects are in their normal state.  After
973
// marking, live objects' map pointers are marked indicating that the object
974
// has been found reachable.
975
//
976
// The marking algorithm is a (mostly) depth-first (because of possible stack
977
// overflow) traversal of the graph of objects reachable from the roots.  It
978
// uses an explicit stack of pointers rather than recursion.  The young
979
// generation's inactive ('from') space is used as a marking stack.  The
980
// objects in the marking stack are the ones that have been reached and marked
981
// but their children have not yet been visited.
982
//
983
// The marking stack can overflow during traversal.  In that case, we set an
984
// overflow flag.  When the overflow flag is set, we continue marking objects
985
// reachable from the objects on the marking stack, but no longer push them on
986
// the marking stack.  Instead, we mark them as both marked and overflowed.
987
// When the stack is in the overflowed state, objects marked as overflowed
988
// have been reached and marked but their children have not been visited yet.
989
// After emptying the marking stack, we clear the overflow flag and traverse
990
// the heap looking for objects marked as overflowed, push them on the stack,
991
// and continue with marking.  This process repeats until all reachable
992
// objects have been marked.
993

    
994
void CodeFlusher::ProcessJSFunctionCandidates() {
995
  Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
996
  Object* undefined = isolate_->heap()->undefined_value();
997

    
998
  JSFunction* candidate = jsfunction_candidates_head_;
999
  JSFunction* next_candidate;
1000
  while (candidate != NULL) {
1001
    next_candidate = GetNextCandidate(candidate);
1002
    ClearNextCandidate(candidate, undefined);
1003

    
1004
    SharedFunctionInfo* shared = candidate->shared();
1005

    
1006
    Code* code = shared->code();
1007
    MarkBit code_mark = Marking::MarkBitFrom(code);
1008
    if (!code_mark.Get()) {
1009
      if (FLAG_trace_code_flushing && shared->is_compiled()) {
1010
        PrintF("[code-flushing clears: ");
1011
        shared->ShortPrint();
1012
        PrintF(" - age: %d]\n", code->GetAge());
1013
      }
1014
      shared->set_code(lazy_compile);
1015
      candidate->set_code(lazy_compile);
1016
    } else {
1017
      candidate->set_code(code);
1018
    }
1019

    
1020
    // We are in the middle of a GC cycle so the write barrier in the code
1021
    // setter did not record the slot update and we have to do that manually.
1022
    Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
1023
    Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
1024
    isolate_->heap()->mark_compact_collector()->
1025
        RecordCodeEntrySlot(slot, target);
1026

    
1027
    Object** shared_code_slot =
1028
        HeapObject::RawField(shared, SharedFunctionInfo::kCodeOffset);
1029
    isolate_->heap()->mark_compact_collector()->
1030
        RecordSlot(shared_code_slot, shared_code_slot, *shared_code_slot);
1031

    
1032
    candidate = next_candidate;
1033
  }
1034

    
1035
  jsfunction_candidates_head_ = NULL;
1036
}
1037

    
1038

    
1039
void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
1040
  Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
1041

    
1042
  SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1043
  SharedFunctionInfo* next_candidate;
1044
  while (candidate != NULL) {
1045
    next_candidate = GetNextCandidate(candidate);
1046
    ClearNextCandidate(candidate);
1047

    
1048
    Code* code = candidate->code();
1049
    MarkBit code_mark = Marking::MarkBitFrom(code);
1050
    if (!code_mark.Get()) {
1051
      if (FLAG_trace_code_flushing && candidate->is_compiled()) {
1052
        PrintF("[code-flushing clears: ");
1053
        candidate->ShortPrint();
1054
        PrintF(" - age: %d]\n", code->GetAge());
1055
      }
1056
      candidate->set_code(lazy_compile);
1057
    }
1058

    
1059
    Object** code_slot =
1060
        HeapObject::RawField(candidate, SharedFunctionInfo::kCodeOffset);
1061
    isolate_->heap()->mark_compact_collector()->
1062
        RecordSlot(code_slot, code_slot, *code_slot);
1063

    
1064
    candidate = next_candidate;
1065
  }
1066

    
1067
  shared_function_info_candidates_head_ = NULL;
1068
}
1069

    
1070

    
1071
void CodeFlusher::ProcessOptimizedCodeMaps() {
1072
  static const int kEntriesStart = SharedFunctionInfo::kEntriesStart;
1073
  static const int kEntryLength = SharedFunctionInfo::kEntryLength;
1074
  static const int kContextOffset = 0;
1075
  static const int kCodeOffset = 1;
1076
  static const int kLiteralsOffset = 2;
1077
  STATIC_ASSERT(kEntryLength == 3);
1078

    
1079
  SharedFunctionInfo* holder = optimized_code_map_holder_head_;
1080
  SharedFunctionInfo* next_holder;
1081
  while (holder != NULL) {
1082
    next_holder = GetNextCodeMap(holder);
1083
    ClearNextCodeMap(holder);
1084

    
1085
    FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
1086
    int new_length = kEntriesStart;
1087
    int old_length = code_map->length();
1088
    for (int i = kEntriesStart; i < old_length; i += kEntryLength) {
1089
      Code* code = Code::cast(code_map->get(i + kCodeOffset));
1090
      MarkBit code_mark = Marking::MarkBitFrom(code);
1091
      if (!code_mark.Get()) {
1092
        continue;
1093
      }
1094

    
1095
      // Update and record the context slot in the optimized code map.
1096
      Object** context_slot = HeapObject::RawField(code_map,
1097
          FixedArray::OffsetOfElementAt(new_length));
1098
      code_map->set(new_length++, code_map->get(i + kContextOffset));
1099
      ASSERT(Marking::IsBlack(
1100
          Marking::MarkBitFrom(HeapObject::cast(*context_slot))));
1101
      isolate_->heap()->mark_compact_collector()->
1102
          RecordSlot(context_slot, context_slot, *context_slot);
1103

    
1104
      // Update and record the code slot in the optimized code map.
1105
      Object** code_slot = HeapObject::RawField(code_map,
1106
          FixedArray::OffsetOfElementAt(new_length));
1107
      code_map->set(new_length++, code_map->get(i + kCodeOffset));
1108
      ASSERT(Marking::IsBlack(
1109
          Marking::MarkBitFrom(HeapObject::cast(*code_slot))));
1110
      isolate_->heap()->mark_compact_collector()->
1111
          RecordSlot(code_slot, code_slot, *code_slot);
1112

    
1113
      // Update and record the literals slot in the optimized code map.
1114
      Object** literals_slot = HeapObject::RawField(code_map,
1115
          FixedArray::OffsetOfElementAt(new_length));
1116
      code_map->set(new_length++, code_map->get(i + kLiteralsOffset));
1117
      ASSERT(Marking::IsBlack(
1118
          Marking::MarkBitFrom(HeapObject::cast(*literals_slot))));
1119
      isolate_->heap()->mark_compact_collector()->
1120
          RecordSlot(literals_slot, literals_slot, *literals_slot);
1121
    }
1122

    
1123
    // Trim the optimized code map if entries have been removed.
1124
    if (new_length < old_length) {
1125
      holder->TrimOptimizedCodeMap(old_length - new_length);
1126
    }
1127

    
1128
    holder = next_holder;
1129
  }
1130

    
1131
  optimized_code_map_holder_head_ = NULL;
1132
}
1133

    
1134

    
1135
void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) {
1136
  // Make sure previous flushing decisions are revisited.
1137
  isolate_->heap()->incremental_marking()->RecordWrites(shared_info);
1138

    
1139
  if (FLAG_trace_code_flushing) {
1140
    PrintF("[code-flushing abandons function-info: ");
1141
    shared_info->ShortPrint();
1142
    PrintF("]\n");
1143
  }
1144

    
1145
  SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1146
  SharedFunctionInfo* next_candidate;
1147
  if (candidate == shared_info) {
1148
    next_candidate = GetNextCandidate(shared_info);
1149
    shared_function_info_candidates_head_ = next_candidate;
1150
    ClearNextCandidate(shared_info);
1151
  } else {
1152
    while (candidate != NULL) {
1153
      next_candidate = GetNextCandidate(candidate);
1154

    
1155
      if (next_candidate == shared_info) {
1156
        next_candidate = GetNextCandidate(shared_info);
1157
        SetNextCandidate(candidate, next_candidate);
1158
        ClearNextCandidate(shared_info);
1159
        break;
1160
      }
1161

    
1162
      candidate = next_candidate;
1163
    }
1164
  }
1165
}
1166

    
1167

    
1168
void CodeFlusher::EvictCandidate(JSFunction* function) {
1169
  ASSERT(!function->next_function_link()->IsUndefined());
1170
  Object* undefined = isolate_->heap()->undefined_value();
1171

    
1172
  // Make sure previous flushing decisions are revisited.
1173
  isolate_->heap()->incremental_marking()->RecordWrites(function);
1174
  isolate_->heap()->incremental_marking()->RecordWrites(function->shared());
1175

    
1176
  if (FLAG_trace_code_flushing) {
1177
    PrintF("[code-flushing abandons closure: ");
1178
    function->shared()->ShortPrint();
1179
    PrintF("]\n");
1180
  }
1181

    
1182
  JSFunction* candidate = jsfunction_candidates_head_;
1183
  JSFunction* next_candidate;
1184
  if (candidate == function) {
1185
    next_candidate = GetNextCandidate(function);
1186
    jsfunction_candidates_head_ = next_candidate;
1187
    ClearNextCandidate(function, undefined);
1188
  } else {
1189
    while (candidate != NULL) {
1190
      next_candidate = GetNextCandidate(candidate);
1191

    
1192
      if (next_candidate == function) {
1193
        next_candidate = GetNextCandidate(function);
1194
        SetNextCandidate(candidate, next_candidate);
1195
        ClearNextCandidate(function, undefined);
1196
        break;
1197
      }
1198

    
1199
      candidate = next_candidate;
1200
    }
1201
  }
1202
}
1203

    
1204

    
1205
void CodeFlusher::EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
1206
  ASSERT(!FixedArray::cast(code_map_holder->optimized_code_map())->
1207
         get(SharedFunctionInfo::kNextMapIndex)->IsUndefined());
1208

    
1209
  // Make sure previous flushing decisions are revisited.
1210
  isolate_->heap()->incremental_marking()->RecordWrites(code_map_holder);
1211

    
1212
  if (FLAG_trace_code_flushing) {
1213
    PrintF("[code-flushing abandons code-map: ");
1214
    code_map_holder->ShortPrint();
1215
    PrintF("]\n");
1216
  }
1217

    
1218
  SharedFunctionInfo* holder = optimized_code_map_holder_head_;
1219
  SharedFunctionInfo* next_holder;
1220
  if (holder == code_map_holder) {
1221
    next_holder = GetNextCodeMap(code_map_holder);
1222
    optimized_code_map_holder_head_ = next_holder;
1223
    ClearNextCodeMap(code_map_holder);
1224
  } else {
1225
    while (holder != NULL) {
1226
      next_holder = GetNextCodeMap(holder);
1227

    
1228
      if (next_holder == code_map_holder) {
1229
        next_holder = GetNextCodeMap(code_map_holder);
1230
        SetNextCodeMap(holder, next_holder);
1231
        ClearNextCodeMap(code_map_holder);
1232
        break;
1233
      }
1234

    
1235
      holder = next_holder;
1236
    }
1237
  }
1238
}
1239

    
1240

    
1241
void CodeFlusher::EvictJSFunctionCandidates() {
1242
  JSFunction* candidate = jsfunction_candidates_head_;
1243
  JSFunction* next_candidate;
1244
  while (candidate != NULL) {
1245
    next_candidate = GetNextCandidate(candidate);
1246
    EvictCandidate(candidate);
1247
    candidate = next_candidate;
1248
  }
1249
  ASSERT(jsfunction_candidates_head_ == NULL);
1250
}
1251

    
1252

    
1253
void CodeFlusher::EvictSharedFunctionInfoCandidates() {
1254
  SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1255
  SharedFunctionInfo* next_candidate;
1256
  while (candidate != NULL) {
1257
    next_candidate = GetNextCandidate(candidate);
1258
    EvictCandidate(candidate);
1259
    candidate = next_candidate;
1260
  }
1261
  ASSERT(shared_function_info_candidates_head_ == NULL);
1262
}
1263

    
1264

    
1265
void CodeFlusher::EvictOptimizedCodeMaps() {
1266
  SharedFunctionInfo* holder = optimized_code_map_holder_head_;
1267
  SharedFunctionInfo* next_holder;
1268
  while (holder != NULL) {
1269
    next_holder = GetNextCodeMap(holder);
1270
    EvictOptimizedCodeMap(holder);
1271
    holder = next_holder;
1272
  }
1273
  ASSERT(optimized_code_map_holder_head_ == NULL);
1274
}
1275

    
1276

    
1277
void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) {
1278
  Heap* heap = isolate_->heap();
1279

    
1280
  JSFunction** slot = &jsfunction_candidates_head_;
1281
  JSFunction* candidate = jsfunction_candidates_head_;
1282
  while (candidate != NULL) {
1283
    if (heap->InFromSpace(candidate)) {
1284
      v->VisitPointer(reinterpret_cast<Object**>(slot));
1285
    }
1286
    candidate = GetNextCandidate(*slot);
1287
    slot = GetNextCandidateSlot(*slot);
1288
  }
1289
}
1290

    
1291

    
1292
MarkCompactCollector::~MarkCompactCollector() {
1293
  if (code_flusher_ != NULL) {
1294
    delete code_flusher_;
1295
    code_flusher_ = NULL;
1296
  }
1297
}
1298

    
1299

    
1300
static inline HeapObject* ShortCircuitConsString(Object** p) {
1301
  // Optimization: If the heap object pointed to by p is a non-internalized
1302
  // cons string whose right substring is HEAP->empty_string, update
1303
  // it in place to its left substring.  Return the updated value.
1304
  //
1305
  // Here we assume that if we change *p, we replace it with a heap object
1306
  // (i.e., the left substring of a cons string is always a heap object).
1307
  //
1308
  // The check performed is:
1309
  //   object->IsConsString() && !object->IsInternalizedString() &&
1310
  //   (ConsString::cast(object)->second() == HEAP->empty_string())
1311
  // except the maps for the object and its possible substrings might be
1312
  // marked.
1313
  HeapObject* object = HeapObject::cast(*p);
1314
  if (!FLAG_clever_optimizations) return object;
1315
  Map* map = object->map();
1316
  InstanceType type = map->instance_type();
1317
  if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
1318

    
1319
  Object* second = reinterpret_cast<ConsString*>(object)->second();
1320
  Heap* heap = map->GetHeap();
1321
  if (second != heap->empty_string()) {
1322
    return object;
1323
  }
1324

    
1325
  // Since we don't have the object's start, it is impossible to update the
1326
  // page dirty marks. Therefore, we only replace the string with its left
1327
  // substring when page dirty marks do not change.
1328
  Object* first = reinterpret_cast<ConsString*>(object)->first();
1329
  if (!heap->InNewSpace(object) && heap->InNewSpace(first)) return object;
1330

    
1331
  *p = first;
1332
  return HeapObject::cast(first);
1333
}
1334

    
1335

    
1336
class MarkCompactMarkingVisitor
1337
    : public StaticMarkingVisitor<MarkCompactMarkingVisitor> {
1338
 public:
1339
  static void ObjectStatsVisitBase(StaticVisitorBase::VisitorId id,
1340
                                   Map* map, HeapObject* obj);
1341

    
1342
  static void ObjectStatsCountFixedArray(
1343
      FixedArrayBase* fixed_array,
1344
      FixedArraySubInstanceType fast_type,
1345
      FixedArraySubInstanceType dictionary_type);
1346

    
1347
  template<MarkCompactMarkingVisitor::VisitorId id>
1348
  class ObjectStatsTracker {
1349
   public:
1350
    static inline void Visit(Map* map, HeapObject* obj);
1351
  };
1352

    
1353
  static void Initialize();
1354

    
1355
  INLINE(static void VisitPointer(Heap* heap, Object** p)) {
1356
    MarkObjectByPointer(heap->mark_compact_collector(), p, p);
1357
  }
1358

    
1359
  INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
1360
    // Mark all objects pointed to in [start, end).
1361
    const int kMinRangeForMarkingRecursion = 64;
1362
    if (end - start >= kMinRangeForMarkingRecursion) {
1363
      if (VisitUnmarkedObjects(heap, start, end)) return;
1364
      // We are close to a stack overflow, so just mark the objects.
1365
    }
1366
    MarkCompactCollector* collector = heap->mark_compact_collector();
1367
    for (Object** p = start; p < end; p++) {
1368
      MarkObjectByPointer(collector, start, p);
1369
    }
1370
  }
1371

    
1372
  // Marks the object black and pushes it on the marking stack.
1373
  INLINE(static void MarkObject(Heap* heap, HeapObject* object)) {
1374
    MarkBit mark = Marking::MarkBitFrom(object);
1375
    heap->mark_compact_collector()->MarkObject(object, mark);
1376
  }
1377

    
1378
  // Marks the object black without pushing it on the marking stack.
1379
  // Returns true if object needed marking and false otherwise.
1380
  INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) {
1381
    MarkBit mark_bit = Marking::MarkBitFrom(object);
1382
    if (!mark_bit.Get()) {
1383
      heap->mark_compact_collector()->SetMark(object, mark_bit);
1384
      return true;
1385
    }
1386
    return false;
1387
  }
1388

    
1389
  // Mark object pointed to by p.
1390
  INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
1391
                                         Object** anchor_slot,
1392
                                         Object** p)) {
1393
    if (!(*p)->IsHeapObject()) return;
1394
    HeapObject* object = ShortCircuitConsString(p);
1395
    collector->RecordSlot(anchor_slot, p, object);
1396
    MarkBit mark = Marking::MarkBitFrom(object);
1397
    collector->MarkObject(object, mark);
1398
  }
1399

    
1400

    
1401
  // Visit an unmarked object.
1402
  INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
1403
                                         HeapObject* obj)) {
1404
#ifdef DEBUG
1405
    ASSERT(collector->heap()->Contains(obj));
1406
    ASSERT(!collector->heap()->mark_compact_collector()->IsMarked(obj));
1407
#endif
1408
    Map* map = obj->map();
1409
    Heap* heap = obj->GetHeap();
1410
    MarkBit mark = Marking::MarkBitFrom(obj);
1411
    heap->mark_compact_collector()->SetMark(obj, mark);
1412
    // Mark the map pointer and the body.
1413
    MarkBit map_mark = Marking::MarkBitFrom(map);
1414
    heap->mark_compact_collector()->MarkObject(map, map_mark);
1415
    IterateBody(map, obj);
1416
  }
1417

    
1418
  // Visit all unmarked objects pointed to by [start, end).
1419
  // Returns false if the operation fails (lack of stack space).
1420
  INLINE(static bool VisitUnmarkedObjects(Heap* heap,
1421
                                          Object** start,
1422
                                          Object** end)) {
1423
    // Return false is we are close to the stack limit.
1424
    StackLimitCheck check(heap->isolate());
1425
    if (check.HasOverflowed()) return false;
1426

    
1427
    MarkCompactCollector* collector = heap->mark_compact_collector();
1428
    // Visit the unmarked objects.
1429
    for (Object** p = start; p < end; p++) {
1430
      Object* o = *p;
1431
      if (!o->IsHeapObject()) continue;
1432
      collector->RecordSlot(start, p, o);
1433
      HeapObject* obj = HeapObject::cast(o);
1434
      MarkBit mark = Marking::MarkBitFrom(obj);
1435
      if (mark.Get()) continue;
1436
      VisitUnmarkedObject(collector, obj);
1437
    }
1438
    return true;
1439
  }
1440

    
1441
  INLINE(static void BeforeVisitingSharedFunctionInfo(HeapObject* object)) {
1442
    SharedFunctionInfo* shared = SharedFunctionInfo::cast(object);
1443
    shared->BeforeVisitingPointers();
1444
  }
1445

    
1446
  static void VisitWeakCollection(Map* map, HeapObject* object) {
1447
    MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
1448
    JSWeakCollection* weak_collection =
1449
        reinterpret_cast<JSWeakCollection*>(object);
1450

    
1451
    // Enqueue weak map in linked list of encountered weak maps.
1452
    if (weak_collection->next() == Smi::FromInt(0)) {
1453
      weak_collection->set_next(collector->encountered_weak_collections());
1454
      collector->set_encountered_weak_collections(weak_collection);
1455
    }
1456

    
1457
    // Skip visiting the backing hash table containing the mappings.
1458
    int object_size = JSWeakCollection::BodyDescriptor::SizeOf(map, object);
1459
    BodyVisitorBase<MarkCompactMarkingVisitor>::IteratePointers(
1460
        map->GetHeap(),
1461
        object,
1462
        JSWeakCollection::BodyDescriptor::kStartOffset,
1463
        JSWeakCollection::kTableOffset);
1464
    BodyVisitorBase<MarkCompactMarkingVisitor>::IteratePointers(
1465
        map->GetHeap(),
1466
        object,
1467
        JSWeakCollection::kTableOffset + kPointerSize,
1468
        object_size);
1469

    
1470
    // Mark the backing hash table without pushing it on the marking stack.
1471
    Object* table_object = weak_collection->table();
1472
    if (!table_object->IsHashTable()) return;
1473
    WeakHashTable* table = WeakHashTable::cast(table_object);
1474
    Object** table_slot =
1475
        HeapObject::RawField(weak_collection, JSWeakCollection::kTableOffset);
1476
    MarkBit table_mark = Marking::MarkBitFrom(table);
1477
    collector->RecordSlot(table_slot, table_slot, table);
1478
    if (!table_mark.Get()) collector->SetMark(table, table_mark);
1479
    // Recording the map slot can be skipped, because maps are not compacted.
1480
    collector->MarkObject(table->map(), Marking::MarkBitFrom(table->map()));
1481
    ASSERT(MarkCompactCollector::IsMarked(table->map()));
1482
  }
1483

    
1484
 private:
1485
  template<int id>
1486
  static inline void TrackObjectStatsAndVisit(Map* map, HeapObject* obj);
1487

    
1488
  // Code flushing support.
1489

    
1490
  static const int kRegExpCodeThreshold = 5;
1491

    
1492
  static void UpdateRegExpCodeAgeAndFlush(Heap* heap,
1493
                                          JSRegExp* re,
1494
                                          bool is_ascii) {
1495
    // Make sure that the fixed array is in fact initialized on the RegExp.
1496
    // We could potentially trigger a GC when initializing the RegExp.
1497
    if (HeapObject::cast(re->data())->map()->instance_type() !=
1498
            FIXED_ARRAY_TYPE) return;
1499

    
1500
    // Make sure this is a RegExp that actually contains code.
1501
    if (re->TypeTag() != JSRegExp::IRREGEXP) return;
1502

    
1503
    Object* code = re->DataAt(JSRegExp::code_index(is_ascii));
1504
    if (!code->IsSmi() &&
1505
        HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) {
1506
      // Save a copy that can be reinstated if we need the code again.
1507
      re->SetDataAt(JSRegExp::saved_code_index(is_ascii), code);
1508

    
1509
      // Saving a copy might create a pointer into compaction candidate
1510
      // that was not observed by marker.  This might happen if JSRegExp data
1511
      // was marked through the compilation cache before marker reached JSRegExp
1512
      // object.
1513
      FixedArray* data = FixedArray::cast(re->data());
1514
      Object** slot = data->data_start() + JSRegExp::saved_code_index(is_ascii);
1515
      heap->mark_compact_collector()->
1516
          RecordSlot(slot, slot, code);
1517

    
1518
      // Set a number in the 0-255 range to guarantee no smi overflow.
1519
      re->SetDataAt(JSRegExp::code_index(is_ascii),
1520
                    Smi::FromInt(heap->sweep_generation() & 0xff));
1521
    } else if (code->IsSmi()) {
1522
      int value = Smi::cast(code)->value();
1523
      // The regexp has not been compiled yet or there was a compilation error.
1524
      if (value == JSRegExp::kUninitializedValue ||
1525
          value == JSRegExp::kCompilationErrorValue) {
1526
        return;
1527
      }
1528

    
1529
      // Check if we should flush now.
1530
      if (value == ((heap->sweep_generation() - kRegExpCodeThreshold) & 0xff)) {
1531
        re->SetDataAt(JSRegExp::code_index(is_ascii),
1532
                      Smi::FromInt(JSRegExp::kUninitializedValue));
1533
        re->SetDataAt(JSRegExp::saved_code_index(is_ascii),
1534
                      Smi::FromInt(JSRegExp::kUninitializedValue));
1535
      }
1536
    }
1537
  }
1538

    
1539

    
1540
  // Works by setting the current sweep_generation (as a smi) in the
1541
  // code object place in the data array of the RegExp and keeps a copy
1542
  // around that can be reinstated if we reuse the RegExp before flushing.
1543
  // If we did not use the code for kRegExpCodeThreshold mark sweep GCs
1544
  // we flush the code.
1545
  static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) {
1546
    Heap* heap = map->GetHeap();
1547
    MarkCompactCollector* collector = heap->mark_compact_collector();
1548
    if (!collector->is_code_flushing_enabled()) {
1549
      VisitJSRegExp(map, object);
1550
      return;
1551
    }
1552
    JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
1553
    // Flush code or set age on both ASCII and two byte code.
1554
    UpdateRegExpCodeAgeAndFlush(heap, re, true);
1555
    UpdateRegExpCodeAgeAndFlush(heap, re, false);
1556
    // Visit the fields of the RegExp, including the updated FixedArray.
1557
    VisitJSRegExp(map, object);
1558
  }
1559

    
1560
  static VisitorDispatchTable<Callback> non_count_table_;
1561
};
1562

    
1563

    
1564
void MarkCompactMarkingVisitor::ObjectStatsCountFixedArray(
1565
    FixedArrayBase* fixed_array,
1566
    FixedArraySubInstanceType fast_type,
1567
    FixedArraySubInstanceType dictionary_type) {
1568
  Heap* heap = fixed_array->map()->GetHeap();
1569
  if (fixed_array->map() != heap->fixed_cow_array_map() &&
1570
      fixed_array->map() != heap->fixed_double_array_map() &&
1571
      fixed_array != heap->empty_fixed_array()) {
1572
    if (fixed_array->IsDictionary()) {
1573
      heap->RecordFixedArraySubTypeStats(dictionary_type,
1574
                                         fixed_array->Size());
1575
    } else {
1576
      heap->RecordFixedArraySubTypeStats(fast_type,
1577
                                         fixed_array->Size());
1578
    }
1579
  }
1580
}
1581

    
1582

    
1583
void MarkCompactMarkingVisitor::ObjectStatsVisitBase(
1584
    MarkCompactMarkingVisitor::VisitorId id, Map* map, HeapObject* obj) {
1585
  Heap* heap = map->GetHeap();
1586
  int object_size = obj->Size();
1587
  heap->RecordObjectStats(map->instance_type(), object_size);
1588
  non_count_table_.GetVisitorById(id)(map, obj);
1589
  if (obj->IsJSObject()) {
1590
    JSObject* object = JSObject::cast(obj);
1591
    ObjectStatsCountFixedArray(object->elements(),
1592
                               DICTIONARY_ELEMENTS_SUB_TYPE,
1593
                               FAST_ELEMENTS_SUB_TYPE);
1594
    ObjectStatsCountFixedArray(object->properties(),
1595
                               DICTIONARY_PROPERTIES_SUB_TYPE,
1596
                               FAST_PROPERTIES_SUB_TYPE);
1597
  }
1598
}
1599

    
1600

    
1601
template<MarkCompactMarkingVisitor::VisitorId id>
1602
void MarkCompactMarkingVisitor::ObjectStatsTracker<id>::Visit(
1603
    Map* map, HeapObject* obj) {
1604
  ObjectStatsVisitBase(id, map, obj);
1605
}
1606

    
1607

    
1608
template<>
1609
class MarkCompactMarkingVisitor::ObjectStatsTracker<
1610
    MarkCompactMarkingVisitor::kVisitMap> {
1611
 public:
1612
  static inline void Visit(Map* map, HeapObject* obj) {
1613
    Heap* heap = map->GetHeap();
1614
    Map* map_obj = Map::cast(obj);
1615
    ASSERT(map->instance_type() == MAP_TYPE);
1616
    DescriptorArray* array = map_obj->instance_descriptors();
1617
    if (map_obj->owns_descriptors() &&
1618
        array != heap->empty_descriptor_array()) {
1619
      int fixed_array_size = array->Size();
1620
      heap->RecordFixedArraySubTypeStats(DESCRIPTOR_ARRAY_SUB_TYPE,
1621
                                         fixed_array_size);
1622
    }
1623
    if (map_obj->HasTransitionArray()) {
1624
      int fixed_array_size = map_obj->transitions()->Size();
1625
      heap->RecordFixedArraySubTypeStats(TRANSITION_ARRAY_SUB_TYPE,
1626
                                         fixed_array_size);
1627
    }
1628
    if (map_obj->has_code_cache()) {
1629
      CodeCache* cache = CodeCache::cast(map_obj->code_cache());
1630
      heap->RecordFixedArraySubTypeStats(MAP_CODE_CACHE_SUB_TYPE,
1631
                                         cache->default_cache()->Size());
1632
      if (!cache->normal_type_cache()->IsUndefined()) {
1633
        heap->RecordFixedArraySubTypeStats(
1634
            MAP_CODE_CACHE_SUB_TYPE,
1635
            FixedArray::cast(cache->normal_type_cache())->Size());
1636
      }
1637
    }
1638
    ObjectStatsVisitBase(kVisitMap, map, obj);
1639
  }
1640
};
1641

    
1642

    
1643
template<>
1644
class MarkCompactMarkingVisitor::ObjectStatsTracker<
1645
    MarkCompactMarkingVisitor::kVisitCode> {
1646
 public:
1647
  static inline void Visit(Map* map, HeapObject* obj) {
1648
    Heap* heap = map->GetHeap();
1649
    int object_size = obj->Size();
1650
    ASSERT(map->instance_type() == CODE_TYPE);
1651
    Code* code_obj = Code::cast(obj);
1652
    heap->RecordCodeSubTypeStats(code_obj->kind(), code_obj->GetAge(),
1653
                                 object_size);
1654
    ObjectStatsVisitBase(kVisitCode, map, obj);
1655
  }
1656
};
1657

    
1658

    
1659
template<>
1660
class MarkCompactMarkingVisitor::ObjectStatsTracker<
1661
    MarkCompactMarkingVisitor::kVisitSharedFunctionInfo> {
1662
 public:
1663
  static inline void Visit(Map* map, HeapObject* obj) {
1664
    Heap* heap = map->GetHeap();
1665
    SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
1666
    if (sfi->scope_info() != heap->empty_fixed_array()) {
1667
      heap->RecordFixedArraySubTypeStats(
1668
          SCOPE_INFO_SUB_TYPE,
1669
          FixedArray::cast(sfi->scope_info())->Size());
1670
    }
1671
    ObjectStatsVisitBase(kVisitSharedFunctionInfo, map, obj);
1672
  }
1673
};
1674

    
1675

    
1676
template<>
1677
class MarkCompactMarkingVisitor::ObjectStatsTracker<
1678
    MarkCompactMarkingVisitor::kVisitFixedArray> {
1679
 public:
1680
  static inline void Visit(Map* map, HeapObject* obj) {
1681
    Heap* heap = map->GetHeap();
1682
    FixedArray* fixed_array = FixedArray::cast(obj);
1683
    if (fixed_array == heap->string_table()) {
1684
      heap->RecordFixedArraySubTypeStats(
1685
          STRING_TABLE_SUB_TYPE,
1686
          fixed_array->Size());
1687
    }
1688
    ObjectStatsVisitBase(kVisitFixedArray, map, obj);
1689
  }
1690
};
1691

    
1692

    
1693
void MarkCompactMarkingVisitor::Initialize() {
1694
  StaticMarkingVisitor<MarkCompactMarkingVisitor>::Initialize();
1695

    
1696
  table_.Register(kVisitJSRegExp,
1697
                  &VisitRegExpAndFlushCode);
1698

    
1699
  if (FLAG_track_gc_object_stats) {
1700
    // Copy the visitor table to make call-through possible.
1701
    non_count_table_.CopyFrom(&table_);
1702
#define VISITOR_ID_COUNT_FUNCTION(id)                                   \
1703
    table_.Register(kVisit##id, ObjectStatsTracker<kVisit##id>::Visit);
1704
    VISITOR_ID_LIST(VISITOR_ID_COUNT_FUNCTION)
1705
#undef VISITOR_ID_COUNT_FUNCTION
1706
  }
1707
}
1708

    
1709

    
1710
VisitorDispatchTable<MarkCompactMarkingVisitor::Callback>
1711
    MarkCompactMarkingVisitor::non_count_table_;
1712

    
1713

    
1714
class CodeMarkingVisitor : public ThreadVisitor {
1715
 public:
1716
  explicit CodeMarkingVisitor(MarkCompactCollector* collector)
1717
      : collector_(collector) {}
1718

    
1719
  void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
1720
    collector_->PrepareThreadForCodeFlushing(isolate, top);
1721
  }
1722

    
1723
 private:
1724
  MarkCompactCollector* collector_;
1725
};
1726

    
1727

    
1728
class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
1729
 public:
1730
  explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
1731
      : collector_(collector) {}
1732

    
1733
  void VisitPointers(Object** start, Object** end) {
1734
    for (Object** p = start; p < end; p++) VisitPointer(p);
1735
  }
1736

    
1737
  void VisitPointer(Object** slot) {
1738
    Object* obj = *slot;
1739
    if (obj->IsSharedFunctionInfo()) {
1740
      SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
1741
      MarkBit shared_mark = Marking::MarkBitFrom(shared);
1742
      MarkBit code_mark = Marking::MarkBitFrom(shared->code());
1743
      collector_->MarkObject(shared->code(), code_mark);
1744
      collector_->MarkObject(shared, shared_mark);
1745
    }
1746
  }
1747

    
1748
 private:
1749
  MarkCompactCollector* collector_;
1750
};
1751

    
1752

    
1753
void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
1754
                                                        ThreadLocalTop* top) {
1755
  for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
1756
    // Note: for the frame that has a pending lazy deoptimization
1757
    // StackFrame::unchecked_code will return a non-optimized code object for
1758
    // the outermost function and StackFrame::LookupCode will return
1759
    // actual optimized code object.
1760
    StackFrame* frame = it.frame();
1761
    Code* code = frame->unchecked_code();
1762
    MarkBit code_mark = Marking::MarkBitFrom(code);
1763
    MarkObject(code, code_mark);
1764
    if (frame->is_optimized()) {
1765
      MarkCompactMarkingVisitor::MarkInlinedFunctionsCode(heap(),
1766
                                                          frame->LookupCode());
1767
    }
1768
  }
1769
}
1770

    
1771

    
1772
void MarkCompactCollector::PrepareForCodeFlushing() {
1773
  // Enable code flushing for non-incremental cycles.
1774
  if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
1775
    EnableCodeFlushing(!was_marked_incrementally_);
1776
  }
1777

    
1778
  // If code flushing is disabled, there is no need to prepare for it.
1779
  if (!is_code_flushing_enabled()) return;
1780

    
1781
  // Ensure that empty descriptor array is marked. Method MarkDescriptorArray
1782
  // relies on it being marked before any other descriptor array.
1783
  HeapObject* descriptor_array = heap()->empty_descriptor_array();
1784
  MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array);
1785
  MarkObject(descriptor_array, descriptor_array_mark);
1786

    
1787
  // Make sure we are not referencing the code from the stack.
1788
  ASSERT(this == heap()->mark_compact_collector());
1789
  PrepareThreadForCodeFlushing(heap()->isolate(),
1790
                               heap()->isolate()->thread_local_top());
1791

    
1792
  // Iterate the archived stacks in all threads to check if
1793
  // the code is referenced.
1794
  CodeMarkingVisitor code_marking_visitor(this);
1795
  heap()->isolate()->thread_manager()->IterateArchivedThreads(
1796
      &code_marking_visitor);
1797

    
1798
  SharedFunctionInfoMarkingVisitor visitor(this);
1799
  heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
1800
  heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
1801

    
1802
  ProcessMarkingDeque();
1803
}
1804

    
1805

    
1806
// Visitor class for marking heap roots.
1807
class RootMarkingVisitor : public ObjectVisitor {
1808
 public:
1809
  explicit RootMarkingVisitor(Heap* heap)
1810
    : collector_(heap->mark_compact_collector()) { }
1811

    
1812
  void VisitPointer(Object** p) {
1813
    MarkObjectByPointer(p);
1814
  }
1815

    
1816
  void VisitPointers(Object** start, Object** end) {
1817
    for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
1818
  }
1819

    
1820
 private:
1821
  void MarkObjectByPointer(Object** p) {
1822
    if (!(*p)->IsHeapObject()) return;
1823

    
1824
    // Replace flat cons strings in place.
1825
    HeapObject* object = ShortCircuitConsString(p);
1826
    MarkBit mark_bit = Marking::MarkBitFrom(object);
1827
    if (mark_bit.Get()) return;
1828

    
1829
    Map* map = object->map();
1830
    // Mark the object.
1831
    collector_->SetMark(object, mark_bit);
1832

    
1833
    // Mark the map pointer and body, and push them on the marking stack.
1834
    MarkBit map_mark = Marking::MarkBitFrom(map);
1835
    collector_->MarkObject(map, map_mark);
1836
    MarkCompactMarkingVisitor::IterateBody(map, object);
1837

    
1838
    // Mark all the objects reachable from the map and body.  May leave
1839
    // overflowed objects in the heap.
1840
    collector_->EmptyMarkingDeque();
1841
  }
1842

    
1843
  MarkCompactCollector* collector_;
1844
};
1845

    
1846

    
1847
// Helper class for pruning the string table.
1848
class StringTableCleaner : public ObjectVisitor {
1849
 public:
1850
  explicit StringTableCleaner(Heap* heap)
1851
    : heap_(heap), pointers_removed_(0) { }
1852

    
1853
  virtual void VisitPointers(Object** start, Object** end) {
1854
    // Visit all HeapObject pointers in [start, end).
1855
    for (Object** p = start; p < end; p++) {
1856
      Object* o = *p;
1857
      if (o->IsHeapObject() &&
1858
          !Marking::MarkBitFrom(HeapObject::cast(o)).Get()) {
1859
        // Check if the internalized string being pruned is external. We need to
1860
        // delete the associated external data as this string is going away.
1861

    
1862
        // Since no objects have yet been moved we can safely access the map of
1863
        // the object.
1864
        if (o->IsExternalString()) {
1865
          heap_->FinalizeExternalString(String::cast(*p));
1866
        }
1867
        // Set the entry to the_hole_value (as deleted).
1868
        *p = heap_->the_hole_value();
1869
        pointers_removed_++;
1870
      }
1871
    }
1872
  }
1873

    
1874
  int PointersRemoved() {
1875
    return pointers_removed_;
1876
  }
1877

    
1878
 private:
1879
  Heap* heap_;
1880
  int pointers_removed_;
1881
};
1882

    
1883

    
1884
// Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
1885
// are retained.
1886
class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
1887
 public:
1888
  virtual Object* RetainAs(Object* object) {
1889
    if (Marking::MarkBitFrom(HeapObject::cast(object)).Get()) {
1890
      return object;
1891
    } else {
1892
      return NULL;
1893
    }
1894
  }
1895
};
1896

    
1897

    
1898
// Fill the marking stack with overflowed objects returned by the given
1899
// iterator.  Stop when the marking stack is filled or the end of the space
1900
// is reached, whichever comes first.
1901
template<class T>
1902
static void DiscoverGreyObjectsWithIterator(Heap* heap,
1903
                                            MarkingDeque* marking_deque,
1904
                                            T* it) {
1905
  // The caller should ensure that the marking stack is initially not full,
1906
  // so that we don't waste effort pointlessly scanning for objects.
1907
  ASSERT(!marking_deque->IsFull());
1908

    
1909
  Map* filler_map = heap->one_pointer_filler_map();
1910
  for (HeapObject* object = it->Next();
1911
       object != NULL;
1912
       object = it->Next()) {
1913
    MarkBit markbit = Marking::MarkBitFrom(object);
1914
    if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
1915
      Marking::GreyToBlack(markbit);
1916
      MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
1917
      marking_deque->PushBlack(object);
1918
      if (marking_deque->IsFull()) return;
1919
    }
1920
  }
1921
}
1922

    
1923

    
1924
static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts);
1925

    
1926

    
1927
static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque,
1928
                                      MemoryChunk* p) {
1929
  ASSERT(!marking_deque->IsFull());
1930
  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
1931
  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
1932
  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
1933
  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
1934

    
1935
  for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
1936
    Address cell_base = it.CurrentCellBase();
1937
    MarkBit::CellType* cell = it.CurrentCell();
1938

    
1939
    const MarkBit::CellType current_cell = *cell;
1940
    if (current_cell == 0) continue;
1941

    
1942
    MarkBit::CellType grey_objects;
1943
    if (it.HasNext()) {
1944
      const MarkBit::CellType next_cell = *(cell+1);
1945
      grey_objects = current_cell &
1946
          ((current_cell >> 1) | (next_cell << (Bitmap::kBitsPerCell - 1)));
1947
    } else {
1948
      grey_objects = current_cell & (current_cell >> 1);
1949
    }
1950

    
1951
    int offset = 0;
1952
    while (grey_objects != 0) {
1953
      int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(grey_objects);
1954
      grey_objects >>= trailing_zeros;
1955
      offset += trailing_zeros;
1956
      MarkBit markbit(cell, 1 << offset, false);
1957
      ASSERT(Marking::IsGrey(markbit));
1958
      Marking::GreyToBlack(markbit);
1959
      Address addr = cell_base + offset * kPointerSize;
1960
      HeapObject* object = HeapObject::FromAddress(addr);
1961
      MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
1962
      marking_deque->PushBlack(object);
1963
      if (marking_deque->IsFull()) return;
1964
      offset += 2;
1965
      grey_objects >>= 2;
1966
    }
1967

    
1968
    grey_objects >>= (Bitmap::kBitsPerCell - 1);
1969
  }
1970
}
1971

    
1972

    
1973
int MarkCompactCollector::DiscoverAndPromoteBlackObjectsOnPage(
1974
    NewSpace* new_space,
1975
    NewSpacePage* p) {
1976
  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
1977
  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
1978
  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
1979
  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
1980

    
1981
  MarkBit::CellType* cells = p->markbits()->cells();
1982
  int survivors_size = 0;
1983

    
1984
  for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
1985
    Address cell_base = it.CurrentCellBase();
1986
    MarkBit::CellType* cell = it.CurrentCell();
1987

    
1988
    MarkBit::CellType current_cell = *cell;
1989
    if (current_cell == 0) continue;
1990

    
1991
    int offset = 0;
1992
    while (current_cell != 0) {
1993
      int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(current_cell);
1994
      current_cell >>= trailing_zeros;
1995
      offset += trailing_zeros;
1996
      Address address = cell_base + offset * kPointerSize;
1997
      HeapObject* object = HeapObject::FromAddress(address);
1998

    
1999
      int size = object->Size();
2000
      survivors_size += size;
2001

    
2002
      if (FLAG_trace_track_allocation_sites && object->IsJSObject()) {
2003
        if (AllocationMemento::FindForJSObject(JSObject::cast(object), true)
2004
            != NULL) {
2005
          heap()->allocation_mementos_found_++;
2006
        }
2007
      }
2008

    
2009
      offset++;
2010
      current_cell >>= 1;
2011
      // Aggressively promote young survivors to the old space.
2012
      if (TryPromoteObject(object, size)) {
2013
        continue;
2014
      }
2015

    
2016
      // Promotion failed. Just migrate object to another semispace.
2017
      MaybeObject* allocation = new_space->AllocateRaw(size);
2018
      if (allocation->IsFailure()) {
2019
        if (!new_space->AddFreshPage()) {
2020
          // Shouldn't happen. We are sweeping linearly, and to-space
2021
          // has the same number of pages as from-space, so there is
2022
          // always room.
2023
          UNREACHABLE();
2024
        }
2025
        allocation = new_space->AllocateRaw(size);
2026
        ASSERT(!allocation->IsFailure());
2027
      }
2028
      Object* target = allocation->ToObjectUnchecked();
2029

    
2030
      MigrateObject(HeapObject::cast(target)->address(),
2031
                    object->address(),
2032
                    size,
2033
                    NEW_SPACE);
2034
    }
2035
    *cells = 0;
2036
  }
2037
  return survivors_size;
2038
}
2039

    
2040

    
2041
static void DiscoverGreyObjectsInSpace(Heap* heap,
2042
                                       MarkingDeque* marking_deque,
2043
                                       PagedSpace* space) {
2044
  if (!space->was_swept_conservatively()) {
2045
    HeapObjectIterator it(space);
2046
    DiscoverGreyObjectsWithIterator(heap, marking_deque, &it);
2047
  } else {
2048
    PageIterator it(space);
2049
    while (it.has_next()) {
2050
      Page* p = it.next();
2051
      DiscoverGreyObjectsOnPage(marking_deque, p);
2052
      if (marking_deque->IsFull()) return;
2053
    }
2054
  }
2055
}
2056

    
2057

    
2058
static void DiscoverGreyObjectsInNewSpace(Heap* heap,
2059
                                          MarkingDeque* marking_deque) {
2060
  NewSpace* space = heap->new_space();
2061
  NewSpacePageIterator it(space->bottom(), space->top());
2062
  while (it.has_next()) {
2063
    NewSpacePage* page = it.next();
2064
    DiscoverGreyObjectsOnPage(marking_deque, page);
2065
    if (marking_deque->IsFull()) return;
2066
  }
2067
}
2068

    
2069

    
2070
bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
2071
  Object* o = *p;
2072
  if (!o->IsHeapObject()) return false;
2073
  HeapObject* heap_object = HeapObject::cast(o);
2074
  MarkBit mark = Marking::MarkBitFrom(heap_object);
2075
  return !mark.Get();
2076
}
2077

    
2078

    
2079
bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap,
2080
                                                        Object** p) {
2081
  Object* o = *p;
2082
  ASSERT(o->IsHeapObject());
2083
  HeapObject* heap_object = HeapObject::cast(o);
2084
  MarkBit mark = Marking::MarkBitFrom(heap_object);
2085
  return !mark.Get();
2086
}
2087

    
2088

    
2089
void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) {
2090
  StringTable* string_table = heap()->string_table();
2091
  // Mark the string table itself.
2092
  MarkBit string_table_mark = Marking::MarkBitFrom(string_table);
2093
  SetMark(string_table, string_table_mark);
2094
  // Explicitly mark the prefix.
2095
  string_table->IteratePrefix(visitor);
2096
  ProcessMarkingDeque();
2097
}
2098

    
2099

    
2100
void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
2101
  // Mark the heap roots including global variables, stack variables,
2102
  // etc., and all objects reachable from them.
2103
  heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
2104

    
2105
  // Handle the string table specially.
2106
  MarkStringTable(visitor);
2107

    
2108
  MarkWeakObjectToCodeTable();
2109

    
2110
  // There may be overflowed objects in the heap.  Visit them now.
2111
  while (marking_deque_.overflowed()) {
2112
    RefillMarkingDeque();
2113
    EmptyMarkingDeque();
2114
  }
2115
}
2116

    
2117

    
2118
void MarkCompactCollector::MarkImplicitRefGroups() {
2119
  List<ImplicitRefGroup*>* ref_groups =
2120
      isolate()->global_handles()->implicit_ref_groups();
2121

    
2122
  int last = 0;
2123
  for (int i = 0; i < ref_groups->length(); i++) {
2124
    ImplicitRefGroup* entry = ref_groups->at(i);
2125
    ASSERT(entry != NULL);
2126

    
2127
    if (!IsMarked(*entry->parent)) {
2128
      (*ref_groups)[last++] = entry;
2129
      continue;
2130
    }
2131

    
2132
    Object*** children = entry->children;
2133
    // A parent object is marked, so mark all child heap objects.
2134
    for (size_t j = 0; j < entry->length; ++j) {
2135
      if ((*children[j])->IsHeapObject()) {
2136
        HeapObject* child = HeapObject::cast(*children[j]);
2137
        MarkBit mark = Marking::MarkBitFrom(child);
2138
        MarkObject(child, mark);
2139
      }
2140
    }
2141

    
2142
    // Once the entire group has been marked, dispose it because it's
2143
    // not needed anymore.
2144
    delete entry;
2145
  }
2146
  ref_groups->Rewind(last);
2147
}
2148

    
2149

    
2150
void MarkCompactCollector::MarkWeakObjectToCodeTable() {
2151
  HeapObject* weak_object_to_code_table =
2152
      HeapObject::cast(heap()->weak_object_to_code_table());
2153
  if (!IsMarked(weak_object_to_code_table)) {
2154
    MarkBit mark = Marking::MarkBitFrom(weak_object_to_code_table);
2155
    SetMark(weak_object_to_code_table, mark);
2156
  }
2157
}
2158

    
2159

    
2160
// Mark all objects reachable from the objects on the marking stack.
2161
// Before: the marking stack contains zero or more heap object pointers.
2162
// After: the marking stack is empty, and all objects reachable from the
2163
// marking stack have been marked, or are overflowed in the heap.
2164
void MarkCompactCollector::EmptyMarkingDeque() {
2165
  while (!marking_deque_.IsEmpty()) {
2166
    HeapObject* object = marking_deque_.Pop();
2167
    ASSERT(object->IsHeapObject());
2168
    ASSERT(heap()->Contains(object));
2169
    ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
2170

    
2171
    Map* map = object->map();
2172
    MarkBit map_mark = Marking::MarkBitFrom(map);
2173
    MarkObject(map, map_mark);
2174

    
2175
    MarkCompactMarkingVisitor::IterateBody(map, object);
2176
  }
2177
}
2178

    
2179

    
2180
// Sweep the heap for overflowed objects, clear their overflow bits, and
2181
// push them on the marking stack.  Stop early if the marking stack fills
2182
// before sweeping completes.  If sweeping completes, there are no remaining
2183
// overflowed objects in the heap so the overflow flag on the markings stack
2184
// is cleared.
2185
void MarkCompactCollector::RefillMarkingDeque() {
2186
  ASSERT(marking_deque_.overflowed());
2187

    
2188
  DiscoverGreyObjectsInNewSpace(heap(), &marking_deque_);
2189
  if (marking_deque_.IsFull()) return;
2190

    
2191
  DiscoverGreyObjectsInSpace(heap(),
2192
                             &marking_deque_,
2193
                             heap()->old_pointer_space());
2194
  if (marking_deque_.IsFull()) return;
2195

    
2196
  DiscoverGreyObjectsInSpace(heap(),
2197
                             &marking_deque_,
2198
                             heap()->old_data_space());
2199
  if (marking_deque_.IsFull()) return;
2200

    
2201
  DiscoverGreyObjectsInSpace(heap(),
2202
                             &marking_deque_,
2203
                             heap()->code_space());
2204
  if (marking_deque_.IsFull()) return;
2205

    
2206
  DiscoverGreyObjectsInSpace(heap(),
2207
                             &marking_deque_,
2208
                             heap()->map_space());
2209
  if (marking_deque_.IsFull()) return;
2210

    
2211
  DiscoverGreyObjectsInSpace(heap(),
2212
                             &marking_deque_,
2213
                             heap()->cell_space());
2214
  if (marking_deque_.IsFull()) return;
2215

    
2216
  DiscoverGreyObjectsInSpace(heap(),
2217
                             &marking_deque_,
2218
                             heap()->property_cell_space());
2219
  if (marking_deque_.IsFull()) return;
2220

    
2221
  LargeObjectIterator lo_it(heap()->lo_space());
2222
  DiscoverGreyObjectsWithIterator(heap(),
2223
                                  &marking_deque_,
2224
                                  &lo_it);
2225
  if (marking_deque_.IsFull()) return;
2226

    
2227
  marking_deque_.ClearOverflowed();
2228
}
2229

    
2230

    
2231
// Mark all objects reachable (transitively) from objects on the marking
2232
// stack.  Before: the marking stack contains zero or more heap object
2233
// pointers.  After: the marking stack is empty and there are no overflowed
2234
// objects in the heap.
2235
void MarkCompactCollector::ProcessMarkingDeque() {
2236
  EmptyMarkingDeque();
2237
  while (marking_deque_.overflowed()) {
2238
    RefillMarkingDeque();
2239
    EmptyMarkingDeque();
2240
  }
2241
}
2242

    
2243

    
2244
// Mark all objects reachable (transitively) from objects on the marking
2245
// stack including references only considered in the atomic marking pause.
2246
void MarkCompactCollector::ProcessEphemeralMarking(ObjectVisitor* visitor) {
2247
  bool work_to_do = true;
2248
  ASSERT(marking_deque_.IsEmpty());
2249
  while (work_to_do) {
2250
    isolate()->global_handles()->IterateObjectGroups(
2251
        visitor, &IsUnmarkedHeapObjectWithHeap);
2252
    MarkImplicitRefGroups();
2253
    ProcessWeakCollections();
2254
    work_to_do = !marking_deque_.IsEmpty();
2255
    ProcessMarkingDeque();
2256
  }
2257
}
2258

    
2259

    
2260
void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
2261
  for (StackFrameIterator it(isolate(), isolate()->thread_local_top());
2262
       !it.done(); it.Advance()) {
2263
    if (it.frame()->type() == StackFrame::JAVA_SCRIPT) {
2264
      return;
2265
    }
2266
    if (it.frame()->type() == StackFrame::OPTIMIZED) {
2267
      Code* code = it.frame()->LookupCode();
2268
      if (!code->CanDeoptAt(it.frame()->pc())) {
2269
        code->CodeIterateBody(visitor);
2270
      }
2271
      ProcessMarkingDeque();
2272
      return;
2273
    }
2274
  }
2275
}
2276

    
2277

    
2278
void MarkCompactCollector::MarkLiveObjects() {
2279
  GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK);
2280
  // The recursive GC marker detects when it is nearing stack overflow,
2281
  // and switches to a different marking system.  JS interrupts interfere
2282
  // with the C stack limit check.
2283
  PostponeInterruptsScope postpone(isolate());
2284

    
2285
  bool incremental_marking_overflowed = false;
2286
  IncrementalMarking* incremental_marking = heap_->incremental_marking();
2287
  if (was_marked_incrementally_) {
2288
    // Finalize the incremental marking and check whether we had an overflow.
2289
    // Both markers use grey color to mark overflowed objects so
2290
    // non-incremental marker can deal with them as if overflow
2291
    // occured during normal marking.
2292
    // But incremental marker uses a separate marking deque
2293
    // so we have to explicitly copy its overflow state.
2294
    incremental_marking->Finalize();
2295
    incremental_marking_overflowed =
2296
        incremental_marking->marking_deque()->overflowed();
2297
    incremental_marking->marking_deque()->ClearOverflowed();
2298
  } else {
2299
    // Abort any pending incremental activities e.g. incremental sweeping.
2300
    incremental_marking->Abort();
2301
  }
2302

    
2303
#ifdef DEBUG
2304
  ASSERT(state_ == PREPARE_GC);
2305
  state_ = MARK_LIVE_OBJECTS;
2306
#endif
2307
  // The to space contains live objects, a page in from space is used as a
2308
  // marking stack.
2309
  Address marking_deque_start = heap()->new_space()->FromSpacePageLow();
2310
  Address marking_deque_end = heap()->new_space()->FromSpacePageHigh();
2311
  if (FLAG_force_marking_deque_overflows) {
2312
    marking_deque_end = marking_deque_start + 64 * kPointerSize;
2313
  }
2314
  marking_deque_.Initialize(marking_deque_start,
2315
                            marking_deque_end);
2316
  ASSERT(!marking_deque_.overflowed());
2317

    
2318
  if (incremental_marking_overflowed) {
2319
    // There are overflowed objects left in the heap after incremental marking.
2320
    marking_deque_.SetOverflowed();
2321
  }
2322

    
2323
  PrepareForCodeFlushing();
2324

    
2325
  if (was_marked_incrementally_) {
2326
    // There is no write barrier on cells so we have to scan them now at the end
2327
    // of the incremental marking.
2328
    {
2329
      HeapObjectIterator cell_iterator(heap()->cell_space());
2330
      HeapObject* cell;
2331
      while ((cell = cell_iterator.Next()) != NULL) {
2332
        ASSERT(cell->IsCell());
2333
        if (IsMarked(cell)) {
2334
          int offset = Cell::kValueOffset;
2335
          MarkCompactMarkingVisitor::VisitPointer(
2336
              heap(),
2337
              reinterpret_cast<Object**>(cell->address() + offset));
2338
        }
2339
      }
2340
    }
2341
    {
2342
      HeapObjectIterator js_global_property_cell_iterator(
2343
          heap()->property_cell_space());
2344
      HeapObject* cell;
2345
      while ((cell = js_global_property_cell_iterator.Next()) != NULL) {
2346
        ASSERT(cell->IsPropertyCell());
2347
        if (IsMarked(cell)) {
2348
          MarkCompactMarkingVisitor::VisitPropertyCell(cell->map(), cell);
2349
        }
2350
      }
2351
    }
2352
  }
2353

    
2354
  RootMarkingVisitor root_visitor(heap());
2355
  MarkRoots(&root_visitor);
2356

    
2357
  ProcessTopOptimizedFrame(&root_visitor);
2358

    
2359
  // The objects reachable from the roots are marked, yet unreachable
2360
  // objects are unmarked.  Mark objects reachable due to host
2361
  // application specific logic or through Harmony weak maps.
2362
  ProcessEphemeralMarking(&root_visitor);
2363

    
2364
  // The objects reachable from the roots, weak maps or object groups
2365
  // are marked, yet unreachable objects are unmarked.  Mark objects
2366
  // reachable only from weak global handles.
2367
  //
2368
  // First we identify nonlive weak handles and mark them as pending
2369
  // destruction.
2370
  heap()->isolate()->global_handles()->IdentifyWeakHandles(
2371
      &IsUnmarkedHeapObject);
2372
  // Then we mark the objects and process the transitive closure.
2373
  heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
2374
  while (marking_deque_.overflowed()) {
2375
    RefillMarkingDeque();
2376
    EmptyMarkingDeque();
2377
  }
2378

    
2379
  // Repeat host application specific and Harmony weak maps marking to
2380
  // mark unmarked objects reachable from the weak roots.
2381
  ProcessEphemeralMarking(&root_visitor);
2382

    
2383
  AfterMarking();
2384
}
2385

    
2386

    
2387
void MarkCompactCollector::AfterMarking() {
2388
  // Object literal map caches reference strings (cache keys) and maps
2389
  // (cache values). At this point still useful maps have already been
2390
  // marked. Mark the keys for the alive values before we process the
2391
  // string table.
2392
  ProcessMapCaches();
2393

    
2394
  // Prune the string table removing all strings only pointed to by the
2395
  // string table.  Cannot use string_table() here because the string
2396
  // table is marked.
2397
  StringTable* string_table = heap()->string_table();
2398
  StringTableCleaner v(heap());
2399
  string_table->IterateElements(&v);
2400
  string_table->ElementsRemoved(v.PointersRemoved());
2401
  heap()->external_string_table_.Iterate(&v);
2402
  heap()->external_string_table_.CleanUp();
2403

    
2404
  // Process the weak references.
2405
  MarkCompactWeakObjectRetainer mark_compact_object_retainer;
2406
  heap()->ProcessWeakReferences(&mark_compact_object_retainer);
2407

    
2408
  // Remove object groups after marking phase.
2409
  heap()->isolate()->global_handles()->RemoveObjectGroups();
2410
  heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
2411

    
2412
  // Flush code from collected candidates.
2413
  if (is_code_flushing_enabled()) {
2414
    code_flusher_->ProcessCandidates();
2415
    // If incremental marker does not support code flushing, we need to
2416
    // disable it before incremental marking steps for next cycle.
2417
    if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
2418
      EnableCodeFlushing(false);
2419
    }
2420
  }
2421

    
2422
  if (!FLAG_watch_ic_patching) {
2423
    // Clean up dead objects from the runtime profiler.
2424
    heap()->isolate()->runtime_profiler()->RemoveDeadSamples();
2425
  }
2426

    
2427
  if (FLAG_track_gc_object_stats) {
2428
    heap()->CheckpointObjectStats();
2429
  }
2430
}
2431

    
2432

    
2433
void MarkCompactCollector::ProcessMapCaches() {
2434
  Object* raw_context = heap()->native_contexts_list_;
2435
  while (raw_context != heap()->undefined_value()) {
2436
    Context* context = reinterpret_cast<Context*>(raw_context);
2437
    if (IsMarked(context)) {
2438
      HeapObject* raw_map_cache =
2439
          HeapObject::cast(context->get(Context::MAP_CACHE_INDEX));
2440
      // A map cache may be reachable from the stack. In this case
2441
      // it's already transitively marked and it's too late to clean
2442
      // up its parts.
2443
      if (!IsMarked(raw_map_cache) &&
2444
          raw_map_cache != heap()->undefined_value()) {
2445
        MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache);
2446
        int existing_elements = map_cache->NumberOfElements();
2447
        int used_elements = 0;
2448
        for (int i = MapCache::kElementsStartIndex;
2449
             i < map_cache->length();
2450
             i += MapCache::kEntrySize) {
2451
          Object* raw_key = map_cache->get(i);
2452
          if (raw_key == heap()->undefined_value() ||
2453
              raw_key == heap()->the_hole_value()) continue;
2454
          STATIC_ASSERT(MapCache::kEntrySize == 2);
2455
          Object* raw_map = map_cache->get(i + 1);
2456
          if (raw_map->IsHeapObject() && IsMarked(raw_map)) {
2457
            ++used_elements;
2458
          } else {
2459
            // Delete useless entries with unmarked maps.
2460
            ASSERT(raw_map->IsMap());
2461
            map_cache->set_the_hole(i);
2462
            map_cache->set_the_hole(i + 1);
2463
          }
2464
        }
2465
        if (used_elements == 0) {
2466
          context->set(Context::MAP_CACHE_INDEX, heap()->undefined_value());
2467
        } else {
2468
          // Note: we don't actually shrink the cache here to avoid
2469
          // extra complexity during GC. We rely on subsequent cache
2470
          // usages (EnsureCapacity) to do this.
2471
          map_cache->ElementsRemoved(existing_elements - used_elements);
2472
          MarkBit map_cache_markbit = Marking::MarkBitFrom(map_cache);
2473
          MarkObject(map_cache, map_cache_markbit);
2474
        }
2475
      }
2476
    }
2477
    // Move to next element in the list.
2478
    raw_context = context->get(Context::NEXT_CONTEXT_LINK);
2479
  }
2480
  ProcessMarkingDeque();
2481
}
2482

    
2483

    
2484
void MarkCompactCollector::ReattachInitialMaps() {
2485
  HeapObjectIterator map_iterator(heap()->map_space());
2486
  for (HeapObject* obj = map_iterator.Next();
2487
       obj != NULL;
2488
       obj = map_iterator.Next()) {
2489
    Map* map = Map::cast(obj);
2490

    
2491
    STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
2492
    if (map->instance_type() < FIRST_JS_RECEIVER_TYPE) continue;
2493

    
2494
    if (map->attached_to_shared_function_info()) {
2495
      JSFunction::cast(map->constructor())->shared()->AttachInitialMap(map);
2496
    }
2497
  }
2498
}
2499

    
2500

    
2501
void MarkCompactCollector::ClearNonLiveReferences() {
2502
  // Iterate over the map space, setting map transitions that go from
2503
  // a marked map to an unmarked map to null transitions.  This action
2504
  // is carried out only on maps of JSObjects and related subtypes.
2505
  HeapObjectIterator map_iterator(heap()->map_space());
2506
  for (HeapObject* obj = map_iterator.Next();
2507
       obj != NULL;
2508
       obj = map_iterator.Next()) {
2509
    Map* map = Map::cast(obj);
2510

    
2511
    if (!map->CanTransition()) continue;
2512

    
2513
    MarkBit map_mark = Marking::MarkBitFrom(map);
2514
    if (map_mark.Get() && map->attached_to_shared_function_info()) {
2515
      // This map is used for inobject slack tracking and has been detached
2516
      // from SharedFunctionInfo during the mark phase.
2517
      // Since it survived the GC, reattach it now.
2518
      JSFunction::cast(map->constructor())->shared()->AttachInitialMap(map);
2519
    }
2520

    
2521
    ClearNonLivePrototypeTransitions(map);
2522
    ClearNonLiveMapTransitions(map, map_mark);
2523

    
2524
    if (map_mark.Get()) {
2525
      ClearNonLiveDependentCode(map->dependent_code());
2526
    } else {
2527
      ClearAndDeoptimizeDependentCode(map->dependent_code());
2528
      map->set_dependent_code(DependentCode::cast(heap()->empty_fixed_array()));
2529
    }
2530
  }
2531

    
2532
  // Iterate over property cell space, removing dependent code that is not
2533
  // otherwise kept alive by strong references.
2534
  HeapObjectIterator cell_iterator(heap_->property_cell_space());
2535
  for (HeapObject* cell = cell_iterator.Next();
2536
       cell != NULL;
2537
       cell = cell_iterator.Next()) {
2538
    if (IsMarked(cell)) {
2539
      ClearNonLiveDependentCode(PropertyCell::cast(cell)->dependent_code());
2540
    }
2541
  }
2542

    
2543
  if (heap_->weak_object_to_code_table()->IsHashTable()) {
2544
    WeakHashTable* table =
2545
        WeakHashTable::cast(heap_->weak_object_to_code_table());
2546
    uint32_t capacity = table->Capacity();
2547
    for (uint32_t i = 0; i < capacity; i++) {
2548
      uint32_t key_index = table->EntryToIndex(i);
2549
      Object* key = table->get(key_index);
2550
      if (!table->IsKey(key)) continue;
2551
      uint32_t value_index = table->EntryToValueIndex(i);
2552
      Object* value = table->get(value_index);
2553
      if (IsMarked(key)) {
2554
        if (!IsMarked(value)) {
2555
          HeapObject* obj = HeapObject::cast(value);
2556
          MarkBit mark = Marking::MarkBitFrom(obj);
2557
          SetMark(obj, mark);
2558
        }
2559
        ClearNonLiveDependentCode(DependentCode::cast(value));
2560
      } else {
2561
        ClearAndDeoptimizeDependentCode(DependentCode::cast(value));
2562
        table->set(key_index, heap_->the_hole_value());
2563
        table->set(value_index, heap_->the_hole_value());
2564
      }
2565
    }
2566
  }
2567
}
2568

    
2569

    
2570
void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
2571
  int number_of_transitions = map->NumberOfProtoTransitions();
2572
  FixedArray* prototype_transitions = map->GetPrototypeTransitions();
2573

    
2574
  int new_number_of_transitions = 0;
2575
  const int header = Map::kProtoTransitionHeaderSize;
2576
  const int proto_offset = header + Map::kProtoTransitionPrototypeOffset;
2577
  const int map_offset = header + Map::kProtoTransitionMapOffset;
2578
  const int step = Map::kProtoTransitionElementsPerEntry;
2579
  for (int i = 0; i < number_of_transitions; i++) {
2580
    Object* prototype = prototype_transitions->get(proto_offset + i * step);
2581
    Object* cached_map = prototype_transitions->get(map_offset + i * step);
2582
    if (IsMarked(prototype) && IsMarked(cached_map)) {
2583
      int proto_index = proto_offset + new_number_of_transitions * step;
2584
      int map_index = map_offset + new_number_of_transitions * step;
2585
      if (new_number_of_transitions != i) {
2586
        prototype_transitions->set(
2587
            proto_index,
2588
            prototype,
2589
            UPDATE_WRITE_BARRIER);
2590
        prototype_transitions->set(
2591
            map_index,
2592
            cached_map,
2593
            SKIP_WRITE_BARRIER);
2594
      }
2595
      Object** slot =
2596
          HeapObject::RawField(prototype_transitions,
2597
                               FixedArray::OffsetOfElementAt(proto_index));
2598
      RecordSlot(slot, slot, prototype);
2599
      new_number_of_transitions++;
2600
    }
2601
  }
2602

    
2603
  if (new_number_of_transitions != number_of_transitions) {
2604
    map->SetNumberOfProtoTransitions(new_number_of_transitions);
2605
  }
2606

    
2607
  // Fill slots that became free with undefined value.
2608
  for (int i = new_number_of_transitions * step;
2609
       i < number_of_transitions * step;
2610
       i++) {
2611
    prototype_transitions->set_undefined(header + i);
2612
  }
2613
}
2614

    
2615

    
2616
void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map,
2617
                                                      MarkBit map_mark) {
2618
  Object* potential_parent = map->GetBackPointer();
2619
  if (!potential_parent->IsMap()) return;
2620
  Map* parent = Map::cast(potential_parent);
2621

    
2622
  // Follow back pointer, check whether we are dealing with a map transition
2623
  // from a live map to a dead path and in case clear transitions of parent.
2624
  bool current_is_alive = map_mark.Get();
2625
  bool parent_is_alive = Marking::MarkBitFrom(parent).Get();
2626
  if (!current_is_alive && parent_is_alive) {
2627
    parent->ClearNonLiveTransitions(heap());
2628
  }
2629
}
2630

    
2631

    
2632
void MarkCompactCollector::ClearAndDeoptimizeDependentCode(
2633
    DependentCode* entries) {
2634
  DisallowHeapAllocation no_allocation;
2635
  DependentCode::GroupStartIndexes starts(entries);
2636
  int number_of_entries = starts.number_of_entries();
2637
  if (number_of_entries == 0) return;
2638
  for (int i = 0; i < number_of_entries; i++) {
2639
    // If the entry is compilation info then the map must be alive,
2640
    // and ClearAndDeoptimizeDependentCode shouldn't be called.
2641
    ASSERT(entries->is_code_at(i));
2642
    Code* code = entries->code_at(i);
2643

    
2644
    if (IsMarked(code) && !code->marked_for_deoptimization()) {
2645
      code->set_marked_for_deoptimization(true);
2646
      have_code_to_deoptimize_ = true;
2647
    }
2648
    entries->clear_at(i);
2649
  }
2650
}
2651

    
2652

    
2653
void MarkCompactCollector::ClearNonLiveDependentCode(DependentCode* entries) {
2654
  DisallowHeapAllocation no_allocation;
2655
  DependentCode::GroupStartIndexes starts(entries);
2656
  int number_of_entries = starts.number_of_entries();
2657
  if (number_of_entries == 0) return;
2658
  int new_number_of_entries = 0;
2659
  // Go through all groups, remove dead codes and compact.
2660
  for (int g = 0; g < DependentCode::kGroupCount; g++) {
2661
    int group_number_of_entries = 0;
2662
    for (int i = starts.at(g); i < starts.at(g + 1); i++) {
2663
      Object* obj = entries->object_at(i);
2664
      ASSERT(obj->IsCode() || IsMarked(obj));
2665
      if (IsMarked(obj) &&
2666
          (!obj->IsCode() || !WillBeDeoptimized(Code::cast(obj)))) {
2667
        if (new_number_of_entries + group_number_of_entries != i) {
2668
          entries->set_object_at(
2669
              new_number_of_entries + group_number_of_entries, obj);
2670
        }
2671
        Object** slot = entries->slot_at(new_number_of_entries +
2672
                                         group_number_of_entries);
2673
        RecordSlot(slot, slot, obj);
2674
        group_number_of_entries++;
2675
      }
2676
    }
2677
    entries->set_number_of_entries(
2678
        static_cast<DependentCode::DependencyGroup>(g),
2679
        group_number_of_entries);
2680
    new_number_of_entries += group_number_of_entries;
2681
  }
2682
  for (int i = new_number_of_entries; i < number_of_entries; i++) {
2683
    entries->clear_at(i);
2684
  }
2685
}
2686

    
2687

    
2688
void MarkCompactCollector::ProcessWeakCollections() {
2689
  GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_WEAKCOLLECTION_PROCESS);
2690
  Object* weak_collection_obj = encountered_weak_collections();
2691
  while (weak_collection_obj != Smi::FromInt(0)) {
2692
    ASSERT(MarkCompactCollector::IsMarked(
2693
        HeapObject::cast(weak_collection_obj)));
2694
    JSWeakCollection* weak_collection =
2695
        reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2696
    ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
2697
    Object** anchor = reinterpret_cast<Object**>(table->address());
2698
    for (int i = 0; i < table->Capacity(); i++) {
2699
      if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
2700
        Object** key_slot =
2701
            HeapObject::RawField(table, FixedArray::OffsetOfElementAt(
2702
                ObjectHashTable::EntryToIndex(i)));
2703
        RecordSlot(anchor, key_slot, *key_slot);
2704
        Object** value_slot =
2705
            HeapObject::RawField(table, FixedArray::OffsetOfElementAt(
2706
                ObjectHashTable::EntryToValueIndex(i)));
2707
        MarkCompactMarkingVisitor::MarkObjectByPointer(
2708
            this, anchor, value_slot);
2709
      }
2710
    }
2711
    weak_collection_obj = weak_collection->next();
2712
  }
2713
}
2714

    
2715

    
2716
void MarkCompactCollector::ClearWeakCollections() {
2717
  GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_WEAKCOLLECTION_CLEAR);
2718
  Object* weak_collection_obj = encountered_weak_collections();
2719
  while (weak_collection_obj != Smi::FromInt(0)) {
2720
    ASSERT(MarkCompactCollector::IsMarked(
2721
        HeapObject::cast(weak_collection_obj)));
2722
    JSWeakCollection* weak_collection =
2723
        reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2724
    ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
2725
    for (int i = 0; i < table->Capacity(); i++) {
2726
      if (!MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
2727
        table->RemoveEntry(i);
2728
      }
2729
    }
2730
    weak_collection_obj = weak_collection->next();
2731
    weak_collection->set_next(Smi::FromInt(0));
2732
  }
2733
  set_encountered_weak_collections(Smi::FromInt(0));
2734
}
2735

    
2736

    
2737
// We scavange new space simultaneously with sweeping. This is done in two
2738
// passes.
2739
//
2740
// The first pass migrates all alive objects from one semispace to another or
2741
// promotes them to old space.  Forwarding address is written directly into
2742
// first word of object without any encoding.  If object is dead we write
2743
// NULL as a forwarding address.
2744
//
2745
// The second pass updates pointers to new space in all spaces.  It is possible
2746
// to encounter pointers to dead new space objects during traversal of pointers
2747
// to new space.  We should clear them to avoid encountering them during next
2748
// pointer iteration.  This is an issue if the store buffer overflows and we
2749
// have to scan the entire old space, including dead objects, looking for
2750
// pointers to new space.
2751
void MarkCompactCollector::MigrateObject(Address dst,
2752
                                         Address src,
2753
                                         int size,
2754
                                         AllocationSpace dest) {
2755
  HeapProfiler* heap_profiler = heap()->isolate()->heap_profiler();
2756
  if (heap_profiler->is_profiling()) {
2757
    heap_profiler->ObjectMoveEvent(src, dst, size);
2758
  }
2759
  ASSERT(heap()->AllowedToBeMigrated(HeapObject::FromAddress(src), dest));
2760
  ASSERT(dest != LO_SPACE && size <= Page::kMaxNonCodeHeapObjectSize);
2761
  if (dest == OLD_POINTER_SPACE) {
2762
    Address src_slot = src;
2763
    Address dst_slot = dst;
2764
    ASSERT(IsAligned(size, kPointerSize));
2765

    
2766
    for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
2767
      Object* value = Memory::Object_at(src_slot);
2768

    
2769
      Memory::Object_at(dst_slot) = value;
2770

    
2771
      if (heap_->InNewSpace(value)) {
2772
        heap_->store_buffer()->Mark(dst_slot);
2773
      } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
2774
        SlotsBuffer::AddTo(&slots_buffer_allocator_,
2775
                           &migration_slots_buffer_,
2776
                           reinterpret_cast<Object**>(dst_slot),
2777
                           SlotsBuffer::IGNORE_OVERFLOW);
2778
      }
2779

    
2780
      src_slot += kPointerSize;
2781
      dst_slot += kPointerSize;
2782
    }
2783

    
2784
    if (compacting_ && HeapObject::FromAddress(dst)->IsJSFunction()) {
2785
      Address code_entry_slot = dst + JSFunction::kCodeEntryOffset;
2786
      Address code_entry = Memory::Address_at(code_entry_slot);
2787

    
2788
      if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
2789
        SlotsBuffer::AddTo(&slots_buffer_allocator_,
2790
                           &migration_slots_buffer_,
2791
                           SlotsBuffer::CODE_ENTRY_SLOT,
2792
                           code_entry_slot,
2793
                           SlotsBuffer::IGNORE_OVERFLOW);
2794
      }
2795
    }
2796
  } else if (dest == CODE_SPACE) {
2797
    PROFILE(isolate(), CodeMoveEvent(src, dst));
2798
    heap()->MoveBlock(dst, src, size);
2799
    SlotsBuffer::AddTo(&slots_buffer_allocator_,
2800
                       &migration_slots_buffer_,
2801
                       SlotsBuffer::RELOCATED_CODE_OBJECT,
2802
                       dst,
2803
                       SlotsBuffer::IGNORE_OVERFLOW);
2804
    Code::cast(HeapObject::FromAddress(dst))->Relocate(dst - src);
2805
  } else {
2806
    ASSERT(dest == OLD_DATA_SPACE || dest == NEW_SPACE);
2807
    heap()->MoveBlock(dst, src, size);
2808
  }
2809
  Memory::Address_at(src) = dst;
2810
}
2811

    
2812

    
2813
// Visitor for updating pointers from live objects in old spaces to new space.
2814
// It does not expect to encounter pointers to dead objects.
2815
class PointersUpdatingVisitor: public ObjectVisitor {
2816
 public:
2817
  explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) { }
2818

    
2819
  void VisitPointer(Object** p) {
2820
    UpdatePointer(p);
2821
  }
2822

    
2823
  void VisitPointers(Object** start, Object** end) {
2824
    for (Object** p = start; p < end; p++) UpdatePointer(p);
2825
  }
2826

    
2827
  void VisitEmbeddedPointer(RelocInfo* rinfo) {
2828
    ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
2829
    Object* target = rinfo->target_object();
2830
    Object* old_target = target;
2831
    VisitPointer(&target);
2832
    // Avoid unnecessary changes that might unnecessary flush the instruction
2833
    // cache.
2834
    if (target != old_target) {
2835
      rinfo->set_target_object(target);
2836
    }
2837
  }
2838

    
2839
  void VisitCodeTarget(RelocInfo* rinfo) {
2840
    ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
2841
    Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
2842
    Object* old_target = target;
2843
    VisitPointer(&target);
2844
    if (target != old_target) {
2845
      rinfo->set_target_address(Code::cast(target)->instruction_start());
2846
    }
2847
  }
2848

    
2849
  void VisitCodeAgeSequence(RelocInfo* rinfo) {
2850
    ASSERT(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
2851
    Object* stub = rinfo->code_age_stub();
2852
    ASSERT(stub != NULL);
2853
    VisitPointer(&stub);
2854
    if (stub != rinfo->code_age_stub()) {
2855
      rinfo->set_code_age_stub(Code::cast(stub));
2856
    }
2857
  }
2858

    
2859
  void VisitDebugTarget(RelocInfo* rinfo) {
2860
    ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
2861
            rinfo->IsPatchedReturnSequence()) ||
2862
           (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
2863
            rinfo->IsPatchedDebugBreakSlotSequence()));
2864
    Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
2865
    VisitPointer(&target);
2866
    rinfo->set_call_address(Code::cast(target)->instruction_start());
2867
  }
2868

    
2869
  static inline void UpdateSlot(Heap* heap, Object** slot) {
2870
    Object* obj = *slot;
2871

    
2872
    if (!obj->IsHeapObject()) return;
2873

    
2874
    HeapObject* heap_obj = HeapObject::cast(obj);
2875

    
2876
    MapWord map_word = heap_obj->map_word();
2877
    if (map_word.IsForwardingAddress()) {
2878
      ASSERT(heap->InFromSpace(heap_obj) ||
2879
             MarkCompactCollector::IsOnEvacuationCandidate(heap_obj));
2880
      HeapObject* target = map_word.ToForwardingAddress();
2881
      *slot = target;
2882
      ASSERT(!heap->InFromSpace(target) &&
2883
             !MarkCompactCollector::IsOnEvacuationCandidate(target));
2884
    }
2885
  }
2886

    
2887
 private:
2888
  inline void UpdatePointer(Object** p) {
2889
    UpdateSlot(heap_, p);
2890
  }
2891

    
2892
  Heap* heap_;
2893
};
2894

    
2895

    
2896
static void UpdatePointer(HeapObject** p, HeapObject* object) {
2897
  ASSERT(*p == object);
2898

    
2899
  Address old_addr = object->address();
2900

    
2901
  Address new_addr = Memory::Address_at(old_addr);
2902

    
2903
  // The new space sweep will overwrite the map word of dead objects
2904
  // with NULL. In this case we do not need to transfer this entry to
2905
  // the store buffer which we are rebuilding.
2906
  if (new_addr != NULL) {
2907
    *p = HeapObject::FromAddress(new_addr);
2908
  } else {
2909
    // We have to zap this pointer, because the store buffer may overflow later,
2910
    // and then we have to scan the entire heap and we don't want to find
2911
    // spurious newspace pointers in the old space.
2912
    // TODO(mstarzinger): This was changed to a sentinel value to track down
2913
    // rare crashes, change it back to Smi::FromInt(0) later.
2914
    *p = reinterpret_cast<HeapObject*>(Smi::FromInt(0x0f100d00 >> 1));  // flood
2915
  }
2916
}
2917

    
2918

    
2919
static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
2920
                                                         Object** p) {
2921
  MapWord map_word = HeapObject::cast(*p)->map_word();
2922

    
2923
  if (map_word.IsForwardingAddress()) {
2924
    return String::cast(map_word.ToForwardingAddress());
2925
  }
2926

    
2927
  return String::cast(*p);
2928
}
2929

    
2930

    
2931
bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
2932
                                            int object_size) {
2933
  // TODO(hpayer): Replace that check with an assert.
2934
  CHECK(object_size <= Page::kMaxNonCodeHeapObjectSize);
2935

    
2936
  OldSpace* target_space = heap()->TargetSpace(object);
2937

    
2938
  ASSERT(target_space == heap()->old_pointer_space() ||
2939
         target_space == heap()->old_data_space());
2940
  Object* result;
2941
  MaybeObject* maybe_result = target_space->AllocateRaw(
2942
      object_size,
2943
      PagedSpace::MOVE_OBJECT);
2944
  if (maybe_result->ToObject(&result)) {
2945
    HeapObject* target = HeapObject::cast(result);
2946
    MigrateObject(target->address(),
2947
                  object->address(),
2948
                  object_size,
2949
                  target_space->identity());
2950
    heap()->mark_compact_collector()->tracer()->
2951
        increment_promoted_objects_size(object_size);
2952
    return true;
2953
  }
2954

    
2955
  return false;
2956
}
2957

    
2958

    
2959
void MarkCompactCollector::EvacuateNewSpace() {
2960
  // There are soft limits in the allocation code, designed trigger a mark
2961
  // sweep collection by failing allocations.  But since we are already in
2962
  // a mark-sweep allocation, there is no sense in trying to trigger one.
2963
  AlwaysAllocateScope scope;
2964
  heap()->CheckNewSpaceExpansionCriteria();
2965

    
2966
  NewSpace* new_space = heap()->new_space();
2967

    
2968
  // Store allocation range before flipping semispaces.
2969
  Address from_bottom = new_space->bottom();
2970
  Address from_top = new_space->top();
2971

    
2972
  // Flip the semispaces.  After flipping, to space is empty, from space has
2973
  // live objects.
2974
  new_space->Flip();
2975
  new_space->ResetAllocationInfo();
2976

    
2977
  int survivors_size = 0;
2978

    
2979
  // First pass: traverse all objects in inactive semispace, remove marks,
2980
  // migrate live objects and write forwarding addresses.  This stage puts
2981
  // new entries in the store buffer and may cause some pages to be marked
2982
  // scan-on-scavenge.
2983
  NewSpacePageIterator it(from_bottom, from_top);
2984
  while (it.has_next()) {
2985
    NewSpacePage* p = it.next();
2986
    survivors_size += DiscoverAndPromoteBlackObjectsOnPage(new_space, p);
2987
  }
2988

    
2989
  heap_->IncrementYoungSurvivorsCounter(survivors_size);
2990
  new_space->set_age_mark(new_space->top());
2991
}
2992

    
2993

    
2994
void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
2995
  AlwaysAllocateScope always_allocate;
2996
  PagedSpace* space = static_cast<PagedSpace*>(p->owner());
2997
  ASSERT(p->IsEvacuationCandidate() && !p->WasSwept());
2998
  p->MarkSweptPrecisely();
2999

    
3000
  int offsets[16];
3001

    
3002
  for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
3003
    Address cell_base = it.CurrentCellBase();
3004
    MarkBit::CellType* cell = it.CurrentCell();
3005

    
3006
    if (*cell == 0) continue;
3007

    
3008
    int live_objects = MarkWordToObjectStarts(*cell, offsets);
3009
    for (int i = 0; i < live_objects; i++) {
3010
      Address object_addr = cell_base + offsets[i] * kPointerSize;
3011
      HeapObject* object = HeapObject::FromAddress(object_addr);
3012
      ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
3013

    
3014
      int size = object->Size();
3015

    
3016
      MaybeObject* target = space->AllocateRaw(size, PagedSpace::MOVE_OBJECT);
3017
      if (target->IsFailure()) {
3018
        // OS refused to give us memory.
3019
        V8::FatalProcessOutOfMemory("Evacuation");
3020
        return;
3021
      }
3022

    
3023
      Object* target_object = target->ToObjectUnchecked();
3024

    
3025
      MigrateObject(HeapObject::cast(target_object)->address(),
3026
                    object_addr,
3027
                    size,
3028
                    space->identity());
3029
      ASSERT(object->map_word().IsForwardingAddress());
3030
    }
3031

    
3032
    // Clear marking bits for current cell.
3033
    *cell = 0;
3034
  }
3035
  p->ResetLiveBytes();
3036
}
3037

    
3038

    
3039
void MarkCompactCollector::EvacuatePages() {
3040
  int npages = evacuation_candidates_.length();
3041
  for (int i = 0; i < npages; i++) {
3042
    Page* p = evacuation_candidates_[i];
3043
    ASSERT(p->IsEvacuationCandidate() ||
3044
           p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3045
    if (p->IsEvacuationCandidate()) {
3046
      // During compaction we might have to request a new page.
3047
      // Check that space still have room for that.
3048
      if (static_cast<PagedSpace*>(p->owner())->CanExpand()) {
3049
        EvacuateLiveObjectsFromPage(p);
3050
      } else {
3051
        // Without room for expansion evacuation is not guaranteed to succeed.
3052
        // Pessimistically abandon unevacuated pages.
3053
        for (int j = i; j < npages; j++) {
3054
          Page* page = evacuation_candidates_[j];
3055
          slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address());
3056
          page->ClearEvacuationCandidate();
3057
          page->SetFlag(Page::RESCAN_ON_EVACUATION);
3058
          page->InsertAfter(static_cast<PagedSpace*>(page->owner())->anchor());
3059
        }
3060
        return;
3061
      }
3062
    }
3063
  }
3064
}
3065

    
3066

    
3067
class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
3068
 public:
3069
  virtual Object* RetainAs(Object* object) {
3070
    if (object->IsHeapObject()) {
3071
      HeapObject* heap_object = HeapObject::cast(object);
3072
      MapWord map_word = heap_object->map_word();
3073
      if (map_word.IsForwardingAddress()) {
3074
        return map_word.ToForwardingAddress();
3075
      }
3076
    }
3077
    return object;
3078
  }
3079
};
3080

    
3081

    
3082
static inline void UpdateSlot(Isolate* isolate,
3083
                              ObjectVisitor* v,
3084
                              SlotsBuffer::SlotType slot_type,
3085
                              Address addr) {
3086
  switch (slot_type) {
3087
    case SlotsBuffer::CODE_TARGET_SLOT: {
3088
      RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL);
3089
      rinfo.Visit(isolate, v);
3090
      break;
3091
    }
3092
    case SlotsBuffer::CODE_ENTRY_SLOT: {
3093
      v->VisitCodeEntry(addr);
3094
      break;
3095
    }
3096
    case SlotsBuffer::RELOCATED_CODE_OBJECT: {
3097
      HeapObject* obj = HeapObject::FromAddress(addr);
3098
      Code::cast(obj)->CodeIterateBody(v);
3099
      break;
3100
    }
3101
    case SlotsBuffer::DEBUG_TARGET_SLOT: {
3102
      RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT, 0, NULL);
3103
      if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(isolate, v);
3104
      break;
3105
    }
3106
    case SlotsBuffer::JS_RETURN_SLOT: {
3107
      RelocInfo rinfo(addr, RelocInfo::JS_RETURN, 0, NULL);
3108
      if (rinfo.IsPatchedReturnSequence()) rinfo.Visit(isolate, v);
3109
      break;
3110
    }
3111
    case SlotsBuffer::EMBEDDED_OBJECT_SLOT: {
3112
      RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
3113
      rinfo.Visit(isolate, v);
3114
      break;
3115
    }
3116
    default:
3117
      UNREACHABLE();
3118
      break;
3119
  }
3120
}
3121

    
3122

    
3123
enum SweepingMode {
3124
  SWEEP_ONLY,
3125
  SWEEP_AND_VISIT_LIVE_OBJECTS
3126
};
3127

    
3128

    
3129
enum SkipListRebuildingMode {
3130
  REBUILD_SKIP_LIST,
3131
  IGNORE_SKIP_LIST
3132
};
3133

    
3134

    
3135
// Sweep a space precisely.  After this has been done the space can
3136
// be iterated precisely, hitting only the live objects.  Code space
3137
// is always swept precisely because we want to be able to iterate
3138
// over it.  Map space is swept precisely, because it is not compacted.
3139
// Slots in live objects pointing into evacuation candidates are updated
3140
// if requested.
3141
template<SweepingMode sweeping_mode, SkipListRebuildingMode skip_list_mode>
3142
static void SweepPrecisely(PagedSpace* space,
3143
                           Page* p,
3144
                           ObjectVisitor* v) {
3145
  ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
3146
  ASSERT_EQ(skip_list_mode == REBUILD_SKIP_LIST,
3147
            space->identity() == CODE_SPACE);
3148
  ASSERT((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
3149

    
3150
  double start_time = 0.0;
3151
  if (FLAG_print_cumulative_gc_stat) {
3152
    start_time = OS::TimeCurrentMillis();
3153
  }
3154

    
3155
  p->MarkSweptPrecisely();
3156

    
3157
  Address free_start = p->area_start();
3158
  ASSERT(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
3159
  int offsets[16];
3160

    
3161
  SkipList* skip_list = p->skip_list();
3162
  int curr_region = -1;
3163
  if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) {
3164
    skip_list->Clear();
3165
  }
3166

    
3167
  for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
3168
    Address cell_base = it.CurrentCellBase();
3169
    MarkBit::CellType* cell = it.CurrentCell();
3170
    int live_objects = MarkWordToObjectStarts(*cell, offsets);
3171
    int live_index = 0;
3172
    for ( ; live_objects != 0; live_objects--) {
3173
      Address free_end = cell_base + offsets[live_index++] * kPointerSize;
3174
      if (free_end != free_start) {
3175
        space->Free(free_start, static_cast<int>(free_end - free_start));
3176
#ifdef ENABLE_GDB_JIT_INTERFACE
3177
        if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
3178
          GDBJITInterface::RemoveCodeRange(free_start, free_end);
3179
        }
3180
#endif
3181
      }
3182
      HeapObject* live_object = HeapObject::FromAddress(free_end);
3183
      ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
3184
      Map* map = live_object->map();
3185
      int size = live_object->SizeFromMap(map);
3186
      if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
3187
        live_object->IterateBody(map->instance_type(), size, v);
3188
      }
3189
      if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
3190
        int new_region_start =
3191
            SkipList::RegionNumber(free_end);
3192
        int new_region_end =
3193
            SkipList::RegionNumber(free_end + size - kPointerSize);
3194
        if (new_region_start != curr_region ||
3195
            new_region_end != curr_region) {
3196
          skip_list->AddObject(free_end, size);
3197
          curr_region = new_region_end;
3198
        }
3199
      }
3200
      free_start = free_end + size;
3201
    }
3202
    // Clear marking bits for current cell.
3203
    *cell = 0;
3204
  }
3205
  if (free_start != p->area_end()) {
3206
    space->Free(free_start, static_cast<int>(p->area_end() - free_start));
3207
#ifdef ENABLE_GDB_JIT_INTERFACE
3208
    if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
3209
      GDBJITInterface::RemoveCodeRange(free_start, p->area_end());
3210
    }
3211
#endif
3212
  }
3213
  p->ResetLiveBytes();
3214
  if (FLAG_print_cumulative_gc_stat) {
3215
    space->heap()->AddSweepingTime(OS::TimeCurrentMillis() - start_time);
3216
  }
3217
}
3218

    
3219

    
3220
static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) {
3221
  Page* p = Page::FromAddress(code->address());
3222

    
3223
  if (p->IsEvacuationCandidate() ||
3224
      p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3225
    return false;
3226
  }
3227

    
3228
  Address code_start = code->address();
3229
  Address code_end = code_start + code->Size();
3230

    
3231
  uint32_t start_index = MemoryChunk::FastAddressToMarkbitIndex(code_start);
3232
  uint32_t end_index =
3233
      MemoryChunk::FastAddressToMarkbitIndex(code_end - kPointerSize);
3234

    
3235
  Bitmap* b = p->markbits();
3236

    
3237
  MarkBit start_mark_bit = b->MarkBitFromIndex(start_index);
3238
  MarkBit end_mark_bit = b->MarkBitFromIndex(end_index);
3239

    
3240
  MarkBit::CellType* start_cell = start_mark_bit.cell();
3241
  MarkBit::CellType* end_cell = end_mark_bit.cell();
3242

    
3243
  if (value) {
3244
    MarkBit::CellType start_mask = ~(start_mark_bit.mask() - 1);
3245
    MarkBit::CellType end_mask = (end_mark_bit.mask() << 1) - 1;
3246

    
3247
    if (start_cell == end_cell) {
3248
      *start_cell |= start_mask & end_mask;
3249
    } else {
3250
      *start_cell |= start_mask;
3251
      for (MarkBit::CellType* cell = start_cell + 1; cell < end_cell; cell++) {
3252
        *cell = ~0;
3253
      }
3254
      *end_cell |= end_mask;
3255
    }
3256
  } else {
3257
    for (MarkBit::CellType* cell = start_cell ; cell <= end_cell; cell++) {
3258
      *cell = 0;
3259
    }
3260
  }
3261

    
3262
  return true;
3263
}
3264

    
3265

    
3266
static bool IsOnInvalidatedCodeObject(Address addr) {
3267
  // We did not record any slots in large objects thus
3268
  // we can safely go to the page from the slot address.
3269
  Page* p = Page::FromAddress(addr);
3270

    
3271
  // First check owner's identity because old pointer and old data spaces
3272
  // are swept lazily and might still have non-zero mark-bits on some
3273
  // pages.
3274
  if (p->owner()->identity() != CODE_SPACE) return false;
3275

    
3276
  // In code space only bits on evacuation candidates (but we don't record
3277
  // any slots on them) and under invalidated code objects are non-zero.
3278
  MarkBit mark_bit =
3279
      p->markbits()->MarkBitFromIndex(Page::FastAddressToMarkbitIndex(addr));
3280

    
3281
  return mark_bit.Get();
3282
}
3283

    
3284

    
3285
void MarkCompactCollector::InvalidateCode(Code* code) {
3286
  if (heap_->incremental_marking()->IsCompacting() &&
3287
      !ShouldSkipEvacuationSlotRecording(code)) {
3288
    ASSERT(compacting_);
3289

    
3290
    // If the object is white than no slots were recorded on it yet.
3291
    MarkBit mark_bit = Marking::MarkBitFrom(code);
3292
    if (Marking::IsWhite(mark_bit)) return;
3293

    
3294
    invalidated_code_.Add(code);
3295
  }
3296
}
3297

    
3298

    
3299
// Return true if the given code is deoptimized or will be deoptimized.
3300
bool MarkCompactCollector::WillBeDeoptimized(Code* code) {
3301
  return code->marked_for_deoptimization();
3302
}
3303

    
3304

    
3305
bool MarkCompactCollector::MarkInvalidatedCode() {
3306
  bool code_marked = false;
3307

    
3308
  int length = invalidated_code_.length();
3309
  for (int i = 0; i < length; i++) {
3310
    Code* code = invalidated_code_[i];
3311

    
3312
    if (SetMarkBitsUnderInvalidatedCode(code, true)) {
3313
      code_marked = true;
3314
    }
3315
  }
3316

    
3317
  return code_marked;
3318
}
3319

    
3320

    
3321
void MarkCompactCollector::RemoveDeadInvalidatedCode() {
3322
  int length = invalidated_code_.length();
3323
  for (int i = 0; i < length; i++) {
3324
    if (!IsMarked(invalidated_code_[i])) invalidated_code_[i] = NULL;
3325
  }
3326
}
3327

    
3328

    
3329
void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) {
3330
  int length = invalidated_code_.length();
3331
  for (int i = 0; i < length; i++) {
3332
    Code* code = invalidated_code_[i];
3333
    if (code != NULL) {
3334
      code->Iterate(visitor);
3335
      SetMarkBitsUnderInvalidatedCode(code, false);
3336
    }
3337
  }
3338
  invalidated_code_.Rewind(0);
3339
}
3340

    
3341

    
3342
void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
3343
  Heap::RelocationLock relocation_lock(heap());
3344

    
3345
  bool code_slots_filtering_required;
3346
  { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
3347
    code_slots_filtering_required = MarkInvalidatedCode();
3348
    EvacuateNewSpace();
3349
  }
3350

    
3351
  { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_EVACUATE_PAGES);
3352
    EvacuatePages();
3353
  }
3354

    
3355
  // Second pass: find pointers to new space and update them.
3356
  PointersUpdatingVisitor updating_visitor(heap());
3357

    
3358
  { GCTracer::Scope gc_scope(tracer_,
3359
                             GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS);
3360
    // Update pointers in to space.
3361
    SemiSpaceIterator to_it(heap()->new_space()->bottom(),
3362
                            heap()->new_space()->top());
3363
    for (HeapObject* object = to_it.Next();
3364
         object != NULL;
3365
         object = to_it.Next()) {
3366
      Map* map = object->map();
3367
      object->IterateBody(map->instance_type(),
3368
                          object->SizeFromMap(map),
3369
                          &updating_visitor);
3370
    }
3371
  }
3372

    
3373
  { GCTracer::Scope gc_scope(tracer_,
3374
                             GCTracer::Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS);
3375
    // Update roots.
3376
    heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
3377
  }
3378

    
3379
  { GCTracer::Scope gc_scope(tracer_,
3380
                             GCTracer::Scope::MC_UPDATE_OLD_TO_NEW_POINTERS);
3381
    StoreBufferRebuildScope scope(heap_,
3382
                                  heap_->store_buffer(),
3383
                                  &Heap::ScavengeStoreBufferCallback);
3384
    heap_->store_buffer()->IteratePointersToNewSpaceAndClearMaps(
3385
        &UpdatePointer);
3386
  }
3387

    
3388
  { GCTracer::Scope gc_scope(tracer_,
3389
                             GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED);
3390
    SlotsBuffer::UpdateSlotsRecordedIn(heap_,
3391
                                       migration_slots_buffer_,
3392
                                       code_slots_filtering_required);
3393
    if (FLAG_trace_fragmentation) {
3394
      PrintF("  migration slots buffer: %d\n",
3395
             SlotsBuffer::SizeOfChain(migration_slots_buffer_));
3396
    }
3397

    
3398
    if (compacting_ && was_marked_incrementally_) {
3399
      // It's difficult to filter out slots recorded for large objects.
3400
      LargeObjectIterator it(heap_->lo_space());
3401
      for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3402
        // LargeObjectSpace is not swept yet thus we have to skip
3403
        // dead objects explicitly.
3404
        if (!IsMarked(obj)) continue;
3405

    
3406
        Page* p = Page::FromAddress(obj->address());
3407
        if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3408
          obj->Iterate(&updating_visitor);
3409
          p->ClearFlag(Page::RESCAN_ON_EVACUATION);
3410
        }
3411
      }
3412
    }
3413
  }
3414

    
3415
  int npages = evacuation_candidates_.length();
3416
  { GCTracer::Scope gc_scope(
3417
      tracer_, GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED);
3418
    for (int i = 0; i < npages; i++) {
3419
      Page* p = evacuation_candidates_[i];
3420
      ASSERT(p->IsEvacuationCandidate() ||
3421
             p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3422

    
3423
      if (p->IsEvacuationCandidate()) {
3424
        SlotsBuffer::UpdateSlotsRecordedIn(heap_,
3425
                                           p->slots_buffer(),
3426
                                           code_slots_filtering_required);
3427
        if (FLAG_trace_fragmentation) {
3428
          PrintF("  page %p slots buffer: %d\n",
3429
                 reinterpret_cast<void*>(p),
3430
                 SlotsBuffer::SizeOfChain(p->slots_buffer()));
3431
        }
3432

    
3433
        // Important: skip list should be cleared only after roots were updated
3434
        // because root iteration traverses the stack and might have to find
3435
        // code objects from non-updated pc pointing into evacuation candidate.
3436
        SkipList* list = p->skip_list();
3437
        if (list != NULL) list->Clear();
3438
      } else {
3439
        if (FLAG_gc_verbose) {
3440
          PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
3441
                 reinterpret_cast<intptr_t>(p));
3442
        }
3443
        PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3444
        p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
3445

    
3446
        switch (space->identity()) {
3447
          case OLD_DATA_SPACE:
3448
            SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
3449
            break;
3450
          case OLD_POINTER_SPACE:
3451
            SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, IGNORE_SKIP_LIST>(
3452
                space, p, &updating_visitor);
3453
            break;
3454
          case CODE_SPACE:
3455
            SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, REBUILD_SKIP_LIST>(
3456
                space, p, &updating_visitor);
3457
            break;
3458
          default:
3459
            UNREACHABLE();
3460
            break;
3461
        }
3462
      }
3463
    }
3464
  }
3465

    
3466
  GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_UPDATE_MISC_POINTERS);
3467

    
3468
  // Update pointers from cells.
3469
  HeapObjectIterator cell_iterator(heap_->cell_space());
3470
  for (HeapObject* cell = cell_iterator.Next();
3471
       cell != NULL;
3472
       cell = cell_iterator.Next()) {
3473
    if (cell->IsCell()) {
3474
      Cell::BodyDescriptor::IterateBody(cell, &updating_visitor);
3475
    }
3476
  }
3477

    
3478
  HeapObjectIterator js_global_property_cell_iterator(
3479
      heap_->property_cell_space());
3480
  for (HeapObject* cell = js_global_property_cell_iterator.Next();
3481
       cell != NULL;
3482
       cell = js_global_property_cell_iterator.Next()) {
3483
    if (cell->IsPropertyCell()) {
3484
      PropertyCell::BodyDescriptor::IterateBody(cell, &updating_visitor);
3485
    }
3486
  }
3487

    
3488
  // Update the head of the native contexts list in the heap.
3489
  updating_visitor.VisitPointer(heap_->native_contexts_list_address());
3490

    
3491
  heap_->string_table()->Iterate(&updating_visitor);
3492
  updating_visitor.VisitPointer(heap_->weak_object_to_code_table_address());
3493
  if (heap_->weak_object_to_code_table()->IsHashTable()) {
3494
    WeakHashTable* table =
3495
        WeakHashTable::cast(heap_->weak_object_to_code_table());
3496
    table->Iterate(&updating_visitor);
3497
    table->Rehash(heap_->undefined_value());
3498
  }
3499

    
3500
  // Update pointers from external string table.
3501
  heap_->UpdateReferencesInExternalStringTable(
3502
      &UpdateReferenceInExternalStringTableEntry);
3503

    
3504
  if (!FLAG_watch_ic_patching) {
3505
    // Update JSFunction pointers from the runtime profiler.
3506
    heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact(
3507
        &updating_visitor);
3508
  }
3509

    
3510
  EvacuationWeakObjectRetainer evacuation_object_retainer;
3511
  heap()->ProcessWeakReferences(&evacuation_object_retainer);
3512

    
3513
  // Visit invalidated code (we ignored all slots on it) and clear mark-bits
3514
  // under it.
3515
  ProcessInvalidatedCode(&updating_visitor);
3516

    
3517
  heap_->isolate()->inner_pointer_to_code_cache()->Flush();
3518

    
3519
#ifdef VERIFY_HEAP
3520
  if (FLAG_verify_heap) {
3521
    VerifyEvacuation(heap_);
3522
  }
3523
#endif
3524

    
3525
  slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_);
3526
  ASSERT(migration_slots_buffer_ == NULL);
3527
}
3528

    
3529

    
3530
void MarkCompactCollector::UnlinkEvacuationCandidates() {
3531
  int npages = evacuation_candidates_.length();
3532
  for (int i = 0; i < npages; i++) {
3533
    Page* p = evacuation_candidates_[i];
3534
    if (!p->IsEvacuationCandidate()) continue;
3535
    p->Unlink();
3536
    p->ClearSweptPrecisely();
3537
    p->ClearSweptConservatively();
3538
  }
3539
}
3540

    
3541

    
3542
void MarkCompactCollector::ReleaseEvacuationCandidates() {
3543
  int npages = evacuation_candidates_.length();
3544
  for (int i = 0; i < npages; i++) {
3545
    Page* p = evacuation_candidates_[i];
3546
    if (!p->IsEvacuationCandidate()) continue;
3547
    PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3548
    space->Free(p->area_start(), p->area_size());
3549
    p->set_scan_on_scavenge(false);
3550
    slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
3551
    p->ResetLiveBytes();
3552
    space->ReleasePage(p, false);
3553
  }
3554
  evacuation_candidates_.Rewind(0);
3555
  compacting_ = false;
3556
  heap()->FreeQueuedChunks();
3557
}
3558

    
3559

    
3560
static const int kStartTableEntriesPerLine = 5;
3561
static const int kStartTableLines = 171;
3562
static const int kStartTableInvalidLine = 127;
3563
static const int kStartTableUnusedEntry = 126;
3564

    
3565
#define _ kStartTableUnusedEntry
3566
#define X kStartTableInvalidLine
3567
// Mark-bit to object start offset table.
3568
//
3569
// The line is indexed by the mark bits in a byte.  The first number on
3570
// the line describes the number of live object starts for the line and the
3571
// other numbers on the line describe the offsets (in words) of the object
3572
// starts.
3573
//
3574
// Since objects are at least 2 words large we don't have entries for two
3575
// consecutive 1 bits.  All entries after 170 have at least 2 consecutive bits.
3576
char kStartTable[kStartTableLines * kStartTableEntriesPerLine] = {
3577
  0, _, _, _, _,  // 0
3578
  1, 0, _, _, _,  // 1
3579
  1, 1, _, _, _,  // 2
3580
  X, _, _, _, _,  // 3
3581
  1, 2, _, _, _,  // 4
3582
  2, 0, 2, _, _,  // 5
3583
  X, _, _, _, _,  // 6
3584
  X, _, _, _, _,  // 7
3585
  1, 3, _, _, _,  // 8
3586
  2, 0, 3, _, _,  // 9
3587
  2, 1, 3, _, _,  // 10
3588
  X, _, _, _, _,  // 11
3589
  X, _, _, _, _,  // 12
3590
  X, _, _, _, _,  // 13
3591
  X, _, _, _, _,  // 14
3592
  X, _, _, _, _,  // 15
3593
  1, 4, _, _, _,  // 16
3594
  2, 0, 4, _, _,  // 17
3595
  2, 1, 4, _, _,  // 18
3596
  X, _, _, _, _,  // 19
3597
  2, 2, 4, _, _,  // 20
3598
  3, 0, 2, 4, _,  // 21
3599
  X, _, _, _, _,  // 22
3600
  X, _, _, _, _,  // 23
3601
  X, _, _, _, _,  // 24
3602
  X, _, _, _, _,  // 25
3603
  X, _, _, _, _,  // 26
3604
  X, _, _, _, _,  // 27
3605
  X, _, _, _, _,  // 28
3606
  X, _, _, _, _,  // 29
3607
  X, _, _, _, _,  // 30
3608
  X, _, _, _, _,  // 31
3609
  1, 5, _, _, _,  // 32
3610
  2, 0, 5, _, _,  // 33
3611
  2, 1, 5, _, _,  // 34
3612
  X, _, _, _, _,  // 35
3613
  2, 2, 5, _, _,  // 36
3614
  3, 0, 2, 5, _,  // 37
3615
  X, _, _, _, _,  // 38
3616
  X, _, _, _, _,  // 39
3617
  2, 3, 5, _, _,  // 40
3618
  3, 0, 3, 5, _,  // 41
3619
  3, 1, 3, 5, _,  // 42
3620
  X, _, _, _, _,  // 43
3621
  X, _, _, _, _,  // 44
3622
  X, _, _, _, _,  // 45
3623
  X, _, _, _, _,  // 46
3624
  X, _, _, _, _,  // 47
3625
  X, _, _, _, _,  // 48
3626
  X, _, _, _, _,  // 49
3627
  X, _, _, _, _,  // 50
3628
  X, _, _, _, _,  // 51
3629
  X, _, _, _, _,  // 52
3630
  X, _, _, _, _,  // 53
3631
  X, _, _, _, _,  // 54
3632
  X, _, _, _, _,  // 55
3633
  X, _, _, _, _,  // 56
3634
  X, _, _, _, _,  // 57
3635
  X, _, _, _, _,  // 58
3636
  X, _, _, _, _,  // 59
3637
  X, _, _, _, _,  // 60
3638
  X, _, _, _, _,  // 61
3639
  X, _, _, _, _,  // 62
3640
  X, _, _, _, _,  // 63
3641
  1, 6, _, _, _,  // 64
3642
  2, 0, 6, _, _,  // 65
3643
  2, 1, 6, _, _,  // 66
3644
  X, _, _, _, _,  // 67
3645
  2, 2, 6, _, _,  // 68
3646
  3, 0, 2, 6, _,  // 69
3647
  X, _, _, _, _,  // 70
3648
  X, _, _, _, _,  // 71
3649
  2, 3, 6, _, _,  // 72
3650
  3, 0, 3, 6, _,  // 73
3651
  3, 1, 3, 6, _,  // 74
3652
  X, _, _, _, _,  // 75
3653
  X, _, _, _, _,  // 76
3654
  X, _, _, _, _,  // 77
3655
  X, _, _, _, _,  // 78
3656
  X, _, _, _, _,  // 79
3657
  2, 4, 6, _, _,  // 80
3658
  3, 0, 4, 6, _,  // 81
3659
  3, 1, 4, 6, _,  // 82
3660
  X, _, _, _, _,  // 83
3661
  3, 2, 4, 6, _,  // 84
3662
  4, 0, 2, 4, 6,  // 85
3663
  X, _, _, _, _,  // 86
3664
  X, _, _, _, _,  // 87
3665
  X, _, _, _, _,  // 88
3666
  X, _, _, _, _,  // 89
3667
  X, _, _, _, _,  // 90
3668
  X, _, _, _, _,  // 91
3669
  X, _, _, _, _,  // 92
3670
  X, _, _, _, _,  // 93
3671
  X, _, _, _, _,  // 94
3672
  X, _, _, _, _,  // 95
3673
  X, _, _, _, _,  // 96
3674
  X, _, _, _, _,  // 97
3675
  X, _, _, _, _,  // 98
3676
  X, _, _, _, _,  // 99
3677
  X, _, _, _, _,  // 100
3678
  X, _, _, _, _,  // 101
3679
  X, _, _, _, _,  // 102
3680
  X, _, _, _, _,  // 103
3681
  X, _, _, _, _,  // 104
3682
  X, _, _, _, _,  // 105
3683
  X, _, _, _, _,  // 106
3684
  X, _, _, _, _,  // 107
3685
  X, _, _, _, _,  // 108
3686
  X, _, _, _, _,  // 109
3687
  X, _, _, _, _,  // 110
3688
  X, _, _, _, _,  // 111
3689
  X, _, _, _, _,  // 112
3690
  X, _, _, _, _,  // 113
3691
  X, _, _, _, _,  // 114
3692
  X, _, _, _, _,  // 115
3693
  X, _, _, _, _,  // 116
3694
  X, _, _, _, _,  // 117
3695
  X, _, _, _, _,  // 118
3696
  X, _, _, _, _,  // 119
3697
  X, _, _, _, _,  // 120
3698
  X, _, _, _, _,  // 121
3699
  X, _, _, _, _,  // 122
3700
  X, _, _, _, _,  // 123
3701
  X, _, _, _, _,  // 124
3702
  X, _, _, _, _,  // 125
3703
  X, _, _, _, _,  // 126
3704
  X, _, _, _, _,  // 127
3705
  1, 7, _, _, _,  // 128
3706
  2, 0, 7, _, _,  // 129
3707
  2, 1, 7, _, _,  // 130
3708
  X, _, _, _, _,  // 131
3709
  2, 2, 7, _, _,  // 132
3710
  3, 0, 2, 7, _,  // 133
3711
  X, _, _, _, _,  // 134
3712
  X, _, _, _, _,  // 135
3713
  2, 3, 7, _, _,  // 136
3714
  3, 0, 3, 7, _,  // 137
3715
  3, 1, 3, 7, _,  // 138
3716
  X, _, _, _, _,  // 139
3717
  X, _, _, _, _,  // 140
3718
  X, _, _, _, _,  // 141
3719
  X, _, _, _, _,  // 142
3720
  X, _, _, _, _,  // 143
3721
  2, 4, 7, _, _,  // 144
3722
  3, 0, 4, 7, _,  // 145
3723
  3, 1, 4, 7, _,  // 146
3724
  X, _, _, _, _,  // 147
3725
  3, 2, 4, 7, _,  // 148
3726
  4, 0, 2, 4, 7,  // 149
3727
  X, _, _, _, _,  // 150
3728
  X, _, _, _, _,  // 151
3729
  X, _, _, _, _,  // 152
3730
  X, _, _, _, _,  // 153
3731
  X, _, _, _, _,  // 154
3732
  X, _, _, _, _,  // 155
3733
  X, _, _, _, _,  // 156
3734
  X, _, _, _, _,  // 157
3735
  X, _, _, _, _,  // 158
3736
  X, _, _, _, _,  // 159
3737
  2, 5, 7, _, _,  // 160
3738
  3, 0, 5, 7, _,  // 161
3739
  3, 1, 5, 7, _,  // 162
3740
  X, _, _, _, _,  // 163
3741
  3, 2, 5, 7, _,  // 164
3742
  4, 0, 2, 5, 7,  // 165
3743
  X, _, _, _, _,  // 166
3744
  X, _, _, _, _,  // 167
3745
  3, 3, 5, 7, _,  // 168
3746
  4, 0, 3, 5, 7,  // 169
3747
  4, 1, 3, 5, 7   // 170
3748
};
3749
#undef _
3750
#undef X
3751

    
3752

    
3753
// Takes a word of mark bits.  Returns the number of objects that start in the
3754
// range.  Puts the offsets of the words in the supplied array.
3755
static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) {
3756
  int objects = 0;
3757
  int offset = 0;
3758

    
3759
  // No consecutive 1 bits.
3760
  ASSERT((mark_bits & 0x180) != 0x180);
3761
  ASSERT((mark_bits & 0x18000) != 0x18000);
3762
  ASSERT((mark_bits & 0x1800000) != 0x1800000);
3763

    
3764
  while (mark_bits != 0) {
3765
    int byte = (mark_bits & 0xff);
3766
    mark_bits >>= 8;
3767
    if (byte != 0) {
3768
      ASSERT(byte < kStartTableLines);  // No consecutive 1 bits.
3769
      char* table = kStartTable + byte * kStartTableEntriesPerLine;
3770
      int objects_in_these_8_words = table[0];
3771
      ASSERT(objects_in_these_8_words != kStartTableInvalidLine);
3772
      ASSERT(objects_in_these_8_words < kStartTableEntriesPerLine);
3773
      for (int i = 0; i < objects_in_these_8_words; i++) {
3774
        starts[objects++] = offset + table[1 + i];
3775
      }
3776
    }
3777
    offset += 8;
3778
  }
3779
  return objects;
3780
}
3781

    
3782

    
3783
static inline Address DigestFreeStart(Address approximate_free_start,
3784
                                      uint32_t free_start_cell) {
3785
  ASSERT(free_start_cell != 0);
3786

    
3787
  // No consecutive 1 bits.
3788
  ASSERT((free_start_cell & (free_start_cell << 1)) == 0);
3789

    
3790
  int offsets[16];
3791
  uint32_t cell = free_start_cell;
3792
  int offset_of_last_live;
3793
  if ((cell & 0x80000000u) != 0) {
3794
    // This case would overflow below.
3795
    offset_of_last_live = 31;
3796
  } else {
3797
    // Remove all but one bit, the most significant.  This is an optimization
3798
    // that may or may not be worthwhile.
3799
    cell |= cell >> 16;
3800
    cell |= cell >> 8;
3801
    cell |= cell >> 4;
3802
    cell |= cell >> 2;
3803
    cell |= cell >> 1;
3804
    cell = (cell + 1) >> 1;
3805
    int live_objects = MarkWordToObjectStarts(cell, offsets);
3806
    ASSERT(live_objects == 1);
3807
    offset_of_last_live = offsets[live_objects - 1];
3808
  }
3809
  Address last_live_start =
3810
      approximate_free_start + offset_of_last_live * kPointerSize;
3811
  HeapObject* last_live = HeapObject::FromAddress(last_live_start);
3812
  Address free_start = last_live_start + last_live->Size();
3813
  return free_start;
3814
}
3815

    
3816

    
3817
static inline Address StartOfLiveObject(Address block_address, uint32_t cell) {
3818
  ASSERT(cell != 0);
3819

    
3820
  // No consecutive 1 bits.
3821
  ASSERT((cell & (cell << 1)) == 0);
3822

    
3823
  int offsets[16];
3824
  if (cell == 0x80000000u) {  // Avoid overflow below.
3825
    return block_address + 31 * kPointerSize;
3826
  }
3827
  uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1;
3828
  ASSERT((first_set_bit & cell) == first_set_bit);
3829
  int live_objects = MarkWordToObjectStarts(first_set_bit, offsets);
3830
  ASSERT(live_objects == 1);
3831
  USE(live_objects);
3832
  return block_address + offsets[0] * kPointerSize;
3833
}
3834

    
3835

    
3836
template<MarkCompactCollector::SweepingParallelism mode>
3837
static intptr_t Free(PagedSpace* space,
3838
                     FreeList* free_list,
3839
                     Address start,
3840
                     int size) {
3841
  if (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY) {
3842
    return space->Free(start, size);
3843
  } else {
3844
    return size - free_list->Free(start, size);
3845
  }
3846
}
3847

    
3848

    
3849
// Force instantiation of templatized SweepConservatively method for
3850
// SWEEP_SEQUENTIALLY mode.
3851
template intptr_t MarkCompactCollector::
3852
    SweepConservatively<MarkCompactCollector::SWEEP_SEQUENTIALLY>(
3853
        PagedSpace*, FreeList*, Page*);
3854

    
3855

    
3856
// Force instantiation of templatized SweepConservatively method for
3857
// SWEEP_IN_PARALLEL mode.
3858
template intptr_t MarkCompactCollector::
3859
    SweepConservatively<MarkCompactCollector::SWEEP_IN_PARALLEL>(
3860
        PagedSpace*, FreeList*, Page*);
3861

    
3862

    
3863
// Sweeps a space conservatively.  After this has been done the larger free
3864
// spaces have been put on the free list and the smaller ones have been
3865
// ignored and left untouched.  A free space is always either ignored or put
3866
// on the free list, never split up into two parts.  This is important
3867
// because it means that any FreeSpace maps left actually describe a region of
3868
// memory that can be ignored when scanning.  Dead objects other than free
3869
// spaces will not contain the free space map.
3870
template<MarkCompactCollector::SweepingParallelism mode>
3871
intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space,
3872
                                                   FreeList* free_list,
3873
                                                   Page* p) {
3874
  ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
3875
  ASSERT((mode == MarkCompactCollector::SWEEP_IN_PARALLEL &&
3876
         free_list != NULL) ||
3877
         (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY &&
3878
         free_list == NULL));
3879

    
3880
  p->MarkSweptConservatively();
3881

    
3882
  intptr_t freed_bytes = 0;
3883
  size_t size = 0;
3884

    
3885
  // Skip over all the dead objects at the start of the page and mark them free.
3886
  Address cell_base = 0;
3887
  MarkBit::CellType* cell = NULL;
3888
  MarkBitCellIterator it(p);
3889
  for (; !it.Done(); it.Advance()) {
3890
    cell_base = it.CurrentCellBase();
3891
    cell = it.CurrentCell();
3892
    if (*cell != 0) break;
3893
  }
3894

    
3895
  if (it.Done()) {
3896
    size = p->area_end() - p->area_start();
3897
    freed_bytes += Free<mode>(space, free_list, p->area_start(),
3898
                              static_cast<int>(size));
3899
    ASSERT_EQ(0, p->LiveBytes());
3900
    return freed_bytes;
3901
  }
3902

    
3903
  // Grow the size of the start-of-page free space a little to get up to the
3904
  // first live object.
3905
  Address free_end = StartOfLiveObject(cell_base, *cell);
3906
  // Free the first free space.
3907
  size = free_end - p->area_start();
3908
  freed_bytes += Free<mode>(space, free_list, p->area_start(),
3909
                            static_cast<int>(size));
3910

    
3911
  // The start of the current free area is represented in undigested form by
3912
  // the address of the last 32-word section that contained a live object and
3913
  // the marking bitmap for that cell, which describes where the live object
3914
  // started.  Unless we find a large free space in the bitmap we will not
3915
  // digest this pair into a real address.  We start the iteration here at the
3916
  // first word in the marking bit map that indicates a live object.
3917
  Address free_start = cell_base;
3918
  MarkBit::CellType free_start_cell = *cell;
3919

    
3920
  for (; !it.Done(); it.Advance()) {
3921
    cell_base = it.CurrentCellBase();
3922
    cell = it.CurrentCell();
3923
    if (*cell != 0) {
3924
      // We have a live object.  Check approximately whether it is more than 32
3925
      // words since the last live object.
3926
      if (cell_base - free_start > 32 * kPointerSize) {
3927
        free_start = DigestFreeStart(free_start, free_start_cell);
3928
        if (cell_base - free_start > 32 * kPointerSize) {
3929
          // Now that we know the exact start of the free space it still looks
3930
          // like we have a large enough free space to be worth bothering with.
3931
          // so now we need to find the start of the first live object at the
3932
          // end of the free space.
3933
          free_end = StartOfLiveObject(cell_base, *cell);
3934
          freed_bytes += Free<mode>(space, free_list, free_start,
3935
                                    static_cast<int>(free_end - free_start));
3936
        }
3937
      }
3938
      // Update our undigested record of where the current free area started.
3939
      free_start = cell_base;
3940
      free_start_cell = *cell;
3941
      // Clear marking bits for current cell.
3942
      *cell = 0;
3943
    }
3944
  }
3945

    
3946
  // Handle the free space at the end of the page.
3947
  if (cell_base - free_start > 32 * kPointerSize) {
3948
    free_start = DigestFreeStart(free_start, free_start_cell);
3949
    freed_bytes += Free<mode>(space, free_list, free_start,
3950
                              static_cast<int>(p->area_end() - free_start));
3951
  }
3952

    
3953
  p->ResetLiveBytes();
3954
  return freed_bytes;
3955
}
3956

    
3957

    
3958
void MarkCompactCollector::SweepInParallel(PagedSpace* space,
3959
                                           FreeList* private_free_list,
3960
                                           FreeList* free_list) {
3961
  PageIterator it(space);
3962
  while (it.has_next()) {
3963
    Page* p = it.next();
3964

    
3965
    if (p->TryParallelSweeping()) {
3966
      SweepConservatively<SWEEP_IN_PARALLEL>(space, private_free_list, p);
3967
      free_list->Concatenate(private_free_list);
3968
    }
3969
  }
3970
}
3971

    
3972

    
3973
void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
3974
  space->set_was_swept_conservatively(sweeper == CONSERVATIVE ||
3975
                                      sweeper == LAZY_CONSERVATIVE ||
3976
                                      sweeper == PARALLEL_CONSERVATIVE ||
3977
                                      sweeper == CONCURRENT_CONSERVATIVE);
3978
  space->ClearStats();
3979

    
3980
  PageIterator it(space);
3981

    
3982
  int pages_swept = 0;
3983
  bool lazy_sweeping_active = false;
3984
  bool unused_page_present = false;
3985
  bool parallel_sweeping_active = false;
3986

    
3987
  while (it.has_next()) {
3988
    Page* p = it.next();
3989

    
3990
    ASSERT(p->parallel_sweeping() == 0);
3991
    ASSERT(!p->IsEvacuationCandidate());
3992

    
3993
    // Clear sweeping flags indicating that marking bits are still intact.
3994
    p->ClearSweptPrecisely();
3995
    p->ClearSweptConservatively();
3996

    
3997
    if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3998
      // Will be processed in EvacuateNewSpaceAndCandidates.
3999
      ASSERT(evacuation_candidates_.length() > 0);
4000
      continue;
4001
    }
4002

    
4003
    // One unused page is kept, all further are released before sweeping them.
4004
    if (p->LiveBytes() == 0) {
4005
      if (unused_page_present) {
4006
        if (FLAG_gc_verbose) {
4007
          PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n",
4008
                 reinterpret_cast<intptr_t>(p));
4009
        }
4010
        // Adjust unswept free bytes because releasing a page expects said
4011
        // counter to be accurate for unswept pages.
4012
        space->IncreaseUnsweptFreeBytes(p);
4013
        space->ReleasePage(p, true);
4014
        continue;
4015
      }
4016
      unused_page_present = true;
4017
    }
4018

    
4019
    switch (sweeper) {
4020
      case CONSERVATIVE: {
4021
        if (FLAG_gc_verbose) {
4022
          PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
4023
                 reinterpret_cast<intptr_t>(p));
4024
        }
4025
        SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
4026
        pages_swept++;
4027
        break;
4028
      }
4029
      case LAZY_CONSERVATIVE: {
4030
        if (lazy_sweeping_active) {
4031
          if (FLAG_gc_verbose) {
4032
            PrintF("Sweeping 0x%" V8PRIxPTR " lazily postponed.\n",
4033
                   reinterpret_cast<intptr_t>(p));
4034
          }
4035
          space->IncreaseUnsweptFreeBytes(p);
4036
        } else {
4037
          if (FLAG_gc_verbose) {
4038
            PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
4039
                   reinterpret_cast<intptr_t>(p));
4040
          }
4041
          SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
4042
          pages_swept++;
4043
          space->SetPagesToSweep(p->next_page());
4044
          lazy_sweeping_active = true;
4045
        }
4046
        break;
4047
      }
4048
      case CONCURRENT_CONSERVATIVE:
4049
      case PARALLEL_CONSERVATIVE: {
4050
        if (!parallel_sweeping_active) {
4051
          if (FLAG_gc_verbose) {
4052
            PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
4053
                   reinterpret_cast<intptr_t>(p));
4054
          }
4055
          SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
4056
          pages_swept++;
4057
          parallel_sweeping_active = true;
4058
        } else {
4059
          if (FLAG_gc_verbose) {
4060
            PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n",
4061
                   reinterpret_cast<intptr_t>(p));
4062
          }
4063
          p->set_parallel_sweeping(1);
4064
          space->IncreaseUnsweptFreeBytes(p);
4065
        }
4066
        break;
4067
      }
4068
      case PRECISE: {
4069
        if (FLAG_gc_verbose) {
4070
          PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n",
4071
                 reinterpret_cast<intptr_t>(p));
4072
        }
4073
        if (space->identity() == CODE_SPACE) {
4074
          SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST>(space, p, NULL);
4075
        } else {
4076
          SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST>(space, p, NULL);
4077
        }
4078
        pages_swept++;
4079
        break;
4080
      }
4081
      default: {
4082
        UNREACHABLE();
4083
      }
4084
    }
4085
  }
4086

    
4087
  if (FLAG_gc_verbose) {
4088
    PrintF("SweepSpace: %s (%d pages swept)\n",
4089
           AllocationSpaceName(space->identity()),
4090
           pages_swept);
4091
  }
4092

    
4093
  // Give pages that are queued to be freed back to the OS.
4094
  heap()->FreeQueuedChunks();
4095
}
4096

    
4097

    
4098
void MarkCompactCollector::SweepSpaces() {
4099
  GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
4100
#ifdef DEBUG
4101
  state_ = SWEEP_SPACES;
4102
#endif
4103
  SweeperType how_to_sweep =
4104
      FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE;
4105
  if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE;
4106
  if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE;
4107
  if (FLAG_expose_gc) how_to_sweep = CONSERVATIVE;
4108
  if (sweep_precisely_) how_to_sweep = PRECISE;
4109

    
4110
  // Unlink evacuation candidates before sweeper threads access the list of
4111
  // pages to avoid race condition.
4112
  UnlinkEvacuationCandidates();
4113

    
4114
  // Noncompacting collections simply sweep the spaces to clear the mark
4115
  // bits and free the nonlive blocks (for old and map spaces).  We sweep
4116
  // the map space last because freeing non-live maps overwrites them and
4117
  // the other spaces rely on possibly non-live maps to get the sizes for
4118
  // non-live objects.
4119
  SequentialSweepingScope scope(this);
4120
  SweepSpace(heap()->old_pointer_space(), how_to_sweep);
4121
  SweepSpace(heap()->old_data_space(), how_to_sweep);
4122

    
4123
  if (how_to_sweep == PARALLEL_CONSERVATIVE ||
4124
      how_to_sweep == CONCURRENT_CONSERVATIVE) {
4125
    // TODO(hpayer): fix race with concurrent sweeper
4126
    StartSweeperThreads();
4127
  }
4128

    
4129
  if (how_to_sweep == PARALLEL_CONSERVATIVE) {
4130
    WaitUntilSweepingCompleted();
4131
  }
4132

    
4133
  RemoveDeadInvalidatedCode();
4134
  SweepSpace(heap()->code_space(), PRECISE);
4135

    
4136
  SweepSpace(heap()->cell_space(), PRECISE);
4137
  SweepSpace(heap()->property_cell_space(), PRECISE);
4138

    
4139
  EvacuateNewSpaceAndCandidates();
4140

    
4141
  // ClearNonLiveTransitions depends on precise sweeping of map space to
4142
  // detect whether unmarked map became dead in this collection or in one
4143
  // of the previous ones.
4144
  SweepSpace(heap()->map_space(), PRECISE);
4145

    
4146
  // Deallocate unmarked objects and clear marked bits for marked objects.
4147
  heap_->lo_space()->FreeUnmarkedObjects();
4148

    
4149
  // Deallocate evacuated candidate pages.
4150
  ReleaseEvacuationCandidates();
4151
}
4152

    
4153

    
4154
void MarkCompactCollector::EnableCodeFlushing(bool enable) {
4155
#ifdef ENABLE_DEBUGGER_SUPPORT
4156
  if (isolate()->debug()->IsLoaded() ||
4157
      isolate()->debug()->has_break_points()) {
4158
    enable = false;
4159
  }
4160
#endif
4161

    
4162
  if (enable) {
4163
    if (code_flusher_ != NULL) return;
4164
    code_flusher_ = new CodeFlusher(isolate());
4165
  } else {
4166
    if (code_flusher_ == NULL) return;
4167
    code_flusher_->EvictAllCandidates();
4168
    delete code_flusher_;
4169
    code_flusher_ = NULL;
4170
  }
4171

    
4172
  if (FLAG_trace_code_flushing) {
4173
    PrintF("[code-flushing is now %s]\n", enable ? "on" : "off");
4174
  }
4175
}
4176

    
4177

    
4178
// TODO(1466) ReportDeleteIfNeeded is not called currently.
4179
// Our profiling tools do not expect intersections between
4180
// code objects. We should either reenable it or change our tools.
4181
void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj,
4182
                                                Isolate* isolate) {
4183
#ifdef ENABLE_GDB_JIT_INTERFACE
4184
  if (obj->IsCode()) {
4185
    GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj));
4186
  }
4187
#endif
4188
  if (obj->IsCode()) {
4189
    PROFILE(isolate, CodeDeleteEvent(obj->address()));
4190
  }
4191
}
4192

    
4193

    
4194
Isolate* MarkCompactCollector::isolate() const {
4195
  return heap_->isolate();
4196
}
4197

    
4198

    
4199
void MarkCompactCollector::Initialize() {
4200
  MarkCompactMarkingVisitor::Initialize();
4201
  IncrementalMarking::Initialize();
4202
}
4203

    
4204

    
4205
bool SlotsBuffer::IsTypedSlot(ObjectSlot slot) {
4206
  return reinterpret_cast<uintptr_t>(slot) < NUMBER_OF_SLOT_TYPES;
4207
}
4208

    
4209

    
4210
bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator,
4211
                        SlotsBuffer** buffer_address,
4212
                        SlotType type,
4213
                        Address addr,
4214
                        AdditionMode mode) {
4215
  SlotsBuffer* buffer = *buffer_address;
4216
  if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) {
4217
    if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
4218
      allocator->DeallocateChain(buffer_address);
4219
      return false;
4220
    }
4221
    buffer = allocator->AllocateBuffer(buffer);
4222
    *buffer_address = buffer;
4223
  }
4224
  ASSERT(buffer->HasSpaceForTypedSlot());
4225
  buffer->Add(reinterpret_cast<ObjectSlot>(type));
4226
  buffer->Add(reinterpret_cast<ObjectSlot>(addr));
4227
  return true;
4228
}
4229

    
4230

    
4231
static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
4232
  if (RelocInfo::IsCodeTarget(rmode)) {
4233
    return SlotsBuffer::CODE_TARGET_SLOT;
4234
  } else if (RelocInfo::IsEmbeddedObject(rmode)) {
4235
    return SlotsBuffer::EMBEDDED_OBJECT_SLOT;
4236
  } else if (RelocInfo::IsDebugBreakSlot(rmode)) {
4237
    return SlotsBuffer::DEBUG_TARGET_SLOT;
4238
  } else if (RelocInfo::IsJSReturn(rmode)) {
4239
    return SlotsBuffer::JS_RETURN_SLOT;
4240
  }
4241
  UNREACHABLE();
4242
  return SlotsBuffer::NUMBER_OF_SLOT_TYPES;
4243
}
4244

    
4245

    
4246
void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
4247
  Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
4248
  if (target_page->IsEvacuationCandidate() &&
4249
      (rinfo->host() == NULL ||
4250
       !ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
4251
    if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
4252
                            target_page->slots_buffer_address(),
4253
                            SlotTypeForRMode(rinfo->rmode()),
4254
                            rinfo->pc(),
4255
                            SlotsBuffer::FAIL_ON_OVERFLOW)) {
4256
      EvictEvacuationCandidate(target_page);
4257
    }
4258
  }
4259
}
4260

    
4261

    
4262
void MarkCompactCollector::RecordCodeEntrySlot(Address slot, Code* target) {
4263
  Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
4264
  if (target_page->IsEvacuationCandidate() &&
4265
      !ShouldSkipEvacuationSlotRecording(reinterpret_cast<Object**>(slot))) {
4266
    if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
4267
                            target_page->slots_buffer_address(),
4268
                            SlotsBuffer::CODE_ENTRY_SLOT,
4269
                            slot,
4270
                            SlotsBuffer::FAIL_ON_OVERFLOW)) {
4271
      EvictEvacuationCandidate(target_page);
4272
    }
4273
  }
4274
}
4275

    
4276

    
4277
void MarkCompactCollector::RecordCodeTargetPatch(Address pc, Code* target) {
4278
  ASSERT(heap()->gc_state() == Heap::MARK_COMPACT);
4279
  if (is_compacting()) {
4280
    Code* host = isolate()->inner_pointer_to_code_cache()->
4281
        GcSafeFindCodeForInnerPointer(pc);
4282
    MarkBit mark_bit = Marking::MarkBitFrom(host);
4283
    if (Marking::IsBlack(mark_bit)) {
4284
      RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
4285
      RecordRelocSlot(&rinfo, target);
4286
    }
4287
  }
4288
}
4289

    
4290

    
4291
static inline SlotsBuffer::SlotType DecodeSlotType(
4292
    SlotsBuffer::ObjectSlot slot) {
4293
  return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot));
4294
}
4295

    
4296

    
4297
void SlotsBuffer::UpdateSlots(Heap* heap) {
4298
  PointersUpdatingVisitor v(heap);
4299

    
4300
  for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
4301
    ObjectSlot slot = slots_[slot_idx];
4302
    if (!IsTypedSlot(slot)) {
4303
      PointersUpdatingVisitor::UpdateSlot(heap, slot);
4304
    } else {
4305
      ++slot_idx;
4306
      ASSERT(slot_idx < idx_);
4307
      UpdateSlot(heap->isolate(),
4308
                 &v,
4309
                 DecodeSlotType(slot),
4310
                 reinterpret_cast<Address>(slots_[slot_idx]));
4311
    }
4312
  }
4313
}
4314

    
4315

    
4316
void SlotsBuffer::UpdateSlotsWithFilter(Heap* heap) {
4317
  PointersUpdatingVisitor v(heap);
4318

    
4319
  for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
4320
    ObjectSlot slot = slots_[slot_idx];
4321
    if (!IsTypedSlot(slot)) {
4322
      if (!IsOnInvalidatedCodeObject(reinterpret_cast<Address>(slot))) {
4323
        PointersUpdatingVisitor::UpdateSlot(heap, slot);
4324
      }
4325
    } else {
4326
      ++slot_idx;
4327
      ASSERT(slot_idx < idx_);
4328
      Address pc = reinterpret_cast<Address>(slots_[slot_idx]);
4329
      if (!IsOnInvalidatedCodeObject(pc)) {
4330
        UpdateSlot(heap->isolate(),
4331
                   &v,
4332
                   DecodeSlotType(slot),
4333
                   reinterpret_cast<Address>(slots_[slot_idx]));
4334
      }
4335
    }
4336
  }
4337
}
4338

    
4339

    
4340
SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) {
4341
  return new SlotsBuffer(next_buffer);
4342
}
4343

    
4344

    
4345
void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) {
4346
  delete buffer;
4347
}
4348

    
4349

    
4350
void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) {
4351
  SlotsBuffer* buffer = *buffer_address;
4352
  while (buffer != NULL) {
4353
    SlotsBuffer* next_buffer = buffer->next();
4354
    DeallocateBuffer(buffer);
4355
    buffer = next_buffer;
4356
  }
4357
  *buffer_address = NULL;
4358
}
4359

    
4360

    
4361
} }  // namespace v8::internal