The data contained in this repository can be downloaded to your computer using one of several clients.
Please see the documentation of your version control software client for more information.

Please select the desired protocol below to get the URL.

This URL has Read-Only access.

Statistics
| Branch: | Revision:

main_repo / deps / v8 / src / heap.cc @ f230a1cf

History | View | Annotate | Download (269 KB)

1
// Copyright 2012 the V8 project authors. All rights reserved.
2
// Redistribution and use in source and binary forms, with or without
3
// modification, are permitted provided that the following conditions are
4
// met:
5
//
6
//     * Redistributions of source code must retain the above copyright
7
//       notice, this list of conditions and the following disclaimer.
8
//     * Redistributions in binary form must reproduce the above
9
//       copyright notice, this list of conditions and the following
10
//       disclaimer in the documentation and/or other materials provided
11
//       with the distribution.
12
//     * Neither the name of Google Inc. nor the names of its
13
//       contributors may be used to endorse or promote products derived
14
//       from this software without specific prior written permission.
15
//
16
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27

    
28
#include "v8.h"
29

    
30
#include "accessors.h"
31
#include "api.h"
32
#include "bootstrapper.h"
33
#include "codegen.h"
34
#include "compilation-cache.h"
35
#include "cpu-profiler.h"
36
#include "debug.h"
37
#include "deoptimizer.h"
38
#include "global-handles.h"
39
#include "heap-profiler.h"
40
#include "incremental-marking.h"
41
#include "isolate-inl.h"
42
#include "mark-compact.h"
43
#include "natives.h"
44
#include "objects-visiting.h"
45
#include "objects-visiting-inl.h"
46
#include "once.h"
47
#include "runtime-profiler.h"
48
#include "scopeinfo.h"
49
#include "snapshot.h"
50
#include "store-buffer.h"
51
#include "utils/random-number-generator.h"
52
#include "v8threads.h"
53
#include "v8utils.h"
54
#include "vm-state-inl.h"
55
#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
56
#include "regexp-macro-assembler.h"
57
#include "arm/regexp-macro-assembler-arm.h"
58
#endif
59
#if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
60
#include "regexp-macro-assembler.h"
61
#include "mips/regexp-macro-assembler-mips.h"
62
#endif
63

    
64
namespace v8 {
65
namespace internal {
66

    
67

    
68
Heap::Heap()
69
    : isolate_(NULL),
70
      code_range_size_(kIs64BitArch ? 512 * MB : 0),
71
// semispace_size_ should be a power of 2 and old_generation_size_ should be
72
// a multiple of Page::kPageSize.
73
      reserved_semispace_size_(8 * (kPointerSize / 4) * MB),
74
      max_semispace_size_(8 * (kPointerSize / 4)  * MB),
75
      initial_semispace_size_(Page::kPageSize),
76
      max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
77
      max_executable_size_(256ul * (kPointerSize / 4) * MB),
78
// Variables set based on semispace_size_ and old_generation_size_ in
79
// ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
80
// Will be 4 * reserved_semispace_size_ to ensure that young
81
// generation can be aligned to its size.
82
      survived_since_last_expansion_(0),
83
      sweep_generation_(0),
84
      always_allocate_scope_depth_(0),
85
      linear_allocation_scope_depth_(0),
86
      contexts_disposed_(0),
87
      global_ic_age_(0),
88
      flush_monomorphic_ics_(false),
89
      allocation_mementos_found_(0),
90
      scan_on_scavenge_pages_(0),
91
      new_space_(this),
92
      old_pointer_space_(NULL),
93
      old_data_space_(NULL),
94
      code_space_(NULL),
95
      map_space_(NULL),
96
      cell_space_(NULL),
97
      property_cell_space_(NULL),
98
      lo_space_(NULL),
99
      gc_state_(NOT_IN_GC),
100
      gc_post_processing_depth_(0),
101
      ms_count_(0),
102
      gc_count_(0),
103
      remembered_unmapped_pages_index_(0),
104
      unflattened_strings_length_(0),
105
#ifdef DEBUG
106
      allocation_timeout_(0),
107
      disallow_allocation_failure_(false),
108
#endif  // DEBUG
109
      new_space_high_promotion_mode_active_(false),
110
      old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
111
      size_of_old_gen_at_last_old_space_gc_(0),
112
      external_allocation_limit_(0),
113
      amount_of_external_allocated_memory_(0),
114
      amount_of_external_allocated_memory_at_last_global_gc_(0),
115
      old_gen_exhausted_(false),
116
      store_buffer_rebuilder_(store_buffer()),
117
      hidden_string_(NULL),
118
      gc_safe_size_of_old_object_(NULL),
119
      total_regexp_code_generated_(0),
120
      tracer_(NULL),
121
      young_survivors_after_last_gc_(0),
122
      high_survival_rate_period_length_(0),
123
      low_survival_rate_period_length_(0),
124
      survival_rate_(0),
125
      previous_survival_rate_trend_(Heap::STABLE),
126
      survival_rate_trend_(Heap::STABLE),
127
      max_gc_pause_(0.0),
128
      total_gc_time_ms_(0.0),
129
      max_alive_after_gc_(0),
130
      min_in_mutator_(kMaxInt),
131
      alive_after_last_gc_(0),
132
      last_gc_end_timestamp_(0.0),
133
      marking_time_(0.0),
134
      sweeping_time_(0.0),
135
      store_buffer_(this),
136
      marking_(this),
137
      incremental_marking_(this),
138
      number_idle_notifications_(0),
139
      last_idle_notification_gc_count_(0),
140
      last_idle_notification_gc_count_init_(false),
141
      mark_sweeps_since_idle_round_started_(0),
142
      gc_count_at_last_idle_gc_(0),
143
      scavenges_since_last_idle_round_(kIdleScavengeThreshold),
144
      full_codegen_bytes_generated_(0),
145
      crankshaft_codegen_bytes_generated_(0),
146
      gcs_since_last_deopt_(0),
147
#ifdef VERIFY_HEAP
148
      no_weak_object_verification_scope_depth_(0),
149
#endif
150
      promotion_queue_(this),
151
      configured_(false),
152
      chunks_queued_for_free_(NULL),
153
      relocation_mutex_(NULL) {
154
  // Allow build-time customization of the max semispace size. Building
155
  // V8 with snapshots and a non-default max semispace size is much
156
  // easier if you can define it as part of the build environment.
157
#if defined(V8_MAX_SEMISPACE_SIZE)
158
  max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
159
#endif
160

    
161
  // Ensure old_generation_size_ is a multiple of kPageSize.
162
  ASSERT(MB >= Page::kPageSize);
163

    
164
  intptr_t max_virtual = OS::MaxVirtualMemory();
165

    
166
  if (max_virtual > 0) {
167
    if (code_range_size_ > 0) {
168
      // Reserve no more than 1/8 of the memory for the code range.
169
      code_range_size_ = Min(code_range_size_, max_virtual >> 3);
170
    }
171
  }
172

    
173
  memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
174
  native_contexts_list_ = NULL;
175
  array_buffers_list_ = Smi::FromInt(0);
176
  allocation_sites_list_ = Smi::FromInt(0);
177
  mark_compact_collector_.heap_ = this;
178
  external_string_table_.heap_ = this;
179
  // Put a dummy entry in the remembered pages so we can find the list the
180
  // minidump even if there are no real unmapped pages.
181
  RememberUnmappedPage(NULL, false);
182

    
183
  ClearObjectStats(true);
184
}
185

    
186

    
187
intptr_t Heap::Capacity() {
188
  if (!HasBeenSetUp()) return 0;
189

    
190
  return new_space_.Capacity() +
191
      old_pointer_space_->Capacity() +
192
      old_data_space_->Capacity() +
193
      code_space_->Capacity() +
194
      map_space_->Capacity() +
195
      cell_space_->Capacity() +
196
      property_cell_space_->Capacity();
197
}
198

    
199

    
200
intptr_t Heap::CommittedMemory() {
201
  if (!HasBeenSetUp()) return 0;
202

    
203
  return new_space_.CommittedMemory() +
204
      old_pointer_space_->CommittedMemory() +
205
      old_data_space_->CommittedMemory() +
206
      code_space_->CommittedMemory() +
207
      map_space_->CommittedMemory() +
208
      cell_space_->CommittedMemory() +
209
      property_cell_space_->CommittedMemory() +
210
      lo_space_->Size();
211
}
212

    
213

    
214
size_t Heap::CommittedPhysicalMemory() {
215
  if (!HasBeenSetUp()) return 0;
216

    
217
  return new_space_.CommittedPhysicalMemory() +
218
      old_pointer_space_->CommittedPhysicalMemory() +
219
      old_data_space_->CommittedPhysicalMemory() +
220
      code_space_->CommittedPhysicalMemory() +
221
      map_space_->CommittedPhysicalMemory() +
222
      cell_space_->CommittedPhysicalMemory() +
223
      property_cell_space_->CommittedPhysicalMemory() +
224
      lo_space_->CommittedPhysicalMemory();
225
}
226

    
227

    
228
intptr_t Heap::CommittedMemoryExecutable() {
229
  if (!HasBeenSetUp()) return 0;
230

    
231
  return isolate()->memory_allocator()->SizeExecutable();
232
}
233

    
234

    
235
intptr_t Heap::Available() {
236
  if (!HasBeenSetUp()) return 0;
237

    
238
  return new_space_.Available() +
239
      old_pointer_space_->Available() +
240
      old_data_space_->Available() +
241
      code_space_->Available() +
242
      map_space_->Available() +
243
      cell_space_->Available() +
244
      property_cell_space_->Available();
245
}
246

    
247

    
248
bool Heap::HasBeenSetUp() {
249
  return old_pointer_space_ != NULL &&
250
         old_data_space_ != NULL &&
251
         code_space_ != NULL &&
252
         map_space_ != NULL &&
253
         cell_space_ != NULL &&
254
         property_cell_space_ != NULL &&
255
         lo_space_ != NULL;
256
}
257

    
258

    
259
int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
260
  if (IntrusiveMarking::IsMarked(object)) {
261
    return IntrusiveMarking::SizeOfMarkedObject(object);
262
  }
263
  return object->SizeFromMap(object->map());
264
}
265

    
266

    
267
GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
268
                                              const char** reason) {
269
  // Is global GC requested?
270
  if (space != NEW_SPACE) {
271
    isolate_->counters()->gc_compactor_caused_by_request()->Increment();
272
    *reason = "GC in old space requested";
273
    return MARK_COMPACTOR;
274
  }
275

    
276
  if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
277
    *reason = "GC in old space forced by flags";
278
    return MARK_COMPACTOR;
279
  }
280

    
281
  // Is enough data promoted to justify a global GC?
282
  if (OldGenerationAllocationLimitReached()) {
283
    isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
284
    *reason = "promotion limit reached";
285
    return MARK_COMPACTOR;
286
  }
287

    
288
  // Have allocation in OLD and LO failed?
289
  if (old_gen_exhausted_) {
290
    isolate_->counters()->
291
        gc_compactor_caused_by_oldspace_exhaustion()->Increment();
292
    *reason = "old generations exhausted";
293
    return MARK_COMPACTOR;
294
  }
295

    
296
  // Is there enough space left in OLD to guarantee that a scavenge can
297
  // succeed?
298
  //
299
  // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
300
  // for object promotion. It counts only the bytes that the memory
301
  // allocator has not yet allocated from the OS and assigned to any space,
302
  // and does not count available bytes already in the old space or code
303
  // space.  Undercounting is safe---we may get an unrequested full GC when
304
  // a scavenge would have succeeded.
305
  if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
306
    isolate_->counters()->
307
        gc_compactor_caused_by_oldspace_exhaustion()->Increment();
308
    *reason = "scavenge might not succeed";
309
    return MARK_COMPACTOR;
310
  }
311

    
312
  // Default
313
  *reason = NULL;
314
  return SCAVENGER;
315
}
316

    
317

    
318
// TODO(1238405): Combine the infrastructure for --heap-stats and
319
// --log-gc to avoid the complicated preprocessor and flag testing.
320
void Heap::ReportStatisticsBeforeGC() {
321
  // Heap::ReportHeapStatistics will also log NewSpace statistics when
322
  // compiled --log-gc is set.  The following logic is used to avoid
323
  // double logging.
324
#ifdef DEBUG
325
  if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
326
  if (FLAG_heap_stats) {
327
    ReportHeapStatistics("Before GC");
328
  } else if (FLAG_log_gc) {
329
    new_space_.ReportStatistics();
330
  }
331
  if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
332
#else
333
  if (FLAG_log_gc) {
334
    new_space_.CollectStatistics();
335
    new_space_.ReportStatistics();
336
    new_space_.ClearHistograms();
337
  }
338
#endif  // DEBUG
339
}
340

    
341

    
342
void Heap::PrintShortHeapStatistics() {
343
  if (!FLAG_trace_gc_verbose) return;
344
  PrintPID("Memory allocator,   used: %6" V8_PTR_PREFIX "d KB"
345
               ", available: %6" V8_PTR_PREFIX "d KB\n",
346
           isolate_->memory_allocator()->Size() / KB,
347
           isolate_->memory_allocator()->Available() / KB);
348
  PrintPID("New space,          used: %6" V8_PTR_PREFIX "d KB"
349
               ", available: %6" V8_PTR_PREFIX "d KB"
350
               ", committed: %6" V8_PTR_PREFIX "d KB\n",
351
           new_space_.Size() / KB,
352
           new_space_.Available() / KB,
353
           new_space_.CommittedMemory() / KB);
354
  PrintPID("Old pointers,       used: %6" V8_PTR_PREFIX "d KB"
355
               ", available: %6" V8_PTR_PREFIX "d KB"
356
               ", committed: %6" V8_PTR_PREFIX "d KB\n",
357
           old_pointer_space_->SizeOfObjects() / KB,
358
           old_pointer_space_->Available() / KB,
359
           old_pointer_space_->CommittedMemory() / KB);
360
  PrintPID("Old data space,     used: %6" V8_PTR_PREFIX "d KB"
361
               ", available: %6" V8_PTR_PREFIX "d KB"
362
               ", committed: %6" V8_PTR_PREFIX "d KB\n",
363
           old_data_space_->SizeOfObjects() / KB,
364
           old_data_space_->Available() / KB,
365
           old_data_space_->CommittedMemory() / KB);
366
  PrintPID("Code space,         used: %6" V8_PTR_PREFIX "d KB"
367
               ", available: %6" V8_PTR_PREFIX "d KB"
368
               ", committed: %6" V8_PTR_PREFIX "d KB\n",
369
           code_space_->SizeOfObjects() / KB,
370
           code_space_->Available() / KB,
371
           code_space_->CommittedMemory() / KB);
372
  PrintPID("Map space,          used: %6" V8_PTR_PREFIX "d KB"
373
               ", available: %6" V8_PTR_PREFIX "d KB"
374
               ", committed: %6" V8_PTR_PREFIX "d KB\n",
375
           map_space_->SizeOfObjects() / KB,
376
           map_space_->Available() / KB,
377
           map_space_->CommittedMemory() / KB);
378
  PrintPID("Cell space,         used: %6" V8_PTR_PREFIX "d KB"
379
               ", available: %6" V8_PTR_PREFIX "d KB"
380
               ", committed: %6" V8_PTR_PREFIX "d KB\n",
381
           cell_space_->SizeOfObjects() / KB,
382
           cell_space_->Available() / KB,
383
           cell_space_->CommittedMemory() / KB);
384
  PrintPID("PropertyCell space, used: %6" V8_PTR_PREFIX "d KB"
385
               ", available: %6" V8_PTR_PREFIX "d KB"
386
               ", committed: %6" V8_PTR_PREFIX "d KB\n",
387
           property_cell_space_->SizeOfObjects() / KB,
388
           property_cell_space_->Available() / KB,
389
           property_cell_space_->CommittedMemory() / KB);
390
  PrintPID("Large object space, used: %6" V8_PTR_PREFIX "d KB"
391
               ", available: %6" V8_PTR_PREFIX "d KB"
392
               ", committed: %6" V8_PTR_PREFIX "d KB\n",
393
           lo_space_->SizeOfObjects() / KB,
394
           lo_space_->Available() / KB,
395
           lo_space_->CommittedMemory() / KB);
396
  PrintPID("All spaces,         used: %6" V8_PTR_PREFIX "d KB"
397
               ", available: %6" V8_PTR_PREFIX "d KB"
398
               ", committed: %6" V8_PTR_PREFIX "d KB\n",
399
           this->SizeOfObjects() / KB,
400
           this->Available() / KB,
401
           this->CommittedMemory() / KB);
402
  PrintPID("External memory reported: %6" V8_PTR_PREFIX "d KB\n",
403
           amount_of_external_allocated_memory_ / KB);
404
  PrintPID("Total time spent in GC  : %.1f ms\n", total_gc_time_ms_);
405
}
406

    
407

    
408
// TODO(1238405): Combine the infrastructure for --heap-stats and
409
// --log-gc to avoid the complicated preprocessor and flag testing.
410
void Heap::ReportStatisticsAfterGC() {
411
  // Similar to the before GC, we use some complicated logic to ensure that
412
  // NewSpace statistics are logged exactly once when --log-gc is turned on.
413
#if defined(DEBUG)
414
  if (FLAG_heap_stats) {
415
    new_space_.CollectStatistics();
416
    ReportHeapStatistics("After GC");
417
  } else if (FLAG_log_gc) {
418
    new_space_.ReportStatistics();
419
  }
420
#else
421
  if (FLAG_log_gc) new_space_.ReportStatistics();
422
#endif  // DEBUG
423
}
424

    
425

    
426
void Heap::GarbageCollectionPrologue() {
427
  {  AllowHeapAllocation for_the_first_part_of_prologue;
428
    isolate_->transcendental_cache()->Clear();
429
    ClearJSFunctionResultCaches();
430
    gc_count_++;
431
    unflattened_strings_length_ = 0;
432

    
433
    if (FLAG_flush_code && FLAG_flush_code_incrementally) {
434
      mark_compact_collector()->EnableCodeFlushing(true);
435
    }
436

    
437
#ifdef VERIFY_HEAP
438
    if (FLAG_verify_heap) {
439
      Verify();
440
    }
441
#endif
442
  }
443

    
444
#ifdef DEBUG
445
  ASSERT(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
446

    
447
  if (FLAG_gc_verbose) Print();
448

    
449
  ReportStatisticsBeforeGC();
450
#endif  // DEBUG
451

    
452
  store_buffer()->GCPrologue();
453

    
454
  if (FLAG_concurrent_osr) {
455
    isolate()->optimizing_compiler_thread()->AgeBufferedOsrJobs();
456
  }
457
}
458

    
459

    
460
intptr_t Heap::SizeOfObjects() {
461
  intptr_t total = 0;
462
  AllSpaces spaces(this);
463
  for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
464
    total += space->SizeOfObjects();
465
  }
466
  return total;
467
}
468

    
469

    
470
void Heap::RepairFreeListsAfterBoot() {
471
  PagedSpaces spaces(this);
472
  for (PagedSpace* space = spaces.next();
473
       space != NULL;
474
       space = spaces.next()) {
475
    space->RepairFreeListsAfterBoot();
476
  }
477
}
478

    
479

    
480
void Heap::GarbageCollectionEpilogue() {
481
  store_buffer()->GCEpilogue();
482

    
483
  // In release mode, we only zap the from space under heap verification.
484
  if (Heap::ShouldZapGarbage()) {
485
    ZapFromSpace();
486
  }
487

    
488
#ifdef VERIFY_HEAP
489
  if (FLAG_verify_heap) {
490
    Verify();
491
  }
492
#endif
493

    
494
  AllowHeapAllocation for_the_rest_of_the_epilogue;
495

    
496
#ifdef DEBUG
497
  if (FLAG_print_global_handles) isolate_->global_handles()->Print();
498
  if (FLAG_print_handles) PrintHandles();
499
  if (FLAG_gc_verbose) Print();
500
  if (FLAG_code_stats) ReportCodeStatistics("After GC");
501
#endif
502
  if (FLAG_deopt_every_n_garbage_collections > 0) {
503
    if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
504
      Deoptimizer::DeoptimizeAll(isolate());
505
      gcs_since_last_deopt_ = 0;
506
    }
507
  }
508

    
509
  isolate_->counters()->alive_after_last_gc()->Set(
510
      static_cast<int>(SizeOfObjects()));
511

    
512
  isolate_->counters()->string_table_capacity()->Set(
513
      string_table()->Capacity());
514
  isolate_->counters()->number_of_symbols()->Set(
515
      string_table()->NumberOfElements());
516

    
517
  if (full_codegen_bytes_generated_ + crankshaft_codegen_bytes_generated_ > 0) {
518
    isolate_->counters()->codegen_fraction_crankshaft()->AddSample(
519
        static_cast<int>((crankshaft_codegen_bytes_generated_ * 100.0) /
520
            (crankshaft_codegen_bytes_generated_
521
            + full_codegen_bytes_generated_)));
522
  }
523

    
524
  if (CommittedMemory() > 0) {
525
    isolate_->counters()->external_fragmentation_total()->AddSample(
526
        static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
527

    
528
    isolate_->counters()->heap_fraction_new_space()->
529
        AddSample(static_cast<int>(
530
            (new_space()->CommittedMemory() * 100.0) / CommittedMemory()));
531
    isolate_->counters()->heap_fraction_old_pointer_space()->AddSample(
532
        static_cast<int>(
533
            (old_pointer_space()->CommittedMemory() * 100.0) /
534
            CommittedMemory()));
535
    isolate_->counters()->heap_fraction_old_data_space()->AddSample(
536
        static_cast<int>(
537
            (old_data_space()->CommittedMemory() * 100.0) /
538
            CommittedMemory()));
539
    isolate_->counters()->heap_fraction_code_space()->
540
        AddSample(static_cast<int>(
541
            (code_space()->CommittedMemory() * 100.0) / CommittedMemory()));
542
    isolate_->counters()->heap_fraction_map_space()->AddSample(
543
        static_cast<int>(
544
            (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
545
    isolate_->counters()->heap_fraction_cell_space()->AddSample(
546
        static_cast<int>(
547
            (cell_space()->CommittedMemory() * 100.0) / CommittedMemory()));
548
    isolate_->counters()->heap_fraction_property_cell_space()->
549
        AddSample(static_cast<int>(
550
            (property_cell_space()->CommittedMemory() * 100.0) /
551
            CommittedMemory()));
552
    isolate_->counters()->heap_fraction_lo_space()->
553
        AddSample(static_cast<int>(
554
            (lo_space()->CommittedMemory() * 100.0) / CommittedMemory()));
555

    
556
    isolate_->counters()->heap_sample_total_committed()->AddSample(
557
        static_cast<int>(CommittedMemory() / KB));
558
    isolate_->counters()->heap_sample_total_used()->AddSample(
559
        static_cast<int>(SizeOfObjects() / KB));
560
    isolate_->counters()->heap_sample_map_space_committed()->AddSample(
561
        static_cast<int>(map_space()->CommittedMemory() / KB));
562
    isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
563
        static_cast<int>(cell_space()->CommittedMemory() / KB));
564
    isolate_->counters()->
565
        heap_sample_property_cell_space_committed()->
566
            AddSample(static_cast<int>(
567
                property_cell_space()->CommittedMemory() / KB));
568
    isolate_->counters()->heap_sample_code_space_committed()->AddSample(
569
        static_cast<int>(code_space()->CommittedMemory() / KB));
570
  }
571

    
572
#define UPDATE_COUNTERS_FOR_SPACE(space)                                       \
573
  isolate_->counters()->space##_bytes_available()->Set(                        \
574
      static_cast<int>(space()->Available()));                                 \
575
  isolate_->counters()->space##_bytes_committed()->Set(                        \
576
      static_cast<int>(space()->CommittedMemory()));                           \
577
  isolate_->counters()->space##_bytes_used()->Set(                             \
578
      static_cast<int>(space()->SizeOfObjects()));
579
#define UPDATE_FRAGMENTATION_FOR_SPACE(space)                                  \
580
  if (space()->CommittedMemory() > 0) {                                        \
581
    isolate_->counters()->external_fragmentation_##space()->AddSample(         \
582
        static_cast<int>(100 -                                                 \
583
            (space()->SizeOfObjects() * 100.0) / space()->CommittedMemory())); \
584
  }
585
#define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space)                     \
586
  UPDATE_COUNTERS_FOR_SPACE(space)                                             \
587
  UPDATE_FRAGMENTATION_FOR_SPACE(space)
588

    
589
  UPDATE_COUNTERS_FOR_SPACE(new_space)
590
  UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space)
591
  UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space)
592
  UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
593
  UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
594
  UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space)
595
  UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(property_cell_space)
596
  UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
597
#undef UPDATE_COUNTERS_FOR_SPACE
598
#undef UPDATE_FRAGMENTATION_FOR_SPACE
599
#undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
600

    
601
#if defined(DEBUG)
602
  ReportStatisticsAfterGC();
603
#endif  // DEBUG
604
#ifdef ENABLE_DEBUGGER_SUPPORT
605
  isolate_->debug()->AfterGarbageCollection();
606
#endif  // ENABLE_DEBUGGER_SUPPORT
607
}
608

    
609

    
610
void Heap::CollectAllGarbage(int flags, const char* gc_reason) {
611
  // Since we are ignoring the return value, the exact choice of space does
612
  // not matter, so long as we do not specify NEW_SPACE, which would not
613
  // cause a full GC.
614
  mark_compact_collector_.SetFlags(flags);
615
  CollectGarbage(OLD_POINTER_SPACE, gc_reason);
616
  mark_compact_collector_.SetFlags(kNoGCFlags);
617
}
618

    
619

    
620
void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
621
  // Since we are ignoring the return value, the exact choice of space does
622
  // not matter, so long as we do not specify NEW_SPACE, which would not
623
  // cause a full GC.
624
  // Major GC would invoke weak handle callbacks on weakly reachable
625
  // handles, but won't collect weakly reachable objects until next
626
  // major GC.  Therefore if we collect aggressively and weak handle callback
627
  // has been invoked, we rerun major GC to release objects which become
628
  // garbage.
629
  // Note: as weak callbacks can execute arbitrary code, we cannot
630
  // hope that eventually there will be no weak callbacks invocations.
631
  // Therefore stop recollecting after several attempts.
632
  if (FLAG_concurrent_recompilation) {
633
    // The optimizing compiler may be unnecessarily holding on to memory.
634
    DisallowHeapAllocation no_recursive_gc;
635
    isolate()->optimizing_compiler_thread()->Flush();
636
  }
637
  mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
638
                                     kReduceMemoryFootprintMask);
639
  isolate_->compilation_cache()->Clear();
640
  const int kMaxNumberOfAttempts = 7;
641
  const int kMinNumberOfAttempts = 2;
642
  for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
643
    if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR, gc_reason, NULL) &&
644
        attempt + 1 >= kMinNumberOfAttempts) {
645
      break;
646
    }
647
  }
648
  mark_compact_collector()->SetFlags(kNoGCFlags);
649
  new_space_.Shrink();
650
  UncommitFromSpace();
651
  incremental_marking()->UncommitMarkingDeque();
652
}
653

    
654

    
655
bool Heap::CollectGarbage(AllocationSpace space,
656
                          GarbageCollector collector,
657
                          const char* gc_reason,
658
                          const char* collector_reason) {
659
  // The VM is in the GC state until exiting this function.
660
  VMState<GC> state(isolate_);
661

    
662
#ifdef DEBUG
663
  // Reset the allocation timeout to the GC interval, but make sure to
664
  // allow at least a few allocations after a collection. The reason
665
  // for this is that we have a lot of allocation sequences and we
666
  // assume that a garbage collection will allow the subsequent
667
  // allocation attempts to go through.
668
  allocation_timeout_ = Max(6, FLAG_gc_interval);
669
#endif
670

    
671
  if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
672
    if (FLAG_trace_incremental_marking) {
673
      PrintF("[IncrementalMarking] Scavenge during marking.\n");
674
    }
675
  }
676

    
677
  if (collector == MARK_COMPACTOR &&
678
      !mark_compact_collector()->abort_incremental_marking() &&
679
      !incremental_marking()->IsStopped() &&
680
      !incremental_marking()->should_hurry() &&
681
      FLAG_incremental_marking_steps) {
682
    // Make progress in incremental marking.
683
    const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
684
    incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
685
                                IncrementalMarking::NO_GC_VIA_STACK_GUARD);
686
    if (!incremental_marking()->IsComplete()) {
687
      if (FLAG_trace_incremental_marking) {
688
        PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
689
      }
690
      collector = SCAVENGER;
691
      collector_reason = "incremental marking delaying mark-sweep";
692
    }
693
  }
694

    
695
  bool next_gc_likely_to_collect_more = false;
696

    
697
  { GCTracer tracer(this, gc_reason, collector_reason);
698
    ASSERT(AllowHeapAllocation::IsAllowed());
699
    DisallowHeapAllocation no_allocation_during_gc;
700
    GarbageCollectionPrologue();
701
    // The GC count was incremented in the prologue.  Tell the tracer about
702
    // it.
703
    tracer.set_gc_count(gc_count_);
704

    
705
    // Tell the tracer which collector we've selected.
706
    tracer.set_collector(collector);
707

    
708
    {
709
      HistogramTimerScope histogram_timer_scope(
710
          (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
711
                                   : isolate_->counters()->gc_compactor());
712
      next_gc_likely_to_collect_more =
713
          PerformGarbageCollection(collector, &tracer);
714
    }
715

    
716
    GarbageCollectionEpilogue();
717
  }
718

    
719
  // Start incremental marking for the next cycle. The heap snapshot
720
  // generator needs incremental marking to stay off after it aborted.
721
  if (!mark_compact_collector()->abort_incremental_marking() &&
722
      incremental_marking()->IsStopped() &&
723
      incremental_marking()->WorthActivating() &&
724
      NextGCIsLikelyToBeFull()) {
725
    incremental_marking()->Start();
726
  }
727

    
728
  return next_gc_likely_to_collect_more;
729
}
730

    
731

    
732
int Heap::NotifyContextDisposed() {
733
  if (FLAG_concurrent_recompilation) {
734
    // Flush the queued recompilation tasks.
735
    isolate()->optimizing_compiler_thread()->Flush();
736
  }
737
  flush_monomorphic_ics_ = true;
738
  return ++contexts_disposed_;
739
}
740

    
741

    
742
void Heap::PerformScavenge() {
743
  GCTracer tracer(this, NULL, NULL);
744
  if (incremental_marking()->IsStopped()) {
745
    PerformGarbageCollection(SCAVENGER, &tracer);
746
  } else {
747
    PerformGarbageCollection(MARK_COMPACTOR, &tracer);
748
  }
749
}
750

    
751

    
752
void Heap::MoveElements(FixedArray* array,
753
                        int dst_index,
754
                        int src_index,
755
                        int len) {
756
  if (len == 0) return;
757

    
758
  ASSERT(array->map() != fixed_cow_array_map());
759
  Object** dst_objects = array->data_start() + dst_index;
760
  OS::MemMove(dst_objects,
761
              array->data_start() + src_index,
762
              len * kPointerSize);
763
  if (!InNewSpace(array)) {
764
    for (int i = 0; i < len; i++) {
765
      // TODO(hpayer): check store buffer for entries
766
      if (InNewSpace(dst_objects[i])) {
767
        RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i));
768
      }
769
    }
770
  }
771
  incremental_marking()->RecordWrites(array);
772
}
773

    
774

    
775
#ifdef VERIFY_HEAP
776
// Helper class for verifying the string table.
777
class StringTableVerifier : public ObjectVisitor {
778
 public:
779
  void VisitPointers(Object** start, Object** end) {
780
    // Visit all HeapObject pointers in [start, end).
781
    for (Object** p = start; p < end; p++) {
782
      if ((*p)->IsHeapObject()) {
783
        // Check that the string is actually internalized.
784
        CHECK((*p)->IsTheHole() || (*p)->IsUndefined() ||
785
              (*p)->IsInternalizedString());
786
      }
787
    }
788
  }
789
};
790

    
791

    
792
static void VerifyStringTable(Heap* heap) {
793
  StringTableVerifier verifier;
794
  heap->string_table()->IterateElements(&verifier);
795
}
796
#endif  // VERIFY_HEAP
797

    
798

    
799
static bool AbortIncrementalMarkingAndCollectGarbage(
800
    Heap* heap,
801
    AllocationSpace space,
802
    const char* gc_reason = NULL) {
803
  heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
804
  bool result = heap->CollectGarbage(space, gc_reason);
805
  heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
806
  return result;
807
}
808

    
809

    
810
void Heap::ReserveSpace(
811
    int *sizes,
812
    Address *locations_out) {
813
  bool gc_performed = true;
814
  int counter = 0;
815
  static const int kThreshold = 20;
816
  while (gc_performed && counter++ < kThreshold) {
817
    gc_performed = false;
818
    ASSERT(NEW_SPACE == FIRST_PAGED_SPACE - 1);
819
    for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
820
      if (sizes[space] != 0) {
821
        MaybeObject* allocation;
822
        if (space == NEW_SPACE) {
823
          allocation = new_space()->AllocateRaw(sizes[space]);
824
        } else {
825
          allocation = paged_space(space)->AllocateRaw(sizes[space]);
826
        }
827
        FreeListNode* node;
828
        if (!allocation->To<FreeListNode>(&node)) {
829
          if (space == NEW_SPACE) {
830
            Heap::CollectGarbage(NEW_SPACE,
831
                                 "failed to reserve space in the new space");
832
          } else {
833
            AbortIncrementalMarkingAndCollectGarbage(
834
                this,
835
                static_cast<AllocationSpace>(space),
836
                "failed to reserve space in paged space");
837
          }
838
          gc_performed = true;
839
          break;
840
        } else {
841
          // Mark with a free list node, in case we have a GC before
842
          // deserializing.
843
          node->set_size(this, sizes[space]);
844
          locations_out[space] = node->address();
845
        }
846
      }
847
    }
848
  }
849

    
850
  if (gc_performed) {
851
    // Failed to reserve the space after several attempts.
852
    V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
853
  }
854
}
855

    
856

    
857
void Heap::EnsureFromSpaceIsCommitted() {
858
  if (new_space_.CommitFromSpaceIfNeeded()) return;
859

    
860
  // Committing memory to from space failed.
861
  // Memory is exhausted and we will die.
862
  V8::FatalProcessOutOfMemory("Committing semi space failed.");
863
}
864

    
865

    
866
void Heap::ClearJSFunctionResultCaches() {
867
  if (isolate_->bootstrapper()->IsActive()) return;
868

    
869
  Object* context = native_contexts_list_;
870
  while (!context->IsUndefined()) {
871
    // Get the caches for this context. GC can happen when the context
872
    // is not fully initialized, so the caches can be undefined.
873
    Object* caches_or_undefined =
874
        Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
875
    if (!caches_or_undefined->IsUndefined()) {
876
      FixedArray* caches = FixedArray::cast(caches_or_undefined);
877
      // Clear the caches:
878
      int length = caches->length();
879
      for (int i = 0; i < length; i++) {
880
        JSFunctionResultCache::cast(caches->get(i))->Clear();
881
      }
882
    }
883
    // Get the next context:
884
    context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
885
  }
886
}
887

    
888

    
889
void Heap::ClearNormalizedMapCaches() {
890
  if (isolate_->bootstrapper()->IsActive() &&
891
      !incremental_marking()->IsMarking()) {
892
    return;
893
  }
894

    
895
  Object* context = native_contexts_list_;
896
  while (!context->IsUndefined()) {
897
    // GC can happen when the context is not fully initialized,
898
    // so the cache can be undefined.
899
    Object* cache =
900
        Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
901
    if (!cache->IsUndefined()) {
902
      NormalizedMapCache::cast(cache)->Clear();
903
    }
904
    context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
905
  }
906
}
907

    
908

    
909
void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
910
  double survival_rate =
911
      (static_cast<double>(young_survivors_after_last_gc_) * 100) /
912
      start_new_space_size;
913

    
914
  if (survival_rate > kYoungSurvivalRateHighThreshold) {
915
    high_survival_rate_period_length_++;
916
  } else {
917
    high_survival_rate_period_length_ = 0;
918
  }
919

    
920
  if (survival_rate < kYoungSurvivalRateLowThreshold) {
921
    low_survival_rate_period_length_++;
922
  } else {
923
    low_survival_rate_period_length_ = 0;
924
  }
925

    
926
  double survival_rate_diff = survival_rate_ - survival_rate;
927

    
928
  if (survival_rate_diff > kYoungSurvivalRateAllowedDeviation) {
929
    set_survival_rate_trend(DECREASING);
930
  } else if (survival_rate_diff < -kYoungSurvivalRateAllowedDeviation) {
931
    set_survival_rate_trend(INCREASING);
932
  } else {
933
    set_survival_rate_trend(STABLE);
934
  }
935

    
936
  survival_rate_ = survival_rate;
937
}
938

    
939
bool Heap::PerformGarbageCollection(GarbageCollector collector,
940
                                    GCTracer* tracer) {
941
  bool next_gc_likely_to_collect_more = false;
942

    
943
  if (collector != SCAVENGER) {
944
    PROFILE(isolate_, CodeMovingGCEvent());
945
  }
946

    
947
#ifdef VERIFY_HEAP
948
  if (FLAG_verify_heap) {
949
    VerifyStringTable(this);
950
  }
951
#endif
952

    
953
  GCType gc_type =
954
      collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
955

    
956
  {
957
    GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
958
    VMState<EXTERNAL> state(isolate_);
959
    HandleScope handle_scope(isolate_);
960
    CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
961
  }
962

    
963
  EnsureFromSpaceIsCommitted();
964

    
965
  int start_new_space_size = Heap::new_space()->SizeAsInt();
966

    
967
  if (IsHighSurvivalRate()) {
968
    // We speed up the incremental marker if it is running so that it
969
    // does not fall behind the rate of promotion, which would cause a
970
    // constantly growing old space.
971
    incremental_marking()->NotifyOfHighPromotionRate();
972
  }
973

    
974
  if (collector == MARK_COMPACTOR) {
975
    // Perform mark-sweep with optional compaction.
976
    MarkCompact(tracer);
977
    sweep_generation_++;
978

    
979
    UpdateSurvivalRateTrend(start_new_space_size);
980

    
981
    size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects();
982

    
983
    old_generation_allocation_limit_ =
984
        OldGenerationAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
985

    
986
    old_gen_exhausted_ = false;
987
  } else {
988
    tracer_ = tracer;
989
    Scavenge();
990
    tracer_ = NULL;
991

    
992
    UpdateSurvivalRateTrend(start_new_space_size);
993
  }
994

    
995
  if (!new_space_high_promotion_mode_active_ &&
996
      new_space_.Capacity() == new_space_.MaximumCapacity() &&
997
      IsStableOrIncreasingSurvivalTrend() &&
998
      IsHighSurvivalRate()) {
999
    // Stable high survival rates even though young generation is at
1000
    // maximum capacity indicates that most objects will be promoted.
1001
    // To decrease scavenger pauses and final mark-sweep pauses, we
1002
    // have to limit maximal capacity of the young generation.
1003
    SetNewSpaceHighPromotionModeActive(true);
1004
    if (FLAG_trace_gc) {
1005
      PrintPID("Limited new space size due to high promotion rate: %d MB\n",
1006
               new_space_.InitialCapacity() / MB);
1007
    }
1008
    // Support for global pre-tenuring uses the high promotion mode as a
1009
    // heuristic indicator of whether to pretenure or not, we trigger
1010
    // deoptimization here to take advantage of pre-tenuring as soon as
1011
    // possible.
1012
    if (FLAG_pretenuring) {
1013
      isolate_->stack_guard()->FullDeopt();
1014
    }
1015
  } else if (new_space_high_promotion_mode_active_ &&
1016
      IsStableOrDecreasingSurvivalTrend() &&
1017
      IsLowSurvivalRate()) {
1018
    // Decreasing low survival rates might indicate that the above high
1019
    // promotion mode is over and we should allow the young generation
1020
    // to grow again.
1021
    SetNewSpaceHighPromotionModeActive(false);
1022
    if (FLAG_trace_gc) {
1023
      PrintPID("Unlimited new space size due to low promotion rate: %d MB\n",
1024
               new_space_.MaximumCapacity() / MB);
1025
    }
1026
    // Trigger deoptimization here to turn off pre-tenuring as soon as
1027
    // possible.
1028
    if (FLAG_pretenuring) {
1029
      isolate_->stack_guard()->FullDeopt();
1030
    }
1031
  }
1032

    
1033
  if (new_space_high_promotion_mode_active_ &&
1034
      new_space_.Capacity() > new_space_.InitialCapacity()) {
1035
    new_space_.Shrink();
1036
  }
1037

    
1038
  isolate_->counters()->objs_since_last_young()->Set(0);
1039

    
1040
  // Callbacks that fire after this point might trigger nested GCs and
1041
  // restart incremental marking, the assertion can't be moved down.
1042
  ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
1043

    
1044
  gc_post_processing_depth_++;
1045
  { AllowHeapAllocation allow_allocation;
1046
    GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1047
    next_gc_likely_to_collect_more =
1048
        isolate_->global_handles()->PostGarbageCollectionProcessing(
1049
            collector, tracer);
1050
  }
1051
  gc_post_processing_depth_--;
1052

    
1053
  isolate_->eternal_handles()->PostGarbageCollectionProcessing(this);
1054

    
1055
  // Update relocatables.
1056
  Relocatable::PostGarbageCollectionProcessing(isolate_);
1057

    
1058
  if (collector == MARK_COMPACTOR) {
1059
    // Register the amount of external allocated memory.
1060
    amount_of_external_allocated_memory_at_last_global_gc_ =
1061
        amount_of_external_allocated_memory_;
1062
  }
1063

    
1064
  {
1065
    GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
1066
    VMState<EXTERNAL> state(isolate_);
1067
    HandleScope handle_scope(isolate_);
1068
    CallGCEpilogueCallbacks(gc_type);
1069
  }
1070

    
1071
#ifdef VERIFY_HEAP
1072
  if (FLAG_verify_heap) {
1073
    VerifyStringTable(this);
1074
  }
1075
#endif
1076

    
1077
  return next_gc_likely_to_collect_more;
1078
}
1079

    
1080

    
1081
void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
1082
  for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
1083
    if (gc_type & gc_prologue_callbacks_[i].gc_type) {
1084
      if (!gc_prologue_callbacks_[i].pass_isolate_) {
1085
        v8::GCPrologueCallback callback =
1086
            reinterpret_cast<v8::GCPrologueCallback>(
1087
                gc_prologue_callbacks_[i].callback);
1088
        callback(gc_type, flags);
1089
      } else {
1090
        v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1091
        gc_prologue_callbacks_[i].callback(isolate, gc_type, flags);
1092
      }
1093
    }
1094
  }
1095
}
1096

    
1097

    
1098
void Heap::CallGCEpilogueCallbacks(GCType gc_type) {
1099
  for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
1100
    if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
1101
      if (!gc_epilogue_callbacks_[i].pass_isolate_) {
1102
        v8::GCPrologueCallback callback =
1103
            reinterpret_cast<v8::GCPrologueCallback>(
1104
                gc_epilogue_callbacks_[i].callback);
1105
        callback(gc_type, kNoGCCallbackFlags);
1106
      } else {
1107
        v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
1108
        gc_epilogue_callbacks_[i].callback(
1109
            isolate, gc_type, kNoGCCallbackFlags);
1110
      }
1111
    }
1112
  }
1113
}
1114

    
1115

    
1116
void Heap::MarkCompact(GCTracer* tracer) {
1117
  gc_state_ = MARK_COMPACT;
1118
  LOG(isolate_, ResourceEvent("markcompact", "begin"));
1119

    
1120
  mark_compact_collector_.Prepare(tracer);
1121

    
1122
  ms_count_++;
1123
  tracer->set_full_gc_count(ms_count_);
1124

    
1125
  MarkCompactPrologue();
1126

    
1127
  mark_compact_collector_.CollectGarbage();
1128

    
1129
  LOG(isolate_, ResourceEvent("markcompact", "end"));
1130

    
1131
  gc_state_ = NOT_IN_GC;
1132

    
1133
  isolate_->counters()->objs_since_last_full()->Set(0);
1134

    
1135
  contexts_disposed_ = 0;
1136

    
1137
  flush_monomorphic_ics_ = false;
1138
}
1139

    
1140

    
1141
void Heap::MarkCompactPrologue() {
1142
  // At any old GC clear the keyed lookup cache to enable collection of unused
1143
  // maps.
1144
  isolate_->keyed_lookup_cache()->Clear();
1145
  isolate_->context_slot_cache()->Clear();
1146
  isolate_->descriptor_lookup_cache()->Clear();
1147
  RegExpResultsCache::Clear(string_split_cache());
1148
  RegExpResultsCache::Clear(regexp_multiple_cache());
1149

    
1150
  isolate_->compilation_cache()->MarkCompactPrologue();
1151

    
1152
  CompletelyClearInstanceofCache();
1153

    
1154
  FlushNumberStringCache();
1155
  if (FLAG_cleanup_code_caches_at_gc) {
1156
    polymorphic_code_cache()->set_cache(undefined_value());
1157
  }
1158

    
1159
  ClearNormalizedMapCaches();
1160
}
1161

    
1162

    
1163
// Helper class for copying HeapObjects
1164
class ScavengeVisitor: public ObjectVisitor {
1165
 public:
1166
  explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
1167

    
1168
  void VisitPointer(Object** p) { ScavengePointer(p); }
1169

    
1170
  void VisitPointers(Object** start, Object** end) {
1171
    // Copy all HeapObject pointers in [start, end)
1172
    for (Object** p = start; p < end; p++) ScavengePointer(p);
1173
  }
1174

    
1175
 private:
1176
  void ScavengePointer(Object** p) {
1177
    Object* object = *p;
1178
    if (!heap_->InNewSpace(object)) return;
1179
    Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1180
                         reinterpret_cast<HeapObject*>(object));
1181
  }
1182

    
1183
  Heap* heap_;
1184
};
1185

    
1186

    
1187
#ifdef VERIFY_HEAP
1188
// Visitor class to verify pointers in code or data space do not point into
1189
// new space.
1190
class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
1191
 public:
1192
  explicit VerifyNonPointerSpacePointersVisitor(Heap* heap) : heap_(heap) {}
1193
  void VisitPointers(Object** start, Object**end) {
1194
    for (Object** current = start; current < end; current++) {
1195
      if ((*current)->IsHeapObject()) {
1196
        CHECK(!heap_->InNewSpace(HeapObject::cast(*current)));
1197
      }
1198
    }
1199
  }
1200

    
1201
 private:
1202
  Heap* heap_;
1203
};
1204

    
1205

    
1206
static void VerifyNonPointerSpacePointers(Heap* heap) {
1207
  // Verify that there are no pointers to new space in spaces where we
1208
  // do not expect them.
1209
  VerifyNonPointerSpacePointersVisitor v(heap);
1210
  HeapObjectIterator code_it(heap->code_space());
1211
  for (HeapObject* object = code_it.Next();
1212
       object != NULL; object = code_it.Next())
1213
    object->Iterate(&v);
1214

    
1215
  // The old data space was normally swept conservatively so that the iterator
1216
  // doesn't work, so we normally skip the next bit.
1217
  if (!heap->old_data_space()->was_swept_conservatively()) {
1218
    HeapObjectIterator data_it(heap->old_data_space());
1219
    for (HeapObject* object = data_it.Next();
1220
         object != NULL; object = data_it.Next())
1221
      object->Iterate(&v);
1222
  }
1223
}
1224
#endif  // VERIFY_HEAP
1225

    
1226

    
1227
void Heap::CheckNewSpaceExpansionCriteria() {
1228
  if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
1229
      survived_since_last_expansion_ > new_space_.Capacity() &&
1230
      !new_space_high_promotion_mode_active_) {
1231
    // Grow the size of new space if there is room to grow, enough data
1232
    // has survived scavenge since the last expansion and we are not in
1233
    // high promotion mode.
1234
    new_space_.Grow();
1235
    survived_since_last_expansion_ = 0;
1236
  }
1237
}
1238

    
1239

    
1240
static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
1241
  return heap->InNewSpace(*p) &&
1242
      !HeapObject::cast(*p)->map_word().IsForwardingAddress();
1243
}
1244

    
1245

    
1246
void Heap::ScavengeStoreBufferCallback(
1247
    Heap* heap,
1248
    MemoryChunk* page,
1249
    StoreBufferEvent event) {
1250
  heap->store_buffer_rebuilder_.Callback(page, event);
1251
}
1252

    
1253

    
1254
void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
1255
  if (event == kStoreBufferStartScanningPagesEvent) {
1256
    start_of_current_page_ = NULL;
1257
    current_page_ = NULL;
1258
  } else if (event == kStoreBufferScanningPageEvent) {
1259
    if (current_page_ != NULL) {
1260
      // If this page already overflowed the store buffer during this iteration.
1261
      if (current_page_->scan_on_scavenge()) {
1262
        // Then we should wipe out the entries that have been added for it.
1263
        store_buffer_->SetTop(start_of_current_page_);
1264
      } else if (store_buffer_->Top() - start_of_current_page_ >=
1265
                 (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
1266
        // Did we find too many pointers in the previous page?  The heuristic is
1267
        // that no page can take more then 1/5 the remaining slots in the store
1268
        // buffer.
1269
        current_page_->set_scan_on_scavenge(true);
1270
        store_buffer_->SetTop(start_of_current_page_);
1271
      } else {
1272
        // In this case the page we scanned took a reasonable number of slots in
1273
        // the store buffer.  It has now been rehabilitated and is no longer
1274
        // marked scan_on_scavenge.
1275
        ASSERT(!current_page_->scan_on_scavenge());
1276
      }
1277
    }
1278
    start_of_current_page_ = store_buffer_->Top();
1279
    current_page_ = page;
1280
  } else if (event == kStoreBufferFullEvent) {
1281
    // The current page overflowed the store buffer again.  Wipe out its entries
1282
    // in the store buffer and mark it scan-on-scavenge again.  This may happen
1283
    // several times while scanning.
1284
    if (current_page_ == NULL) {
1285
      // Store Buffer overflowed while scanning promoted objects.  These are not
1286
      // in any particular page, though they are likely to be clustered by the
1287
      // allocation routines.
1288
      store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2);
1289
    } else {
1290
      // Store Buffer overflowed while scanning a particular old space page for
1291
      // pointers to new space.
1292
      ASSERT(current_page_ == page);
1293
      ASSERT(page != NULL);
1294
      current_page_->set_scan_on_scavenge(true);
1295
      ASSERT(start_of_current_page_ != store_buffer_->Top());
1296
      store_buffer_->SetTop(start_of_current_page_);
1297
    }
1298
  } else {
1299
    UNREACHABLE();
1300
  }
1301
}
1302

    
1303

    
1304
void PromotionQueue::Initialize() {
1305
  // Assumes that a NewSpacePage exactly fits a number of promotion queue
1306
  // entries (where each is a pair of intptr_t). This allows us to simplify
1307
  // the test fpr when to switch pages.
1308
  ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
1309
         == 0);
1310
  limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
1311
  front_ = rear_ =
1312
      reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
1313
  emergency_stack_ = NULL;
1314
  guard_ = false;
1315
}
1316

    
1317

    
1318
void PromotionQueue::RelocateQueueHead() {
1319
  ASSERT(emergency_stack_ == NULL);
1320

    
1321
  Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
1322
  intptr_t* head_start = rear_;
1323
  intptr_t* head_end =
1324
      Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
1325

    
1326
  int entries_count =
1327
      static_cast<int>(head_end - head_start) / kEntrySizeInWords;
1328

    
1329
  emergency_stack_ = new List<Entry>(2 * entries_count);
1330

    
1331
  while (head_start != head_end) {
1332
    int size = static_cast<int>(*(head_start++));
1333
    HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
1334
    emergency_stack_->Add(Entry(obj, size));
1335
  }
1336
  rear_ = head_end;
1337
}
1338

    
1339

    
1340
class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
1341
 public:
1342
  explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) { }
1343

    
1344
  virtual Object* RetainAs(Object* object) {
1345
    if (!heap_->InFromSpace(object)) {
1346
      return object;
1347
    }
1348

    
1349
    MapWord map_word = HeapObject::cast(object)->map_word();
1350
    if (map_word.IsForwardingAddress()) {
1351
      return map_word.ToForwardingAddress();
1352
    }
1353
    return NULL;
1354
  }
1355

    
1356
 private:
1357
  Heap* heap_;
1358
};
1359

    
1360

    
1361
void Heap::Scavenge() {
1362
  RelocationLock relocation_lock(this);
1363

    
1364
  allocation_mementos_found_ = 0;
1365

    
1366
#ifdef VERIFY_HEAP
1367
  if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
1368
#endif
1369

    
1370
  gc_state_ = SCAVENGE;
1371

    
1372
  // Implements Cheney's copying algorithm
1373
  LOG(isolate_, ResourceEvent("scavenge", "begin"));
1374

    
1375
  // Clear descriptor cache.
1376
  isolate_->descriptor_lookup_cache()->Clear();
1377

    
1378
  // Used for updating survived_since_last_expansion_ at function end.
1379
  intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
1380

    
1381
  CheckNewSpaceExpansionCriteria();
1382

    
1383
  SelectScavengingVisitorsTable();
1384

    
1385
  incremental_marking()->PrepareForScavenge();
1386

    
1387
  paged_space(OLD_DATA_SPACE)->EnsureSweeperProgress(new_space_.Size());
1388
  paged_space(OLD_POINTER_SPACE)->EnsureSweeperProgress(new_space_.Size());
1389

    
1390
  // Flip the semispaces.  After flipping, to space is empty, from space has
1391
  // live objects.
1392
  new_space_.Flip();
1393
  new_space_.ResetAllocationInfo();
1394

    
1395
  // We need to sweep newly copied objects which can be either in the
1396
  // to space or promoted to the old generation.  For to-space
1397
  // objects, we treat the bottom of the to space as a queue.  Newly
1398
  // copied and unswept objects lie between a 'front' mark and the
1399
  // allocation pointer.
1400
  //
1401
  // Promoted objects can go into various old-generation spaces, and
1402
  // can be allocated internally in the spaces (from the free list).
1403
  // We treat the top of the to space as a queue of addresses of
1404
  // promoted objects.  The addresses of newly promoted and unswept
1405
  // objects lie between a 'front' mark and a 'rear' mark that is
1406
  // updated as a side effect of promoting an object.
1407
  //
1408
  // There is guaranteed to be enough room at the top of the to space
1409
  // for the addresses of promoted objects: every object promoted
1410
  // frees up its size in bytes from the top of the new space, and
1411
  // objects are at least one pointer in size.
1412
  Address new_space_front = new_space_.ToSpaceStart();
1413
  promotion_queue_.Initialize();
1414

    
1415
#ifdef DEBUG
1416
  store_buffer()->Clean();
1417
#endif
1418

    
1419
  ScavengeVisitor scavenge_visitor(this);
1420
  // Copy roots.
1421
  IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1422

    
1423
  // Copy objects reachable from the old generation.
1424
  {
1425
    StoreBufferRebuildScope scope(this,
1426
                                  store_buffer(),
1427
                                  &ScavengeStoreBufferCallback);
1428
    store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
1429
  }
1430

    
1431
  // Copy objects reachable from simple cells by scavenging cell values
1432
  // directly.
1433
  HeapObjectIterator cell_iterator(cell_space_);
1434
  for (HeapObject* heap_object = cell_iterator.Next();
1435
       heap_object != NULL;
1436
       heap_object = cell_iterator.Next()) {
1437
    if (heap_object->IsCell()) {
1438
      Cell* cell = Cell::cast(heap_object);
1439
      Address value_address = cell->ValueAddress();
1440
      scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1441
    }
1442
  }
1443

    
1444
  // Copy objects reachable from global property cells by scavenging global
1445
  // property cell values directly.
1446
  HeapObjectIterator js_global_property_cell_iterator(property_cell_space_);
1447
  for (HeapObject* heap_object = js_global_property_cell_iterator.Next();
1448
       heap_object != NULL;
1449
       heap_object = js_global_property_cell_iterator.Next()) {
1450
    if (heap_object->IsPropertyCell()) {
1451
      PropertyCell* cell = PropertyCell::cast(heap_object);
1452
      Address value_address = cell->ValueAddress();
1453
      scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1454
      Address type_address = cell->TypeAddress();
1455
      scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(type_address));
1456
    }
1457
  }
1458

    
1459
  // Copy objects reachable from the code flushing candidates list.
1460
  MarkCompactCollector* collector = mark_compact_collector();
1461
  if (collector->is_code_flushing_enabled()) {
1462
    collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
1463
  }
1464

    
1465
  // Scavenge object reachable from the native contexts list directly.
1466
  scavenge_visitor.VisitPointer(BitCast<Object**>(&native_contexts_list_));
1467

    
1468
  new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1469

    
1470
  while (isolate()->global_handles()->IterateObjectGroups(
1471
      &scavenge_visitor, &IsUnscavengedHeapObject)) {
1472
    new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1473
  }
1474
  isolate()->global_handles()->RemoveObjectGroups();
1475
  isolate()->global_handles()->RemoveImplicitRefGroups();
1476

    
1477
  isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1478
      &IsUnscavengedHeapObject);
1479
  isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
1480
      &scavenge_visitor);
1481
  new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1482

    
1483
  UpdateNewSpaceReferencesInExternalStringTable(
1484
      &UpdateNewSpaceReferenceInExternalStringTableEntry);
1485

    
1486
  promotion_queue_.Destroy();
1487

    
1488
  if (!FLAG_watch_ic_patching) {
1489
    isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
1490
  }
1491
  incremental_marking()->UpdateMarkingDequeAfterScavenge();
1492

    
1493
  ScavengeWeakObjectRetainer weak_object_retainer(this);
1494
  ProcessWeakReferences(&weak_object_retainer);
1495

    
1496
  ASSERT(new_space_front == new_space_.top());
1497

    
1498
  // Set age mark.
1499
  new_space_.set_age_mark(new_space_.top());
1500

    
1501
  new_space_.LowerInlineAllocationLimit(
1502
      new_space_.inline_allocation_limit_step());
1503

    
1504
  // Update how much has survived scavenge.
1505
  IncrementYoungSurvivorsCounter(static_cast<int>(
1506
      (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
1507

    
1508
  LOG(isolate_, ResourceEvent("scavenge", "end"));
1509

    
1510
  gc_state_ = NOT_IN_GC;
1511

    
1512
  scavenges_since_last_idle_round_++;
1513

    
1514
  if (FLAG_trace_track_allocation_sites && allocation_mementos_found_ > 0) {
1515
    PrintF("AllocationMementos found during scavenge = %d\n",
1516
           allocation_mementos_found_);
1517
  }
1518
}
1519

    
1520

    
1521
String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1522
                                                                Object** p) {
1523
  MapWord first_word = HeapObject::cast(*p)->map_word();
1524

    
1525
  if (!first_word.IsForwardingAddress()) {
1526
    // Unreachable external string can be finalized.
1527
    heap->FinalizeExternalString(String::cast(*p));
1528
    return NULL;
1529
  }
1530

    
1531
  // String is still reachable.
1532
  return String::cast(first_word.ToForwardingAddress());
1533
}
1534

    
1535

    
1536
void Heap::UpdateNewSpaceReferencesInExternalStringTable(
1537
    ExternalStringTableUpdaterCallback updater_func) {
1538
#ifdef VERIFY_HEAP
1539
  if (FLAG_verify_heap) {
1540
    external_string_table_.Verify();
1541
  }
1542
#endif
1543

    
1544
  if (external_string_table_.new_space_strings_.is_empty()) return;
1545

    
1546
  Object** start = &external_string_table_.new_space_strings_[0];
1547
  Object** end = start + external_string_table_.new_space_strings_.length();
1548
  Object** last = start;
1549

    
1550
  for (Object** p = start; p < end; ++p) {
1551
    ASSERT(InFromSpace(*p));
1552
    String* target = updater_func(this, p);
1553

    
1554
    if (target == NULL) continue;
1555

    
1556
    ASSERT(target->IsExternalString());
1557

    
1558
    if (InNewSpace(target)) {
1559
      // String is still in new space.  Update the table entry.
1560
      *last = target;
1561
      ++last;
1562
    } else {
1563
      // String got promoted.  Move it to the old string list.
1564
      external_string_table_.AddOldString(target);
1565
    }
1566
  }
1567

    
1568
  ASSERT(last <= end);
1569
  external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1570
}
1571

    
1572

    
1573
void Heap::UpdateReferencesInExternalStringTable(
1574
    ExternalStringTableUpdaterCallback updater_func) {
1575

    
1576
  // Update old space string references.
1577
  if (external_string_table_.old_space_strings_.length() > 0) {
1578
    Object** start = &external_string_table_.old_space_strings_[0];
1579
    Object** end = start + external_string_table_.old_space_strings_.length();
1580
    for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1581
  }
1582

    
1583
  UpdateNewSpaceReferencesInExternalStringTable(updater_func);
1584
}
1585

    
1586

    
1587
template <class T>
1588
struct WeakListVisitor;
1589

    
1590

    
1591
template <class T>
1592
static Object* VisitWeakList(Heap* heap,
1593
                             Object* list,
1594
                             WeakObjectRetainer* retainer,
1595
                             bool record_slots) {
1596
  Object* undefined = heap->undefined_value();
1597
  Object* head = undefined;
1598
  T* tail = NULL;
1599
  MarkCompactCollector* collector = heap->mark_compact_collector();
1600
  while (list != undefined) {
1601
    // Check whether to keep the candidate in the list.
1602
    T* candidate = reinterpret_cast<T*>(list);
1603
    Object* retained = retainer->RetainAs(list);
1604
    if (retained != NULL) {
1605
      if (head == undefined) {
1606
        // First element in the list.
1607
        head = retained;
1608
      } else {
1609
        // Subsequent elements in the list.
1610
        ASSERT(tail != NULL);
1611
        WeakListVisitor<T>::SetWeakNext(tail, retained);
1612
        if (record_slots) {
1613
          Object** next_slot =
1614
            HeapObject::RawField(tail, WeakListVisitor<T>::WeakNextOffset());
1615
          collector->RecordSlot(next_slot, next_slot, retained);
1616
        }
1617
      }
1618
      // Retained object is new tail.
1619
      ASSERT(!retained->IsUndefined());
1620
      candidate = reinterpret_cast<T*>(retained);
1621
      tail = candidate;
1622

    
1623

    
1624
      // tail is a live object, visit it.
1625
      WeakListVisitor<T>::VisitLiveObject(
1626
          heap, tail, retainer, record_slots);
1627
    } else {
1628
      WeakListVisitor<T>::VisitPhantomObject(heap, candidate);
1629
    }
1630

    
1631
    // Move to next element in the list.
1632
    list = WeakListVisitor<T>::WeakNext(candidate);
1633
  }
1634

    
1635
  // Terminate the list if there is one or more elements.
1636
  if (tail != NULL) {
1637
    WeakListVisitor<T>::SetWeakNext(tail, undefined);
1638
  }
1639
  return head;
1640
}
1641

    
1642

    
1643
template<>
1644
struct WeakListVisitor<JSFunction> {
1645
  static void SetWeakNext(JSFunction* function, Object* next) {
1646
    function->set_next_function_link(next);
1647
  }
1648

    
1649
  static Object* WeakNext(JSFunction* function) {
1650
    return function->next_function_link();
1651
  }
1652

    
1653
  static int WeakNextOffset() {
1654
    return JSFunction::kNextFunctionLinkOffset;
1655
  }
1656

    
1657
  static void VisitLiveObject(Heap*, JSFunction*,
1658
                              WeakObjectRetainer*, bool) {
1659
  }
1660

    
1661
  static void VisitPhantomObject(Heap*, JSFunction*) {
1662
  }
1663
};
1664

    
1665

    
1666
template<>
1667
struct WeakListVisitor<Code> {
1668
  static void SetWeakNext(Code* code, Object* next) {
1669
    code->set_next_code_link(next);
1670
  }
1671

    
1672
  static Object* WeakNext(Code* code) {
1673
    return code->next_code_link();
1674
  }
1675

    
1676
  static int WeakNextOffset() {
1677
    return Code::kNextCodeLinkOffset;
1678
  }
1679

    
1680
  static void VisitLiveObject(Heap*, Code*,
1681
                              WeakObjectRetainer*, bool) {
1682
  }
1683

    
1684
  static void VisitPhantomObject(Heap*, Code*) {
1685
  }
1686
};
1687

    
1688

    
1689
template<>
1690
struct WeakListVisitor<Context> {
1691
  static void SetWeakNext(Context* context, Object* next) {
1692
    context->set(Context::NEXT_CONTEXT_LINK,
1693
                 next,
1694
                 UPDATE_WRITE_BARRIER);
1695
  }
1696

    
1697
  static Object* WeakNext(Context* context) {
1698
    return context->get(Context::NEXT_CONTEXT_LINK);
1699
  }
1700

    
1701
  static void VisitLiveObject(Heap* heap,
1702
                              Context* context,
1703
                              WeakObjectRetainer* retainer,
1704
                              bool record_slots) {
1705
    // Process the three weak lists linked off the context.
1706
    DoWeakList<JSFunction>(heap, context, retainer, record_slots,
1707
        Context::OPTIMIZED_FUNCTIONS_LIST);
1708
    DoWeakList<Code>(heap, context, retainer, record_slots,
1709
        Context::OPTIMIZED_CODE_LIST);
1710
    DoWeakList<Code>(heap, context, retainer, record_slots,
1711
        Context::DEOPTIMIZED_CODE_LIST);
1712
  }
1713

    
1714
  template<class T>
1715
  static void DoWeakList(Heap* heap,
1716
                         Context* context,
1717
                         WeakObjectRetainer* retainer,
1718
                         bool record_slots,
1719
                         int index) {
1720
    // Visit the weak list, removing dead intermediate elements.
1721
    Object* list_head = VisitWeakList<T>(heap, context->get(index), retainer,
1722
        record_slots);
1723

    
1724
    // Update the list head.
1725
    context->set(index, list_head, UPDATE_WRITE_BARRIER);
1726

    
1727
    if (record_slots) {
1728
      // Record the updated slot if necessary.
1729
      Object** head_slot = HeapObject::RawField(
1730
          context, FixedArray::SizeFor(index));
1731
      heap->mark_compact_collector()->RecordSlot(
1732
          head_slot, head_slot, list_head);
1733
    }
1734
  }
1735

    
1736
  static void VisitPhantomObject(Heap*, Context*) {
1737
  }
1738

    
1739
  static int WeakNextOffset() {
1740
    return FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK);
1741
  }
1742
};
1743

    
1744

    
1745
void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1746
  // We don't record weak slots during marking or scavenges.
1747
  // Instead we do it once when we complete mark-compact cycle.
1748
  // Note that write barrier has no effect if we are already in the middle of
1749
  // compacting mark-sweep cycle and we have to record slots manually.
1750
  bool record_slots =
1751
      gc_state() == MARK_COMPACT &&
1752
      mark_compact_collector()->is_compacting();
1753
  ProcessArrayBuffers(retainer, record_slots);
1754
  ProcessNativeContexts(retainer, record_slots);
1755
  ProcessAllocationSites(retainer, record_slots);
1756
}
1757

    
1758
void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer,
1759
                                 bool record_slots) {
1760
  Object* head =
1761
      VisitWeakList<Context>(
1762
          this, native_contexts_list(), retainer, record_slots);
1763
  // Update the head of the list of contexts.
1764
  native_contexts_list_ = head;
1765
}
1766

    
1767

    
1768
template<>
1769
struct WeakListVisitor<JSArrayBufferView> {
1770
  static void SetWeakNext(JSArrayBufferView* obj, Object* next) {
1771
    obj->set_weak_next(next);
1772
  }
1773

    
1774
  static Object* WeakNext(JSArrayBufferView* obj) {
1775
    return obj->weak_next();
1776
  }
1777

    
1778
  static void VisitLiveObject(Heap*,
1779
                              JSArrayBufferView* obj,
1780
                              WeakObjectRetainer* retainer,
1781
                              bool record_slots) {}
1782

    
1783
  static void VisitPhantomObject(Heap*, JSArrayBufferView*) {}
1784

    
1785
  static int WeakNextOffset() {
1786
    return JSArrayBufferView::kWeakNextOffset;
1787
  }
1788
};
1789

    
1790

    
1791
template<>
1792
struct WeakListVisitor<JSArrayBuffer> {
1793
  static void SetWeakNext(JSArrayBuffer* obj, Object* next) {
1794
    obj->set_weak_next(next);
1795
  }
1796

    
1797
  static Object* WeakNext(JSArrayBuffer* obj) {
1798
    return obj->weak_next();
1799
  }
1800

    
1801
  static void VisitLiveObject(Heap* heap,
1802
                              JSArrayBuffer* array_buffer,
1803
                              WeakObjectRetainer* retainer,
1804
                              bool record_slots) {
1805
    Object* typed_array_obj =
1806
        VisitWeakList<JSArrayBufferView>(
1807
            heap,
1808
            array_buffer->weak_first_view(),
1809
            retainer, record_slots);
1810
    array_buffer->set_weak_first_view(typed_array_obj);
1811
    if (typed_array_obj != heap->undefined_value() && record_slots) {
1812
      Object** slot = HeapObject::RawField(
1813
          array_buffer, JSArrayBuffer::kWeakFirstViewOffset);
1814
      heap->mark_compact_collector()->RecordSlot(slot, slot, typed_array_obj);
1815
    }
1816
  }
1817

    
1818
  static void VisitPhantomObject(Heap* heap, JSArrayBuffer* phantom) {
1819
    Runtime::FreeArrayBuffer(heap->isolate(), phantom);
1820
  }
1821

    
1822
  static int WeakNextOffset() {
1823
    return JSArrayBuffer::kWeakNextOffset;
1824
  }
1825
};
1826

    
1827

    
1828
void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer,
1829
                               bool record_slots) {
1830
  Object* array_buffer_obj =
1831
      VisitWeakList<JSArrayBuffer>(this,
1832
                                   array_buffers_list(),
1833
                                   retainer, record_slots);
1834
  set_array_buffers_list(array_buffer_obj);
1835
}
1836

    
1837

    
1838
void Heap::TearDownArrayBuffers() {
1839
  Object* undefined = undefined_value();
1840
  for (Object* o = array_buffers_list(); o != undefined;) {
1841
    JSArrayBuffer* buffer = JSArrayBuffer::cast(o);
1842
    Runtime::FreeArrayBuffer(isolate(), buffer);
1843
    o = buffer->weak_next();
1844
  }
1845
  array_buffers_list_ = undefined;
1846
}
1847

    
1848

    
1849
template<>
1850
struct WeakListVisitor<AllocationSite> {
1851
  static void SetWeakNext(AllocationSite* obj, Object* next) {
1852
    obj->set_weak_next(next);
1853
  }
1854

    
1855
  static Object* WeakNext(AllocationSite* obj) {
1856
    return obj->weak_next();
1857
  }
1858

    
1859
  static void VisitLiveObject(Heap* heap,
1860
                              AllocationSite* array_buffer,
1861
                              WeakObjectRetainer* retainer,
1862
                              bool record_slots) {}
1863

    
1864
  static void VisitPhantomObject(Heap* heap, AllocationSite* phantom) {}
1865

    
1866
  static int WeakNextOffset() {
1867
    return AllocationSite::kWeakNextOffset;
1868
  }
1869
};
1870

    
1871

    
1872
void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer,
1873
                                  bool record_slots) {
1874
  Object* allocation_site_obj =
1875
      VisitWeakList<AllocationSite>(this,
1876
                                    allocation_sites_list(),
1877
                                    retainer, record_slots);
1878
  set_allocation_sites_list(allocation_site_obj);
1879
}
1880

    
1881

    
1882
void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
1883
  DisallowHeapAllocation no_allocation;
1884

    
1885
  // Both the external string table and the string table may contain
1886
  // external strings, but neither lists them exhaustively, nor is the
1887
  // intersection set empty.  Therefore we iterate over the external string
1888
  // table first, ignoring internalized strings, and then over the
1889
  // internalized string table.
1890

    
1891
  class ExternalStringTableVisitorAdapter : public ObjectVisitor {
1892
   public:
1893
    explicit ExternalStringTableVisitorAdapter(
1894
        v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
1895
    virtual void VisitPointers(Object** start, Object** end) {
1896
      for (Object** p = start; p < end; p++) {
1897
        // Visit non-internalized external strings,
1898
        // since internalized strings are listed in the string table.
1899
        if (!(*p)->IsInternalizedString()) {
1900
          ASSERT((*p)->IsExternalString());
1901
          visitor_->VisitExternalString(Utils::ToLocal(
1902
              Handle<String>(String::cast(*p))));
1903
        }
1904
      }
1905
    }
1906
   private:
1907
    v8::ExternalResourceVisitor* visitor_;
1908
  } external_string_table_visitor(visitor);
1909

    
1910
  external_string_table_.Iterate(&external_string_table_visitor);
1911

    
1912
  class StringTableVisitorAdapter : public ObjectVisitor {
1913
   public:
1914
    explicit StringTableVisitorAdapter(
1915
        v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
1916
    virtual void VisitPointers(Object** start, Object** end) {
1917
      for (Object** p = start; p < end; p++) {
1918
        if ((*p)->IsExternalString()) {
1919
          ASSERT((*p)->IsInternalizedString());
1920
          visitor_->VisitExternalString(Utils::ToLocal(
1921
              Handle<String>(String::cast(*p))));
1922
        }
1923
      }
1924
    }
1925
   private:
1926
    v8::ExternalResourceVisitor* visitor_;
1927
  } string_table_visitor(visitor);
1928

    
1929
  string_table()->IterateElements(&string_table_visitor);
1930
}
1931

    
1932

    
1933
class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
1934
 public:
1935
  static inline void VisitPointer(Heap* heap, Object** p) {
1936
    Object* object = *p;
1937
    if (!heap->InNewSpace(object)) return;
1938
    Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1939
                         reinterpret_cast<HeapObject*>(object));
1940
  }
1941
};
1942

    
1943

    
1944
Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1945
                         Address new_space_front) {
1946
  do {
1947
    SemiSpace::AssertValidRange(new_space_front, new_space_.top());
1948
    // The addresses new_space_front and new_space_.top() define a
1949
    // queue of unprocessed copied objects.  Process them until the
1950
    // queue is empty.
1951
    while (new_space_front != new_space_.top()) {
1952
      if (!NewSpacePage::IsAtEnd(new_space_front)) {
1953
        HeapObject* object = HeapObject::FromAddress(new_space_front);
1954
        new_space_front +=
1955
          NewSpaceScavenger::IterateBody(object->map(), object);
1956
      } else {
1957
        new_space_front =
1958
            NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
1959
      }
1960
    }
1961

    
1962
    // Promote and process all the to-be-promoted objects.
1963
    {
1964
      StoreBufferRebuildScope scope(this,
1965
                                    store_buffer(),
1966
                                    &ScavengeStoreBufferCallback);
1967
      while (!promotion_queue()->is_empty()) {
1968
        HeapObject* target;
1969
        int size;
1970
        promotion_queue()->remove(&target, &size);
1971

    
1972
        // Promoted object might be already partially visited
1973
        // during old space pointer iteration. Thus we search specificly
1974
        // for pointers to from semispace instead of looking for pointers
1975
        // to new space.
1976
        ASSERT(!target->IsMap());
1977
        IterateAndMarkPointersToFromSpace(target->address(),
1978
                                          target->address() + size,
1979
                                          &ScavengeObject);
1980
      }
1981
    }
1982

    
1983
    // Take another spin if there are now unswept objects in new space
1984
    // (there are currently no more unswept promoted objects).
1985
  } while (new_space_front != new_space_.top());
1986

    
1987
  return new_space_front;
1988
}
1989

    
1990

    
1991
STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) == 0);
1992
STATIC_ASSERT((ConstantPoolArray::kHeaderSize & kDoubleAlignmentMask) == 0);
1993

    
1994

    
1995
INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap,
1996
                                              HeapObject* object,
1997
                                              int size));
1998

    
1999
static HeapObject* EnsureDoubleAligned(Heap* heap,
2000
                                       HeapObject* object,
2001
                                       int size) {
2002
  if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
2003
    heap->CreateFillerObjectAt(object->address(), kPointerSize);
2004
    return HeapObject::FromAddress(object->address() + kPointerSize);
2005
  } else {
2006
    heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
2007
                               kPointerSize);
2008
    return object;
2009
  }
2010
}
2011

    
2012

    
2013
enum LoggingAndProfiling {
2014
  LOGGING_AND_PROFILING_ENABLED,
2015
  LOGGING_AND_PROFILING_DISABLED
2016
};
2017

    
2018

    
2019
enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
2020

    
2021

    
2022
template<MarksHandling marks_handling,
2023
         LoggingAndProfiling logging_and_profiling_mode>
2024
class ScavengingVisitor : public StaticVisitorBase {
2025
 public:
2026
  static void Initialize() {
2027
    table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
2028
    table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
2029
    table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
2030
    table_.Register(kVisitByteArray, &EvacuateByteArray);
2031
    table_.Register(kVisitFixedArray, &EvacuateFixedArray);
2032
    table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
2033

    
2034
    table_.Register(kVisitNativeContext,
2035
                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
2036
                        template VisitSpecialized<Context::kSize>);
2037

    
2038
    table_.Register(kVisitConsString,
2039
                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
2040
                        template VisitSpecialized<ConsString::kSize>);
2041

    
2042
    table_.Register(kVisitSlicedString,
2043
                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
2044
                        template VisitSpecialized<SlicedString::kSize>);
2045

    
2046
    table_.Register(kVisitSymbol,
2047
                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
2048
                        template VisitSpecialized<Symbol::kSize>);
2049

    
2050
    table_.Register(kVisitSharedFunctionInfo,
2051
                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
2052
                        template VisitSpecialized<SharedFunctionInfo::kSize>);
2053

    
2054
    table_.Register(kVisitJSWeakMap,
2055
                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
2056
                    Visit);
2057

    
2058
    table_.Register(kVisitJSWeakSet,
2059
                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
2060
                    Visit);
2061

    
2062
    table_.Register(kVisitJSArrayBuffer,
2063
                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
2064
                    Visit);
2065

    
2066
    table_.Register(kVisitJSTypedArray,
2067
                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
2068
                    Visit);
2069

    
2070
    table_.Register(kVisitJSDataView,
2071
                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
2072
                    Visit);
2073

    
2074
    table_.Register(kVisitJSRegExp,
2075
                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
2076
                    Visit);
2077

    
2078
    if (marks_handling == IGNORE_MARKS) {
2079
      table_.Register(kVisitJSFunction,
2080
                      &ObjectEvacuationStrategy<POINTER_OBJECT>::
2081
                          template VisitSpecialized<JSFunction::kSize>);
2082
    } else {
2083
      table_.Register(kVisitJSFunction, &EvacuateJSFunction);
2084
    }
2085

    
2086
    table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
2087
                                   kVisitDataObject,
2088
                                   kVisitDataObjectGeneric>();
2089

    
2090
    table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
2091
                                   kVisitJSObject,
2092
                                   kVisitJSObjectGeneric>();
2093

    
2094
    table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
2095
                                   kVisitStruct,
2096
                                   kVisitStructGeneric>();
2097
  }
2098

    
2099
  static VisitorDispatchTable<ScavengingCallback>* GetTable() {
2100
    return &table_;
2101
  }
2102

    
2103
 private:
2104
  enum ObjectContents  { DATA_OBJECT, POINTER_OBJECT };
2105

    
2106
  static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
2107
    bool should_record = false;
2108
#ifdef DEBUG
2109
    should_record = FLAG_heap_stats;
2110
#endif
2111
    should_record = should_record || FLAG_log_gc;
2112
    if (should_record) {
2113
      if (heap->new_space()->Contains(obj)) {
2114
        heap->new_space()->RecordAllocation(obj);
2115
      } else {
2116
        heap->new_space()->RecordPromotion(obj);
2117
      }
2118
    }
2119
  }
2120

    
2121
  // Helper function used by CopyObject to copy a source object to an
2122
  // allocated target object and update the forwarding pointer in the source
2123
  // object.  Returns the target object.
2124
  INLINE(static void MigrateObject(Heap* heap,
2125
                                   HeapObject* source,
2126
                                   HeapObject* target,
2127
                                   int size)) {
2128
    // Copy the content of source to target.
2129
    heap->CopyBlock(target->address(), source->address(), size);
2130

    
2131
    // Set the forwarding address.
2132
    source->set_map_word(MapWord::FromForwardingAddress(target));
2133

    
2134
    if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
2135
      // Update NewSpace stats if necessary.
2136
      RecordCopiedObject(heap, target);
2137
      Isolate* isolate = heap->isolate();
2138
      HeapProfiler* heap_profiler = isolate->heap_profiler();
2139
      if (heap_profiler->is_profiling()) {
2140
        heap_profiler->ObjectMoveEvent(source->address(), target->address(),
2141
                                       size);
2142
      }
2143
      if (isolate->logger()->is_logging_code_events() ||
2144
          isolate->cpu_profiler()->is_profiling()) {
2145
        if (target->IsSharedFunctionInfo()) {
2146
          PROFILE(isolate, SharedFunctionInfoMoveEvent(
2147
              source->address(), target->address()));
2148
        }
2149
      }
2150
    }
2151

    
2152
    if (marks_handling == TRANSFER_MARKS) {
2153
      if (Marking::TransferColor(source, target)) {
2154
        MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
2155
      }
2156
    }
2157
  }
2158

    
2159

    
2160
  template<ObjectContents object_contents, int alignment>
2161
  static inline void EvacuateObject(Map* map,
2162
                                    HeapObject** slot,
2163
                                    HeapObject* object,
2164
                                    int object_size) {
2165
    SLOW_ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
2166
    SLOW_ASSERT(object->Size() == object_size);
2167

    
2168
    int allocation_size = object_size;
2169
    if (alignment != kObjectAlignment) {
2170
      ASSERT(alignment == kDoubleAlignment);
2171
      allocation_size += kPointerSize;
2172
    }
2173

    
2174
    Heap* heap = map->GetHeap();
2175
    if (heap->ShouldBePromoted(object->address(), object_size)) {
2176
      MaybeObject* maybe_result;
2177

    
2178
      if (object_contents == DATA_OBJECT) {
2179
        ASSERT(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE));
2180
        maybe_result = heap->old_data_space()->AllocateRaw(allocation_size);
2181
      } else {
2182
        ASSERT(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
2183
        maybe_result = heap->old_pointer_space()->AllocateRaw(allocation_size);
2184
      }
2185

    
2186
      Object* result = NULL;  // Initialization to please compiler.
2187
      if (maybe_result->ToObject(&result)) {
2188
        HeapObject* target = HeapObject::cast(result);
2189

    
2190
        if (alignment != kObjectAlignment) {
2191
          target = EnsureDoubleAligned(heap, target, allocation_size);
2192
        }
2193

    
2194
        // Order is important: slot might be inside of the target if target
2195
        // was allocated over a dead object and slot comes from the store
2196
        // buffer.
2197
        *slot = target;
2198
        MigrateObject(heap, object, target, object_size);
2199

    
2200
        if (object_contents == POINTER_OBJECT) {
2201
          if (map->instance_type() == JS_FUNCTION_TYPE) {
2202
            heap->promotion_queue()->insert(
2203
                target, JSFunction::kNonWeakFieldsEndOffset);
2204
          } else {
2205
            heap->promotion_queue()->insert(target, object_size);
2206
          }
2207
        }
2208

    
2209
        heap->tracer()->increment_promoted_objects_size(object_size);
2210
        return;
2211
      }
2212
    }
2213
    ASSERT(heap->AllowedToBeMigrated(object, NEW_SPACE));
2214
    MaybeObject* allocation = heap->new_space()->AllocateRaw(allocation_size);
2215
    heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
2216
    Object* result = allocation->ToObjectUnchecked();
2217
    HeapObject* target = HeapObject::cast(result);
2218

    
2219
    if (alignment != kObjectAlignment) {
2220
      target = EnsureDoubleAligned(heap, target, allocation_size);
2221
    }
2222

    
2223
    // Order is important: slot might be inside of the target if target
2224
    // was allocated over a dead object and slot comes from the store
2225
    // buffer.
2226
    *slot = target;
2227
    MigrateObject(heap, object, target, object_size);
2228
    return;
2229
  }
2230

    
2231

    
2232
  static inline void EvacuateJSFunction(Map* map,
2233
                                        HeapObject** slot,
2234
                                        HeapObject* object) {
2235
    ObjectEvacuationStrategy<POINTER_OBJECT>::
2236
        template VisitSpecialized<JSFunction::kSize>(map, slot, object);
2237

    
2238
    HeapObject* target = *slot;
2239
    MarkBit mark_bit = Marking::MarkBitFrom(target);
2240
    if (Marking::IsBlack(mark_bit)) {
2241
      // This object is black and it might not be rescanned by marker.
2242
      // We should explicitly record code entry slot for compaction because
2243
      // promotion queue processing (IterateAndMarkPointersToFromSpace) will
2244
      // miss it as it is not HeapObject-tagged.
2245
      Address code_entry_slot =
2246
          target->address() + JSFunction::kCodeEntryOffset;
2247
      Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
2248
      map->GetHeap()->mark_compact_collector()->
2249
          RecordCodeEntrySlot(code_entry_slot, code);
2250
    }
2251
  }
2252

    
2253

    
2254
  static inline void EvacuateFixedArray(Map* map,
2255
                                        HeapObject** slot,
2256
                                        HeapObject* object) {
2257
    int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
2258
    EvacuateObject<POINTER_OBJECT, kObjectAlignment>(
2259
        map, slot, object, object_size);
2260
  }
2261

    
2262

    
2263
  static inline void EvacuateFixedDoubleArray(Map* map,
2264
                                              HeapObject** slot,
2265
                                              HeapObject* object) {
2266
    int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
2267
    int object_size = FixedDoubleArray::SizeFor(length);
2268
    EvacuateObject<DATA_OBJECT, kDoubleAlignment>(
2269
        map, slot, object, object_size);
2270
  }
2271

    
2272

    
2273
  static inline void EvacuateByteArray(Map* map,
2274
                                       HeapObject** slot,
2275
                                       HeapObject* object) {
2276
    int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
2277
    EvacuateObject<DATA_OBJECT, kObjectAlignment>(
2278
        map, slot, object, object_size);
2279
  }
2280

    
2281

    
2282
  static inline void EvacuateSeqOneByteString(Map* map,
2283
                                            HeapObject** slot,
2284
                                            HeapObject* object) {
2285
    int object_size = SeqOneByteString::cast(object)->
2286
        SeqOneByteStringSize(map->instance_type());
2287
    EvacuateObject<DATA_OBJECT, kObjectAlignment>(
2288
        map, slot, object, object_size);
2289
  }
2290

    
2291

    
2292
  static inline void EvacuateSeqTwoByteString(Map* map,
2293
                                              HeapObject** slot,
2294
                                              HeapObject* object) {
2295
    int object_size = SeqTwoByteString::cast(object)->
2296
        SeqTwoByteStringSize(map->instance_type());
2297
    EvacuateObject<DATA_OBJECT, kObjectAlignment>(
2298
        map, slot, object, object_size);
2299
  }
2300

    
2301

    
2302
  static inline bool IsShortcutCandidate(int type) {
2303
    return ((type & kShortcutTypeMask) == kShortcutTypeTag);
2304
  }
2305

    
2306
  static inline void EvacuateShortcutCandidate(Map* map,
2307
                                               HeapObject** slot,
2308
                                               HeapObject* object) {
2309
    ASSERT(IsShortcutCandidate(map->instance_type()));
2310

    
2311
    Heap* heap = map->GetHeap();
2312

    
2313
    if (marks_handling == IGNORE_MARKS &&
2314
        ConsString::cast(object)->unchecked_second() ==
2315
        heap->empty_string()) {
2316
      HeapObject* first =
2317
          HeapObject::cast(ConsString::cast(object)->unchecked_first());
2318

    
2319
      *slot = first;
2320

    
2321
      if (!heap->InNewSpace(first)) {
2322
        object->set_map_word(MapWord::FromForwardingAddress(first));
2323
        return;
2324
      }
2325

    
2326
      MapWord first_word = first->map_word();
2327
      if (first_word.IsForwardingAddress()) {
2328
        HeapObject* target = first_word.ToForwardingAddress();
2329

    
2330
        *slot = target;
2331
        object->set_map_word(MapWord::FromForwardingAddress(target));
2332
        return;
2333
      }
2334

    
2335
      heap->DoScavengeObject(first->map(), slot, first);
2336
      object->set_map_word(MapWord::FromForwardingAddress(*slot));
2337
      return;
2338
    }
2339

    
2340
    int object_size = ConsString::kSize;
2341
    EvacuateObject<POINTER_OBJECT, kObjectAlignment>(
2342
        map, slot, object, object_size);
2343
  }
2344

    
2345
  template<ObjectContents object_contents>
2346
  class ObjectEvacuationStrategy {
2347
   public:
2348
    template<int object_size>
2349
    static inline void VisitSpecialized(Map* map,
2350
                                        HeapObject** slot,
2351
                                        HeapObject* object) {
2352
      EvacuateObject<object_contents, kObjectAlignment>(
2353
          map, slot, object, object_size);
2354
    }
2355

    
2356
    static inline void Visit(Map* map,
2357
                             HeapObject** slot,
2358
                             HeapObject* object) {
2359
      int object_size = map->instance_size();
2360
      EvacuateObject<object_contents, kObjectAlignment>(
2361
          map, slot, object, object_size);
2362
    }
2363
  };
2364

    
2365
  static VisitorDispatchTable<ScavengingCallback> table_;
2366
};
2367

    
2368

    
2369
template<MarksHandling marks_handling,
2370
         LoggingAndProfiling logging_and_profiling_mode>
2371
VisitorDispatchTable<ScavengingCallback>
2372
    ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
2373

    
2374

    
2375
static void InitializeScavengingVisitorsTables() {
2376
  ScavengingVisitor<TRANSFER_MARKS,
2377
                    LOGGING_AND_PROFILING_DISABLED>::Initialize();
2378
  ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
2379
  ScavengingVisitor<TRANSFER_MARKS,
2380
                    LOGGING_AND_PROFILING_ENABLED>::Initialize();
2381
  ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
2382
}
2383

    
2384

    
2385
void Heap::SelectScavengingVisitorsTable() {
2386
  bool logging_and_profiling =
2387
      isolate()->logger()->is_logging() ||
2388
      isolate()->cpu_profiler()->is_profiling() ||
2389
      (isolate()->heap_profiler() != NULL &&
2390
       isolate()->heap_profiler()->is_profiling());
2391

    
2392
  if (!incremental_marking()->IsMarking()) {
2393
    if (!logging_and_profiling) {
2394
      scavenging_visitors_table_.CopyFrom(
2395
          ScavengingVisitor<IGNORE_MARKS,
2396
                            LOGGING_AND_PROFILING_DISABLED>::GetTable());
2397
    } else {
2398
      scavenging_visitors_table_.CopyFrom(
2399
          ScavengingVisitor<IGNORE_MARKS,
2400
                            LOGGING_AND_PROFILING_ENABLED>::GetTable());
2401
    }
2402
  } else {
2403
    if (!logging_and_profiling) {
2404
      scavenging_visitors_table_.CopyFrom(
2405
          ScavengingVisitor<TRANSFER_MARKS,
2406
                            LOGGING_AND_PROFILING_DISABLED>::GetTable());
2407
    } else {
2408
      scavenging_visitors_table_.CopyFrom(
2409
          ScavengingVisitor<TRANSFER_MARKS,
2410
                            LOGGING_AND_PROFILING_ENABLED>::GetTable());
2411
    }
2412

    
2413
    if (incremental_marking()->IsCompacting()) {
2414
      // When compacting forbid short-circuiting of cons-strings.
2415
      // Scavenging code relies on the fact that new space object
2416
      // can't be evacuated into evacuation candidate but
2417
      // short-circuiting violates this assumption.
2418
      scavenging_visitors_table_.Register(
2419
          StaticVisitorBase::kVisitShortcutCandidate,
2420
          scavenging_visitors_table_.GetVisitorById(
2421
              StaticVisitorBase::kVisitConsString));
2422
    }
2423
  }
2424
}
2425

    
2426

    
2427
void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
2428
  SLOW_ASSERT(object->GetIsolate()->heap()->InFromSpace(object));
2429
  MapWord first_word = object->map_word();
2430
  SLOW_ASSERT(!first_word.IsForwardingAddress());
2431
  Map* map = first_word.ToMap();
2432
  map->GetHeap()->DoScavengeObject(map, p, object);
2433
}
2434

    
2435

    
2436
MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
2437
                                      int instance_size) {
2438
  Object* result;
2439
  MaybeObject* maybe_result = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
2440
  if (!maybe_result->ToObject(&result)) return maybe_result;
2441

    
2442
  // Map::cast cannot be used due to uninitialized map field.
2443
  reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
2444
  reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
2445
  reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
2446
  reinterpret_cast<Map*>(result)->set_visitor_id(
2447
        StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2448
  reinterpret_cast<Map*>(result)->set_inobject_properties(0);
2449
  reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
2450
  reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
2451
  reinterpret_cast<Map*>(result)->set_bit_field(0);
2452
  reinterpret_cast<Map*>(result)->set_bit_field2(0);
2453
  int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
2454
                   Map::OwnsDescriptors::encode(true);
2455
  reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
2456
  return result;
2457
}
2458

    
2459

    
2460
MaybeObject* Heap::AllocateMap(InstanceType instance_type,
2461
                               int instance_size,
2462
                               ElementsKind elements_kind) {
2463
  Object* result;
2464
  MaybeObject* maybe_result = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
2465
  if (!maybe_result->To(&result)) return maybe_result;
2466

    
2467
  Map* map = reinterpret_cast<Map*>(result);
2468
  map->set_map_no_write_barrier(meta_map());
2469
  map->set_instance_type(instance_type);
2470
  map->set_visitor_id(
2471
      StaticVisitorBase::GetVisitorId(instance_type, instance_size));
2472
  map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
2473
  map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
2474
  map->set_instance_size(instance_size);
2475
  map->set_inobject_properties(0);
2476
  map->set_pre_allocated_property_fields(0);
2477
  map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2478
  map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2479
                          SKIP_WRITE_BARRIER);
2480
  map->init_back_pointer(undefined_value());
2481
  map->set_unused_property_fields(0);
2482
  map->set_instance_descriptors(empty_descriptor_array());
2483
  map->set_bit_field(0);
2484
  map->set_bit_field2(1 << Map::kIsExtensible);
2485
  int bit_field3 = Map::EnumLengthBits::encode(Map::kInvalidEnumCache) |
2486
                   Map::OwnsDescriptors::encode(true);
2487
  map->set_bit_field3(bit_field3);
2488
  map->set_elements_kind(elements_kind);
2489

    
2490
  return map;
2491
}
2492

    
2493

    
2494
MaybeObject* Heap::AllocateCodeCache() {
2495
  CodeCache* code_cache;
2496
  { MaybeObject* maybe_code_cache = AllocateStruct(CODE_CACHE_TYPE);
2497
    if (!maybe_code_cache->To(&code_cache)) return maybe_code_cache;
2498
  }
2499
  code_cache->set_default_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
2500
  code_cache->set_normal_type_cache(undefined_value(), SKIP_WRITE_BARRIER);
2501
  return code_cache;
2502
}
2503

    
2504

    
2505
MaybeObject* Heap::AllocatePolymorphicCodeCache() {
2506
  return AllocateStruct(POLYMORPHIC_CODE_CACHE_TYPE);
2507
}
2508

    
2509

    
2510
MaybeObject* Heap::AllocateAccessorPair() {
2511
  AccessorPair* accessors;
2512
  { MaybeObject* maybe_accessors = AllocateStruct(ACCESSOR_PAIR_TYPE);
2513
    if (!maybe_accessors->To(&accessors)) return maybe_accessors;
2514
  }
2515
  accessors->set_getter(the_hole_value(), SKIP_WRITE_BARRIER);
2516
  accessors->set_setter(the_hole_value(), SKIP_WRITE_BARRIER);
2517
  accessors->set_access_flags(Smi::FromInt(0), SKIP_WRITE_BARRIER);
2518
  return accessors;
2519
}
2520

    
2521

    
2522
MaybeObject* Heap::AllocateTypeFeedbackInfo() {
2523
  TypeFeedbackInfo* info;
2524
  { MaybeObject* maybe_info = AllocateStruct(TYPE_FEEDBACK_INFO_TYPE);
2525
    if (!maybe_info->To(&info)) return maybe_info;
2526
  }
2527
  info->initialize_storage();
2528
  info->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()),
2529
                                SKIP_WRITE_BARRIER);
2530
  return info;
2531
}
2532

    
2533

    
2534
MaybeObject* Heap::AllocateAliasedArgumentsEntry(int aliased_context_slot) {
2535
  AliasedArgumentsEntry* entry;
2536
  { MaybeObject* maybe_entry = AllocateStruct(ALIASED_ARGUMENTS_ENTRY_TYPE);
2537
    if (!maybe_entry->To(&entry)) return maybe_entry;
2538
  }
2539
  entry->set_aliased_context_slot(aliased_context_slot);
2540
  return entry;
2541
}
2542

    
2543

    
2544
const Heap::StringTypeTable Heap::string_type_table[] = {
2545
#define STRING_TYPE_ELEMENT(type, size, name, camel_name)                      \
2546
  {type, size, k##camel_name##MapRootIndex},
2547
  STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
2548
#undef STRING_TYPE_ELEMENT
2549
};
2550

    
2551

    
2552
const Heap::ConstantStringTable Heap::constant_string_table[] = {
2553
#define CONSTANT_STRING_ELEMENT(name, contents)                                \
2554
  {contents, k##name##RootIndex},
2555
  INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
2556
#undef CONSTANT_STRING_ELEMENT
2557
};
2558

    
2559

    
2560
const Heap::StructTable Heap::struct_table[] = {
2561
#define STRUCT_TABLE_ELEMENT(NAME, Name, name)                                 \
2562
  { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
2563
  STRUCT_LIST(STRUCT_TABLE_ELEMENT)
2564
#undef STRUCT_TABLE_ELEMENT
2565
};
2566

    
2567

    
2568
bool Heap::CreateInitialMaps() {
2569
  Object* obj;
2570
  { MaybeObject* maybe_obj = AllocatePartialMap(MAP_TYPE, Map::kSize);
2571
    if (!maybe_obj->ToObject(&obj)) return false;
2572
  }
2573
  // Map::cast cannot be used due to uninitialized map field.
2574
  Map* new_meta_map = reinterpret_cast<Map*>(obj);
2575
  set_meta_map(new_meta_map);
2576
  new_meta_map->set_map(new_meta_map);
2577

    
2578
  { MaybeObject* maybe_obj =
2579
        AllocatePartialMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2580
    if (!maybe_obj->ToObject(&obj)) return false;
2581
  }
2582
  set_fixed_array_map(Map::cast(obj));
2583

    
2584
  { MaybeObject* maybe_obj = AllocatePartialMap(ODDBALL_TYPE, Oddball::kSize);
2585
    if (!maybe_obj->ToObject(&obj)) return false;
2586
  }
2587
  set_oddball_map(Map::cast(obj));
2588

    
2589
  // Allocate the empty array.
2590
  { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2591
    if (!maybe_obj->ToObject(&obj)) return false;
2592
  }
2593
  set_empty_fixed_array(FixedArray::cast(obj));
2594

    
2595
  { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2596
    if (!maybe_obj->ToObject(&obj)) return false;
2597
  }
2598
  set_null_value(Oddball::cast(obj));
2599
  Oddball::cast(obj)->set_kind(Oddball::kNull);
2600

    
2601
  { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2602
    if (!maybe_obj->ToObject(&obj)) return false;
2603
  }
2604
  set_undefined_value(Oddball::cast(obj));
2605
  Oddball::cast(obj)->set_kind(Oddball::kUndefined);
2606
  ASSERT(!InNewSpace(undefined_value()));
2607

    
2608
  // Allocate the empty descriptor array.
2609
  { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
2610
    if (!maybe_obj->ToObject(&obj)) return false;
2611
  }
2612
  set_empty_descriptor_array(DescriptorArray::cast(obj));
2613

    
2614
  // Fix the instance_descriptors for the existing maps.
2615
  meta_map()->set_code_cache(empty_fixed_array());
2616
  meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2617
  meta_map()->init_back_pointer(undefined_value());
2618
  meta_map()->set_instance_descriptors(empty_descriptor_array());
2619

    
2620
  fixed_array_map()->set_code_cache(empty_fixed_array());
2621
  fixed_array_map()->set_dependent_code(
2622
      DependentCode::cast(empty_fixed_array()));
2623
  fixed_array_map()->init_back_pointer(undefined_value());
2624
  fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
2625

    
2626
  oddball_map()->set_code_cache(empty_fixed_array());
2627
  oddball_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
2628
  oddball_map()->init_back_pointer(undefined_value());
2629
  oddball_map()->set_instance_descriptors(empty_descriptor_array());
2630

    
2631
  // Fix prototype object for existing maps.
2632
  meta_map()->set_prototype(null_value());
2633
  meta_map()->set_constructor(null_value());
2634

    
2635
  fixed_array_map()->set_prototype(null_value());
2636
  fixed_array_map()->set_constructor(null_value());
2637

    
2638
  oddball_map()->set_prototype(null_value());
2639
  oddball_map()->set_constructor(null_value());
2640

    
2641
  { MaybeObject* maybe_obj =
2642
        AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2643
    if (!maybe_obj->ToObject(&obj)) return false;
2644
  }
2645
  set_fixed_cow_array_map(Map::cast(obj));
2646
  ASSERT(fixed_array_map() != fixed_cow_array_map());
2647

    
2648
  { MaybeObject* maybe_obj =
2649
        AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2650
    if (!maybe_obj->ToObject(&obj)) return false;
2651
  }
2652
  set_scope_info_map(Map::cast(obj));
2653

    
2654
  { MaybeObject* maybe_obj = AllocateMap(HEAP_NUMBER_TYPE, HeapNumber::kSize);
2655
    if (!maybe_obj->ToObject(&obj)) return false;
2656
  }
2657
  set_heap_number_map(Map::cast(obj));
2658

    
2659
  { MaybeObject* maybe_obj = AllocateMap(SYMBOL_TYPE, Symbol::kSize);
2660
    if (!maybe_obj->ToObject(&obj)) return false;
2661
  }
2662
  set_symbol_map(Map::cast(obj));
2663

    
2664
  { MaybeObject* maybe_obj = AllocateMap(FOREIGN_TYPE, Foreign::kSize);
2665
    if (!maybe_obj->ToObject(&obj)) return false;
2666
  }
2667
  set_foreign_map(Map::cast(obj));
2668

    
2669
  for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
2670
    const StringTypeTable& entry = string_type_table[i];
2671
    { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2672
      if (!maybe_obj->ToObject(&obj)) return false;
2673
    }
2674
    roots_[entry.index] = Map::cast(obj);
2675
  }
2676

    
2677
  { MaybeObject* maybe_obj = AllocateMap(STRING_TYPE, kVariableSizeSentinel);
2678
    if (!maybe_obj->ToObject(&obj)) return false;
2679
  }
2680
  set_undetectable_string_map(Map::cast(obj));
2681
  Map::cast(obj)->set_is_undetectable();
2682

    
2683
  { MaybeObject* maybe_obj =
2684
        AllocateMap(ASCII_STRING_TYPE, kVariableSizeSentinel);
2685
    if (!maybe_obj->ToObject(&obj)) return false;
2686
  }
2687
  set_undetectable_ascii_string_map(Map::cast(obj));
2688
  Map::cast(obj)->set_is_undetectable();
2689

    
2690
  { MaybeObject* maybe_obj =
2691
        AllocateMap(FIXED_DOUBLE_ARRAY_TYPE, kVariableSizeSentinel);
2692
    if (!maybe_obj->ToObject(&obj)) return false;
2693
  }
2694
  set_fixed_double_array_map(Map::cast(obj));
2695

    
2696
  { MaybeObject* maybe_obj =
2697
        AllocateMap(CONSTANT_POOL_ARRAY_TYPE, kVariableSizeSentinel);
2698
    if (!maybe_obj->ToObject(&obj)) return false;
2699
  }
2700
  set_constant_pool_array_map(Map::cast(obj));
2701

    
2702
  { MaybeObject* maybe_obj =
2703
        AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
2704
    if (!maybe_obj->ToObject(&obj)) return false;
2705
  }
2706
  set_byte_array_map(Map::cast(obj));
2707

    
2708
  { MaybeObject* maybe_obj =
2709
        AllocateMap(FREE_SPACE_TYPE, kVariableSizeSentinel);
2710
    if (!maybe_obj->ToObject(&obj)) return false;
2711
  }
2712
  set_free_space_map(Map::cast(obj));
2713

    
2714
  { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
2715
    if (!maybe_obj->ToObject(&obj)) return false;
2716
  }
2717
  set_empty_byte_array(ByteArray::cast(obj));
2718

    
2719
  { MaybeObject* maybe_obj =
2720
        AllocateMap(EXTERNAL_PIXEL_ARRAY_TYPE, ExternalArray::kAlignedSize);
2721
    if (!maybe_obj->ToObject(&obj)) return false;
2722
  }
2723
  set_external_pixel_array_map(Map::cast(obj));
2724

    
2725
  { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_BYTE_ARRAY_TYPE,
2726
                                         ExternalArray::kAlignedSize);
2727
    if (!maybe_obj->ToObject(&obj)) return false;
2728
  }
2729
  set_external_byte_array_map(Map::cast(obj));
2730

    
2731
  { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
2732
                                         ExternalArray::kAlignedSize);
2733
    if (!maybe_obj->ToObject(&obj)) return false;
2734
  }
2735
  set_external_unsigned_byte_array_map(Map::cast(obj));
2736

    
2737
  { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_SHORT_ARRAY_TYPE,
2738
                                         ExternalArray::kAlignedSize);
2739
    if (!maybe_obj->ToObject(&obj)) return false;
2740
  }
2741
  set_external_short_array_map(Map::cast(obj));
2742

    
2743
  { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
2744
                                         ExternalArray::kAlignedSize);
2745
    if (!maybe_obj->ToObject(&obj)) return false;
2746
  }
2747
  set_external_unsigned_short_array_map(Map::cast(obj));
2748

    
2749
  { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_INT_ARRAY_TYPE,
2750
                                         ExternalArray::kAlignedSize);
2751
    if (!maybe_obj->ToObject(&obj)) return false;
2752
  }
2753
  set_external_int_array_map(Map::cast(obj));
2754

    
2755
  { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
2756
                                         ExternalArray::kAlignedSize);
2757
    if (!maybe_obj->ToObject(&obj)) return false;
2758
  }
2759
  set_external_unsigned_int_array_map(Map::cast(obj));
2760

    
2761
  { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_FLOAT_ARRAY_TYPE,
2762
                                         ExternalArray::kAlignedSize);
2763
    if (!maybe_obj->ToObject(&obj)) return false;
2764
  }
2765
  set_external_float_array_map(Map::cast(obj));
2766

    
2767
  { MaybeObject* maybe_obj =
2768
        AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2769
    if (!maybe_obj->ToObject(&obj)) return false;
2770
  }
2771
  set_non_strict_arguments_elements_map(Map::cast(obj));
2772

    
2773
  { MaybeObject* maybe_obj = AllocateMap(EXTERNAL_DOUBLE_ARRAY_TYPE,
2774
                                         ExternalArray::kAlignedSize);
2775
    if (!maybe_obj->ToObject(&obj)) return false;
2776
  }
2777
  set_external_double_array_map(Map::cast(obj));
2778

    
2779
  { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalByteArray);
2780
    if (!maybe_obj->ToObject(&obj)) return false;
2781
  }
2782
  set_empty_external_byte_array(ExternalArray::cast(obj));
2783

    
2784
  { MaybeObject* maybe_obj =
2785
        AllocateEmptyExternalArray(kExternalUnsignedByteArray);
2786
    if (!maybe_obj->ToObject(&obj)) return false;
2787
  }
2788
  set_empty_external_unsigned_byte_array(ExternalArray::cast(obj));
2789

    
2790
  { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalShortArray);
2791
    if (!maybe_obj->ToObject(&obj)) return false;
2792
  }
2793
  set_empty_external_short_array(ExternalArray::cast(obj));
2794

    
2795
  { MaybeObject* maybe_obj = AllocateEmptyExternalArray(
2796
      kExternalUnsignedShortArray);
2797
    if (!maybe_obj->ToObject(&obj)) return false;
2798
  }
2799
  set_empty_external_unsigned_short_array(ExternalArray::cast(obj));
2800

    
2801
  { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalIntArray);
2802
    if (!maybe_obj->ToObject(&obj)) return false;
2803
  }
2804
  set_empty_external_int_array(ExternalArray::cast(obj));
2805

    
2806
  { MaybeObject* maybe_obj =
2807
        AllocateEmptyExternalArray(kExternalUnsignedIntArray);
2808
    if (!maybe_obj->ToObject(&obj)) return false;
2809
  }
2810
  set_empty_external_unsigned_int_array(ExternalArray::cast(obj));
2811

    
2812
  { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalFloatArray);
2813
    if (!maybe_obj->ToObject(&obj)) return false;
2814
  }
2815
  set_empty_external_float_array(ExternalArray::cast(obj));
2816

    
2817
  { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalDoubleArray);
2818
    if (!maybe_obj->ToObject(&obj)) return false;
2819
  }
2820
  set_empty_external_double_array(ExternalArray::cast(obj));
2821

    
2822
  { MaybeObject* maybe_obj = AllocateEmptyExternalArray(kExternalPixelArray);
2823
    if (!maybe_obj->ToObject(&obj)) return false;
2824
  }
2825
  set_empty_external_pixel_array(ExternalArray::cast(obj));
2826

    
2827
  { MaybeObject* maybe_obj = AllocateMap(CODE_TYPE, kVariableSizeSentinel);
2828
    if (!maybe_obj->ToObject(&obj)) return false;
2829
  }
2830
  set_code_map(Map::cast(obj));
2831

    
2832
  { MaybeObject* maybe_obj = AllocateMap(CELL_TYPE, Cell::kSize);
2833
    if (!maybe_obj->ToObject(&obj)) return false;
2834
  }
2835
  set_cell_map(Map::cast(obj));
2836

    
2837
  { MaybeObject* maybe_obj = AllocateMap(PROPERTY_CELL_TYPE,
2838
                                         PropertyCell::kSize);
2839
    if (!maybe_obj->ToObject(&obj)) return false;
2840
  }
2841
  set_global_property_cell_map(Map::cast(obj));
2842

    
2843
  { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, kPointerSize);
2844
    if (!maybe_obj->ToObject(&obj)) return false;
2845
  }
2846
  set_one_pointer_filler_map(Map::cast(obj));
2847

    
2848
  { MaybeObject* maybe_obj = AllocateMap(FILLER_TYPE, 2 * kPointerSize);
2849
    if (!maybe_obj->ToObject(&obj)) return false;
2850
  }
2851
  set_two_pointer_filler_map(Map::cast(obj));
2852

    
2853
  for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
2854
    const StructTable& entry = struct_table[i];
2855
    { MaybeObject* maybe_obj = AllocateMap(entry.type, entry.size);
2856
      if (!maybe_obj->ToObject(&obj)) return false;
2857
    }
2858
    roots_[entry.index] = Map::cast(obj);
2859
  }
2860

    
2861
  { MaybeObject* maybe_obj =
2862
        AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2863
    if (!maybe_obj->ToObject(&obj)) return false;
2864
  }
2865
  set_hash_table_map(Map::cast(obj));
2866

    
2867
  { MaybeObject* maybe_obj =
2868
        AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2869
    if (!maybe_obj->ToObject(&obj)) return false;
2870
  }
2871
  set_function_context_map(Map::cast(obj));
2872

    
2873
  { MaybeObject* maybe_obj =
2874
        AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2875
    if (!maybe_obj->ToObject(&obj)) return false;
2876
  }
2877
  set_catch_context_map(Map::cast(obj));
2878

    
2879
  { MaybeObject* maybe_obj =
2880
        AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2881
    if (!maybe_obj->ToObject(&obj)) return false;
2882
  }
2883
  set_with_context_map(Map::cast(obj));
2884

    
2885
  { MaybeObject* maybe_obj =
2886
        AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2887
    if (!maybe_obj->ToObject(&obj)) return false;
2888
  }
2889
  set_block_context_map(Map::cast(obj));
2890

    
2891
  { MaybeObject* maybe_obj =
2892
        AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2893
    if (!maybe_obj->ToObject(&obj)) return false;
2894
  }
2895
  set_module_context_map(Map::cast(obj));
2896

    
2897
  { MaybeObject* maybe_obj =
2898
        AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2899
    if (!maybe_obj->ToObject(&obj)) return false;
2900
  }
2901
  set_global_context_map(Map::cast(obj));
2902

    
2903
  { MaybeObject* maybe_obj =
2904
        AllocateMap(FIXED_ARRAY_TYPE, kVariableSizeSentinel);
2905
    if (!maybe_obj->ToObject(&obj)) return false;
2906
  }
2907
  Map* native_context_map = Map::cast(obj);
2908
  native_context_map->set_dictionary_map(true);
2909
  native_context_map->set_visitor_id(StaticVisitorBase::kVisitNativeContext);
2910
  set_native_context_map(native_context_map);
2911

    
2912
  { MaybeObject* maybe_obj = AllocateMap(SHARED_FUNCTION_INFO_TYPE,
2913
                                         SharedFunctionInfo::kAlignedSize);
2914
    if (!maybe_obj->ToObject(&obj)) return false;
2915
  }
2916
  set_shared_function_info_map(Map::cast(obj));
2917

    
2918
  { MaybeObject* maybe_obj = AllocateMap(JS_MESSAGE_OBJECT_TYPE,
2919
                                         JSMessageObject::kSize);
2920
    if (!maybe_obj->ToObject(&obj)) return false;
2921
  }
2922
  set_message_object_map(Map::cast(obj));
2923

    
2924
  Map* external_map;
2925
  { MaybeObject* maybe_obj =
2926
        AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize);
2927
    if (!maybe_obj->To(&external_map)) return false;
2928
  }
2929
  external_map->set_is_extensible(false);
2930
  set_external_map(external_map);
2931

    
2932
  ASSERT(!InNewSpace(empty_fixed_array()));
2933
  return true;
2934
}
2935

    
2936

    
2937
MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
2938
  // Statically ensure that it is safe to allocate heap numbers in paged
2939
  // spaces.
2940
  int size = HeapNumber::kSize;
2941
  STATIC_ASSERT(HeapNumber::kSize <= Page::kNonCodeObjectAreaSize);
2942
  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
2943

    
2944
  Object* result;
2945
  { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
2946
    if (!maybe_result->ToObject(&result)) return maybe_result;
2947
  }
2948

    
2949
  HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
2950
  HeapNumber::cast(result)->set_value(value);
2951
  return result;
2952
}
2953

    
2954

    
2955
MaybeObject* Heap::AllocateCell(Object* value) {
2956
  int size = Cell::kSize;
2957
  STATIC_ASSERT(Cell::kSize <= Page::kNonCodeObjectAreaSize);
2958

    
2959
  Object* result;
2960
  { MaybeObject* maybe_result = AllocateRaw(size, CELL_SPACE, CELL_SPACE);
2961
    if (!maybe_result->ToObject(&result)) return maybe_result;
2962
  }
2963
  HeapObject::cast(result)->set_map_no_write_barrier(cell_map());
2964
  Cell::cast(result)->set_value(value);
2965
  return result;
2966
}
2967

    
2968

    
2969
MaybeObject* Heap::AllocatePropertyCell() {
2970
  int size = PropertyCell::kSize;
2971
  STATIC_ASSERT(PropertyCell::kSize <= Page::kNonCodeObjectAreaSize);
2972

    
2973
  Object* result;
2974
  MaybeObject* maybe_result =
2975
      AllocateRaw(size, PROPERTY_CELL_SPACE, PROPERTY_CELL_SPACE);
2976
  if (!maybe_result->ToObject(&result)) return maybe_result;
2977

    
2978
  HeapObject::cast(result)->set_map_no_write_barrier(
2979
      global_property_cell_map());
2980
  PropertyCell* cell = PropertyCell::cast(result);
2981
  cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
2982
                           SKIP_WRITE_BARRIER);
2983
  cell->set_value(the_hole_value());
2984
  cell->set_type(Type::None());
2985
  return result;
2986
}
2987

    
2988

    
2989
MaybeObject* Heap::AllocateBox(Object* value, PretenureFlag pretenure) {
2990
  Box* result;
2991
  MaybeObject* maybe_result = AllocateStruct(BOX_TYPE);
2992
  if (!maybe_result->To(&result)) return maybe_result;
2993
  result->set_value(value);
2994
  return result;
2995
}
2996

    
2997

    
2998
MaybeObject* Heap::AllocateAllocationSite() {
2999
  AllocationSite* site;
3000
  MaybeObject* maybe_result = Allocate(allocation_site_map(),
3001
                                       OLD_POINTER_SPACE);
3002
  if (!maybe_result->To(&site)) return maybe_result;
3003
  site->Initialize();
3004

    
3005
  // Link the site
3006
  site->set_weak_next(allocation_sites_list());
3007
  set_allocation_sites_list(site);
3008
  return site;
3009
}
3010

    
3011

    
3012
MaybeObject* Heap::CreateOddball(const char* to_string,
3013
                                 Object* to_number,
3014
                                 byte kind) {
3015
  Object* result;
3016
  { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE);
3017
    if (!maybe_result->ToObject(&result)) return maybe_result;
3018
  }
3019
  return Oddball::cast(result)->Initialize(this, to_string, to_number, kind);
3020
}
3021

    
3022

    
3023
bool Heap::CreateApiObjects() {
3024
  Object* obj;
3025

    
3026
  { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
3027
    if (!maybe_obj->ToObject(&obj)) return false;
3028
  }
3029
  // Don't use Smi-only elements optimizations for objects with the neander
3030
  // map. There are too many cases where element values are set directly with a
3031
  // bottleneck to trap the Smi-only -> fast elements transition, and there
3032
  // appears to be no benefit for optimize this case.
3033
  Map* new_neander_map = Map::cast(obj);
3034
  new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
3035
  set_neander_map(new_neander_map);
3036

    
3037
  { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
3038
    if (!maybe_obj->ToObject(&obj)) return false;
3039
  }
3040
  Object* elements;
3041
  { MaybeObject* maybe_elements = AllocateFixedArray(2);
3042
    if (!maybe_elements->ToObject(&elements)) return false;
3043
  }
3044
  FixedArray::cast(elements)->set(0, Smi::FromInt(0));
3045
  JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
3046
  set_message_listeners(JSObject::cast(obj));
3047

    
3048
  return true;
3049
}
3050

    
3051

    
3052
void Heap::CreateJSEntryStub() {
3053
  JSEntryStub stub;
3054
  set_js_entry_code(*stub.GetCode(isolate()));
3055
}
3056

    
3057

    
3058
void Heap::CreateJSConstructEntryStub() {
3059
  JSConstructEntryStub stub;
3060
  set_js_construct_entry_code(*stub.GetCode(isolate()));
3061
}
3062

    
3063

    
3064
void Heap::CreateFixedStubs() {
3065
  // Here we create roots for fixed stubs. They are needed at GC
3066
  // for cooking and uncooking (check out frames.cc).
3067
  // The eliminates the need for doing dictionary lookup in the
3068
  // stub cache for these stubs.
3069
  HandleScope scope(isolate());
3070
  // gcc-4.4 has problem generating correct code of following snippet:
3071
  // {  JSEntryStub stub;
3072
  //    js_entry_code_ = *stub.GetCode();
3073
  // }
3074
  // {  JSConstructEntryStub stub;
3075
  //    js_construct_entry_code_ = *stub.GetCode();
3076
  // }
3077
  // To workaround the problem, make separate functions without inlining.
3078
  Heap::CreateJSEntryStub();
3079
  Heap::CreateJSConstructEntryStub();
3080

    
3081
  // Create stubs that should be there, so we don't unexpectedly have to
3082
  // create them if we need them during the creation of another stub.
3083
  // Stub creation mixes raw pointers and handles in an unsafe manner so
3084
  // we cannot create stubs while we are creating stubs.
3085
  CodeStub::GenerateStubsAheadOfTime(isolate());
3086
}
3087

    
3088

    
3089
bool Heap::CreateInitialObjects() {
3090
  Object* obj;
3091

    
3092
  // The -0 value must be set before NumberFromDouble works.
3093
  { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
3094
    if (!maybe_obj->ToObject(&obj)) return false;
3095
  }
3096
  set_minus_zero_value(HeapNumber::cast(obj));
3097
  ASSERT(std::signbit(minus_zero_value()->Number()) != 0);
3098

    
3099
  { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
3100
    if (!maybe_obj->ToObject(&obj)) return false;
3101
  }
3102
  set_nan_value(HeapNumber::cast(obj));
3103

    
3104
  { MaybeObject* maybe_obj = AllocateHeapNumber(V8_INFINITY, TENURED);
3105
    if (!maybe_obj->ToObject(&obj)) return false;
3106
  }
3107
  set_infinity_value(HeapNumber::cast(obj));
3108

    
3109
  // The hole has not been created yet, but we want to put something
3110
  // predictable in the gaps in the string table, so lets make that Smi zero.
3111
  set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
3112

    
3113
  // Allocate initial string table.
3114
  { MaybeObject* maybe_obj =
3115
        StringTable::Allocate(this, kInitialStringTableSize);
3116
    if (!maybe_obj->ToObject(&obj)) return false;
3117
  }
3118
  // Don't use set_string_table() due to asserts.
3119
  roots_[kStringTableRootIndex] = obj;
3120

    
3121
  // Finish initializing oddballs after creating the string table.
3122
  { MaybeObject* maybe_obj =
3123
        undefined_value()->Initialize(this,
3124
                                      "undefined",
3125
                                      nan_value(),
3126
                                      Oddball::kUndefined);
3127
    if (!maybe_obj->ToObject(&obj)) return false;
3128
  }
3129

    
3130
  // Initialize the null_value.
3131
  { MaybeObject* maybe_obj = null_value()->Initialize(
3132
      this, "null", Smi::FromInt(0), Oddball::kNull);
3133
    if (!maybe_obj->ToObject(&obj)) return false;
3134
  }
3135

    
3136
  { MaybeObject* maybe_obj = CreateOddball("true",
3137
                                           Smi::FromInt(1),
3138
                                           Oddball::kTrue);
3139
    if (!maybe_obj->ToObject(&obj)) return false;
3140
  }
3141
  set_true_value(Oddball::cast(obj));
3142

    
3143
  { MaybeObject* maybe_obj = CreateOddball("false",
3144
                                           Smi::FromInt(0),
3145
                                           Oddball::kFalse);
3146
    if (!maybe_obj->ToObject(&obj)) return false;
3147
  }
3148
  set_false_value(Oddball::cast(obj));
3149

    
3150
  { MaybeObject* maybe_obj = CreateOddball("hole",
3151
                                           Smi::FromInt(-1),
3152
                                           Oddball::kTheHole);
3153
    if (!maybe_obj->ToObject(&obj)) return false;
3154
  }
3155
  set_the_hole_value(Oddball::cast(obj));
3156

    
3157
  { MaybeObject* maybe_obj = CreateOddball("uninitialized",
3158
                                           Smi::FromInt(-1),
3159
                                           Oddball::kUninitialized);
3160
    if (!maybe_obj->ToObject(&obj)) return false;
3161
  }
3162
  set_uninitialized_value(Oddball::cast(obj));
3163

    
3164
  { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
3165
                                           Smi::FromInt(-4),
3166
                                           Oddball::kArgumentMarker);
3167
    if (!maybe_obj->ToObject(&obj)) return false;
3168
  }
3169
  set_arguments_marker(Oddball::cast(obj));
3170

    
3171
  { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
3172
                                           Smi::FromInt(-2),
3173
                                           Oddball::kOther);
3174
    if (!maybe_obj->ToObject(&obj)) return false;
3175
  }
3176
  set_no_interceptor_result_sentinel(obj);
3177

    
3178
  { MaybeObject* maybe_obj = CreateOddball("termination_exception",
3179
                                           Smi::FromInt(-3),
3180
                                           Oddball::kOther);
3181
    if (!maybe_obj->ToObject(&obj)) return false;
3182
  }
3183
  set_termination_exception(obj);
3184

    
3185
  for (unsigned i = 0; i < ARRAY_SIZE(constant_string_table); i++) {
3186
    { MaybeObject* maybe_obj =
3187
          InternalizeUtf8String(constant_string_table[i].contents);
3188
      if (!maybe_obj->ToObject(&obj)) return false;
3189
    }
3190
    roots_[constant_string_table[i].index] = String::cast(obj);
3191
  }
3192

    
3193
  // Allocate the hidden string which is used to identify the hidden properties
3194
  // in JSObjects. The hash code has a special value so that it will not match
3195
  // the empty string when searching for the property. It cannot be part of the
3196
  // loop above because it needs to be allocated manually with the special
3197
  // hash code in place. The hash code for the hidden_string is zero to ensure
3198
  // that it will always be at the first entry in property descriptors.
3199
  { MaybeObject* maybe_obj = AllocateOneByteInternalizedString(
3200
      OneByteVector("", 0), String::kEmptyStringHash);
3201
    if (!maybe_obj->ToObject(&obj)) return false;
3202
  }
3203
  hidden_string_ = String::cast(obj);
3204

    
3205
  // Allocate the code_stubs dictionary. The initial size is set to avoid
3206
  // expanding the dictionary during bootstrapping.
3207
  { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 128);
3208
    if (!maybe_obj->ToObject(&obj)) return false;
3209
  }
3210
  set_code_stubs(UnseededNumberDictionary::cast(obj));
3211

    
3212

    
3213
  // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
3214
  // is set to avoid expanding the dictionary during bootstrapping.
3215
  { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 64);
3216
    if (!maybe_obj->ToObject(&obj)) return false;
3217
  }
3218
  set_non_monomorphic_cache(UnseededNumberDictionary::cast(obj));
3219

    
3220
  { MaybeObject* maybe_obj = AllocatePolymorphicCodeCache();
3221
    if (!maybe_obj->ToObject(&obj)) return false;
3222
  }
3223
  set_polymorphic_code_cache(PolymorphicCodeCache::cast(obj));
3224

    
3225
  set_instanceof_cache_function(Smi::FromInt(0));
3226
  set_instanceof_cache_map(Smi::FromInt(0));
3227
  set_instanceof_cache_answer(Smi::FromInt(0));
3228

    
3229
  CreateFixedStubs();
3230

    
3231
  // Allocate the dictionary of intrinsic function names.
3232
  { MaybeObject* maybe_obj =
3233
        NameDictionary::Allocate(this, Runtime::kNumFunctions);
3234
    if (!maybe_obj->ToObject(&obj)) return false;
3235
  }
3236
  { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this,
3237
                                                                       obj);
3238
    if (!maybe_obj->ToObject(&obj)) return false;
3239
  }
3240
  set_intrinsic_function_names(NameDictionary::cast(obj));
3241

    
3242
  { MaybeObject* maybe_obj = AllocateInitialNumberStringCache();
3243
    if (!maybe_obj->ToObject(&obj)) return false;
3244
  }
3245
  set_number_string_cache(FixedArray::cast(obj));
3246

    
3247
  // Allocate cache for single character one byte strings.
3248
  { MaybeObject* maybe_obj =
3249
        AllocateFixedArray(String::kMaxOneByteCharCode + 1, TENURED);
3250
    if (!maybe_obj->ToObject(&obj)) return false;
3251
  }
3252
  set_single_character_string_cache(FixedArray::cast(obj));
3253

    
3254
  // Allocate cache for string split.
3255
  { MaybeObject* maybe_obj = AllocateFixedArray(
3256
      RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
3257
    if (!maybe_obj->ToObject(&obj)) return false;
3258
  }
3259
  set_string_split_cache(FixedArray::cast(obj));
3260

    
3261
  { MaybeObject* maybe_obj = AllocateFixedArray(
3262
      RegExpResultsCache::kRegExpResultsCacheSize, TENURED);
3263
    if (!maybe_obj->ToObject(&obj)) return false;
3264
  }
3265
  set_regexp_multiple_cache(FixedArray::cast(obj));
3266

    
3267
  // Allocate cache for external strings pointing to native source code.
3268
  { MaybeObject* maybe_obj = AllocateFixedArray(Natives::GetBuiltinsCount());
3269
    if (!maybe_obj->ToObject(&obj)) return false;
3270
  }
3271
  set_natives_source_cache(FixedArray::cast(obj));
3272

    
3273
  // Allocate object to hold object observation state.
3274
  { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
3275
    if (!maybe_obj->ToObject(&obj)) return false;
3276
  }
3277
  { MaybeObject* maybe_obj = AllocateJSObjectFromMap(Map::cast(obj));
3278
    if (!maybe_obj->ToObject(&obj)) return false;
3279
  }
3280
  set_observation_state(JSObject::cast(obj));
3281

    
3282
  { MaybeObject* maybe_obj = AllocateSymbol();
3283
    if (!maybe_obj->ToObject(&obj)) return false;
3284
  }
3285
  set_frozen_symbol(Symbol::cast(obj));
3286

    
3287
  { MaybeObject* maybe_obj = AllocateSymbol();
3288
    if (!maybe_obj->ToObject(&obj)) return false;
3289
  }
3290
  set_elements_transition_symbol(Symbol::cast(obj));
3291

    
3292
  { MaybeObject* maybe_obj = SeededNumberDictionary::Allocate(this, 0, TENURED);
3293
    if (!maybe_obj->ToObject(&obj)) return false;
3294
  }
3295
  SeededNumberDictionary::cast(obj)->set_requires_slow_elements();
3296
  set_empty_slow_element_dictionary(SeededNumberDictionary::cast(obj));
3297

    
3298
  { MaybeObject* maybe_obj = AllocateSymbol();
3299
    if (!maybe_obj->ToObject(&obj)) return false;
3300
  }
3301
  set_observed_symbol(Symbol::cast(obj));
3302

    
3303
  // Handling of script id generation is in Factory::NewScript.
3304
  set_last_script_id(Smi::FromInt(v8::Script::kNoScriptId));
3305

    
3306
  // Initialize keyed lookup cache.
3307
  isolate_->keyed_lookup_cache()->Clear();
3308

    
3309
  // Initialize context slot cache.
3310
  isolate_->context_slot_cache()->Clear();
3311

    
3312
  // Initialize descriptor cache.
3313
  isolate_->descriptor_lookup_cache()->Clear();
3314

    
3315
  // Initialize compilation cache.
3316
  isolate_->compilation_cache()->Clear();
3317

    
3318
  return true;
3319
}
3320

    
3321

    
3322
bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
3323
  RootListIndex writable_roots[] = {
3324
    kStoreBufferTopRootIndex,
3325
    kStackLimitRootIndex,
3326
    kNumberStringCacheRootIndex,
3327
    kInstanceofCacheFunctionRootIndex,
3328
    kInstanceofCacheMapRootIndex,
3329
    kInstanceofCacheAnswerRootIndex,
3330
    kCodeStubsRootIndex,
3331
    kNonMonomorphicCacheRootIndex,
3332
    kPolymorphicCodeCacheRootIndex,
3333
    kLastScriptIdRootIndex,
3334
    kEmptyScriptRootIndex,
3335
    kRealStackLimitRootIndex,
3336
    kArgumentsAdaptorDeoptPCOffsetRootIndex,
3337
    kConstructStubDeoptPCOffsetRootIndex,
3338
    kGetterStubDeoptPCOffsetRootIndex,
3339
    kSetterStubDeoptPCOffsetRootIndex,
3340
    kStringTableRootIndex,
3341
  };
3342

    
3343
  for (unsigned int i = 0; i < ARRAY_SIZE(writable_roots); i++) {
3344
    if (root_index == writable_roots[i])
3345
      return true;
3346
  }
3347
  return false;
3348
}
3349

    
3350

    
3351
bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
3352
  return !RootCanBeWrittenAfterInitialization(root_index) &&
3353
      !InNewSpace(roots_array_start()[root_index]);
3354
}
3355

    
3356

    
3357
Object* RegExpResultsCache::Lookup(Heap* heap,
3358
                                   String* key_string,
3359
                                   Object* key_pattern,
3360
                                   ResultsCacheType type) {
3361
  FixedArray* cache;
3362
  if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
3363
  if (type == STRING_SPLIT_SUBSTRINGS) {
3364
    ASSERT(key_pattern->IsString());
3365
    if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0);
3366
    cache = heap->string_split_cache();
3367
  } else {
3368
    ASSERT(type == REGEXP_MULTIPLE_INDICES);
3369
    ASSERT(key_pattern->IsFixedArray());
3370
    cache = heap->regexp_multiple_cache();
3371
  }
3372

    
3373
  uint32_t hash = key_string->Hash();
3374
  uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3375
      ~(kArrayEntriesPerCacheEntry - 1));
3376
  if (cache->get(index + kStringOffset) == key_string &&
3377
      cache->get(index + kPatternOffset) == key_pattern) {
3378
    return cache->get(index + kArrayOffset);
3379
  }
3380
  index =
3381
      ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3382
  if (cache->get(index + kStringOffset) == key_string &&
3383
      cache->get(index + kPatternOffset) == key_pattern) {
3384
    return cache->get(index + kArrayOffset);
3385
  }
3386
  return Smi::FromInt(0);
3387
}
3388

    
3389

    
3390
void RegExpResultsCache::Enter(Heap* heap,
3391
                               String* key_string,
3392
                               Object* key_pattern,
3393
                               FixedArray* value_array,
3394
                               ResultsCacheType type) {
3395
  FixedArray* cache;
3396
  if (!key_string->IsInternalizedString()) return;
3397
  if (type == STRING_SPLIT_SUBSTRINGS) {
3398
    ASSERT(key_pattern->IsString());
3399
    if (!key_pattern->IsInternalizedString()) return;
3400
    cache = heap->string_split_cache();
3401
  } else {
3402
    ASSERT(type == REGEXP_MULTIPLE_INDICES);
3403
    ASSERT(key_pattern->IsFixedArray());
3404
    cache = heap->regexp_multiple_cache();
3405
  }
3406

    
3407
  uint32_t hash = key_string->Hash();
3408
  uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
3409
      ~(kArrayEntriesPerCacheEntry - 1));
3410
  if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
3411
    cache->set(index + kStringOffset, key_string);
3412
    cache->set(index + kPatternOffset, key_pattern);
3413
    cache->set(index + kArrayOffset, value_array);
3414
  } else {
3415
    uint32_t index2 =
3416
        ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
3417
    if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
3418
      cache->set(index2 + kStringOffset, key_string);
3419
      cache->set(index2 + kPatternOffset, key_pattern);
3420
      cache->set(index2 + kArrayOffset, value_array);
3421
    } else {
3422
      cache->set(index2 + kStringOffset, Smi::FromInt(0));
3423
      cache->set(index2 + kPatternOffset, Smi::FromInt(0));
3424
      cache->set(index2 + kArrayOffset, Smi::FromInt(0));
3425
      cache->set(index + kStringOffset, key_string);
3426
      cache->set(index + kPatternOffset, key_pattern);
3427
      cache->set(index + kArrayOffset, value_array);
3428
    }
3429
  }
3430
  // If the array is a reasonably short list of substrings, convert it into a
3431
  // list of internalized strings.
3432
  if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
3433
    for (int i = 0; i < value_array->length(); i++) {
3434
      String* str = String::cast(value_array->get(i));
3435
      Object* internalized_str;
3436
      MaybeObject* maybe_string = heap->InternalizeString(str);
3437
      if (maybe_string->ToObject(&internalized_str)) {
3438
        value_array->set(i, internalized_str);
3439
      }
3440
    }
3441
  }
3442
  // Convert backing store to a copy-on-write array.
3443
  value_array->set_map_no_write_barrier(heap->fixed_cow_array_map());
3444
}
3445

    
3446

    
3447
void RegExpResultsCache::Clear(FixedArray* cache) {
3448
  for (int i = 0; i < kRegExpResultsCacheSize; i++) {
3449
    cache->set(i, Smi::FromInt(0));
3450
  }
3451
}
3452

    
3453

    
3454
MaybeObject* Heap::AllocateInitialNumberStringCache() {
3455
  MaybeObject* maybe_obj =
3456
      AllocateFixedArray(kInitialNumberStringCacheSize * 2, TENURED);
3457
  return maybe_obj;
3458
}
3459

    
3460

    
3461
int Heap::FullSizeNumberStringCacheLength() {
3462
  // Compute the size of the number string cache based on the max newspace size.
3463
  // The number string cache has a minimum size based on twice the initial cache
3464
  // size to ensure that it is bigger after being made 'full size'.
3465
  int number_string_cache_size = max_semispace_size_ / 512;
3466
  number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
3467
                                 Min(0x4000, number_string_cache_size));
3468
  // There is a string and a number per entry so the length is twice the number
3469
  // of entries.
3470
  return number_string_cache_size * 2;
3471
}
3472

    
3473

    
3474
void Heap::AllocateFullSizeNumberStringCache() {
3475
  // The idea is to have a small number string cache in the snapshot to keep
3476
  // boot-time memory usage down.  If we expand the number string cache already
3477
  // while creating the snapshot then that didn't work out.
3478
  ASSERT(!Serializer::enabled() || FLAG_extra_code != NULL);
3479
  MaybeObject* maybe_obj =
3480
      AllocateFixedArray(FullSizeNumberStringCacheLength(), TENURED);
3481
  Object* new_cache;
3482
  if (maybe_obj->ToObject(&new_cache)) {
3483
    // We don't bother to repopulate the cache with entries from the old cache.
3484
    // It will be repopulated soon enough with new strings.
3485
    set_number_string_cache(FixedArray::cast(new_cache));
3486
  }
3487
  // If allocation fails then we just return without doing anything.  It is only
3488
  // a cache, so best effort is OK here.
3489
}
3490

    
3491

    
3492
void Heap::FlushNumberStringCache() {
3493
  // Flush the number to string cache.
3494
  int len = number_string_cache()->length();
3495
  for (int i = 0; i < len; i++) {
3496
    number_string_cache()->set_undefined(i);
3497
  }
3498
}
3499

    
3500

    
3501
static inline int double_get_hash(double d) {
3502
  DoubleRepresentation rep(d);
3503
  return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
3504
}
3505

    
3506

    
3507
static inline int smi_get_hash(Smi* smi) {
3508
  return smi->value();
3509
}
3510

    
3511

    
3512
Object* Heap::GetNumberStringCache(Object* number) {
3513
  int hash;
3514
  int mask = (number_string_cache()->length() >> 1) - 1;
3515
  if (number->IsSmi()) {
3516
    hash = smi_get_hash(Smi::cast(number)) & mask;
3517
  } else {
3518
    hash = double_get_hash(number->Number()) & mask;
3519
  }
3520
  Object* key = number_string_cache()->get(hash * 2);
3521
  if (key == number) {
3522
    return String::cast(number_string_cache()->get(hash * 2 + 1));
3523
  } else if (key->IsHeapNumber() &&
3524
             number->IsHeapNumber() &&
3525
             key->Number() == number->Number()) {
3526
    return String::cast(number_string_cache()->get(hash * 2 + 1));
3527
  }
3528
  return undefined_value();
3529
}
3530

    
3531

    
3532
void Heap::SetNumberStringCache(Object* number, String* string) {
3533
  int hash;
3534
  int mask = (number_string_cache()->length() >> 1) - 1;
3535
  if (number->IsSmi()) {
3536
    hash = smi_get_hash(Smi::cast(number)) & mask;
3537
  } else {
3538
    hash = double_get_hash(number->Number()) & mask;
3539
  }
3540
  if (number_string_cache()->get(hash * 2) != undefined_value() &&
3541
      number_string_cache()->length() != FullSizeNumberStringCacheLength()) {
3542
    // The first time we have a hash collision, we move to the full sized
3543
    // number string cache.
3544
    AllocateFullSizeNumberStringCache();
3545
    return;
3546
  }
3547
  number_string_cache()->set(hash * 2, number);
3548
  number_string_cache()->set(hash * 2 + 1, string);
3549
}
3550

    
3551

    
3552
MaybeObject* Heap::NumberToString(Object* number,
3553
                                  bool check_number_string_cache,
3554
                                  PretenureFlag pretenure) {
3555
  isolate_->counters()->number_to_string_runtime()->Increment();
3556
  if (check_number_string_cache) {
3557
    Object* cached = GetNumberStringCache(number);
3558
    if (cached != undefined_value()) {
3559
      return cached;
3560
    }
3561
  }
3562

    
3563
  char arr[100];
3564
  Vector<char> buffer(arr, ARRAY_SIZE(arr));
3565
  const char* str;
3566
  if (number->IsSmi()) {
3567
    int num = Smi::cast(number)->value();
3568
    str = IntToCString(num, buffer);
3569
  } else {
3570
    double num = HeapNumber::cast(number)->value();
3571
    str = DoubleToCString(num, buffer);
3572
  }
3573

    
3574
  Object* js_string;
3575
  MaybeObject* maybe_js_string =
3576
      AllocateStringFromOneByte(CStrVector(str), pretenure);
3577
  if (maybe_js_string->ToObject(&js_string)) {
3578
    SetNumberStringCache(number, String::cast(js_string));
3579
  }
3580
  return maybe_js_string;
3581
}
3582

    
3583

    
3584
MaybeObject* Heap::Uint32ToString(uint32_t value,
3585
                                  bool check_number_string_cache) {
3586
  Object* number;
3587
  MaybeObject* maybe = NumberFromUint32(value);
3588
  if (!maybe->To<Object>(&number)) return maybe;
3589
  return NumberToString(number, check_number_string_cache);
3590
}
3591

    
3592

    
3593
Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
3594
  return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
3595
}
3596

    
3597

    
3598
Heap::RootListIndex Heap::RootIndexForExternalArrayType(
3599
    ExternalArrayType array_type) {
3600
  switch (array_type) {
3601
    case kExternalByteArray:
3602
      return kExternalByteArrayMapRootIndex;
3603
    case kExternalUnsignedByteArray:
3604
      return kExternalUnsignedByteArrayMapRootIndex;
3605
    case kExternalShortArray:
3606
      return kExternalShortArrayMapRootIndex;
3607
    case kExternalUnsignedShortArray:
3608
      return kExternalUnsignedShortArrayMapRootIndex;
3609
    case kExternalIntArray:
3610
      return kExternalIntArrayMapRootIndex;
3611
    case kExternalUnsignedIntArray:
3612
      return kExternalUnsignedIntArrayMapRootIndex;
3613
    case kExternalFloatArray:
3614
      return kExternalFloatArrayMapRootIndex;
3615
    case kExternalDoubleArray:
3616
      return kExternalDoubleArrayMapRootIndex;
3617
    case kExternalPixelArray:
3618
      return kExternalPixelArrayMapRootIndex;
3619
    default:
3620
      UNREACHABLE();
3621
      return kUndefinedValueRootIndex;
3622
  }
3623
}
3624

    
3625
Heap::RootListIndex Heap::RootIndexForEmptyExternalArray(
3626
    ElementsKind elementsKind) {
3627
  switch (elementsKind) {
3628
    case EXTERNAL_BYTE_ELEMENTS:
3629
      return kEmptyExternalByteArrayRootIndex;
3630
    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
3631
      return kEmptyExternalUnsignedByteArrayRootIndex;
3632
    case EXTERNAL_SHORT_ELEMENTS:
3633
      return kEmptyExternalShortArrayRootIndex;
3634
    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
3635
      return kEmptyExternalUnsignedShortArrayRootIndex;
3636
    case EXTERNAL_INT_ELEMENTS:
3637
      return kEmptyExternalIntArrayRootIndex;
3638
    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
3639
      return kEmptyExternalUnsignedIntArrayRootIndex;
3640
    case EXTERNAL_FLOAT_ELEMENTS:
3641
      return kEmptyExternalFloatArrayRootIndex;
3642
    case EXTERNAL_DOUBLE_ELEMENTS:
3643
      return kEmptyExternalDoubleArrayRootIndex;
3644
    case EXTERNAL_PIXEL_ELEMENTS:
3645
      return kEmptyExternalPixelArrayRootIndex;
3646
    default:
3647
      UNREACHABLE();
3648
      return kUndefinedValueRootIndex;
3649
  }
3650
}
3651

    
3652

    
3653
ExternalArray* Heap::EmptyExternalArrayForMap(Map* map) {
3654
  return ExternalArray::cast(
3655
      roots_[RootIndexForEmptyExternalArray(map->elements_kind())]);
3656
}
3657

    
3658

    
3659

    
3660

    
3661
MaybeObject* Heap::NumberFromDouble(double value, PretenureFlag pretenure) {
3662
  // We need to distinguish the minus zero value and this cannot be
3663
  // done after conversion to int. Doing this by comparing bit
3664
  // patterns is faster than using fpclassify() et al.
3665
  static const DoubleRepresentation minus_zero(-0.0);
3666

    
3667
  DoubleRepresentation rep(value);
3668
  if (rep.bits == minus_zero.bits) {
3669
    return AllocateHeapNumber(-0.0, pretenure);
3670
  }
3671

    
3672
  int int_value = FastD2I(value);
3673
  if (value == int_value && Smi::IsValid(int_value)) {
3674
    return Smi::FromInt(int_value);
3675
  }
3676

    
3677
  // Materialize the value in the heap.
3678
  return AllocateHeapNumber(value, pretenure);
3679
}
3680

    
3681

    
3682
MaybeObject* Heap::AllocateForeign(Address address, PretenureFlag pretenure) {
3683
  // Statically ensure that it is safe to allocate foreigns in paged spaces.
3684
  STATIC_ASSERT(Foreign::kSize <= Page::kMaxNonCodeHeapObjectSize);
3685
  AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
3686
  Foreign* result;
3687
  MaybeObject* maybe_result = Allocate(foreign_map(), space);
3688
  if (!maybe_result->To(&result)) return maybe_result;
3689
  result->set_foreign_address(address);
3690
  return result;
3691
}
3692

    
3693

    
3694
MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) {
3695
  SharedFunctionInfo* share;
3696
  MaybeObject* maybe = Allocate(shared_function_info_map(), OLD_POINTER_SPACE);
3697
  if (!maybe->To<SharedFunctionInfo>(&share)) return maybe;
3698

    
3699
  // Set pointer fields.
3700
  share->set_name(name);
3701
  Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal);
3702
  share->set_code(illegal);
3703
  share->set_optimized_code_map(Smi::FromInt(0));
3704
  share->set_scope_info(ScopeInfo::Empty(isolate_));
3705
  Code* construct_stub =
3706
      isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric);
3707
  share->set_construct_stub(construct_stub);
3708
  share->set_instance_class_name(Object_string());
3709
  share->set_function_data(undefined_value(), SKIP_WRITE_BARRIER);
3710
  share->set_script(undefined_value(), SKIP_WRITE_BARRIER);
3711
  share->set_debug_info(undefined_value(), SKIP_WRITE_BARRIER);
3712
  share->set_inferred_name(empty_string(), SKIP_WRITE_BARRIER);
3713
  share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
3714
  share->set_ast_node_count(0);
3715
  share->set_counters(0);
3716

    
3717
  // Set integer fields (smi or int, depending on the architecture).
3718
  share->set_length(0);
3719
  share->set_formal_parameter_count(0);
3720
  share->set_expected_nof_properties(0);
3721
  share->set_num_literals(0);
3722
  share->set_start_position_and_type(0);
3723
  share->set_end_position(0);
3724
  share->set_function_token_position(0);
3725
  // All compiler hints default to false or 0.
3726
  share->set_compiler_hints(0);
3727
  share->set_opt_count_and_bailout_reason(0);
3728

    
3729
  return share;
3730
}
3731

    
3732

    
3733
MaybeObject* Heap::AllocateJSMessageObject(String* type,
3734
                                           JSArray* arguments,
3735
                                           int start_position,
3736
                                           int end_position,
3737
                                           Object* script,
3738
                                           Object* stack_trace,
3739
                                           Object* stack_frames) {
3740
  Object* result;
3741
  { MaybeObject* maybe_result = Allocate(message_object_map(), NEW_SPACE);
3742
    if (!maybe_result->ToObject(&result)) return maybe_result;
3743
  }
3744
  JSMessageObject* message = JSMessageObject::cast(result);
3745
  message->set_properties(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3746
  message->initialize_elements();
3747
  message->set_elements(Heap::empty_fixed_array(), SKIP_WRITE_BARRIER);
3748
  message->set_type(type);
3749
  message->set_arguments(arguments);
3750
  message->set_start_position(start_position);
3751
  message->set_end_position(end_position);
3752
  message->set_script(script);
3753
  message->set_stack_trace(stack_trace);
3754
  message->set_stack_frames(stack_frames);
3755
  return result;
3756
}
3757

    
3758

    
3759

    
3760
// Returns true for a character in a range.  Both limits are inclusive.
3761
static inline bool Between(uint32_t character, uint32_t from, uint32_t to) {
3762
  // This makes uses of the the unsigned wraparound.
3763
  return character - from <= to - from;
3764
}
3765

    
3766

    
3767
MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
3768
    Heap* heap,
3769
    uint16_t c1,
3770
    uint16_t c2) {
3771
  String* result;
3772
  // Numeric strings have a different hash algorithm not known by
3773
  // LookupTwoCharsStringIfExists, so we skip this step for such strings.
3774
  if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) &&
3775
      heap->string_table()->LookupTwoCharsStringIfExists(c1, c2, &result)) {
3776
    return result;
3777
  // Now we know the length is 2, we might as well make use of that fact
3778
  // when building the new string.
3779
  } else if (static_cast<unsigned>(c1 | c2) <= String::kMaxOneByteCharCodeU) {
3780
    // We can do this.
3781
    ASSERT(IsPowerOf2(String::kMaxOneByteCharCodeU + 1));  // because of this.
3782
    Object* result;
3783
    { MaybeObject* maybe_result = heap->AllocateRawOneByteString(2);
3784
      if (!maybe_result->ToObject(&result)) return maybe_result;
3785
    }
3786
    uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3787
    dest[0] = static_cast<uint8_t>(c1);
3788
    dest[1] = static_cast<uint8_t>(c2);
3789
    return result;
3790
  } else {
3791
    Object* result;
3792
    { MaybeObject* maybe_result = heap->AllocateRawTwoByteString(2);
3793
      if (!maybe_result->ToObject(&result)) return maybe_result;
3794
    }
3795
    uc16* dest = SeqTwoByteString::cast(result)->GetChars();
3796
    dest[0] = c1;
3797
    dest[1] = c2;
3798
    return result;
3799
  }
3800
}
3801

    
3802

    
3803
MaybeObject* Heap::AllocateConsString(String* first, String* second) {
3804
  int first_length = first->length();
3805
  if (first_length == 0) {
3806
    return second;
3807
  }
3808

    
3809
  int second_length = second->length();
3810
  if (second_length == 0) {
3811
    return first;
3812
  }
3813

    
3814
  int length = first_length + second_length;
3815

    
3816
  // Optimization for 2-byte strings often used as keys in a decompression
3817
  // dictionary.  Check whether we already have the string in the string
3818
  // table to prevent creation of many unneccesary strings.
3819
  if (length == 2) {
3820
    uint16_t c1 = first->Get(0);
3821
    uint16_t c2 = second->Get(0);
3822
    return MakeOrFindTwoCharacterString(this, c1, c2);
3823
  }
3824

    
3825
  bool first_is_one_byte = first->IsOneByteRepresentation();
3826
  bool second_is_one_byte = second->IsOneByteRepresentation();
3827
  bool is_one_byte = first_is_one_byte && second_is_one_byte;
3828
  // Make sure that an out of memory exception is thrown if the length
3829
  // of the new cons string is too large.
3830
  if (length > String::kMaxLength || length < 0) {
3831
    isolate()->context()->mark_out_of_memory();
3832
    return Failure::OutOfMemoryException(0x4);
3833
  }
3834

    
3835
  bool is_one_byte_data_in_two_byte_string = false;
3836
  if (!is_one_byte) {
3837
    // At least one of the strings uses two-byte representation so we
3838
    // can't use the fast case code for short ASCII strings below, but
3839
    // we can try to save memory if all chars actually fit in ASCII.
3840
    is_one_byte_data_in_two_byte_string =
3841
        first->HasOnlyOneByteChars() && second->HasOnlyOneByteChars();
3842
    if (is_one_byte_data_in_two_byte_string) {
3843
      isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
3844
    }
3845
  }
3846

    
3847
  // If the resulting string is small make a flat string.
3848
  if (length < ConsString::kMinLength) {
3849
    // Note that neither of the two inputs can be a slice because:
3850
    STATIC_ASSERT(ConsString::kMinLength <= SlicedString::kMinLength);
3851
    ASSERT(first->IsFlat());
3852
    ASSERT(second->IsFlat());
3853
    if (is_one_byte) {
3854
      Object* result;
3855
      { MaybeObject* maybe_result = AllocateRawOneByteString(length);
3856
        if (!maybe_result->ToObject(&result)) return maybe_result;
3857
      }
3858
      // Copy the characters into the new object.
3859
      uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3860
      // Copy first part.
3861
      const uint8_t* src;
3862
      if (first->IsExternalString()) {
3863
        src = ExternalAsciiString::cast(first)->GetChars();
3864
      } else {
3865
        src = SeqOneByteString::cast(first)->GetChars();
3866
      }
3867
      for (int i = 0; i < first_length; i++) *dest++ = src[i];
3868
      // Copy second part.
3869
      if (second->IsExternalString()) {
3870
        src = ExternalAsciiString::cast(second)->GetChars();
3871
      } else {
3872
        src = SeqOneByteString::cast(second)->GetChars();
3873
      }
3874
      for (int i = 0; i < second_length; i++) *dest++ = src[i];
3875
      return result;
3876
    } else {
3877
      if (is_one_byte_data_in_two_byte_string) {
3878
        Object* result;
3879
        { MaybeObject* maybe_result = AllocateRawOneByteString(length);
3880
          if (!maybe_result->ToObject(&result)) return maybe_result;
3881
        }
3882
        // Copy the characters into the new object.
3883
        uint8_t* dest = SeqOneByteString::cast(result)->GetChars();
3884
        String::WriteToFlat(first, dest, 0, first_length);
3885
        String::WriteToFlat(second, dest + first_length, 0, second_length);
3886
        isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
3887
        return result;
3888
      }
3889

    
3890
      Object* result;
3891
      { MaybeObject* maybe_result = AllocateRawTwoByteString(length);
3892
        if (!maybe_result->ToObject(&result)) return maybe_result;
3893
      }
3894
      // Copy the characters into the new object.
3895
      uc16* dest = SeqTwoByteString::cast(result)->GetChars();
3896
      String::WriteToFlat(first, dest, 0, first_length);
3897
      String::WriteToFlat(second, dest + first_length, 0, second_length);
3898
      return result;
3899
    }
3900
  }
3901

    
3902
  Map* map = (is_one_byte || is_one_byte_data_in_two_byte_string) ?
3903
      cons_ascii_string_map() : cons_string_map();
3904

    
3905
  Object* result;
3906
  { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3907
    if (!maybe_result->ToObject(&result)) return maybe_result;
3908
  }
3909

    
3910
  DisallowHeapAllocation no_gc;
3911
  ConsString* cons_string = ConsString::cast(result);
3912
  WriteBarrierMode mode = cons_string->GetWriteBarrierMode(no_gc);
3913
  cons_string->set_length(length);
3914
  cons_string->set_hash_field(String::kEmptyHashField);
3915
  cons_string->set_first(first, mode);
3916
  cons_string->set_second(second, mode);
3917
  return result;
3918
}
3919

    
3920

    
3921
MaybeObject* Heap::AllocateSubString(String* buffer,
3922
                                     int start,
3923
                                     int end,
3924
                                     PretenureFlag pretenure) {
3925
  int length = end - start;
3926
  if (length <= 0) {
3927
    return empty_string();
3928
  } else if (length == 1) {
3929
    return LookupSingleCharacterStringFromCode(buffer->Get(start));
3930
  } else if (length == 2) {
3931
    // Optimization for 2-byte strings often used as keys in a decompression
3932
    // dictionary.  Check whether we already have the string in the string
3933
    // table to prevent creation of many unnecessary strings.
3934
    uint16_t c1 = buffer->Get(start);
3935
    uint16_t c2 = buffer->Get(start + 1);
3936
    return MakeOrFindTwoCharacterString(this, c1, c2);
3937
  }
3938

    
3939
  // Make an attempt to flatten the buffer to reduce access time.
3940
  buffer = buffer->TryFlattenGetString();
3941

    
3942
  if (!FLAG_string_slices ||
3943
      !buffer->IsFlat() ||
3944
      length < SlicedString::kMinLength ||
3945
      pretenure == TENURED) {
3946
    Object* result;
3947
    // WriteToFlat takes care of the case when an indirect string has a
3948
    // different encoding from its underlying string.  These encodings may
3949
    // differ because of externalization.
3950
    bool is_one_byte = buffer->IsOneByteRepresentation();
3951
    { MaybeObject* maybe_result = is_one_byte
3952
                                  ? AllocateRawOneByteString(length, pretenure)
3953
                                  : AllocateRawTwoByteString(length, pretenure);
3954
      if (!maybe_result->ToObject(&result)) return maybe_result;
3955
    }
3956
    String* string_result = String::cast(result);
3957
    // Copy the characters into the new object.
3958
    if (is_one_byte) {
3959
      ASSERT(string_result->IsOneByteRepresentation());
3960
      uint8_t* dest = SeqOneByteString::cast(string_result)->GetChars();
3961
      String::WriteToFlat(buffer, dest, start, end);
3962
    } else {
3963
      ASSERT(string_result->IsTwoByteRepresentation());
3964
      uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
3965
      String::WriteToFlat(buffer, dest, start, end);
3966
    }
3967
    return result;
3968
  }
3969

    
3970
  ASSERT(buffer->IsFlat());
3971
#if VERIFY_HEAP
3972
  if (FLAG_verify_heap) {
3973
    buffer->StringVerify();
3974
  }
3975
#endif
3976

    
3977
  Object* result;
3978
  // When slicing an indirect string we use its encoding for a newly created
3979
  // slice and don't check the encoding of the underlying string.  This is safe
3980
  // even if the encodings are different because of externalization.  If an
3981
  // indirect ASCII string is pointing to a two-byte string, the two-byte char
3982
  // codes of the underlying string must still fit into ASCII (because
3983
  // externalization must not change char codes).
3984
  { Map* map = buffer->IsOneByteRepresentation()
3985
                 ? sliced_ascii_string_map()
3986
                 : sliced_string_map();
3987
    MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3988
    if (!maybe_result->ToObject(&result)) return maybe_result;
3989
  }
3990

    
3991
  DisallowHeapAllocation no_gc;
3992
  SlicedString* sliced_string = SlicedString::cast(result);
3993
  sliced_string->set_length(length);
3994
  sliced_string->set_hash_field(String::kEmptyHashField);
3995
  if (buffer->IsConsString()) {
3996
    ConsString* cons = ConsString::cast(buffer);
3997
    ASSERT(cons->second()->length() == 0);
3998
    sliced_string->set_parent(cons->first());
3999
    sliced_string->set_offset(start);
4000
  } else if (buffer->IsSlicedString()) {
4001
    // Prevent nesting sliced strings.
4002
    SlicedString* parent_slice = SlicedString::cast(buffer);
4003
    sliced_string->set_parent(parent_slice->parent());
4004
    sliced_string->set_offset(start + parent_slice->offset());
4005
  } else {
4006
    sliced_string->set_parent(buffer);
4007
    sliced_string->set_offset(start);
4008
  }
4009
  ASSERT(sliced_string->parent()->IsSeqString() ||
4010
         sliced_string->parent()->IsExternalString());
4011
  return result;
4012
}
4013

    
4014

    
4015
MaybeObject* Heap::AllocateExternalStringFromAscii(
4016
    const ExternalAsciiString::Resource* resource) {
4017
  size_t length = resource->length();
4018
  if (length > static_cast<size_t>(String::kMaxLength)) {
4019
    isolate()->context()->mark_out_of_memory();
4020
    return Failure::OutOfMemoryException(0x5);
4021
  }
4022

    
4023
  Map* map = external_ascii_string_map();
4024
  Object* result;
4025
  { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4026
    if (!maybe_result->ToObject(&result)) return maybe_result;
4027
  }
4028

    
4029
  ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
4030
  external_string->set_length(static_cast<int>(length));
4031
  external_string->set_hash_field(String::kEmptyHashField);
4032
  external_string->set_resource(resource);
4033

    
4034
  return result;
4035
}
4036

    
4037

    
4038
MaybeObject* Heap::AllocateExternalStringFromTwoByte(
4039
    const ExternalTwoByteString::Resource* resource) {
4040
  size_t length = resource->length();
4041
  if (length > static_cast<size_t>(String::kMaxLength)) {
4042
    isolate()->context()->mark_out_of_memory();
4043
    return Failure::OutOfMemoryException(0x6);
4044
  }
4045

    
4046
  // For small strings we check whether the resource contains only
4047
  // one byte characters.  If yes, we use a different string map.
4048
  static const size_t kOneByteCheckLengthLimit = 32;
4049
  bool is_one_byte = length <= kOneByteCheckLengthLimit &&
4050
      String::IsOneByte(resource->data(), static_cast<int>(length));
4051
  Map* map = is_one_byte ?
4052
      external_string_with_one_byte_data_map() : external_string_map();
4053
  Object* result;
4054
  { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4055
    if (!maybe_result->ToObject(&result)) return maybe_result;
4056
  }
4057

    
4058
  ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
4059
  external_string->set_length(static_cast<int>(length));
4060
  external_string->set_hash_field(String::kEmptyHashField);
4061
  external_string->set_resource(resource);
4062

    
4063
  return result;
4064
}
4065

    
4066

    
4067
MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
4068
  if (code <= String::kMaxOneByteCharCode) {
4069
    Object* value = single_character_string_cache()->get(code);
4070
    if (value != undefined_value()) return value;
4071

    
4072
    uint8_t buffer[1];
4073
    buffer[0] = static_cast<uint8_t>(code);
4074
    Object* result;
4075
    MaybeObject* maybe_result =
4076
        InternalizeOneByteString(Vector<const uint8_t>(buffer, 1));
4077

    
4078
    if (!maybe_result->ToObject(&result)) return maybe_result;
4079
    single_character_string_cache()->set(code, result);
4080
    return result;
4081
  }
4082

    
4083
  Object* result;
4084
  { MaybeObject* maybe_result = AllocateRawTwoByteString(1);
4085
    if (!maybe_result->ToObject(&result)) return maybe_result;
4086
  }
4087
  String* answer = String::cast(result);
4088
  answer->Set(0, code);
4089
  return answer;
4090
}
4091

    
4092

    
4093
MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
4094
  if (length < 0 || length > ByteArray::kMaxLength) {
4095
    return Failure::OutOfMemoryException(0x7);
4096
  }
4097
  int size = ByteArray::SizeFor(length);
4098
  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
4099
  Object* result;
4100
  { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
4101
    if (!maybe_result->ToObject(&result)) return maybe_result;
4102
  }
4103

    
4104
  reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
4105
      byte_array_map());
4106
  reinterpret_cast<ByteArray*>(result)->set_length(length);
4107
  return result;
4108
}
4109

    
4110

    
4111
void Heap::CreateFillerObjectAt(Address addr, int size) {
4112
  if (size == 0) return;
4113
  HeapObject* filler = HeapObject::FromAddress(addr);
4114
  if (size == kPointerSize) {
4115
    filler->set_map_no_write_barrier(one_pointer_filler_map());
4116
  } else if (size == 2 * kPointerSize) {
4117
    filler->set_map_no_write_barrier(two_pointer_filler_map());
4118
  } else {
4119
    filler->set_map_no_write_barrier(free_space_map());
4120
    FreeSpace::cast(filler)->set_size(size);
4121
  }
4122
}
4123

    
4124

    
4125
MaybeObject* Heap::AllocateExternalArray(int length,
4126
                                         ExternalArrayType array_type,
4127
                                         void* external_pointer,
4128
                                         PretenureFlag pretenure) {
4129
  int size = ExternalArray::kAlignedSize;
4130
  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
4131
  Object* result;
4132
  { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
4133
    if (!maybe_result->ToObject(&result)) return maybe_result;
4134
  }
4135

    
4136
  reinterpret_cast<ExternalArray*>(result)->set_map_no_write_barrier(
4137
      MapForExternalArrayType(array_type));
4138
  reinterpret_cast<ExternalArray*>(result)->set_length(length);
4139
  reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
4140
      external_pointer);
4141

    
4142
  return result;
4143
}
4144

    
4145

    
4146
MaybeObject* Heap::CreateCode(const CodeDesc& desc,
4147
                              Code::Flags flags,
4148
                              Handle<Object> self_reference,
4149
                              bool immovable,
4150
                              bool crankshafted,
4151
                              int prologue_offset) {
4152
  // Allocate ByteArray before the Code object, so that we do not risk
4153
  // leaving uninitialized Code object (and breaking the heap).
4154
  ByteArray* reloc_info;
4155
  MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size, TENURED);
4156
  if (!maybe_reloc_info->To(&reloc_info)) return maybe_reloc_info;
4157

    
4158
  // Compute size.
4159
  int body_size = RoundUp(desc.instr_size, kObjectAlignment);
4160
  int obj_size = Code::SizeFor(body_size);
4161
  ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
4162
  MaybeObject* maybe_result;
4163
  // Large code objects and code objects which should stay at a fixed address
4164
  // are allocated in large object space.
4165
  HeapObject* result;
4166
  bool force_lo_space = obj_size > code_space()->AreaSize();
4167
  if (force_lo_space) {
4168
    maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
4169
  } else {
4170
    maybe_result = code_space_->AllocateRaw(obj_size);
4171
  }
4172
  if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
4173

    
4174
  if (immovable && !force_lo_space &&
4175
      // Objects on the first page of each space are never moved.
4176
      !code_space_->FirstPage()->Contains(result->address())) {
4177
    // Discard the first code allocation, which was on a page where it could be
4178
    // moved.
4179
    CreateFillerObjectAt(result->address(), obj_size);
4180
    maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
4181
    if (!maybe_result->To<HeapObject>(&result)) return maybe_result;
4182
  }
4183

    
4184
  // Initialize the object
4185
  result->set_map_no_write_barrier(code_map());
4186
  Code* code = Code::cast(result);
4187
  ASSERT(!isolate_->code_range()->exists() ||
4188
      isolate_->code_range()->contains(code->address()));
4189
  code->set_instruction_size(desc.instr_size);
4190
  code->set_relocation_info(reloc_info);
4191
  code->set_flags(flags);
4192
  if (code->is_call_stub() || code->is_keyed_call_stub()) {
4193
    code->set_check_type(RECEIVER_MAP_CHECK);
4194
  }
4195
  code->set_is_crankshafted(crankshafted);
4196
  code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
4197
  code->InitializeTypeFeedbackInfoNoWriteBarrier(undefined_value());
4198
  code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
4199
  code->set_gc_metadata(Smi::FromInt(0));
4200
  code->set_ic_age(global_ic_age_);
4201
  code->set_prologue_offset(prologue_offset);
4202
  if (code->kind() == Code::OPTIMIZED_FUNCTION) {
4203
    code->set_marked_for_deoptimization(false);
4204
  }
4205

    
4206
#ifdef ENABLE_DEBUGGER_SUPPORT
4207
  if (code->kind() == Code::FUNCTION) {
4208
    code->set_has_debug_break_slots(
4209
        isolate_->debugger()->IsDebuggerActive());
4210
  }
4211
#endif
4212

    
4213
  // Allow self references to created code object by patching the handle to
4214
  // point to the newly allocated Code object.
4215
  if (!self_reference.is_null()) {
4216
    *(self_reference.location()) = code;
4217
  }
4218
  // Migrate generated code.
4219
  // The generated code can contain Object** values (typically from handles)
4220
  // that are dereferenced during the copy to point directly to the actual heap
4221
  // objects. These pointers can include references to the code object itself,
4222
  // through the self_reference parameter.
4223
  code->CopyFrom(desc);
4224

    
4225
#ifdef VERIFY_HEAP
4226
  if (FLAG_verify_heap) {
4227
    code->Verify();
4228
  }
4229
#endif
4230
  return code;
4231
}
4232

    
4233

    
4234
MaybeObject* Heap::CopyCode(Code* code) {
4235
  // Allocate an object the same size as the code object.
4236
  int obj_size = code->Size();
4237
  MaybeObject* maybe_result;
4238
  if (obj_size > code_space()->AreaSize()) {
4239
    maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
4240
  } else {
4241
    maybe_result = code_space_->AllocateRaw(obj_size);
4242
  }
4243

    
4244
  Object* result;
4245
  if (!maybe_result->ToObject(&result)) return maybe_result;
4246

    
4247
  // Copy code object.
4248
  Address old_addr = code->address();
4249
  Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
4250
  CopyBlock(new_addr, old_addr, obj_size);
4251
  // Relocate the copy.
4252
  Code* new_code = Code::cast(result);
4253
  ASSERT(!isolate_->code_range()->exists() ||
4254
      isolate_->code_range()->contains(code->address()));
4255
  new_code->Relocate(new_addr - old_addr);
4256
  return new_code;
4257
}
4258

    
4259

    
4260
MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
4261
  // Allocate ByteArray before the Code object, so that we do not risk
4262
  // leaving uninitialized Code object (and breaking the heap).
4263
  Object* reloc_info_array;
4264
  { MaybeObject* maybe_reloc_info_array =
4265
        AllocateByteArray(reloc_info.length(), TENURED);
4266
    if (!maybe_reloc_info_array->ToObject(&reloc_info_array)) {
4267
      return maybe_reloc_info_array;
4268
    }
4269
  }
4270

    
4271
  int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
4272

    
4273
  int new_obj_size = Code::SizeFor(new_body_size);
4274

    
4275
  Address old_addr = code->address();
4276

    
4277
  size_t relocation_offset =
4278
      static_cast<size_t>(code->instruction_end() - old_addr);
4279

    
4280
  MaybeObject* maybe_result;
4281
  if (new_obj_size > code_space()->AreaSize()) {
4282
    maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
4283
  } else {
4284
    maybe_result = code_space_->AllocateRaw(new_obj_size);
4285
  }
4286

    
4287
  Object* result;
4288
  if (!maybe_result->ToObject(&result)) return maybe_result;
4289

    
4290
  // Copy code object.
4291
  Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
4292

    
4293
  // Copy header and instructions.
4294
  CopyBytes(new_addr, old_addr, relocation_offset);
4295

    
4296
  Code* new_code = Code::cast(result);
4297
  new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
4298

    
4299
  // Copy patched rinfo.
4300
  CopyBytes(new_code->relocation_start(),
4301
            reloc_info.start(),
4302
            static_cast<size_t>(reloc_info.length()));
4303

    
4304
  // Relocate the copy.
4305
  ASSERT(!isolate_->code_range()->exists() ||
4306
      isolate_->code_range()->contains(code->address()));
4307
  new_code->Relocate(new_addr - old_addr);
4308

    
4309
#ifdef VERIFY_HEAP
4310
  if (FLAG_verify_heap) {
4311
    code->Verify();
4312
  }
4313
#endif
4314
  return new_code;
4315
}
4316

    
4317

    
4318
MaybeObject* Heap::AllocateWithAllocationSite(Map* map, AllocationSpace space,
4319
    Handle<AllocationSite> allocation_site) {
4320
  ASSERT(gc_state_ == NOT_IN_GC);
4321
  ASSERT(map->instance_type() != MAP_TYPE);
4322
  // If allocation failures are disallowed, we may allocate in a different
4323
  // space when new space is full and the object is not a large object.
4324
  AllocationSpace retry_space =
4325
      (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
4326
  int size = map->instance_size() + AllocationMemento::kSize;
4327
  Object* result;
4328
  MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4329
  if (!maybe_result->ToObject(&result)) return maybe_result;
4330
  // No need for write barrier since object is white and map is in old space.
4331
  HeapObject::cast(result)->set_map_no_write_barrier(map);
4332
  AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
4333
      reinterpret_cast<Address>(result) + map->instance_size());
4334
  alloc_memento->set_map_no_write_barrier(allocation_memento_map());
4335
  ASSERT(allocation_site->map() == allocation_site_map());
4336
  alloc_memento->set_allocation_site(*allocation_site, SKIP_WRITE_BARRIER);
4337
  return result;
4338
}
4339

    
4340

    
4341
MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
4342
  ASSERT(gc_state_ == NOT_IN_GC);
4343
  ASSERT(map->instance_type() != MAP_TYPE);
4344
  // If allocation failures are disallowed, we may allocate in a different
4345
  // space when new space is full and the object is not a large object.
4346
  AllocationSpace retry_space =
4347
      (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
4348
  int size = map->instance_size();
4349
  Object* result;
4350
  MaybeObject* maybe_result = AllocateRaw(size, space, retry_space);
4351
  if (!maybe_result->ToObject(&result)) return maybe_result;
4352
  // No need for write barrier since object is white and map is in old space.
4353
  HeapObject::cast(result)->set_map_no_write_barrier(map);
4354
  return result;
4355
}
4356

    
4357

    
4358
void Heap::InitializeFunction(JSFunction* function,
4359
                              SharedFunctionInfo* shared,
4360
                              Object* prototype) {
4361
  ASSERT(!prototype->IsMap());
4362
  function->initialize_properties();
4363
  function->initialize_elements();
4364
  function->set_shared(shared);
4365
  function->set_code(shared->code());
4366
  function->set_prototype_or_initial_map(prototype);
4367
  function->set_context(undefined_value());
4368
  function->set_literals_or_bindings(empty_fixed_array());
4369
  function->set_next_function_link(undefined_value());
4370
}
4371

    
4372

    
4373
MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
4374
  // Make sure to use globals from the function's context, since the function
4375
  // can be from a different context.
4376
  Context* native_context = function->context()->native_context();
4377
  Map* new_map;
4378
  if (function->shared()->is_generator()) {
4379
    // Generator prototypes can share maps since they don't have "constructor"
4380
    // properties.
4381
    new_map = native_context->generator_object_prototype_map();
4382
  } else {
4383
    // Each function prototype gets a fresh map to avoid unwanted sharing of
4384
    // maps between prototypes of different constructors.
4385
    JSFunction* object_function = native_context->object_function();
4386
    ASSERT(object_function->has_initial_map());
4387
    MaybeObject* maybe_map = object_function->initial_map()->Copy();
4388
    if (!maybe_map->To(&new_map)) return maybe_map;
4389
  }
4390

    
4391
  Object* prototype;
4392
  MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map);
4393
  if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
4394

    
4395
  if (!function->shared()->is_generator()) {
4396
    MaybeObject* maybe_failure =
4397
        JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributesTrampoline(
4398
            constructor_string(), function, DONT_ENUM);
4399
    if (maybe_failure->IsFailure()) return maybe_failure;
4400
  }
4401

    
4402
  return prototype;
4403
}
4404

    
4405

    
4406
MaybeObject* Heap::AllocateFunction(Map* function_map,
4407
                                    SharedFunctionInfo* shared,
4408
                                    Object* prototype,
4409
                                    PretenureFlag pretenure) {
4410
  AllocationSpace space =
4411
      (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
4412
  Object* result;
4413
  { MaybeObject* maybe_result = Allocate(function_map, space);
4414
    if (!maybe_result->ToObject(&result)) return maybe_result;
4415
  }
4416
  InitializeFunction(JSFunction::cast(result), shared, prototype);
4417
  return result;
4418
}
4419

    
4420

    
4421
MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
4422
  // To get fast allocation and map sharing for arguments objects we
4423
  // allocate them based on an arguments boilerplate.
4424

    
4425
  JSObject* boilerplate;
4426
  int arguments_object_size;
4427
  bool strict_mode_callee = callee->IsJSFunction() &&
4428
      !JSFunction::cast(callee)->shared()->is_classic_mode();
4429
  if (strict_mode_callee) {
4430
    boilerplate =
4431
        isolate()->context()->native_context()->
4432
            strict_mode_arguments_boilerplate();
4433
    arguments_object_size = kArgumentsObjectSizeStrict;
4434
  } else {
4435
    boilerplate =
4436
        isolate()->context()->native_context()->arguments_boilerplate();
4437
    arguments_object_size = kArgumentsObjectSize;
4438
  }
4439

    
4440
  // Check that the size of the boilerplate matches our
4441
  // expectations. The ArgumentsAccessStub::GenerateNewObject relies
4442
  // on the size being a known constant.
4443
  ASSERT(arguments_object_size == boilerplate->map()->instance_size());
4444

    
4445
  // Do the allocation.
4446
  Object* result;
4447
  { MaybeObject* maybe_result =
4448
        AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
4449
    if (!maybe_result->ToObject(&result)) return maybe_result;
4450
  }
4451

    
4452
  // Copy the content. The arguments boilerplate doesn't have any
4453
  // fields that point to new space so it's safe to skip the write
4454
  // barrier here.
4455
  CopyBlock(HeapObject::cast(result)->address(),
4456
            boilerplate->address(),
4457
            JSObject::kHeaderSize);
4458

    
4459
  // Set the length property.
4460
  JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsLengthIndex,
4461
                                                Smi::FromInt(length),
4462
                                                SKIP_WRITE_BARRIER);
4463
  // Set the callee property for non-strict mode arguments object only.
4464
  if (!strict_mode_callee) {
4465
    JSObject::cast(result)->InObjectPropertyAtPut(kArgumentsCalleeIndex,
4466
                                                  callee);
4467
  }
4468

    
4469
  // Check the state of the object
4470
  ASSERT(JSObject::cast(result)->HasFastProperties());
4471
  ASSERT(JSObject::cast(result)->HasFastObjectElements());
4472

    
4473
  return result;
4474
}
4475

    
4476

    
4477
MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
4478
  ASSERT(!fun->has_initial_map());
4479

    
4480
  // First create a new map with the size and number of in-object properties
4481
  // suggested by the function.
4482
  InstanceType instance_type;
4483
  int instance_size;
4484
  int in_object_properties;
4485
  if (fun->shared()->is_generator()) {
4486
    instance_type = JS_GENERATOR_OBJECT_TYPE;
4487
    instance_size = JSGeneratorObject::kSize;
4488
    in_object_properties = 0;
4489
  } else {
4490
    instance_type = JS_OBJECT_TYPE;
4491
    instance_size = fun->shared()->CalculateInstanceSize();
4492
    in_object_properties = fun->shared()->CalculateInObjectProperties();
4493
  }
4494
  Map* map;
4495
  MaybeObject* maybe_map = AllocateMap(instance_type, instance_size);
4496
  if (!maybe_map->To(&map)) return maybe_map;
4497

    
4498
  // Fetch or allocate prototype.
4499
  Object* prototype;
4500
  if (fun->has_instance_prototype()) {
4501
    prototype = fun->instance_prototype();
4502
  } else {
4503
    MaybeObject* maybe_prototype = AllocateFunctionPrototype(fun);
4504
    if (!maybe_prototype->To(&prototype)) return maybe_prototype;
4505
  }
4506
  map->set_inobject_properties(in_object_properties);
4507
  map->set_unused_property_fields(in_object_properties);
4508
  map->set_prototype(prototype);
4509
  ASSERT(map->has_fast_object_elements());
4510

    
4511
  if (!fun->shared()->is_generator()) {
4512
    fun->shared()->StartInobjectSlackTracking(map);
4513
  }
4514

    
4515
  return map;
4516
}
4517

    
4518

    
4519
void Heap::InitializeJSObjectFromMap(JSObject* obj,
4520
                                     FixedArray* properties,
4521
                                     Map* map) {
4522
  obj->set_properties(properties);
4523
  obj->initialize_elements();
4524
  // TODO(1240798): Initialize the object's body using valid initial values
4525
  // according to the object's initial map.  For example, if the map's
4526
  // instance type is JS_ARRAY_TYPE, the length field should be initialized
4527
  // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
4528
  // fixed array (e.g. Heap::empty_fixed_array()).  Currently, the object
4529
  // verification code has to cope with (temporarily) invalid objects.  See
4530
  // for example, JSArray::JSArrayVerify).
4531
  Object* filler;
4532
  // We cannot always fill with one_pointer_filler_map because objects
4533
  // created from API functions expect their internal fields to be initialized
4534
  // with undefined_value.
4535
  // Pre-allocated fields need to be initialized with undefined_value as well
4536
  // so that object accesses before the constructor completes (e.g. in the
4537
  // debugger) will not cause a crash.
4538
  if (map->constructor()->IsJSFunction() &&
4539
      JSFunction::cast(map->constructor())->shared()->
4540
          IsInobjectSlackTrackingInProgress()) {
4541
    // We might want to shrink the object later.
4542
    ASSERT(obj->GetInternalFieldCount() == 0);
4543
    filler = Heap::one_pointer_filler_map();
4544
  } else {
4545
    filler = Heap::undefined_value();
4546
  }
4547
  obj->InitializeBody(map, Heap::undefined_value(), filler);
4548
}
4549

    
4550

    
4551
MaybeObject* Heap::AllocateJSObjectFromMap(
4552
    Map* map, PretenureFlag pretenure, bool allocate_properties) {
4553
  // JSFunctions should be allocated using AllocateFunction to be
4554
  // properly initialized.
4555
  ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
4556

    
4557
  // Both types of global objects should be allocated using
4558
  // AllocateGlobalObject to be properly initialized.
4559
  ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
4560
  ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
4561

    
4562
  // Allocate the backing storage for the properties.
4563
  FixedArray* properties;
4564
  if (allocate_properties) {
4565
    int prop_size = map->InitialPropertiesLength();
4566
    ASSERT(prop_size >= 0);
4567
    { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, pretenure);
4568
      if (!maybe_properties->To(&properties)) return maybe_properties;
4569
    }
4570
  } else {
4571
    properties = empty_fixed_array();
4572
  }
4573

    
4574
  // Allocate the JSObject.
4575
  int size = map->instance_size();
4576
  AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
4577
  Object* obj;
4578
  MaybeObject* maybe_obj = Allocate(map, space);
4579
  if (!maybe_obj->To(&obj)) return maybe_obj;
4580

    
4581
  // Initialize the JSObject.
4582
  InitializeJSObjectFromMap(JSObject::cast(obj), properties, map);
4583
  ASSERT(JSObject::cast(obj)->HasFastElements() ||
4584
         JSObject::cast(obj)->HasExternalArrayElements());
4585
  return obj;
4586
}
4587

    
4588

    
4589
MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(
4590
    Map* map, Handle<AllocationSite> allocation_site) {
4591
  // JSFunctions should be allocated using AllocateFunction to be
4592
  // properly initialized.
4593
  ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
4594

    
4595
  // Both types of global objects should be allocated using
4596
  // AllocateGlobalObject to be properly initialized.
4597
  ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
4598
  ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
4599

    
4600
  // Allocate the backing storage for the properties.
4601
  int prop_size = map->InitialPropertiesLength();
4602
  ASSERT(prop_size >= 0);
4603
  FixedArray* properties;
4604
  { MaybeObject* maybe_properties = AllocateFixedArray(prop_size);
4605
    if (!maybe_properties->To(&properties)) return maybe_properties;
4606
  }
4607

    
4608
  // Allocate the JSObject.
4609
  int size = map->instance_size();
4610
  AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, NOT_TENURED);
4611
  Object* obj;
4612
  MaybeObject* maybe_obj =
4613
      AllocateWithAllocationSite(map, space, allocation_site);
4614
  if (!maybe_obj->To(&obj)) return maybe_obj;
4615

    
4616
  // Initialize the JSObject.
4617
  InitializeJSObjectFromMap(JSObject::cast(obj), properties, map);
4618
  ASSERT(JSObject::cast(obj)->HasFastElements());
4619
  return obj;
4620
}
4621

    
4622

    
4623
MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
4624
                                    PretenureFlag pretenure) {
4625
  // Allocate the initial map if absent.
4626
  if (!constructor->has_initial_map()) {
4627
    Object* initial_map;
4628
    { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
4629
      if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
4630
    }
4631
    constructor->set_initial_map(Map::cast(initial_map));
4632
    Map::cast(initial_map)->set_constructor(constructor);
4633
  }
4634
  // Allocate the object based on the constructors initial map.
4635
  MaybeObject* result = AllocateJSObjectFromMap(
4636
      constructor->initial_map(), pretenure);
4637
#ifdef DEBUG
4638
  // Make sure result is NOT a global object if valid.
4639
  Object* non_failure;
4640
  ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
4641
#endif
4642
  return result;
4643
}
4644

    
4645

    
4646
MaybeObject* Heap::AllocateJSObjectWithAllocationSite(JSFunction* constructor,
4647
    Handle<AllocationSite> allocation_site) {
4648
  // Allocate the initial map if absent.
4649
  if (!constructor->has_initial_map()) {
4650
    Object* initial_map;
4651
    { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
4652
      if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map;
4653
    }
4654
    constructor->set_initial_map(Map::cast(initial_map));
4655
    Map::cast(initial_map)->set_constructor(constructor);
4656
  }
4657
  // Allocate the object based on the constructors initial map, or the payload
4658
  // advice
4659
  Map* initial_map = constructor->initial_map();
4660

    
4661
  Smi* smi = Smi::cast(allocation_site->transition_info());
4662
  ElementsKind to_kind = static_cast<ElementsKind>(smi->value());
4663
  AllocationSiteMode mode = TRACK_ALLOCATION_SITE;
4664
  if (to_kind != initial_map->elements_kind()) {
4665
    MaybeObject* maybe_new_map = initial_map->AsElementsKind(to_kind);
4666
    if (!maybe_new_map->To(&initial_map)) return maybe_new_map;
4667
    // Possibly alter the mode, since we found an updated elements kind
4668
    // in the type info cell.
4669
    mode = AllocationSite::GetMode(to_kind);
4670
  }
4671

    
4672
  MaybeObject* result;
4673
  if (mode == TRACK_ALLOCATION_SITE) {
4674
    result = AllocateJSObjectFromMapWithAllocationSite(initial_map,
4675
        allocation_site);
4676
  } else {
4677
    result = AllocateJSObjectFromMap(initial_map, NOT_TENURED);
4678
  }
4679
#ifdef DEBUG
4680
  // Make sure result is NOT a global object if valid.
4681
  Object* non_failure;
4682
  ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject());
4683
#endif
4684
  return result;
4685
}
4686

    
4687

    
4688
MaybeObject* Heap::AllocateJSGeneratorObject(JSFunction *function) {
4689
  ASSERT(function->shared()->is_generator());
4690
  Map *map;
4691
  if (function->has_initial_map()) {
4692
    map = function->initial_map();
4693
  } else {
4694
    // Allocate the initial map if absent.
4695
    MaybeObject* maybe_map = AllocateInitialMap(function);
4696
    if (!maybe_map->To(&map)) return maybe_map;
4697
    function->set_initial_map(map);
4698
    map->set_constructor(function);
4699
  }
4700
  ASSERT(map->instance_type() == JS_GENERATOR_OBJECT_TYPE);
4701
  return AllocateJSObjectFromMap(map);
4702
}
4703

    
4704

    
4705
MaybeObject* Heap::AllocateJSModule(Context* context, ScopeInfo* scope_info) {
4706
  // Allocate a fresh map. Modules do not have a prototype.
4707
  Map* map;
4708
  MaybeObject* maybe_map = AllocateMap(JS_MODULE_TYPE, JSModule::kSize);
4709
  if (!maybe_map->To(&map)) return maybe_map;
4710
  // Allocate the object based on the map.
4711
  JSModule* module;
4712
  MaybeObject* maybe_module = AllocateJSObjectFromMap(map, TENURED);
4713
  if (!maybe_module->To(&module)) return maybe_module;
4714
  module->set_context(context);
4715
  module->set_scope_info(scope_info);
4716
  return module;
4717
}
4718

    
4719

    
4720
MaybeObject* Heap::AllocateJSArrayAndStorage(
4721
    ElementsKind elements_kind,
4722
    int length,
4723
    int capacity,
4724
    ArrayStorageAllocationMode mode,
4725
    PretenureFlag pretenure) {
4726
  MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4727
  JSArray* array;
4728
  if (!maybe_array->To(&array)) return maybe_array;
4729

    
4730
  // TODO(mvstanton): this body of code is duplicate with AllocateJSArrayStorage
4731
  // for performance reasons.
4732
  ASSERT(capacity >= length);
4733

    
4734
  if (capacity == 0) {
4735
    array->set_length(Smi::FromInt(0));
4736
    array->set_elements(empty_fixed_array());
4737
    return array;
4738
  }
4739

    
4740
  FixedArrayBase* elms;
4741
  MaybeObject* maybe_elms = NULL;
4742
  if (IsFastDoubleElementsKind(elements_kind)) {
4743
    if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4744
      maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4745
    } else {
4746
      ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4747
      maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4748
    }
4749
  } else {
4750
    ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4751
    if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4752
      maybe_elms = AllocateUninitializedFixedArray(capacity);
4753
    } else {
4754
      ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4755
      maybe_elms = AllocateFixedArrayWithHoles(capacity);
4756
    }
4757
  }
4758
  if (!maybe_elms->To(&elms)) return maybe_elms;
4759

    
4760
  array->set_elements(elms);
4761
  array->set_length(Smi::FromInt(length));
4762
  return array;
4763
}
4764

    
4765

    
4766
MaybeObject* Heap::AllocateJSArrayStorage(
4767
    JSArray* array,
4768
    int length,
4769
    int capacity,
4770
    ArrayStorageAllocationMode mode) {
4771
  ASSERT(capacity >= length);
4772

    
4773
  if (capacity == 0) {
4774
    array->set_length(Smi::FromInt(0));
4775
    array->set_elements(empty_fixed_array());
4776
    return array;
4777
  }
4778

    
4779
  FixedArrayBase* elms;
4780
  MaybeObject* maybe_elms = NULL;
4781
  ElementsKind elements_kind = array->GetElementsKind();
4782
  if (IsFastDoubleElementsKind(elements_kind)) {
4783
    if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4784
      maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
4785
    } else {
4786
      ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4787
      maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity);
4788
    }
4789
  } else {
4790
    ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
4791
    if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
4792
      maybe_elms = AllocateUninitializedFixedArray(capacity);
4793
    } else {
4794
      ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
4795
      maybe_elms = AllocateFixedArrayWithHoles(capacity);
4796
    }
4797
  }
4798
  if (!maybe_elms->To(&elms)) return maybe_elms;
4799

    
4800
  array->set_elements(elms);
4801
  array->set_length(Smi::FromInt(length));
4802
  return array;
4803
}
4804

    
4805

    
4806
MaybeObject* Heap::AllocateJSArrayWithElements(
4807
    FixedArrayBase* elements,
4808
    ElementsKind elements_kind,
4809
    int length,
4810
    PretenureFlag pretenure) {
4811
  MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
4812
  JSArray* array;
4813
  if (!maybe_array->To(&array)) return maybe_array;
4814

    
4815
  array->set_elements(elements);
4816
  array->set_length(Smi::FromInt(length));
4817
  array->ValidateElements();
4818
  return array;
4819
}
4820

    
4821

    
4822
MaybeObject* Heap::AllocateJSProxy(Object* handler, Object* prototype) {
4823
  // Allocate map.
4824
  // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4825
  // maps. Will probably depend on the identity of the handler object, too.
4826
  Map* map;
4827
  MaybeObject* maybe_map_obj = AllocateMap(JS_PROXY_TYPE, JSProxy::kSize);
4828
  if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4829
  map->set_prototype(prototype);
4830

    
4831
  // Allocate the proxy object.
4832
  JSProxy* result;
4833
  MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4834
  if (!maybe_result->To<JSProxy>(&result)) return maybe_result;
4835
  result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4836
  result->set_handler(handler);
4837
  result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4838
  return result;
4839
}
4840

    
4841

    
4842
MaybeObject* Heap::AllocateJSFunctionProxy(Object* handler,
4843
                                           Object* call_trap,
4844
                                           Object* construct_trap,
4845
                                           Object* prototype) {
4846
  // Allocate map.
4847
  // TODO(rossberg): Once we optimize proxies, think about a scheme to share
4848
  // maps. Will probably depend on the identity of the handler object, too.
4849
  Map* map;
4850
  MaybeObject* maybe_map_obj =
4851
      AllocateMap(JS_FUNCTION_PROXY_TYPE, JSFunctionProxy::kSize);
4852
  if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
4853
  map->set_prototype(prototype);
4854

    
4855
  // Allocate the proxy object.
4856
  JSFunctionProxy* result;
4857
  MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
4858
  if (!maybe_result->To<JSFunctionProxy>(&result)) return maybe_result;
4859
  result->InitializeBody(map->instance_size(), Smi::FromInt(0));
4860
  result->set_handler(handler);
4861
  result->set_hash(undefined_value(), SKIP_WRITE_BARRIER);
4862
  result->set_call_trap(call_trap);
4863
  result->set_construct_trap(construct_trap);
4864
  return result;
4865
}
4866

    
4867

    
4868
MaybeObject* Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
4869
  // Never used to copy functions.  If functions need to be copied we
4870
  // have to be careful to clear the literals array.
4871
  SLOW_ASSERT(!source->IsJSFunction());
4872

    
4873
  // Make the clone.
4874
  Map* map = source->map();
4875
  int object_size = map->instance_size();
4876
  Object* clone;
4877

    
4878
  ASSERT(site == NULL || (AllocationSite::CanTrack(map->instance_type()) &&
4879
                          map->instance_type() == JS_ARRAY_TYPE));
4880

    
4881
  WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
4882

    
4883
  // If we're forced to always allocate, we use the general allocation
4884
  // functions which may leave us with an object in old space.
4885
  if (always_allocate()) {
4886
    { MaybeObject* maybe_clone =
4887
          AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
4888
      if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4889
    }
4890
    Address clone_address = HeapObject::cast(clone)->address();
4891
    CopyBlock(clone_address,
4892
              source->address(),
4893
              object_size);
4894
    // Update write barrier for all fields that lie beyond the header.
4895
    RecordWrites(clone_address,
4896
                 JSObject::kHeaderSize,
4897
                 (object_size - JSObject::kHeaderSize) / kPointerSize);
4898
  } else {
4899
    wb_mode = SKIP_WRITE_BARRIER;
4900

    
4901
    { int adjusted_object_size = site != NULL
4902
          ? object_size + AllocationMemento::kSize
4903
          : object_size;
4904
      MaybeObject* maybe_clone = new_space_.AllocateRaw(adjusted_object_size);
4905
      if (!maybe_clone->ToObject(&clone)) return maybe_clone;
4906
    }
4907
    SLOW_ASSERT(InNewSpace(clone));
4908
    // Since we know the clone is allocated in new space, we can copy
4909
    // the contents without worrying about updating the write barrier.
4910
    CopyBlock(HeapObject::cast(clone)->address(),
4911
              source->address(),
4912
              object_size);
4913

    
4914
    if (site != NULL) {
4915
      AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
4916
          reinterpret_cast<Address>(clone) + object_size);
4917
      alloc_memento->set_map_no_write_barrier(allocation_memento_map());
4918
      ASSERT(site->map() == allocation_site_map());
4919
      alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
4920
      HeapProfiler* profiler = isolate()->heap_profiler();
4921
      if (profiler->is_tracking_allocations()) {
4922
        profiler->UpdateObjectSizeEvent(HeapObject::cast(clone)->address(),
4923
                                        object_size);
4924
        profiler->NewObjectEvent(alloc_memento->address(),
4925
                                 AllocationMemento::kSize);
4926
      }
4927
    }
4928
  }
4929

    
4930
  SLOW_ASSERT(
4931
      JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
4932
  FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
4933
  FixedArray* properties = FixedArray::cast(source->properties());
4934
  // Update elements if necessary.
4935
  if (elements->length() > 0) {
4936
    Object* elem;
4937
    { MaybeObject* maybe_elem;
4938
      if (elements->map() == fixed_cow_array_map()) {
4939
        maybe_elem = FixedArray::cast(elements);
4940
      } else if (source->HasFastDoubleElements()) {
4941
        maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
4942
      } else {
4943
        maybe_elem = CopyFixedArray(FixedArray::cast(elements));
4944
      }
4945
      if (!maybe_elem->ToObject(&elem)) return maybe_elem;
4946
    }
4947
    JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode);
4948
  }
4949
  // Update properties if necessary.
4950
  if (properties->length() > 0) {
4951
    Object* prop;
4952
    { MaybeObject* maybe_prop = CopyFixedArray(properties);
4953
      if (!maybe_prop->ToObject(&prop)) return maybe_prop;
4954
    }
4955
    JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode);
4956
  }
4957
  // Return the new clone.
4958
  return clone;
4959
}
4960

    
4961

    
4962
MaybeObject* Heap::ReinitializeJSReceiver(
4963
    JSReceiver* object, InstanceType type, int size) {
4964
  ASSERT(type >= FIRST_JS_OBJECT_TYPE);
4965

    
4966
  // Allocate fresh map.
4967
  // TODO(rossberg): Once we optimize proxies, cache these maps.
4968
  Map* map;
4969
  MaybeObject* maybe = AllocateMap(type, size);
4970
  if (!maybe->To<Map>(&map)) return maybe;
4971

    
4972
  // Check that the receiver has at least the size of the fresh object.
4973
  int size_difference = object->map()->instance_size() - map->instance_size();
4974
  ASSERT(size_difference >= 0);
4975

    
4976
  map->set_prototype(object->map()->prototype());
4977

    
4978
  // Allocate the backing storage for the properties.
4979
  int prop_size = map->unused_property_fields() - map->inobject_properties();
4980
  Object* properties;
4981
  maybe = AllocateFixedArray(prop_size, TENURED);
4982
  if (!maybe->ToObject(&properties)) return maybe;
4983

    
4984
  // Functions require some allocation, which might fail here.
4985
  SharedFunctionInfo* shared = NULL;
4986
  if (type == JS_FUNCTION_TYPE) {
4987
    String* name;
4988
    maybe =
4989
        InternalizeOneByteString(STATIC_ASCII_VECTOR("<freezing call trap>"));
4990
    if (!maybe->To<String>(&name)) return maybe;
4991
    maybe = AllocateSharedFunctionInfo(name);
4992
    if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe;
4993
  }
4994

    
4995
  // Because of possible retries of this function after failure,
4996
  // we must NOT fail after this point, where we have changed the type!
4997

    
4998
  // Reset the map for the object.
4999
  object->set_map(map);
5000
  JSObject* jsobj = JSObject::cast(object);
5001

    
5002
  // Reinitialize the object from the constructor map.
5003
  InitializeJSObjectFromMap(jsobj, FixedArray::cast(properties), map);
5004

    
5005
  // Functions require some minimal initialization.
5006
  if (type == JS_FUNCTION_TYPE) {
5007
    map->set_function_with_prototype(true);
5008
    InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
5009
    JSFunction::cast(object)->set_context(
5010
        isolate()->context()->native_context());
5011
  }
5012

    
5013
  // Put in filler if the new object is smaller than the old.
5014
  if (size_difference > 0) {
5015
    CreateFillerObjectAt(
5016
        object->address() + map->instance_size(), size_difference);
5017
  }
5018

    
5019
  return object;
5020
}
5021

    
5022

    
5023
MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
5024
                                             JSGlobalProxy* object) {
5025
  ASSERT(constructor->has_initial_map());
5026
  Map* map = constructor->initial_map();
5027

    
5028
  // Check that the already allocated object has the same size and type as
5029
  // objects allocated using the constructor.
5030
  ASSERT(map->instance_size() == object->map()->instance_size());
5031
  ASSERT(map->instance_type() == object->map()->instance_type());
5032

    
5033
  // Allocate the backing storage for the properties.
5034
  int prop_size = map->unused_property_fields() - map->inobject_properties();
5035
  Object* properties;
5036
  { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED);
5037
    if (!maybe_properties->ToObject(&properties)) return maybe_properties;
5038
  }
5039

    
5040
  // Reset the map for the object.
5041
  object->set_map(constructor->initial_map());
5042

    
5043
  // Reinitialize the object from the constructor map.
5044
  InitializeJSObjectFromMap(object, FixedArray::cast(properties), map);
5045
  return object;
5046
}
5047

    
5048

    
5049
MaybeObject* Heap::AllocateStringFromOneByte(Vector<const uint8_t> string,
5050
                                           PretenureFlag pretenure) {
5051
  int length = string.length();
5052
  if (length == 1) {
5053
    return Heap::LookupSingleCharacterStringFromCode(string[0]);
5054
  }
5055
  Object* result;
5056
  { MaybeObject* maybe_result =
5057
        AllocateRawOneByteString(string.length(), pretenure);
5058
    if (!maybe_result->ToObject(&result)) return maybe_result;
5059
  }
5060

    
5061
  // Copy the characters into the new object.
5062
  CopyChars(SeqOneByteString::cast(result)->GetChars(),
5063
            string.start(),
5064
            length);
5065
  return result;
5066
}
5067

    
5068

    
5069
MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string,
5070
                                              int non_ascii_start,
5071
                                              PretenureFlag pretenure) {
5072
  // Continue counting the number of characters in the UTF-8 string, starting
5073
  // from the first non-ascii character or word.
5074
  Access<UnicodeCache::Utf8Decoder>
5075
      decoder(isolate_->unicode_cache()->utf8_decoder());
5076
  decoder->Reset(string.start() + non_ascii_start,
5077
                 string.length() - non_ascii_start);
5078
  int utf16_length = decoder->Utf16Length();
5079
  ASSERT(utf16_length > 0);
5080
  // Allocate string.
5081
  Object* result;
5082
  {
5083
    int chars = non_ascii_start + utf16_length;
5084
    MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure);
5085
    if (!maybe_result->ToObject(&result)) return maybe_result;
5086
  }
5087
  // Convert and copy the characters into the new object.
5088
  SeqTwoByteString* twobyte = SeqTwoByteString::cast(result);
5089
  // Copy ascii portion.
5090
  uint16_t* data = twobyte->GetChars();
5091
  if (non_ascii_start != 0) {
5092
    const char* ascii_data = string.start();
5093
    for (int i = 0; i < non_ascii_start; i++) {
5094
      *data++ = *ascii_data++;
5095
    }
5096
  }
5097
  // Now write the remainder.
5098
  decoder->WriteUtf16(data, utf16_length);
5099
  return result;
5100
}
5101

    
5102

    
5103
MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
5104
                                             PretenureFlag pretenure) {
5105
  // Check if the string is an ASCII string.
5106
  Object* result;
5107
  int length = string.length();
5108
  const uc16* start = string.start();
5109

    
5110
  if (String::IsOneByte(start, length)) {
5111
    MaybeObject* maybe_result = AllocateRawOneByteString(length, pretenure);
5112
    if (!maybe_result->ToObject(&result)) return maybe_result;
5113
    CopyChars(SeqOneByteString::cast(result)->GetChars(), start, length);
5114
  } else {  // It's not a one byte string.
5115
    MaybeObject* maybe_result = AllocateRawTwoByteString(length, pretenure);
5116
    if (!maybe_result->ToObject(&result)) return maybe_result;
5117
    CopyChars(SeqTwoByteString::cast(result)->GetChars(), start, length);
5118
  }
5119
  return result;
5120
}
5121

    
5122

    
5123
Map* Heap::InternalizedStringMapForString(String* string) {
5124
  // If the string is in new space it cannot be used as internalized.
5125
  if (InNewSpace(string)) return NULL;
5126

    
5127
  // Find the corresponding internalized string map for strings.
5128
  switch (string->map()->instance_type()) {
5129
    case STRING_TYPE: return internalized_string_map();
5130
    case ASCII_STRING_TYPE: return ascii_internalized_string_map();
5131
    case CONS_STRING_TYPE: return cons_internalized_string_map();
5132
    case CONS_ASCII_STRING_TYPE: return cons_ascii_internalized_string_map();
5133
    case EXTERNAL_STRING_TYPE: return external_internalized_string_map();
5134
    case EXTERNAL_ASCII_STRING_TYPE:
5135
      return external_ascii_internalized_string_map();
5136
    case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
5137
      return external_internalized_string_with_one_byte_data_map();
5138
    case SHORT_EXTERNAL_STRING_TYPE:
5139
      return short_external_internalized_string_map();
5140
    case SHORT_EXTERNAL_ASCII_STRING_TYPE:
5141
      return short_external_ascii_internalized_string_map();
5142
    case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
5143
      return short_external_internalized_string_with_one_byte_data_map();
5144
    default: return NULL;  // No match found.
5145
  }
5146
}
5147

    
5148

    
5149
static inline void WriteOneByteData(Vector<const char> vector,
5150
                                    uint8_t* chars,
5151
                                    int len) {
5152
  // Only works for ascii.
5153
  ASSERT(vector.length() == len);
5154
  OS::MemCopy(chars, vector.start(), len);
5155
}
5156

    
5157
static inline void WriteTwoByteData(Vector<const char> vector,
5158
                                    uint16_t* chars,
5159
                                    int len) {
5160
  const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
5161
  unsigned stream_length = vector.length();
5162
  while (stream_length != 0) {
5163
    unsigned consumed = 0;
5164
    uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
5165
    ASSERT(c != unibrow::Utf8::kBadChar);
5166
    ASSERT(consumed <= stream_length);
5167
    stream_length -= consumed;
5168
    stream += consumed;
5169
    if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
5170
      len -= 2;
5171
      if (len < 0) break;
5172
      *chars++ = unibrow::Utf16::LeadSurrogate(c);
5173
      *chars++ = unibrow::Utf16::TrailSurrogate(c);
5174
    } else {
5175
      len -= 1;
5176
      if (len < 0) break;
5177
      *chars++ = c;
5178
    }
5179
  }
5180
  ASSERT(stream_length == 0);
5181
  ASSERT(len == 0);
5182
}
5183

    
5184

    
5185
static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
5186
  ASSERT(s->length() == len);
5187
  String::WriteToFlat(s, chars, 0, len);
5188
}
5189

    
5190

    
5191
static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
5192
  ASSERT(s->length() == len);
5193
  String::WriteToFlat(s, chars, 0, len);
5194
}
5195

    
5196

    
5197
template<bool is_one_byte, typename T>
5198
MaybeObject* Heap::AllocateInternalizedStringImpl(
5199
    T t, int chars, uint32_t hash_field) {
5200
  ASSERT(chars >= 0);
5201
  // Compute map and object size.
5202
  int size;
5203
  Map* map;
5204

    
5205
  if (is_one_byte) {
5206
    if (chars > SeqOneByteString::kMaxLength) {
5207
      return Failure::OutOfMemoryException(0x9);
5208
    }
5209
    map = ascii_internalized_string_map();
5210
    size = SeqOneByteString::SizeFor(chars);
5211
  } else {
5212
    if (chars > SeqTwoByteString::kMaxLength) {
5213
      return Failure::OutOfMemoryException(0xa);
5214
    }
5215
    map = internalized_string_map();
5216
    size = SeqTwoByteString::SizeFor(chars);
5217
  }
5218
  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED);
5219

    
5220
  // Allocate string.
5221
  Object* result;
5222
  { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
5223
    if (!maybe_result->ToObject(&result)) return maybe_result;
5224
  }
5225

    
5226
  reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
5227
  // Set length and hash fields of the allocated string.
5228
  String* answer = String::cast(result);
5229
  answer->set_length(chars);
5230
  answer->set_hash_field(hash_field);
5231

    
5232
  ASSERT_EQ(size, answer->Size());
5233

    
5234
  if (is_one_byte) {
5235
    WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
5236
  } else {
5237
    WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
5238
  }
5239
  return answer;
5240
}
5241

    
5242

    
5243
// Need explicit instantiations.
5244
template
5245
MaybeObject* Heap::AllocateInternalizedStringImpl<true>(String*, int, uint32_t);
5246
template
5247
MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
5248
    String*, int, uint32_t);
5249
template
5250
MaybeObject* Heap::AllocateInternalizedStringImpl<false>(
5251
    Vector<const char>, int, uint32_t);
5252

    
5253

    
5254
MaybeObject* Heap::AllocateRawOneByteString(int length,
5255
                                            PretenureFlag pretenure) {
5256
  if (length < 0 || length > SeqOneByteString::kMaxLength) {
5257
    return Failure::OutOfMemoryException(0xb);
5258
  }
5259
  int size = SeqOneByteString::SizeFor(length);
5260
  ASSERT(size <= SeqOneByteString::kMaxSize);
5261
  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
5262

    
5263
  Object* result;
5264
  { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
5265
    if (!maybe_result->ToObject(&result)) return maybe_result;
5266
  }
5267

    
5268
  // Partially initialize the object.
5269
  HeapObject::cast(result)->set_map_no_write_barrier(ascii_string_map());
5270
  String::cast(result)->set_length(length);
5271
  String::cast(result)->set_hash_field(String::kEmptyHashField);
5272
  ASSERT_EQ(size, HeapObject::cast(result)->Size());
5273

    
5274
  return result;
5275
}
5276

    
5277

    
5278
MaybeObject* Heap::AllocateRawTwoByteString(int length,
5279
                                            PretenureFlag pretenure) {
5280
  if (length < 0 || length > SeqTwoByteString::kMaxLength) {
5281
    return Failure::OutOfMemoryException(0xc);
5282
  }
5283
  int size = SeqTwoByteString::SizeFor(length);
5284
  ASSERT(size <= SeqTwoByteString::kMaxSize);
5285
  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
5286

    
5287
  Object* result;
5288
  { MaybeObject* maybe_result = AllocateRaw(size, space, OLD_DATA_SPACE);
5289
    if (!maybe_result->ToObject(&result)) return maybe_result;
5290
  }
5291

    
5292
  // Partially initialize the object.
5293
  HeapObject::cast(result)->set_map_no_write_barrier(string_map());
5294
  String::cast(result)->set_length(length);
5295
  String::cast(result)->set_hash_field(String::kEmptyHashField);
5296
  ASSERT_EQ(size, HeapObject::cast(result)->Size());
5297
  return result;
5298
}
5299

    
5300

    
5301
MaybeObject* Heap::AllocateJSArray(
5302
    ElementsKind elements_kind,
5303
    PretenureFlag pretenure) {
5304
  Context* native_context = isolate()->context()->native_context();
5305
  JSFunction* array_function = native_context->array_function();
5306
  Map* map = array_function->initial_map();
5307
  Map* transition_map = isolate()->get_initial_js_array_map(elements_kind);
5308
  if (transition_map != NULL) map = transition_map;
5309
  return AllocateJSObjectFromMap(map, pretenure);
5310
}
5311

    
5312

    
5313
MaybeObject* Heap::AllocateEmptyFixedArray() {
5314
  int size = FixedArray::SizeFor(0);
5315
  Object* result;
5316
  { MaybeObject* maybe_result =
5317
        AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
5318
    if (!maybe_result->ToObject(&result)) return maybe_result;
5319
  }
5320
  // Initialize the object.
5321
  reinterpret_cast<FixedArray*>(result)->set_map_no_write_barrier(
5322
      fixed_array_map());
5323
  reinterpret_cast<FixedArray*>(result)->set_length(0);
5324
  return result;
5325
}
5326

    
5327

    
5328
MaybeObject* Heap::AllocateEmptyExternalArray(ExternalArrayType array_type) {
5329
  return AllocateExternalArray(0, array_type, NULL, TENURED);
5330
}
5331

    
5332

    
5333
MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
5334
  int len = src->length();
5335
  Object* obj;
5336
  { MaybeObject* maybe_obj = AllocateRawFixedArray(len, NOT_TENURED);
5337
    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5338
  }
5339
  if (InNewSpace(obj)) {
5340
    HeapObject* dst = HeapObject::cast(obj);
5341
    dst->set_map_no_write_barrier(map);
5342
    CopyBlock(dst->address() + kPointerSize,
5343
              src->address() + kPointerSize,
5344
              FixedArray::SizeFor(len) - kPointerSize);
5345
    return obj;
5346
  }
5347
  HeapObject::cast(obj)->set_map_no_write_barrier(map);
5348
  FixedArray* result = FixedArray::cast(obj);
5349
  result->set_length(len);
5350

    
5351
  // Copy the content
5352
  DisallowHeapAllocation no_gc;
5353
  WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
5354
  for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
5355
  return result;
5356
}
5357

    
5358

    
5359
MaybeObject* Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
5360
                                               Map* map) {
5361
  int len = src->length();
5362
  Object* obj;
5363
  { MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(len, NOT_TENURED);
5364
    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5365
  }
5366
  HeapObject* dst = HeapObject::cast(obj);
5367
  dst->set_map_no_write_barrier(map);
5368
  CopyBlock(
5369
      dst->address() + FixedDoubleArray::kLengthOffset,
5370
      src->address() + FixedDoubleArray::kLengthOffset,
5371
      FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
5372
  return obj;
5373
}
5374

    
5375

    
5376
MaybeObject* Heap::CopyConstantPoolArrayWithMap(ConstantPoolArray* src,
5377
                                                Map* map) {
5378
  int int64_entries = src->count_of_int64_entries();
5379
  int ptr_entries = src->count_of_ptr_entries();
5380
  int int32_entries = src->count_of_int32_entries();
5381
  Object* obj;
5382
  { MaybeObject* maybe_obj =
5383
        AllocateConstantPoolArray(int64_entries, ptr_entries, int32_entries);
5384
    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5385
  }
5386
  HeapObject* dst = HeapObject::cast(obj);
5387
  dst->set_map_no_write_barrier(map);
5388
  CopyBlock(
5389
      dst->address() + ConstantPoolArray::kLengthOffset,
5390
      src->address() + ConstantPoolArray::kLengthOffset,
5391
      ConstantPoolArray::SizeFor(int64_entries, ptr_entries, int32_entries)
5392
          - ConstantPoolArray::kLengthOffset);
5393
  return obj;
5394
}
5395

    
5396

    
5397
MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
5398
  if (length < 0 || length > FixedArray::kMaxLength) {
5399
    return Failure::OutOfMemoryException(0xe);
5400
  }
5401
  int size = FixedArray::SizeFor(length);
5402
  AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
5403

    
5404
  return AllocateRaw(size, space, OLD_POINTER_SPACE);
5405
}
5406

    
5407

    
5408
MaybeObject* Heap::AllocateFixedArrayWithFiller(int length,
5409
                                                PretenureFlag pretenure,
5410
                                                Object* filler) {
5411
  ASSERT(length >= 0);
5412
  ASSERT(empty_fixed_array()->IsFixedArray());
5413
  if (length == 0) return empty_fixed_array();
5414

    
5415
  ASSERT(!InNewSpace(filler));
5416
  Object* result;
5417
  { MaybeObject* maybe_result = AllocateRawFixedArray(length, pretenure);
5418
    if (!maybe_result->ToObject(&result)) return maybe_result;
5419
  }
5420

    
5421
  HeapObject::cast(result)->set_map_no_write_barrier(fixed_array_map());
5422
  FixedArray* array = FixedArray::cast(result);
5423
  array->set_length(length);
5424
  MemsetPointer(array->data_start(), filler, length);
5425
  return array;
5426
}
5427

    
5428

    
5429
MaybeObject* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
5430
  return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
5431
}
5432

    
5433

    
5434
MaybeObject* Heap::AllocateFixedArrayWithHoles(int length,
5435
                                               PretenureFlag pretenure) {
5436
  return AllocateFixedArrayWithFiller(length, pretenure, the_hole_value());
5437
}
5438

    
5439

    
5440
MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
5441
  if (length == 0) return empty_fixed_array();
5442

    
5443
  Object* obj;
5444
  { MaybeObject* maybe_obj = AllocateRawFixedArray(length, NOT_TENURED);
5445
    if (!maybe_obj->ToObject(&obj)) return maybe_obj;
5446
  }
5447

    
5448
  reinterpret_cast<FixedArray*>(obj)->set_map_no_write_barrier(
5449
      fixed_array_map());
5450
  FixedArray::cast(obj)->set_length(length);
5451
  return obj;
5452
}
5453

    
5454

    
5455
MaybeObject* Heap::AllocateEmptyFixedDoubleArray() {
5456
  int size = FixedDoubleArray::SizeFor(0);
5457
  Object* result;
5458
  { MaybeObject* maybe_result =
5459
        AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
5460
    if (!maybe_result->ToObject(&result)) return maybe_result;
5461
  }
5462
  // Initialize the object.
5463
  reinterpret_cast<FixedDoubleArray*>(result)->set_map_no_write_barrier(
5464
      fixed_double_array_map());
5465
  reinterpret_cast<FixedDoubleArray*>(result)->set_length(0);
5466
  return result;
5467
}
5468

    
5469

    
5470
MaybeObject* Heap::AllocateUninitializedFixedDoubleArray(
5471
    int length,
5472
    PretenureFlag pretenure) {
5473
  if (length == 0) return empty_fixed_array();
5474

    
5475
  Object* elements_object;
5476
  MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
5477
  if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
5478
  FixedDoubleArray* elements =
5479
      reinterpret_cast<FixedDoubleArray*>(elements_object);
5480

    
5481
  elements->set_map_no_write_barrier(fixed_double_array_map());
5482
  elements->set_length(length);
5483
  return elements;
5484
}
5485

    
5486

    
5487
MaybeObject* Heap::AllocateFixedDoubleArrayWithHoles(
5488
    int length,
5489
    PretenureFlag pretenure) {
5490
  if (length == 0) return empty_fixed_array();
5491

    
5492
  Object* elements_object;
5493
  MaybeObject* maybe_obj = AllocateRawFixedDoubleArray(length, pretenure);
5494
  if (!maybe_obj->ToObject(&elements_object)) return maybe_obj;
5495
  FixedDoubleArray* elements =
5496
      reinterpret_cast<FixedDoubleArray*>(elements_object);
5497

    
5498
  for (int i = 0; i < length; ++i) {
5499
    elements->set_the_hole(i);
5500
  }
5501

    
5502
  elements->set_map_no_write_barrier(fixed_double_array_map());
5503
  elements->set_length(length);
5504
  return elements;
5505
}
5506

    
5507

    
5508
MaybeObject* Heap::AllocateRawFixedDoubleArray(int length,
5509
                                               PretenureFlag pretenure) {
5510
  if (length < 0 || length > FixedDoubleArray::kMaxLength) {
5511
    return Failure::OutOfMemoryException(0xf);
5512
  }
5513
  int size = FixedDoubleArray::SizeFor(length);
5514
#ifndef V8_HOST_ARCH_64_BIT
5515
  size += kPointerSize;
5516
#endif
5517
  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
5518

    
5519
  HeapObject* object;
5520
  { MaybeObject* maybe_object = AllocateRaw(size, space, OLD_DATA_SPACE);
5521
    if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
5522
  }
5523

    
5524
  return EnsureDoubleAligned(this, object, size);
5525
}
5526

    
5527

    
5528
MaybeObject* Heap::AllocateConstantPoolArray(int number_of_int64_entries,
5529
                                             int number_of_ptr_entries,
5530
                                             int number_of_int32_entries) {
5531
  ASSERT(number_of_int64_entries > 0 || number_of_ptr_entries > 0 ||
5532
         number_of_int32_entries > 0);
5533
  int size = ConstantPoolArray::SizeFor(number_of_int64_entries,
5534
                                        number_of_ptr_entries,
5535
                                        number_of_int32_entries);
5536
#ifndef V8_HOST_ARCH_64_BIT
5537
  size += kPointerSize;
5538
#endif
5539
  AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
5540

    
5541
  HeapObject* object;
5542
  { MaybeObject* maybe_object = AllocateRaw(size, space, OLD_POINTER_SPACE);
5543
    if (!maybe_object->To<HeapObject>(&object)) return maybe_object;
5544
  }
5545
  object = EnsureDoubleAligned(this, object, size);
5546
  HeapObject::cast(object)->set_map_no_write_barrier(constant_pool_array_map());
5547

    
5548
  ConstantPoolArray* constant_pool =
5549
      reinterpret_cast<ConstantPoolArray*>(object);
5550
  constant_pool->SetEntryCounts(number_of_int64_entries,
5551
                                number_of_ptr_entries,
5552
                                number_of_int32_entries);
5553
  MemsetPointer(
5554
      HeapObject::RawField(
5555
          constant_pool,
5556
          constant_pool->OffsetOfElementAt(constant_pool->first_ptr_index())),
5557
      undefined_value(),
5558
      number_of_ptr_entries);
5559
  return constant_pool;
5560
}
5561

    
5562

    
5563
MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
5564
  Object* result;
5565
  { MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
5566
    if (!maybe_result->ToObject(&result)) return maybe_result;
5567
  }
5568
  reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(
5569
      hash_table_map());
5570
  ASSERT(result->IsHashTable());
5571
  return result;
5572
}
5573

    
5574

    
5575
MaybeObject* Heap::AllocateSymbol() {
5576
  // Statically ensure that it is safe to allocate symbols in paged spaces.
5577
  STATIC_ASSERT(Symbol::kSize <= Page::kNonCodeObjectAreaSize);
5578

    
5579
  Object* result;
5580
  MaybeObject* maybe =
5581
      AllocateRaw(Symbol::kSize, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
5582
  if (!maybe->ToObject(&result)) return maybe;
5583

    
5584
  HeapObject::cast(result)->set_map_no_write_barrier(symbol_map());
5585

    
5586
  // Generate a random hash value.
5587
  int hash;
5588
  int attempts = 0;
5589
  do {
5590
    hash = isolate()->random_number_generator()->NextInt() & Name::kHashBitMask;
5591
    attempts++;
5592
  } while (hash == 0 && attempts < 30);
5593
  if (hash == 0) hash = 1;  // never return 0
5594

    
5595
  Symbol::cast(result)->set_hash_field(
5596
      Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
5597
  Symbol::cast(result)->set_name(undefined_value());
5598

    
5599
  ASSERT(result->IsSymbol());
5600
  return result;
5601
}
5602

    
5603

    
5604
MaybeObject* Heap::AllocateNativeContext() {
5605
  Object* result;
5606
  { MaybeObject* maybe_result =
5607
        AllocateFixedArray(Context::NATIVE_CONTEXT_SLOTS);
5608
    if (!maybe_result->ToObject(&result)) return maybe_result;
5609
  }
5610
  Context* context = reinterpret_cast<Context*>(result);
5611
  context->set_map_no_write_barrier(native_context_map());
5612
  context->set_js_array_maps(undefined_value());
5613
  ASSERT(context->IsNativeContext());
5614
  ASSERT(result->IsContext());
5615
  return result;
5616
}
5617

    
5618

    
5619
MaybeObject* Heap::AllocateGlobalContext(JSFunction* function,
5620
                                         ScopeInfo* scope_info) {
5621
  Object* result;
5622
  { MaybeObject* maybe_result =
5623
        AllocateFixedArray(scope_info->ContextLength(), TENURED);
5624
    if (!maybe_result->ToObject(&result)) return maybe_result;
5625
  }
5626
  Context* context = reinterpret_cast<Context*>(result);
5627
  context->set_map_no_write_barrier(global_context_map());
5628
  context->set_closure(function);
5629
  context->set_previous(function->context());
5630
  context->set_extension(scope_info);
5631
  context->set_global_object(function->context()->global_object());
5632
  ASSERT(context->IsGlobalContext());
5633
  ASSERT(result->IsContext());
5634
  return context;
5635
}
5636

    
5637

    
5638
MaybeObject* Heap::AllocateModuleContext(ScopeInfo* scope_info) {
5639
  Object* result;
5640
  { MaybeObject* maybe_result =
5641
        AllocateFixedArray(scope_info->ContextLength(), TENURED);
5642
    if (!maybe_result->ToObject(&result)) return maybe_result;
5643
  }
5644
  Context* context = reinterpret_cast<Context*>(result);
5645
  context->set_map_no_write_barrier(module_context_map());
5646
  // Instance link will be set later.
5647
  context->set_extension(Smi::FromInt(0));
5648
  return context;
5649
}
5650

    
5651

    
5652
MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
5653
  ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
5654
  Object* result;
5655
  { MaybeObject* maybe_result = AllocateFixedArray(length);
5656
    if (!maybe_result->ToObject(&result)) return maybe_result;
5657
  }
5658
  Context* context = reinterpret_cast<Context*>(result);
5659
  context->set_map_no_write_barrier(function_context_map());
5660
  context->set_closure(function);
5661
  context->set_previous(function->context());
5662
  context->set_extension(Smi::FromInt(0));
5663
  context->set_global_object(function->context()->global_object());
5664
  return context;
5665
}
5666

    
5667

    
5668
MaybeObject* Heap::AllocateCatchContext(JSFunction* function,
5669
                                        Context* previous,
5670
                                        String* name,
5671
                                        Object* thrown_object) {
5672
  STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == Context::THROWN_OBJECT_INDEX);
5673
  Object* result;
5674
  { MaybeObject* maybe_result =
5675
        AllocateFixedArray(Context::MIN_CONTEXT_SLOTS + 1);
5676
    if (!maybe_result->ToObject(&result)) return maybe_result;
5677
  }
5678
  Context* context = reinterpret_cast<Context*>(result);
5679
  context->set_map_no_write_barrier(catch_context_map());
5680
  context->set_closure(function);
5681
  context->set_previous(previous);
5682
  context->set_extension(name);
5683
  context->set_global_object(previous->global_object());
5684
  context->set(Context::THROWN_OBJECT_INDEX, thrown_object);
5685
  return context;
5686
}
5687

    
5688

    
5689
MaybeObject* Heap::AllocateWithContext(JSFunction* function,
5690
                                       Context* previous,
5691
                                       JSReceiver* extension) {
5692
  Object* result;
5693
  { MaybeObject* maybe_result = AllocateFixedArray(Context::MIN_CONTEXT_SLOTS);
5694
    if (!maybe_result->ToObject(&result)) return maybe_result;
5695
  }
5696
  Context* context = reinterpret_cast<Context*>(result);
5697
  context->set_map_no_write_barrier(with_context_map());
5698
  context->set_closure(function);
5699
  context->set_previous(previous);
5700
  context->set_extension(extension);
5701
  context->set_global_object(previous->global_object());
5702
  return context;
5703
}
5704

    
5705

    
5706
MaybeObject* Heap::AllocateBlockContext(JSFunction* function,
5707
                                        Context* previous,
5708
                                        ScopeInfo* scope_info) {
5709
  Object* result;
5710
  { MaybeObject* maybe_result =
5711
        AllocateFixedArrayWithHoles(scope_info->ContextLength());
5712
    if (!maybe_result->ToObject(&result)) return maybe_result;
5713
  }
5714
  Context* context = reinterpret_cast<Context*>(result);
5715
  context->set_map_no_write_barrier(block_context_map());
5716
  context->set_closure(function);
5717
  context->set_previous(previous);
5718
  context->set_extension(scope_info);
5719
  context->set_global_object(previous->global_object());
5720
  return context;
5721
}
5722

    
5723

    
5724
MaybeObject* Heap::AllocateScopeInfo(int length) {
5725
  FixedArray* scope_info;
5726
  MaybeObject* maybe_scope_info = AllocateFixedArray(length, TENURED);
5727
  if (!maybe_scope_info->To(&scope_info)) return maybe_scope_info;
5728
  scope_info->set_map_no_write_barrier(scope_info_map());
5729
  return scope_info;
5730
}
5731

    
5732

    
5733
MaybeObject* Heap::AllocateExternal(void* value) {
5734
  Foreign* foreign;
5735
  { MaybeObject* maybe_result = AllocateForeign(static_cast<Address>(value));
5736
    if (!maybe_result->To(&foreign)) return maybe_result;
5737
  }
5738
  JSObject* external;
5739
  { MaybeObject* maybe_result = AllocateJSObjectFromMap(external_map());
5740
    if (!maybe_result->To(&external)) return maybe_result;
5741
  }
5742
  external->SetInternalField(0, foreign);
5743
  return external;
5744
}
5745

    
5746

    
5747
MaybeObject* Heap::AllocateStruct(InstanceType type) {
5748
  Map* map;
5749
  switch (type) {
5750
#define MAKE_CASE(NAME, Name, name) \
5751
    case NAME##_TYPE: map = name##_map(); break;
5752
STRUCT_LIST(MAKE_CASE)
5753
#undef MAKE_CASE
5754
    default:
5755
      UNREACHABLE();
5756
      return Failure::InternalError();
5757
  }
5758
  int size = map->instance_size();
5759
  AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
5760
  Object* result;
5761
  { MaybeObject* maybe_result = Allocate(map, space);
5762
    if (!maybe_result->ToObject(&result)) return maybe_result;
5763
  }
5764
  Struct::cast(result)->InitializeBody(size);
5765
  return result;
5766
}
5767

    
5768

    
5769
bool Heap::IsHeapIterable() {
5770
  return (!old_pointer_space()->was_swept_conservatively() &&
5771
          !old_data_space()->was_swept_conservatively());
5772
}
5773

    
5774

    
5775
void Heap::EnsureHeapIsIterable() {
5776
  ASSERT(AllowHeapAllocation::IsAllowed());
5777
  if (!IsHeapIterable()) {
5778
    CollectAllGarbage(kMakeHeapIterableMask, "Heap::EnsureHeapIsIterable");
5779
  }
5780
  ASSERT(IsHeapIterable());
5781
}
5782

    
5783

    
5784
void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
5785
  incremental_marking()->Step(step_size,
5786
                              IncrementalMarking::NO_GC_VIA_STACK_GUARD);
5787

    
5788
  if (incremental_marking()->IsComplete()) {
5789
    bool uncommit = false;
5790
    if (gc_count_at_last_idle_gc_ == gc_count_) {
5791
      // No GC since the last full GC, the mutator is probably not active.
5792
      isolate_->compilation_cache()->Clear();
5793
      uncommit = true;
5794
    }
5795
    CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
5796
    mark_sweeps_since_idle_round_started_++;
5797
    gc_count_at_last_idle_gc_ = gc_count_;
5798
    if (uncommit) {
5799
      new_space_.Shrink();
5800
      UncommitFromSpace();
5801
    }
5802
  }
5803
}
5804

    
5805

    
5806
bool Heap::IdleNotification(int hint) {
5807
  // Hints greater than this value indicate that
5808
  // the embedder is requesting a lot of GC work.
5809
  const int kMaxHint = 1000;
5810
  const int kMinHintForIncrementalMarking = 10;
5811
  // Minimal hint that allows to do full GC.
5812
  const int kMinHintForFullGC = 100;
5813
  intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4;
5814
  // The size factor is in range [5..250]. The numbers here are chosen from
5815
  // experiments. If you changes them, make sure to test with
5816
  // chrome/performance_ui_tests --gtest_filter="GeneralMixMemoryTest.*
5817
  intptr_t step_size =
5818
      size_factor * IncrementalMarking::kAllocatedThreshold;
5819

    
5820
  if (contexts_disposed_ > 0) {
5821
    if (hint >= kMaxHint) {
5822
      // The embedder is requesting a lot of GC work after context disposal,
5823
      // we age inline caches so that they don't keep objects from
5824
      // the old context alive.
5825
      AgeInlineCaches();
5826
    }
5827
    int mark_sweep_time = Min(TimeMarkSweepWouldTakeInMs(), 1000);
5828
    if (hint >= mark_sweep_time && !FLAG_expose_gc &&
5829
        incremental_marking()->IsStopped()) {
5830
      HistogramTimerScope scope(isolate_->counters()->gc_context());
5831
      CollectAllGarbage(kReduceMemoryFootprintMask,
5832
                        "idle notification: contexts disposed");
5833
    } else {
5834
      AdvanceIdleIncrementalMarking(step_size);
5835
      contexts_disposed_ = 0;
5836
    }
5837
    // After context disposal there is likely a lot of garbage remaining, reset
5838
    // the idle notification counters in order to trigger more incremental GCs
5839
    // on subsequent idle notifications.
5840
    StartIdleRound();
5841
    return false;
5842
  }
5843

    
5844
  if (!FLAG_incremental_marking || FLAG_expose_gc || Serializer::enabled()) {
5845
    return IdleGlobalGC();
5846
  }
5847

    
5848
  // By doing small chunks of GC work in each IdleNotification,
5849
  // perform a round of incremental GCs and after that wait until
5850
  // the mutator creates enough garbage to justify a new round.
5851
  // An incremental GC progresses as follows:
5852
  // 1. many incremental marking steps,
5853
  // 2. one old space mark-sweep-compact,
5854
  // 3. many lazy sweep steps.
5855
  // Use mark-sweep-compact events to count incremental GCs in a round.
5856

    
5857
  if (incremental_marking()->IsStopped()) {
5858
    if (!mark_compact_collector()->AreSweeperThreadsActivated() &&
5859
        !IsSweepingComplete() &&
5860
        !AdvanceSweepers(static_cast<int>(step_size))) {
5861
      return false;
5862
    }
5863
  }
5864

    
5865
  if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
5866
    if (EnoughGarbageSinceLastIdleRound()) {
5867
      StartIdleRound();
5868
    } else {
5869
      return true;
5870
    }
5871
  }
5872

    
5873
  int remaining_mark_sweeps = kMaxMarkSweepsInIdleRound -
5874
                              mark_sweeps_since_idle_round_started_;
5875

    
5876
  if (incremental_marking()->IsStopped()) {
5877
    // If there are no more than two GCs left in this idle round and we are
5878
    // allowed to do a full GC, then make those GCs full in order to compact
5879
    // the code space.
5880
    // TODO(ulan): Once we enable code compaction for incremental marking,
5881
    // we can get rid of this special case and always start incremental marking.
5882
    if (remaining_mark_sweeps <= 2 && hint >= kMinHintForFullGC) {
5883
      CollectAllGarbage(kReduceMemoryFootprintMask,
5884
                        "idle notification: finalize idle round");
5885
      mark_sweeps_since_idle_round_started_++;
5886
    } else if (hint > kMinHintForIncrementalMarking) {
5887
      incremental_marking()->Start();
5888
    }
5889
  }
5890
  if (!incremental_marking()->IsStopped() &&
5891
      hint > kMinHintForIncrementalMarking) {
5892
    AdvanceIdleIncrementalMarking(step_size);
5893
  }
5894

    
5895
  if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
5896
    FinishIdleRound();
5897
    return true;
5898
  }
5899

    
5900
  return false;
5901
}
5902

    
5903

    
5904
bool Heap::IdleGlobalGC() {
5905
  static const int kIdlesBeforeScavenge = 4;
5906
  static const int kIdlesBeforeMarkSweep = 7;
5907
  static const int kIdlesBeforeMarkCompact = 8;
5908
  static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
5909
  static const unsigned int kGCsBetweenCleanup = 4;
5910

    
5911
  if (!last_idle_notification_gc_count_init_) {
5912
    last_idle_notification_gc_count_ = gc_count_;
5913
    last_idle_notification_gc_count_init_ = true;
5914
  }
5915

    
5916
  bool uncommit = true;
5917
  bool finished = false;
5918

    
5919
  // Reset the number of idle notifications received when a number of
5920
  // GCs have taken place. This allows another round of cleanup based
5921
  // on idle notifications if enough work has been carried out to
5922
  // provoke a number of garbage collections.
5923
  if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
5924
    number_idle_notifications_ =
5925
        Min(number_idle_notifications_ + 1, kMaxIdleCount);
5926
  } else {
5927
    number_idle_notifications_ = 0;
5928
    last_idle_notification_gc_count_ = gc_count_;
5929
  }
5930

    
5931
  if (number_idle_notifications_ == kIdlesBeforeScavenge) {
5932
    CollectGarbage(NEW_SPACE, "idle notification");
5933
    new_space_.Shrink();
5934
    last_idle_notification_gc_count_ = gc_count_;
5935
  } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
5936
    // Before doing the mark-sweep collections we clear the
5937
    // compilation cache to avoid hanging on to source code and
5938
    // generated code for cached functions.
5939
    isolate_->compilation_cache()->Clear();
5940

    
5941
    CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
5942
    new_space_.Shrink();
5943
    last_idle_notification_gc_count_ = gc_count_;
5944

    
5945
  } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
5946
    CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
5947
    new_space_.Shrink();
5948
    last_idle_notification_gc_count_ = gc_count_;
5949
    number_idle_notifications_ = 0;
5950
    finished = true;
5951
  } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
5952
    // If we have received more than kIdlesBeforeMarkCompact idle
5953
    // notifications we do not perform any cleanup because we don't
5954
    // expect to gain much by doing so.
5955
    finished = true;
5956
  }
5957

    
5958
  if (uncommit) UncommitFromSpace();
5959

    
5960
  return finished;
5961
}
5962

    
5963

    
5964
#ifdef DEBUG
5965

    
5966
void Heap::Print() {
5967
  if (!HasBeenSetUp()) return;
5968
  isolate()->PrintStack(stdout);
5969
  AllSpaces spaces(this);
5970
  for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
5971
    space->Print();
5972
  }
5973
}
5974

    
5975

    
5976
void Heap::ReportCodeStatistics(const char* title) {
5977
  PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
5978
  PagedSpace::ResetCodeStatistics(isolate());
5979
  // We do not look for code in new space, map space, or old space.  If code
5980
  // somehow ends up in those spaces, we would miss it here.
5981
  code_space_->CollectCodeStatistics();
5982
  lo_space_->CollectCodeStatistics();
5983
  PagedSpace::ReportCodeStatistics(isolate());
5984
}
5985

    
5986

    
5987
// This function expects that NewSpace's allocated objects histogram is
5988
// populated (via a call to CollectStatistics or else as a side effect of a
5989
// just-completed scavenge collection).
5990
void Heap::ReportHeapStatistics(const char* title) {
5991
  USE(title);
5992
  PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
5993
         title, gc_count_);
5994
  PrintF("old_generation_allocation_limit_ %" V8_PTR_PREFIX "d\n",
5995
         old_generation_allocation_limit_);
5996

    
5997
  PrintF("\n");
5998
  PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
5999
  isolate_->global_handles()->PrintStats();
6000
  PrintF("\n");
6001

    
6002
  PrintF("Heap statistics : ");
6003
  isolate_->memory_allocator()->ReportStatistics();
6004
  PrintF("To space : ");
6005
  new_space_.ReportStatistics();
6006
  PrintF("Old pointer space : ");
6007
  old_pointer_space_->ReportStatistics();
6008
  PrintF("Old data space : ");
6009
  old_data_space_->ReportStatistics();
6010
  PrintF("Code space : ");
6011
  code_space_->ReportStatistics();
6012
  PrintF("Map space : ");
6013
  map_space_->ReportStatistics();
6014
  PrintF("Cell space : ");
6015
  cell_space_->ReportStatistics();
6016
  PrintF("PropertyCell space : ");
6017
  property_cell_space_->ReportStatistics();
6018
  PrintF("Large object space : ");
6019
  lo_space_->ReportStatistics();
6020
  PrintF(">>>>>> ========================================= >>>>>>\n");
6021
}
6022

    
6023
#endif  // DEBUG
6024

    
6025
bool Heap::Contains(HeapObject* value) {
6026
  return Contains(value->address());
6027
}
6028

    
6029

    
6030
bool Heap::Contains(Address addr) {
6031
  if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
6032
  return HasBeenSetUp() &&
6033
    (new_space_.ToSpaceContains(addr) ||
6034
     old_pointer_space_->Contains(addr) ||
6035
     old_data_space_->Contains(addr) ||
6036
     code_space_->Contains(addr) ||
6037
     map_space_->Contains(addr) ||
6038
     cell_space_->Contains(addr) ||
6039
     property_cell_space_->Contains(addr) ||
6040
     lo_space_->SlowContains(addr));
6041
}
6042

    
6043

    
6044
bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
6045
  return InSpace(value->address(), space);
6046
}
6047

    
6048

    
6049
bool Heap::InSpace(Address addr, AllocationSpace space) {
6050
  if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
6051
  if (!HasBeenSetUp()) return false;
6052

    
6053
  switch (space) {
6054
    case NEW_SPACE:
6055
      return new_space_.ToSpaceContains(addr);
6056
    case OLD_POINTER_SPACE:
6057
      return old_pointer_space_->Contains(addr);
6058
    case OLD_DATA_SPACE:
6059
      return old_data_space_->Contains(addr);
6060
    case CODE_SPACE:
6061
      return code_space_->Contains(addr);
6062
    case MAP_SPACE:
6063
      return map_space_->Contains(addr);
6064
    case CELL_SPACE:
6065
      return cell_space_->Contains(addr);
6066
    case PROPERTY_CELL_SPACE:
6067
      return property_cell_space_->Contains(addr);
6068
    case LO_SPACE:
6069
      return lo_space_->SlowContains(addr);
6070
  }
6071

    
6072
  return false;
6073
}
6074

    
6075

    
6076
#ifdef VERIFY_HEAP
6077
void Heap::Verify() {
6078
  CHECK(HasBeenSetUp());
6079

    
6080
  store_buffer()->Verify();
6081

    
6082
  VerifyPointersVisitor visitor;
6083
  IterateRoots(&visitor, VISIT_ONLY_STRONG);
6084

    
6085
  new_space_.Verify();
6086

    
6087
  old_pointer_space_->Verify(&visitor);
6088
  map_space_->Verify(&visitor);
6089

    
6090
  VerifyPointersVisitor no_dirty_regions_visitor;
6091
  old_data_space_->Verify(&no_dirty_regions_visitor);
6092
  code_space_->Verify(&no_dirty_regions_visitor);
6093
  cell_space_->Verify(&no_dirty_regions_visitor);
6094
  property_cell_space_->Verify(&no_dirty_regions_visitor);
6095

    
6096
  lo_space_->Verify();
6097
}
6098
#endif
6099

    
6100

    
6101
MaybeObject* Heap::InternalizeUtf8String(Vector<const char> string) {
6102
  Object* result = NULL;
6103
  Object* new_table;
6104
  { MaybeObject* maybe_new_table =
6105
        string_table()->LookupUtf8String(string, &result);
6106
    if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6107
  }
6108
  // Can't use set_string_table because StringTable::cast knows that
6109
  // StringTable is a singleton and checks for identity.
6110
  roots_[kStringTableRootIndex] = new_table;
6111
  ASSERT(result != NULL);
6112
  return result;
6113
}
6114

    
6115

    
6116
MaybeObject* Heap::InternalizeOneByteString(Vector<const uint8_t> string) {
6117
  Object* result = NULL;
6118
  Object* new_table;
6119
  { MaybeObject* maybe_new_table =
6120
        string_table()->LookupOneByteString(string, &result);
6121
    if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6122
  }
6123
  // Can't use set_string_table because StringTable::cast knows that
6124
  // StringTable is a singleton and checks for identity.
6125
  roots_[kStringTableRootIndex] = new_table;
6126
  ASSERT(result != NULL);
6127
  return result;
6128
}
6129

    
6130

    
6131
MaybeObject* Heap::InternalizeOneByteString(Handle<SeqOneByteString> string,
6132
                                     int from,
6133
                                     int length) {
6134
  Object* result = NULL;
6135
  Object* new_table;
6136
  { MaybeObject* maybe_new_table =
6137
        string_table()->LookupSubStringOneByteString(string,
6138
                                                   from,
6139
                                                   length,
6140
                                                   &result);
6141
    if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6142
  }
6143
  // Can't use set_string_table because StringTable::cast knows that
6144
  // StringTable is a singleton and checks for identity.
6145
  roots_[kStringTableRootIndex] = new_table;
6146
  ASSERT(result != NULL);
6147
  return result;
6148
}
6149

    
6150

    
6151
MaybeObject* Heap::InternalizeTwoByteString(Vector<const uc16> string) {
6152
  Object* result = NULL;
6153
  Object* new_table;
6154
  { MaybeObject* maybe_new_table =
6155
        string_table()->LookupTwoByteString(string, &result);
6156
    if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6157
  }
6158
  // Can't use set_string_table because StringTable::cast knows that
6159
  // StringTable is a singleton and checks for identity.
6160
  roots_[kStringTableRootIndex] = new_table;
6161
  ASSERT(result != NULL);
6162
  return result;
6163
}
6164

    
6165

    
6166
MaybeObject* Heap::InternalizeString(String* string) {
6167
  if (string->IsInternalizedString()) return string;
6168
  Object* result = NULL;
6169
  Object* new_table;
6170
  { MaybeObject* maybe_new_table =
6171
        string_table()->LookupString(string, &result);
6172
    if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
6173
  }
6174
  // Can't use set_string_table because StringTable::cast knows that
6175
  // StringTable is a singleton and checks for identity.
6176
  roots_[kStringTableRootIndex] = new_table;
6177
  ASSERT(result != NULL);
6178
  return result;
6179
}
6180

    
6181

    
6182
bool Heap::InternalizeStringIfExists(String* string, String** result) {
6183
  if (string->IsInternalizedString()) {
6184
    *result = string;
6185
    return true;
6186
  }
6187
  return string_table()->LookupStringIfExists(string, result);
6188
}
6189

    
6190

    
6191
void Heap::ZapFromSpace() {
6192
  NewSpacePageIterator it(new_space_.FromSpaceStart(),
6193
                          new_space_.FromSpaceEnd());
6194
  while (it.has_next()) {
6195
    NewSpacePage* page = it.next();
6196
    for (Address cursor = page->area_start(), limit = page->area_end();
6197
         cursor < limit;
6198
         cursor += kPointerSize) {
6199
      Memory::Address_at(cursor) = kFromSpaceZapValue;
6200
    }
6201
  }
6202
}
6203

    
6204

    
6205
void Heap::IterateAndMarkPointersToFromSpace(Address start,
6206
                                             Address end,
6207
                                             ObjectSlotCallback callback) {
6208
  Address slot_address = start;
6209

    
6210
  // We are not collecting slots on new space objects during mutation
6211
  // thus we have to scan for pointers to evacuation candidates when we
6212
  // promote objects. But we should not record any slots in non-black
6213
  // objects. Grey object's slots would be rescanned.
6214
  // White object might not survive until the end of collection
6215
  // it would be a violation of the invariant to record it's slots.
6216
  bool record_slots = false;
6217
  if (incremental_marking()->IsCompacting()) {
6218
    MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
6219
    record_slots = Marking::IsBlack(mark_bit);
6220
  }
6221

    
6222
  while (slot_address < end) {
6223
    Object** slot = reinterpret_cast<Object**>(slot_address);
6224
    Object* object = *slot;
6225
    // If the store buffer becomes overfull we mark pages as being exempt from
6226
    // the store buffer.  These pages are scanned to find pointers that point
6227
    // to the new space.  In that case we may hit newly promoted objects and
6228
    // fix the pointers before the promotion queue gets to them.  Thus the 'if'.
6229
    if (object->IsHeapObject()) {
6230
      if (Heap::InFromSpace(object)) {
6231
        callback(reinterpret_cast<HeapObject**>(slot),
6232
                 HeapObject::cast(object));
6233
        Object* new_object = *slot;
6234
        if (InNewSpace(new_object)) {
6235
          SLOW_ASSERT(Heap::InToSpace(new_object));
6236
          SLOW_ASSERT(new_object->IsHeapObject());
6237
          store_buffer_.EnterDirectlyIntoStoreBuffer(
6238
              reinterpret_cast<Address>(slot));
6239
        }
6240
        SLOW_ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
6241
      } else if (record_slots &&
6242
                 MarkCompactCollector::IsOnEvacuationCandidate(object)) {
6243
        mark_compact_collector()->RecordSlot(slot, slot, object);
6244
      }
6245
    }
6246
    slot_address += kPointerSize;
6247
  }
6248
}
6249

    
6250

    
6251
#ifdef DEBUG
6252
typedef bool (*CheckStoreBufferFilter)(Object** addr);
6253

    
6254

    
6255
bool IsAMapPointerAddress(Object** addr) {
6256
  uintptr_t a = reinterpret_cast<uintptr_t>(addr);
6257
  int mod = a % Map::kSize;
6258
  return mod >= Map::kPointerFieldsBeginOffset &&
6259
         mod < Map::kPointerFieldsEndOffset;
6260
}
6261

    
6262

    
6263
bool EverythingsAPointer(Object** addr) {
6264
  return true;
6265
}
6266

    
6267

    
6268
static void CheckStoreBuffer(Heap* heap,
6269
                             Object** current,
6270
                             Object** limit,
6271
                             Object**** store_buffer_position,
6272
                             Object*** store_buffer_top,
6273
                             CheckStoreBufferFilter filter,
6274
                             Address special_garbage_start,
6275
                             Address special_garbage_end) {
6276
  Map* free_space_map = heap->free_space_map();
6277
  for ( ; current < limit; current++) {
6278
    Object* o = *current;
6279
    Address current_address = reinterpret_cast<Address>(current);
6280
    // Skip free space.
6281
    if (o == free_space_map) {
6282
      Address current_address = reinterpret_cast<Address>(current);
6283
      FreeSpace* free_space =
6284
          FreeSpace::cast(HeapObject::FromAddress(current_address));
6285
      int skip = free_space->Size();
6286
      ASSERT(current_address + skip <= reinterpret_cast<Address>(limit));
6287
      ASSERT(skip > 0);
6288
      current_address += skip - kPointerSize;
6289
      current = reinterpret_cast<Object**>(current_address);
6290
      continue;
6291
    }
6292
    // Skip the current linear allocation space between top and limit which is
6293
    // unmarked with the free space map, but can contain junk.
6294
    if (current_address == special_garbage_start &&
6295
        special_garbage_end != special_garbage_start) {
6296
      current_address = special_garbage_end - kPointerSize;
6297
      current = reinterpret_cast<Object**>(current_address);
6298
      continue;
6299
    }
6300
    if (!(*filter)(current)) continue;
6301
    ASSERT(current_address < special_garbage_start ||
6302
           current_address >= special_garbage_end);
6303
    ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
6304
    // We have to check that the pointer does not point into new space
6305
    // without trying to cast it to a heap object since the hash field of
6306
    // a string can contain values like 1 and 3 which are tagged null
6307
    // pointers.
6308
    if (!heap->InNewSpace(o)) continue;
6309
    while (**store_buffer_position < current &&
6310
           *store_buffer_position < store_buffer_top) {
6311
      (*store_buffer_position)++;
6312
    }
6313
    if (**store_buffer_position != current ||
6314
        *store_buffer_position == store_buffer_top) {
6315
      Object** obj_start = current;
6316
      while (!(*obj_start)->IsMap()) obj_start--;
6317
      UNREACHABLE();
6318
    }
6319
  }
6320
}
6321

    
6322

    
6323
// Check that the store buffer contains all intergenerational pointers by
6324
// scanning a page and ensuring that all pointers to young space are in the
6325
// store buffer.
6326
void Heap::OldPointerSpaceCheckStoreBuffer() {
6327
  OldSpace* space = old_pointer_space();
6328
  PageIterator pages(space);
6329

    
6330
  store_buffer()->SortUniq();
6331

    
6332
  while (pages.has_next()) {
6333
    Page* page = pages.next();
6334
    Object** current = reinterpret_cast<Object**>(page->area_start());
6335

    
6336
    Address end = page->area_end();
6337

    
6338
    Object*** store_buffer_position = store_buffer()->Start();
6339
    Object*** store_buffer_top = store_buffer()->Top();
6340

    
6341
    Object** limit = reinterpret_cast<Object**>(end);
6342
    CheckStoreBuffer(this,
6343
                     current,
6344
                     limit,
6345
                     &store_buffer_position,
6346
                     store_buffer_top,
6347
                     &EverythingsAPointer,
6348
                     space->top(),
6349
                     space->limit());
6350
  }
6351
}
6352

    
6353

    
6354
void Heap::MapSpaceCheckStoreBuffer() {
6355
  MapSpace* space = map_space();
6356
  PageIterator pages(space);
6357

    
6358
  store_buffer()->SortUniq();
6359

    
6360
  while (pages.has_next()) {
6361
    Page* page = pages.next();
6362
    Object** current = reinterpret_cast<Object**>(page->area_start());
6363

    
6364
    Address end = page->area_end();
6365

    
6366
    Object*** store_buffer_position = store_buffer()->Start();
6367
    Object*** store_buffer_top = store_buffer()->Top();
6368

    
6369
    Object** limit = reinterpret_cast<Object**>(end);
6370
    CheckStoreBuffer(this,
6371
                     current,
6372
                     limit,
6373
                     &store_buffer_position,
6374
                     store_buffer_top,
6375
                     &IsAMapPointerAddress,
6376
                     space->top(),
6377
                     space->limit());
6378
  }
6379
}
6380

    
6381

    
6382
void Heap::LargeObjectSpaceCheckStoreBuffer() {
6383
  LargeObjectIterator it(lo_space());
6384
  for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
6385
    // We only have code, sequential strings, or fixed arrays in large
6386
    // object space, and only fixed arrays can possibly contain pointers to
6387
    // the young generation.
6388
    if (object->IsFixedArray()) {
6389
      Object*** store_buffer_position = store_buffer()->Start();
6390
      Object*** store_buffer_top = store_buffer()->Top();
6391
      Object** current = reinterpret_cast<Object**>(object->address());
6392
      Object** limit =
6393
          reinterpret_cast<Object**>(object->address() + object->Size());
6394
      CheckStoreBuffer(this,
6395
                       current,
6396
                       limit,
6397
                       &store_buffer_position,
6398
                       store_buffer_top,
6399
                       &EverythingsAPointer,
6400
                       NULL,
6401
                       NULL);
6402
    }
6403
  }
6404
}
6405
#endif
6406

    
6407

    
6408
void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
6409
  IterateStrongRoots(v, mode);
6410
  IterateWeakRoots(v, mode);
6411
}
6412

    
6413

    
6414
void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
6415
  v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
6416
  v->Synchronize(VisitorSynchronization::kStringTable);
6417
  if (mode != VISIT_ALL_IN_SCAVENGE &&
6418
      mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
6419
    // Scavenge collections have special processing for this.
6420
    external_string_table_.Iterate(v);
6421
  }
6422
  v->Synchronize(VisitorSynchronization::kExternalStringsTable);
6423
}
6424

    
6425

    
6426
void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
6427
  v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
6428
  v->Synchronize(VisitorSynchronization::kStrongRootList);
6429

    
6430
  v->VisitPointer(BitCast<Object**>(&hidden_string_));
6431
  v->Synchronize(VisitorSynchronization::kInternalizedString);
6432

    
6433
  isolate_->bootstrapper()->Iterate(v);
6434
  v->Synchronize(VisitorSynchronization::kBootstrapper);
6435
  isolate_->Iterate(v);
6436
  v->Synchronize(VisitorSynchronization::kTop);
6437
  Relocatable::Iterate(isolate_, v);
6438
  v->Synchronize(VisitorSynchronization::kRelocatable);
6439

    
6440
#ifdef ENABLE_DEBUGGER_SUPPORT
6441
  isolate_->debug()->Iterate(v);
6442
  if (isolate_->deoptimizer_data() != NULL) {
6443
    isolate_->deoptimizer_data()->Iterate(v);
6444
  }
6445
#endif
6446
  v->Synchronize(VisitorSynchronization::kDebug);
6447
  isolate_->compilation_cache()->Iterate(v);
6448
  v->Synchronize(VisitorSynchronization::kCompilationCache);
6449

    
6450
  // Iterate over local handles in handle scopes.
6451
  isolate_->handle_scope_implementer()->Iterate(v);
6452
  isolate_->IterateDeferredHandles(v);
6453
  v->Synchronize(VisitorSynchronization::kHandleScope);
6454

    
6455
  // Iterate over the builtin code objects and code stubs in the
6456
  // heap. Note that it is not necessary to iterate over code objects
6457
  // on scavenge collections.
6458
  if (mode != VISIT_ALL_IN_SCAVENGE) {
6459
    isolate_->builtins()->IterateBuiltins(v);
6460
  }
6461
  v->Synchronize(VisitorSynchronization::kBuiltins);
6462

    
6463
  // Iterate over global handles.
6464
  switch (mode) {
6465
    case VISIT_ONLY_STRONG:
6466
      isolate_->global_handles()->IterateStrongRoots(v);
6467
      break;
6468
    case VISIT_ALL_IN_SCAVENGE:
6469
      isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
6470
      break;
6471
    case VISIT_ALL_IN_SWEEP_NEWSPACE:
6472
    case VISIT_ALL:
6473
      isolate_->global_handles()->IterateAllRoots(v);
6474
      break;
6475
  }
6476
  v->Synchronize(VisitorSynchronization::kGlobalHandles);
6477

    
6478
  // Iterate over eternal handles.
6479
  if (mode == VISIT_ALL_IN_SCAVENGE) {
6480
    isolate_->eternal_handles()->IterateNewSpaceRoots(v);
6481
  } else {
6482
    isolate_->eternal_handles()->IterateAllRoots(v);
6483
  }
6484
  v->Synchronize(VisitorSynchronization::kEternalHandles);
6485

    
6486
  // Iterate over pointers being held by inactive threads.
6487
  isolate_->thread_manager()->Iterate(v);
6488
  v->Synchronize(VisitorSynchronization::kThreadManager);
6489

    
6490
  // Iterate over the pointers the Serialization/Deserialization code is
6491
  // holding.
6492
  // During garbage collection this keeps the partial snapshot cache alive.
6493
  // During deserialization of the startup snapshot this creates the partial
6494
  // snapshot cache and deserializes the objects it refers to.  During
6495
  // serialization this does nothing, since the partial snapshot cache is
6496
  // empty.  However the next thing we do is create the partial snapshot,
6497
  // filling up the partial snapshot cache with objects it needs as we go.
6498
  SerializerDeserializer::Iterate(isolate_, v);
6499
  // We don't do a v->Synchronize call here, because in debug mode that will
6500
  // output a flag to the snapshot.  However at this point the serializer and
6501
  // deserializer are deliberately a little unsynchronized (see above) so the
6502
  // checking of the sync flag in the snapshot would fail.
6503
}
6504

    
6505

    
6506
// TODO(1236194): Since the heap size is configurable on the command line
6507
// and through the API, we should gracefully handle the case that the heap
6508
// size is not big enough to fit all the initial objects.
6509
bool Heap::ConfigureHeap(int max_semispace_size,
6510
                         intptr_t max_old_gen_size,
6511
                         intptr_t max_executable_size) {
6512
  if (HasBeenSetUp()) return false;
6513

    
6514
  if (FLAG_stress_compaction) {
6515
    // This will cause more frequent GCs when stressing.
6516
    max_semispace_size_ = Page::kPageSize;
6517
  }
6518

    
6519
  if (max_semispace_size > 0) {
6520
    if (max_semispace_size < Page::kPageSize) {
6521
      max_semispace_size = Page::kPageSize;
6522
      if (FLAG_trace_gc) {
6523
        PrintPID("Max semispace size cannot be less than %dkbytes\n",
6524
                 Page::kPageSize >> 10);
6525
      }
6526
    }
6527
    max_semispace_size_ = max_semispace_size;
6528
  }
6529

    
6530
  if (Snapshot::IsEnabled()) {
6531
    // If we are using a snapshot we always reserve the default amount
6532
    // of memory for each semispace because code in the snapshot has
6533
    // write-barrier code that relies on the size and alignment of new
6534
    // space.  We therefore cannot use a larger max semispace size
6535
    // than the default reserved semispace size.
6536
    if (max_semispace_size_ > reserved_semispace_size_) {
6537
      max_semispace_size_ = reserved_semispace_size_;
6538
      if (FLAG_trace_gc) {
6539
        PrintPID("Max semispace size cannot be more than %dkbytes\n",
6540
                 reserved_semispace_size_ >> 10);
6541
      }
6542
    }
6543
  } else {
6544
    // If we are not using snapshots we reserve space for the actual
6545
    // max semispace size.
6546
    reserved_semispace_size_ = max_semispace_size_;
6547
  }
6548

    
6549
  if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
6550
  if (max_executable_size > 0) {
6551
    max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
6552
  }
6553

    
6554
  // The max executable size must be less than or equal to the max old
6555
  // generation size.
6556
  if (max_executable_size_ > max_old_generation_size_) {
6557
    max_executable_size_ = max_old_generation_size_;
6558
  }
6559

    
6560
  // The new space size must be a power of two to support single-bit testing
6561
  // for containment.
6562
  max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
6563
  reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
6564
  initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
6565

    
6566
  // The external allocation limit should be below 256 MB on all architectures
6567
  // to avoid unnecessary low memory notifications, as that is the threshold
6568
  // for some embedders.
6569
  external_allocation_limit_ = 12 * max_semispace_size_;
6570
  ASSERT(external_allocation_limit_ <= 256 * MB);
6571

    
6572
  // The old generation is paged and needs at least one page for each space.
6573
  int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
6574
  max_old_generation_size_ = Max(static_cast<intptr_t>(paged_space_count *
6575
                                                       Page::kPageSize),
6576
                                 RoundUp(max_old_generation_size_,
6577
                                         Page::kPageSize));
6578

    
6579
  // We rely on being able to allocate new arrays in paged spaces.
6580
  ASSERT(MaxRegularSpaceAllocationSize() >=
6581
         (JSArray::kSize +
6582
          FixedArray::SizeFor(JSObject::kInitialMaxFastElementArray) +
6583
          AllocationMemento::kSize));
6584

    
6585
  configured_ = true;
6586
  return true;
6587
}
6588

    
6589

    
6590
bool Heap::ConfigureHeapDefault() {
6591
  return ConfigureHeap(static_cast<intptr_t>(FLAG_max_new_space_size / 2) * KB,
6592
                       static_cast<intptr_t>(FLAG_max_old_space_size) * MB,
6593
                       static_cast<intptr_t>(FLAG_max_executable_size) * MB);
6594
}
6595

    
6596

    
6597
void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
6598
  *stats->start_marker = HeapStats::kStartMarker;
6599
  *stats->end_marker = HeapStats::kEndMarker;
6600
  *stats->new_space_size = new_space_.SizeAsInt();
6601
  *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
6602
  *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
6603
  *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
6604
  *stats->old_data_space_size = old_data_space_->SizeOfObjects();
6605
  *stats->old_data_space_capacity = old_data_space_->Capacity();
6606
  *stats->code_space_size = code_space_->SizeOfObjects();
6607
  *stats->code_space_capacity = code_space_->Capacity();
6608
  *stats->map_space_size = map_space_->SizeOfObjects();
6609
  *stats->map_space_capacity = map_space_->Capacity();
6610
  *stats->cell_space_size = cell_space_->SizeOfObjects();
6611
  *stats->cell_space_capacity = cell_space_->Capacity();
6612
  *stats->property_cell_space_size = property_cell_space_->SizeOfObjects();
6613
  *stats->property_cell_space_capacity = property_cell_space_->Capacity();
6614
  *stats->lo_space_size = lo_space_->Size();
6615
  isolate_->global_handles()->RecordStats(stats);
6616
  *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
6617
  *stats->memory_allocator_capacity =
6618
      isolate()->memory_allocator()->Size() +
6619
      isolate()->memory_allocator()->Available();
6620
  *stats->os_error = OS::GetLastError();
6621
      isolate()->memory_allocator()->Available();
6622
  if (take_snapshot) {
6623
    HeapIterator iterator(this);
6624
    for (HeapObject* obj = iterator.next();
6625
         obj != NULL;
6626
         obj = iterator.next()) {
6627
      InstanceType type = obj->map()->instance_type();
6628
      ASSERT(0 <= type && type <= LAST_TYPE);
6629
      stats->objects_per_type[type]++;
6630
      stats->size_per_type[type] += obj->Size();
6631
    }
6632
  }
6633
}
6634

    
6635

    
6636
intptr_t Heap::PromotedSpaceSizeOfObjects() {
6637
  return old_pointer_space_->SizeOfObjects()
6638
      + old_data_space_->SizeOfObjects()
6639
      + code_space_->SizeOfObjects()
6640
      + map_space_->SizeOfObjects()
6641
      + cell_space_->SizeOfObjects()
6642
      + property_cell_space_->SizeOfObjects()
6643
      + lo_space_->SizeOfObjects();
6644
}
6645

    
6646

    
6647
intptr_t Heap::PromotedExternalMemorySize() {
6648
  if (amount_of_external_allocated_memory_
6649
      <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
6650
  return amount_of_external_allocated_memory_
6651
      - amount_of_external_allocated_memory_at_last_global_gc_;
6652
}
6653

    
6654

    
6655
V8_DECLARE_ONCE(initialize_gc_once);
6656

    
6657
static void InitializeGCOnce() {
6658
  InitializeScavengingVisitorsTables();
6659
  NewSpaceScavenger::Initialize();
6660
  MarkCompactCollector::Initialize();
6661
}
6662

    
6663

    
6664
bool Heap::SetUp() {
6665
#ifdef DEBUG
6666
  allocation_timeout_ = FLAG_gc_interval;
6667
#endif
6668

    
6669
  // Initialize heap spaces and initial maps and objects. Whenever something
6670
  // goes wrong, just return false. The caller should check the results and
6671
  // call Heap::TearDown() to release allocated memory.
6672
  //
6673
  // If the heap is not yet configured (e.g. through the API), configure it.
6674
  // Configuration is based on the flags new-space-size (really the semispace
6675
  // size) and old-space-size if set or the initial values of semispace_size_
6676
  // and old_generation_size_ otherwise.
6677
  if (!configured_) {
6678
    if (!ConfigureHeapDefault()) return false;
6679
  }
6680

    
6681
  CallOnce(&initialize_gc_once, &InitializeGCOnce);
6682

    
6683
  MarkMapPointersAsEncoded(false);
6684

    
6685
  // Set up memory allocator.
6686
  if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
6687
      return false;
6688

    
6689
  // Set up new space.
6690
  if (!new_space_.SetUp(reserved_semispace_size_, max_semispace_size_)) {
6691
    return false;
6692
  }
6693

    
6694
  // Initialize old pointer space.
6695
  old_pointer_space_ =
6696
      new OldSpace(this,
6697
                   max_old_generation_size_,
6698
                   OLD_POINTER_SPACE,
6699
                   NOT_EXECUTABLE);
6700
  if (old_pointer_space_ == NULL) return false;
6701
  if (!old_pointer_space_->SetUp()) return false;
6702

    
6703
  // Initialize old data space.
6704
  old_data_space_ =
6705
      new OldSpace(this,
6706
                   max_old_generation_size_,
6707
                   OLD_DATA_SPACE,
6708
                   NOT_EXECUTABLE);
6709
  if (old_data_space_ == NULL) return false;
6710
  if (!old_data_space_->SetUp()) return false;
6711

    
6712
  // Initialize the code space, set its maximum capacity to the old
6713
  // generation size. It needs executable memory.
6714
  // On 64-bit platform(s), we put all code objects in a 2 GB range of
6715
  // virtual address space, so that they can call each other with near calls.
6716
  if (code_range_size_ > 0) {
6717
    if (!isolate_->code_range()->SetUp(code_range_size_)) {
6718
      return false;
6719
    }
6720
  }
6721

    
6722
  code_space_ =
6723
      new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
6724
  if (code_space_ == NULL) return false;
6725
  if (!code_space_->SetUp()) return false;
6726

    
6727
  // Initialize map space.
6728
  map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
6729
  if (map_space_ == NULL) return false;
6730
  if (!map_space_->SetUp()) return false;
6731

    
6732
  // Initialize simple cell space.
6733
  cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
6734
  if (cell_space_ == NULL) return false;
6735
  if (!cell_space_->SetUp()) return false;
6736

    
6737
  // Initialize global property cell space.
6738
  property_cell_space_ = new PropertyCellSpace(this, max_old_generation_size_,
6739
                                               PROPERTY_CELL_SPACE);
6740
  if (property_cell_space_ == NULL) return false;
6741
  if (!property_cell_space_->SetUp()) return false;
6742

    
6743
  // The large object code space may contain code or data.  We set the memory
6744
  // to be non-executable here for safety, but this means we need to enable it
6745
  // explicitly when allocating large code objects.
6746
  lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
6747
  if (lo_space_ == NULL) return false;
6748
  if (!lo_space_->SetUp()) return false;
6749

    
6750
  // Set up the seed that is used to randomize the string hash function.
6751
  ASSERT(hash_seed() == 0);
6752
  if (FLAG_randomize_hashes) {
6753
    if (FLAG_hash_seed == 0) {
6754
      int rnd = isolate()->random_number_generator()->NextInt();
6755
      set_hash_seed(Smi::FromInt(rnd & Name::kHashBitMask));
6756
    } else {
6757
      set_hash_seed(Smi::FromInt(FLAG_hash_seed));
6758
    }
6759
  }
6760

    
6761
  LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
6762
  LOG(isolate_, IntPtrTEvent("heap-available", Available()));
6763

    
6764
  store_buffer()->SetUp();
6765

    
6766
  if (FLAG_concurrent_recompilation) relocation_mutex_ = new Mutex;
6767
#ifdef DEBUG
6768
  relocation_mutex_locked_by_optimizer_thread_ = false;
6769
#endif  // DEBUG
6770

    
6771
  return true;
6772
}
6773

    
6774

    
6775
bool Heap::CreateHeapObjects() {
6776
  // Create initial maps.
6777
  if (!CreateInitialMaps()) return false;
6778
  if (!CreateApiObjects()) return false;
6779

    
6780
  // Create initial objects
6781
  if (!CreateInitialObjects()) return false;
6782

    
6783
  native_contexts_list_ = undefined_value();
6784
  array_buffers_list_ = undefined_value();
6785
  allocation_sites_list_ = undefined_value();
6786
  weak_object_to_code_table_ = undefined_value();
6787
  return true;
6788
}
6789

    
6790

    
6791
void Heap::SetStackLimits() {
6792
  ASSERT(isolate_ != NULL);
6793
  ASSERT(isolate_ == isolate());
6794
  // On 64 bit machines, pointers are generally out of range of Smis.  We write
6795
  // something that looks like an out of range Smi to the GC.
6796

    
6797
  // Set up the special root array entries containing the stack limits.
6798
  // These are actually addresses, but the tag makes the GC ignore it.
6799
  roots_[kStackLimitRootIndex] =
6800
      reinterpret_cast<Object*>(
6801
          (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
6802
  roots_[kRealStackLimitRootIndex] =
6803
      reinterpret_cast<Object*>(
6804
          (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
6805
}
6806

    
6807

    
6808
void Heap::TearDown() {
6809
#ifdef VERIFY_HEAP
6810
  if (FLAG_verify_heap) {
6811
    Verify();
6812
  }
6813
#endif
6814

    
6815
  if (FLAG_print_cumulative_gc_stat) {
6816
    PrintF("\n");
6817
    PrintF("gc_count=%d ", gc_count_);
6818
    PrintF("mark_sweep_count=%d ", ms_count_);
6819
    PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
6820
    PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
6821
    PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
6822
    PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
6823
           get_max_alive_after_gc());
6824
    PrintF("total_marking_time=%.1f ", marking_time());
6825
    PrintF("total_sweeping_time=%.1f ", sweeping_time());
6826
    PrintF("\n\n");
6827
  }
6828

    
6829
  TearDownArrayBuffers();
6830

    
6831
  isolate_->global_handles()->TearDown();
6832

    
6833
  external_string_table_.TearDown();
6834

    
6835
  mark_compact_collector()->TearDown();
6836

    
6837
  new_space_.TearDown();
6838

    
6839
  if (old_pointer_space_ != NULL) {
6840
    old_pointer_space_->TearDown();
6841
    delete old_pointer_space_;
6842
    old_pointer_space_ = NULL;
6843
  }
6844

    
6845
  if (old_data_space_ != NULL) {
6846
    old_data_space_->TearDown();
6847
    delete old_data_space_;
6848
    old_data_space_ = NULL;
6849
  }
6850

    
6851
  if (code_space_ != NULL) {
6852
    code_space_->TearDown();
6853
    delete code_space_;
6854
    code_space_ = NULL;
6855
  }
6856

    
6857
  if (map_space_ != NULL) {
6858
    map_space_->TearDown();
6859
    delete map_space_;
6860
    map_space_ = NULL;
6861
  }
6862

    
6863
  if (cell_space_ != NULL) {
6864
    cell_space_->TearDown();
6865
    delete cell_space_;
6866
    cell_space_ = NULL;
6867
  }
6868

    
6869
  if (property_cell_space_ != NULL) {
6870
    property_cell_space_->TearDown();
6871
    delete property_cell_space_;
6872
    property_cell_space_ = NULL;
6873
  }
6874

    
6875
  if (lo_space_ != NULL) {
6876
    lo_space_->TearDown();
6877
    delete lo_space_;
6878
    lo_space_ = NULL;
6879
  }
6880

    
6881
  store_buffer()->TearDown();
6882
  incremental_marking()->TearDown();
6883

    
6884
  isolate_->memory_allocator()->TearDown();
6885

    
6886
  delete relocation_mutex_;
6887
}
6888

    
6889

    
6890
void Heap::AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback,
6891
                                 GCType gc_type,
6892
                                 bool pass_isolate) {
6893
  ASSERT(callback != NULL);
6894
  GCPrologueCallbackPair pair(callback, gc_type, pass_isolate);
6895
  ASSERT(!gc_prologue_callbacks_.Contains(pair));
6896
  return gc_prologue_callbacks_.Add(pair);
6897
}
6898

    
6899

    
6900
void Heap::RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback) {
6901
  ASSERT(callback != NULL);
6902
  for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
6903
    if (gc_prologue_callbacks_[i].callback == callback) {
6904
      gc_prologue_callbacks_.Remove(i);
6905
      return;
6906
    }
6907
  }
6908
  UNREACHABLE();
6909
}
6910

    
6911

    
6912
void Heap::AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback,
6913
                                 GCType gc_type,
6914
                                 bool pass_isolate) {
6915
  ASSERT(callback != NULL);
6916
  GCEpilogueCallbackPair pair(callback, gc_type, pass_isolate);
6917
  ASSERT(!gc_epilogue_callbacks_.Contains(pair));
6918
  return gc_epilogue_callbacks_.Add(pair);
6919
}
6920

    
6921

    
6922
void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback) {
6923
  ASSERT(callback != NULL);
6924
  for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
6925
    if (gc_epilogue_callbacks_[i].callback == callback) {
6926
      gc_epilogue_callbacks_.Remove(i);
6927
      return;
6928
    }
6929
  }
6930
  UNREACHABLE();
6931
}
6932

    
6933

    
6934
MaybeObject* Heap::AddWeakObjectToCodeDependency(Object* obj,
6935
                                                 DependentCode* dep) {
6936
  ASSERT(!InNewSpace(obj));
6937
  ASSERT(!InNewSpace(dep));
6938
  MaybeObject* maybe_obj =
6939
      WeakHashTable::cast(weak_object_to_code_table_)->Put(obj, dep);
6940
  WeakHashTable* table;
6941
  if (!maybe_obj->To(&table)) return maybe_obj;
6942
  if (ShouldZapGarbage() && weak_object_to_code_table_ != table) {
6943
    WeakHashTable::cast(weak_object_to_code_table_)->Zap(the_hole_value());
6944
  }
6945
  set_weak_object_to_code_table(table);
6946
  ASSERT_EQ(dep, WeakHashTable::cast(weak_object_to_code_table_)->Lookup(obj));
6947
  return weak_object_to_code_table_;
6948
}
6949

    
6950

    
6951
DependentCode* Heap::LookupWeakObjectToCodeDependency(Object* obj) {
6952
  Object* dep = WeakHashTable::cast(weak_object_to_code_table_)->Lookup(obj);
6953
  if (dep->IsDependentCode()) return DependentCode::cast(dep);
6954
  return DependentCode::cast(empty_fixed_array());
6955
}
6956

    
6957

    
6958
void Heap::EnsureWeakObjectToCodeTable() {
6959
  if (!weak_object_to_code_table()->IsHashTable()) {
6960
    set_weak_object_to_code_table(*isolate()->factory()->NewWeakHashTable(16));
6961
  }
6962
}
6963

    
6964

    
6965
#ifdef DEBUG
6966

    
6967
class PrintHandleVisitor: public ObjectVisitor {
6968
 public:
6969
  void VisitPointers(Object** start, Object** end) {
6970
    for (Object** p = start; p < end; p++)
6971
      PrintF("  handle %p to %p\n",
6972
             reinterpret_cast<void*>(p),
6973
             reinterpret_cast<void*>(*p));
6974
  }
6975
};
6976

    
6977

    
6978
void Heap::PrintHandles() {
6979
  PrintF("Handles:\n");
6980
  PrintHandleVisitor v;
6981
  isolate_->handle_scope_implementer()->Iterate(&v);
6982
}
6983

    
6984
#endif
6985

    
6986

    
6987
Space* AllSpaces::next() {
6988
  switch (counter_++) {
6989
    case NEW_SPACE:
6990
      return heap_->new_space();
6991
    case OLD_POINTER_SPACE:
6992
      return heap_->old_pointer_space();
6993
    case OLD_DATA_SPACE:
6994
      return heap_->old_data_space();
6995
    case CODE_SPACE:
6996
      return heap_->code_space();
6997
    case MAP_SPACE:
6998
      return heap_->map_space();
6999
    case CELL_SPACE:
7000
      return heap_->cell_space();
7001
    case PROPERTY_CELL_SPACE:
7002
      return heap_->property_cell_space();
7003
    case LO_SPACE:
7004
      return heap_->lo_space();
7005
    default:
7006
      return NULL;
7007
  }
7008
}
7009

    
7010

    
7011
PagedSpace* PagedSpaces::next() {
7012
  switch (counter_++) {
7013
    case OLD_POINTER_SPACE:
7014
      return heap_->old_pointer_space();
7015
    case OLD_DATA_SPACE:
7016
      return heap_->old_data_space();
7017
    case CODE_SPACE:
7018
      return heap_->code_space();
7019
    case MAP_SPACE:
7020
      return heap_->map_space();
7021
    case CELL_SPACE:
7022
      return heap_->cell_space();
7023
    case PROPERTY_CELL_SPACE:
7024
      return heap_->property_cell_space();
7025
    default:
7026
      return NULL;
7027
  }
7028
}
7029

    
7030

    
7031

    
7032
OldSpace* OldSpaces::next() {
7033
  switch (counter_++) {
7034
    case OLD_POINTER_SPACE:
7035
      return heap_->old_pointer_space();
7036
    case OLD_DATA_SPACE:
7037
      return heap_->old_data_space();
7038
    case CODE_SPACE:
7039
      return heap_->code_space();
7040
    default:
7041
      return NULL;
7042
  }
7043
}
7044

    
7045

    
7046
SpaceIterator::SpaceIterator(Heap* heap)
7047
    : heap_(heap),
7048
      current_space_(FIRST_SPACE),
7049
      iterator_(NULL),
7050
      size_func_(NULL) {
7051
}
7052

    
7053

    
7054
SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func)
7055
    : heap_(heap),
7056
      current_space_(FIRST_SPACE),
7057
      iterator_(NULL),
7058
      size_func_(size_func) {
7059
}
7060

    
7061

    
7062
SpaceIterator::~SpaceIterator() {
7063
  // Delete active iterator if any.
7064
  delete iterator_;
7065
}
7066

    
7067

    
7068
bool SpaceIterator::has_next() {
7069
  // Iterate until no more spaces.
7070
  return current_space_ != LAST_SPACE;
7071
}
7072

    
7073

    
7074
ObjectIterator* SpaceIterator::next() {
7075
  if (iterator_ != NULL) {
7076
    delete iterator_;
7077
    iterator_ = NULL;
7078
    // Move to the next space
7079
    current_space_++;
7080
    if (current_space_ > LAST_SPACE) {
7081
      return NULL;
7082
    }
7083
  }
7084

    
7085
  // Return iterator for the new current space.
7086
  return CreateIterator();
7087
}
7088

    
7089

    
7090
// Create an iterator for the space to iterate.
7091
ObjectIterator* SpaceIterator::CreateIterator() {
7092
  ASSERT(iterator_ == NULL);
7093

    
7094
  switch (current_space_) {
7095
    case NEW_SPACE:
7096
      iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_);
7097
      break;
7098
    case OLD_POINTER_SPACE:
7099
      iterator_ =
7100
          new HeapObjectIterator(heap_->old_pointer_space(), size_func_);
7101
      break;
7102
    case OLD_DATA_SPACE:
7103
      iterator_ = new HeapObjectIterator(heap_->old_data_space(), size_func_);
7104
      break;
7105
    case CODE_SPACE:
7106
      iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_);
7107
      break;
7108
    case MAP_SPACE:
7109
      iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_);
7110
      break;
7111
    case CELL_SPACE:
7112
      iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_);
7113
      break;
7114
    case PROPERTY_CELL_SPACE:
7115
      iterator_ = new HeapObjectIterator(heap_->property_cell_space(),
7116
                                         size_func_);
7117
      break;
7118
    case LO_SPACE:
7119
      iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
7120
      break;
7121
  }
7122

    
7123
  // Return the newly allocated iterator;
7124
  ASSERT(iterator_ != NULL);
7125
  return iterator_;
7126
}
7127

    
7128

    
7129
class HeapObjectsFilter {
7130
 public:
7131
  virtual ~HeapObjectsFilter() {}
7132
  virtual bool SkipObject(HeapObject* object) = 0;
7133
};
7134

    
7135

    
7136
class UnreachableObjectsFilter : public HeapObjectsFilter {
7137
 public:
7138
  explicit UnreachableObjectsFilter(Heap* heap) : heap_(heap) {
7139
    MarkReachableObjects();
7140
  }
7141

    
7142
  ~UnreachableObjectsFilter() {
7143
    heap_->mark_compact_collector()->ClearMarkbits();
7144
  }
7145

    
7146
  bool SkipObject(HeapObject* object) {
7147
    MarkBit mark_bit = Marking::MarkBitFrom(object);
7148
    return !mark_bit.Get();
7149
  }
7150

    
7151
 private:
7152
  class MarkingVisitor : public ObjectVisitor {
7153
   public:
7154
    MarkingVisitor() : marking_stack_(10) {}
7155

    
7156
    void VisitPointers(Object** start, Object** end) {
7157
      for (Object** p = start; p < end; p++) {
7158
        if (!(*p)->IsHeapObject()) continue;
7159
        HeapObject* obj = HeapObject::cast(*p);
7160
        MarkBit mark_bit = Marking::MarkBitFrom(obj);
7161
        if (!mark_bit.Get()) {
7162
          mark_bit.Set();
7163
          marking_stack_.Add(obj);
7164
        }
7165
      }
7166
    }
7167

    
7168
    void TransitiveClosure() {
7169
      while (!marking_stack_.is_empty()) {
7170
        HeapObject* obj = marking_stack_.RemoveLast();
7171
        obj->Iterate(this);
7172
      }
7173
    }
7174

    
7175
   private:
7176
    List<HeapObject*> marking_stack_;
7177
  };
7178

    
7179
  void MarkReachableObjects() {
7180
    MarkingVisitor visitor;
7181
    heap_->IterateRoots(&visitor, VISIT_ALL);
7182
    visitor.TransitiveClosure();
7183
  }
7184

    
7185
  Heap* heap_;
7186
  DisallowHeapAllocation no_allocation_;
7187
};
7188

    
7189

    
7190
HeapIterator::HeapIterator(Heap* heap)
7191
    : heap_(heap),
7192
      filtering_(HeapIterator::kNoFiltering),
7193
      filter_(NULL) {
7194
  Init();
7195
}
7196

    
7197

    
7198
HeapIterator::HeapIterator(Heap* heap,
7199
                           HeapIterator::HeapObjectsFiltering filtering)
7200
    : heap_(heap),
7201
      filtering_(filtering),
7202
      filter_(NULL) {
7203
  Init();
7204
}
7205

    
7206

    
7207
HeapIterator::~HeapIterator() {
7208
  Shutdown();
7209
}
7210

    
7211

    
7212
void HeapIterator::Init() {
7213
  // Start the iteration.
7214
  space_iterator_ = new SpaceIterator(heap_);
7215
  switch (filtering_) {
7216
    case kFilterUnreachable:
7217
      filter_ = new UnreachableObjectsFilter(heap_);
7218
      break;
7219
    default:
7220
      break;
7221
  }
7222
  object_iterator_ = space_iterator_->next();
7223
}
7224

    
7225

    
7226
void HeapIterator::Shutdown() {
7227
#ifdef DEBUG
7228
  // Assert that in filtering mode we have iterated through all
7229
  // objects. Otherwise, heap will be left in an inconsistent state.
7230
  if (filtering_ != kNoFiltering) {
7231
    ASSERT(object_iterator_ == NULL);
7232
  }
7233
#endif
7234
  // Make sure the last iterator is deallocated.
7235
  delete space_iterator_;
7236
  space_iterator_ = NULL;
7237
  object_iterator_ = NULL;
7238
  delete filter_;
7239
  filter_ = NULL;
7240
}
7241

    
7242

    
7243
HeapObject* HeapIterator::next() {
7244
  if (filter_ == NULL) return NextObject();
7245

    
7246
  HeapObject* obj = NextObject();
7247
  while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
7248
  return obj;
7249
}
7250

    
7251

    
7252
HeapObject* HeapIterator::NextObject() {
7253
  // No iterator means we are done.
7254
  if (object_iterator_ == NULL) return NULL;
7255

    
7256
  if (HeapObject* obj = object_iterator_->next_object()) {
7257
    // If the current iterator has more objects we are fine.
7258
    return obj;
7259
  } else {
7260
    // Go though the spaces looking for one that has objects.
7261
    while (space_iterator_->has_next()) {
7262
      object_iterator_ = space_iterator_->next();
7263
      if (HeapObject* obj = object_iterator_->next_object()) {
7264
        return obj;
7265
      }
7266
    }
7267
  }
7268
  // Done with the last space.
7269
  object_iterator_ = NULL;
7270
  return NULL;
7271
}
7272

    
7273

    
7274
void HeapIterator::reset() {
7275
  // Restart the iterator.
7276
  Shutdown();
7277
  Init();
7278
}
7279

    
7280

    
7281
#ifdef DEBUG
7282

    
7283
Object* const PathTracer::kAnyGlobalObject = NULL;
7284

    
7285
class PathTracer::MarkVisitor: public ObjectVisitor {
7286
 public:
7287
  explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
7288
  void VisitPointers(Object** start, Object** end) {
7289
    // Scan all HeapObject pointers in [start, end)
7290
    for (Object** p = start; !tracer_->found() && (p < end); p++) {
7291
      if ((*p)->IsHeapObject())
7292
        tracer_->MarkRecursively(p, this);
7293
    }
7294
  }
7295

    
7296
 private:
7297
  PathTracer* tracer_;
7298
};
7299

    
7300

    
7301
class PathTracer::UnmarkVisitor: public ObjectVisitor {
7302
 public:
7303
  explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
7304
  void VisitPointers(Object** start, Object** end) {
7305
    // Scan all HeapObject pointers in [start, end)
7306
    for (Object** p = start; p < end; p++) {
7307
      if ((*p)->IsHeapObject())
7308
        tracer_->UnmarkRecursively(p, this);
7309
    }
7310
  }
7311

    
7312
 private:
7313
  PathTracer* tracer_;
7314
};
7315

    
7316

    
7317
void PathTracer::VisitPointers(Object** start, Object** end) {
7318
  bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
7319
  // Visit all HeapObject pointers in [start, end)
7320
  for (Object** p = start; !done && (p < end); p++) {
7321
    if ((*p)->IsHeapObject()) {
7322
      TracePathFrom(p);
7323
      done = ((what_to_find_ == FIND_FIRST) && found_target_);
7324
    }
7325
  }
7326
}
7327

    
7328

    
7329
void PathTracer::Reset() {
7330
  found_target_ = false;
7331
  object_stack_.Clear();
7332
}
7333

    
7334

    
7335
void PathTracer::TracePathFrom(Object** root) {
7336
  ASSERT((search_target_ == kAnyGlobalObject) ||
7337
         search_target_->IsHeapObject());
7338
  found_target_in_trace_ = false;
7339
  Reset();
7340

    
7341
  MarkVisitor mark_visitor(this);
7342
  MarkRecursively(root, &mark_visitor);
7343

    
7344
  UnmarkVisitor unmark_visitor(this);
7345
  UnmarkRecursively(root, &unmark_visitor);
7346

    
7347
  ProcessResults();
7348
}
7349

    
7350

    
7351
static bool SafeIsNativeContext(HeapObject* obj) {
7352
  return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
7353
}
7354

    
7355

    
7356
void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
7357
  if (!(*p)->IsHeapObject()) return;
7358

    
7359
  HeapObject* obj = HeapObject::cast(*p);
7360

    
7361
  Object* map = obj->map();
7362

    
7363
  if (!map->IsHeapObject()) return;  // visited before
7364

    
7365
  if (found_target_in_trace_) return;  // stop if target found
7366
  object_stack_.Add(obj);
7367
  if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
7368
      (obj == search_target_)) {
7369
    found_target_in_trace_ = true;
7370
    found_target_ = true;
7371
    return;
7372
  }
7373

    
7374
  bool is_native_context = SafeIsNativeContext(obj);
7375

    
7376
  // not visited yet
7377
  Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
7378

    
7379
  Address map_addr = map_p->address();
7380

    
7381
  obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
7382

    
7383
  // Scan the object body.
7384
  if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
7385
    // This is specialized to scan Context's properly.
7386
    Object** start = reinterpret_cast<Object**>(obj->address() +
7387
                                                Context::kHeaderSize);
7388
    Object** end = reinterpret_cast<Object**>(obj->address() +
7389
        Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
7390
    mark_visitor->VisitPointers(start, end);
7391
  } else {
7392
    obj->IterateBody(map_p->instance_type(),
7393
                     obj->SizeFromMap(map_p),
7394
                     mark_visitor);
7395
  }
7396

    
7397
  // Scan the map after the body because the body is a lot more interesting
7398
  // when doing leak detection.
7399
  MarkRecursively(&map, mark_visitor);
7400

    
7401
  if (!found_target_in_trace_)  // don't pop if found the target
7402
    object_stack_.RemoveLast();
7403
}
7404

    
7405

    
7406
void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
7407
  if (!(*p)->IsHeapObject()) return;
7408

    
7409
  HeapObject* obj = HeapObject::cast(*p);
7410

    
7411
  Object* map = obj->map();
7412

    
7413
  if (map->IsHeapObject()) return;  // unmarked already
7414

    
7415
  Address map_addr = reinterpret_cast<Address>(map);
7416

    
7417
  map_addr -= kMarkTag;
7418

    
7419
  ASSERT_TAG_ALIGNED(map_addr);
7420

    
7421
  HeapObject* map_p = HeapObject::FromAddress(map_addr);
7422

    
7423
  obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
7424

    
7425
  UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
7426

    
7427
  obj->IterateBody(Map::cast(map_p)->instance_type(),
7428
                   obj->SizeFromMap(Map::cast(map_p)),
7429
                   unmark_visitor);
7430
}
7431

    
7432

    
7433
void PathTracer::ProcessResults() {
7434
  if (found_target_) {
7435
    PrintF("=====================================\n");
7436
    PrintF("====        Path to object       ====\n");
7437
    PrintF("=====================================\n\n");
7438

    
7439
    ASSERT(!object_stack_.is_empty());
7440
    for (int i = 0; i < object_stack_.length(); i++) {
7441
      if (i > 0) PrintF("\n     |\n     |\n     V\n\n");
7442
      Object* obj = object_stack_[i];
7443
      obj->Print();
7444
    }
7445
    PrintF("=====================================\n");
7446
  }
7447
}
7448

    
7449

    
7450
// Triggers a depth-first traversal of reachable objects from one
7451
// given root object and finds a path to a specific heap object and
7452
// prints it.
7453
void Heap::TracePathToObjectFrom(Object* target, Object* root) {
7454
  PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
7455
  tracer.VisitPointer(&root);
7456
}
7457

    
7458

    
7459
// Triggers a depth-first traversal of reachable objects from roots
7460
// and finds a path to a specific heap object and prints it.
7461
void Heap::TracePathToObject(Object* target) {
7462
  PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
7463
  IterateRoots(&tracer, VISIT_ONLY_STRONG);
7464
}
7465

    
7466

    
7467
// Triggers a depth-first traversal of reachable objects from roots
7468
// and finds a path to any global object and prints it. Useful for
7469
// determining the source for leaks of global objects.
7470
void Heap::TracePathToGlobal() {
7471
  PathTracer tracer(PathTracer::kAnyGlobalObject,
7472
                    PathTracer::FIND_ALL,
7473
                    VISIT_ALL);
7474
  IterateRoots(&tracer, VISIT_ONLY_STRONG);
7475
}
7476
#endif
7477

    
7478

    
7479
static intptr_t CountTotalHolesSize(Heap* heap) {
7480
  intptr_t holes_size = 0;
7481
  OldSpaces spaces(heap);
7482
  for (OldSpace* space = spaces.next();
7483
       space != NULL;
7484
       space = spaces.next()) {
7485
    holes_size += space->Waste() + space->Available();
7486
  }
7487
  return holes_size;
7488
}
7489

    
7490

    
7491
GCTracer::GCTracer(Heap* heap,
7492
                   const char* gc_reason,
7493
                   const char* collector_reason)
7494
    : start_time_(0.0),
7495
      start_object_size_(0),
7496
      start_memory_size_(0),
7497
      gc_count_(0),
7498
      full_gc_count_(0),
7499
      allocated_since_last_gc_(0),
7500
      spent_in_mutator_(0),
7501
      promoted_objects_size_(0),
7502
      nodes_died_in_new_space_(0),
7503
      nodes_copied_in_new_space_(0),
7504
      nodes_promoted_(0),
7505
      heap_(heap),
7506
      gc_reason_(gc_reason),
7507
      collector_reason_(collector_reason) {
7508
  if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
7509
  start_time_ = OS::TimeCurrentMillis();
7510
  start_object_size_ = heap_->SizeOfObjects();
7511
  start_memory_size_ = heap_->isolate()->memory_allocator()->Size();
7512

    
7513
  for (int i = 0; i < Scope::kNumberOfScopes; i++) {
7514
    scopes_[i] = 0;
7515
  }
7516

    
7517
  in_free_list_or_wasted_before_gc_ = CountTotalHolesSize(heap);
7518

    
7519
  allocated_since_last_gc_ =
7520
      heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
7521

    
7522
  if (heap_->last_gc_end_timestamp_ > 0) {
7523
    spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
7524
  }
7525

    
7526
  steps_count_ = heap_->incremental_marking()->steps_count();
7527
  steps_took_ = heap_->incremental_marking()->steps_took();
7528
  longest_step_ = heap_->incremental_marking()->longest_step();
7529
  steps_count_since_last_gc_ =
7530
      heap_->incremental_marking()->steps_count_since_last_gc();
7531
  steps_took_since_last_gc_ =
7532
      heap_->incremental_marking()->steps_took_since_last_gc();
7533
}
7534

    
7535

    
7536
GCTracer::~GCTracer() {
7537
  // Printf ONE line iff flag is set.
7538
  if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
7539

    
7540
  bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
7541

    
7542
  heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
7543
  heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
7544

    
7545
  double time = heap_->last_gc_end_timestamp_ - start_time_;
7546

    
7547
  // Update cumulative GC statistics if required.
7548
  if (FLAG_print_cumulative_gc_stat) {
7549
    heap_->total_gc_time_ms_ += time;
7550
    heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
7551
    heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
7552
                                     heap_->alive_after_last_gc_);
7553
    if (!first_gc) {
7554
      heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
7555
                                   spent_in_mutator_);
7556
    }
7557
  } else if (FLAG_trace_gc_verbose) {
7558
    heap_->total_gc_time_ms_ += time;
7559
  }
7560

    
7561
  if (collector_ == SCAVENGER && FLAG_trace_gc_ignore_scavenger) return;
7562

    
7563
  heap_->AddMarkingTime(scopes_[Scope::MC_MARK]);
7564

    
7565
  if (FLAG_print_cumulative_gc_stat && !FLAG_trace_gc) return;
7566
  PrintPID("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
7567

    
7568
  if (!FLAG_trace_gc_nvp) {
7569
    int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
7570

    
7571
    double end_memory_size_mb =
7572
        static_cast<double>(heap_->isolate()->memory_allocator()->Size()) / MB;
7573

    
7574
    PrintF("%s %.1f (%.1f) -> %.1f (%.1f) MB, ",
7575
           CollectorString(),
7576
           static_cast<double>(start_object_size_) / MB,
7577
           static_cast<double>(start_memory_size_) / MB,
7578
           SizeOfHeapObjects(),
7579
           end_memory_size_mb);
7580

    
7581
    if (external_time > 0) PrintF("%d / ", external_time);
7582
    PrintF("%.1f ms", time);
7583
    if (steps_count_ > 0) {
7584
      if (collector_ == SCAVENGER) {
7585
        PrintF(" (+ %.1f ms in %d steps since last GC)",
7586
               steps_took_since_last_gc_,
7587
               steps_count_since_last_gc_);
7588
      } else {
7589
        PrintF(" (+ %.1f ms in %d steps since start of marking, "
7590
                   "biggest step %.1f ms)",
7591
               steps_took_,
7592
               steps_count_,
7593
               longest_step_);
7594
      }
7595
    }
7596

    
7597
    if (gc_reason_ != NULL) {
7598
      PrintF(" [%s]", gc_reason_);
7599
    }
7600

    
7601
    if (collector_reason_ != NULL) {
7602
      PrintF(" [%s]", collector_reason_);
7603
    }
7604

    
7605
    PrintF(".\n");
7606
  } else {
7607
    PrintF("pause=%.1f ", time);
7608
    PrintF("mutator=%.1f ", spent_in_mutator_);
7609
    PrintF("gc=");
7610
    switch (collector_) {
7611
      case SCAVENGER:
7612
        PrintF("s");
7613
        break;
7614
      case MARK_COMPACTOR:
7615
        PrintF("ms");
7616
        break;
7617
      default:
7618
        UNREACHABLE();
7619
    }
7620
    PrintF(" ");
7621

    
7622
    PrintF("external=%.1f ", scopes_[Scope::EXTERNAL]);
7623
    PrintF("mark=%.1f ", scopes_[Scope::MC_MARK]);
7624
    PrintF("sweep=%.1f ", scopes_[Scope::MC_SWEEP]);
7625
    PrintF("sweepns=%.1f ", scopes_[Scope::MC_SWEEP_NEWSPACE]);
7626
    PrintF("evacuate=%.1f ", scopes_[Scope::MC_EVACUATE_PAGES]);
7627
    PrintF("new_new=%.1f ", scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]);
7628
    PrintF("root_new=%.1f ", scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]);
7629
    PrintF("old_new=%.1f ", scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]);
7630
    PrintF("compaction_ptrs=%.1f ",
7631
        scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]);
7632
    PrintF("intracompaction_ptrs=%.1f ",
7633
        scopes_[Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]);
7634
    PrintF("misc_compaction=%.1f ", scopes_[Scope::MC_UPDATE_MISC_POINTERS]);
7635
    PrintF("weakcollection_process=%.1f ",
7636
        scopes_[Scope::MC_WEAKCOLLECTION_PROCESS]);
7637
    PrintF("weakcollection_clear=%.1f ",
7638
        scopes_[Scope::MC_WEAKCOLLECTION_CLEAR]);
7639

    
7640
    PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_object_size_);
7641
    PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
7642
    PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
7643
           in_free_list_or_wasted_before_gc_);
7644
    PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize(heap_));
7645

    
7646
    PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
7647
    PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
7648
    PrintF("nodes_died_in_new=%d ", nodes_died_in_new_space_);
7649
    PrintF("nodes_copied_in_new=%d ", nodes_copied_in_new_space_);
7650
    PrintF("nodes_promoted=%d ", nodes_promoted_);
7651

    
7652
    if (collector_ == SCAVENGER) {
7653
      PrintF("stepscount=%d ", steps_count_since_last_gc_);
7654
      PrintF("stepstook=%.1f ", steps_took_since_last_gc_);
7655
    } else {
7656
      PrintF("stepscount=%d ", steps_count_);
7657
      PrintF("stepstook=%.1f ", steps_took_);
7658
      PrintF("longeststep=%.1f ", longest_step_);
7659
    }
7660

    
7661
    PrintF("\n");
7662
  }
7663

    
7664
  heap_->PrintShortHeapStatistics();
7665
}
7666

    
7667

    
7668
const char* GCTracer::CollectorString() {
7669
  switch (collector_) {
7670
    case SCAVENGER:
7671
      return "Scavenge";
7672
    case MARK_COMPACTOR:
7673
      return "Mark-sweep";
7674
  }
7675
  return "Unknown GC";
7676
}
7677

    
7678

    
7679
int KeyedLookupCache::Hash(Map* map, Name* name) {
7680
  // Uses only lower 32 bits if pointers are larger.
7681
  uintptr_t addr_hash =
7682
      static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
7683
  return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
7684
}
7685

    
7686

    
7687
int KeyedLookupCache::Lookup(Map* map, Name* name) {
7688
  int index = (Hash(map, name) & kHashMask);
7689
  for (int i = 0; i < kEntriesPerBucket; i++) {
7690
    Key& key = keys_[index + i];
7691
    if ((key.map == map) && key.name->Equals(name)) {
7692
      return field_offsets_[index + i];
7693
    }
7694
  }
7695
  return kNotFound;
7696
}
7697

    
7698

    
7699
void KeyedLookupCache::Update(Map* map, Name* name, int field_offset) {
7700
  if (!name->IsUniqueName()) {
7701
    String* internalized_string;
7702
    if (!map->GetIsolate()->heap()->InternalizeStringIfExists(
7703
            String::cast(name), &internalized_string)) {
7704
      return;
7705
    }
7706
    name = internalized_string;
7707
  }
7708
  // This cache is cleared only between mark compact passes, so we expect the
7709
  // cache to only contain old space names.
7710
  ASSERT(!map->GetIsolate()->heap()->InNewSpace(name));
7711

    
7712
  int index = (Hash(map, name) & kHashMask);
7713
  // After a GC there will be free slots, so we use them in order (this may
7714
  // help to get the most frequently used one in position 0).
7715
  for (int i = 0; i< kEntriesPerBucket; i++) {
7716
    Key& key = keys_[index];
7717
    Object* free_entry_indicator = NULL;
7718
    if (key.map == free_entry_indicator) {
7719
      key.map = map;
7720
      key.name = name;
7721
      field_offsets_[index + i] = field_offset;
7722
      return;
7723
    }
7724
  }
7725
  // No free entry found in this bucket, so we move them all down one and
7726
  // put the new entry at position zero.
7727
  for (int i = kEntriesPerBucket - 1; i > 0; i--) {
7728
    Key& key = keys_[index + i];
7729
    Key& key2 = keys_[index + i - 1];
7730
    key = key2;
7731
    field_offsets_[index + i] = field_offsets_[index + i - 1];
7732
  }
7733

    
7734
  // Write the new first entry.
7735
  Key& key = keys_[index];
7736
  key.map = map;
7737
  key.name = name;
7738
  field_offsets_[index] = field_offset;
7739
}
7740

    
7741

    
7742
void KeyedLookupCache::Clear() {
7743
  for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
7744
}
7745

    
7746

    
7747
void DescriptorLookupCache::Clear() {
7748
  for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
7749
}
7750

    
7751

    
7752
#ifdef DEBUG
7753
void Heap::GarbageCollectionGreedyCheck() {
7754
  ASSERT(FLAG_gc_greedy);
7755
  if (isolate_->bootstrapper()->IsActive()) return;
7756
  if (disallow_allocation_failure()) return;
7757
  CollectGarbage(NEW_SPACE);
7758
}
7759
#endif
7760

    
7761

    
7762
TranscendentalCache::SubCache::SubCache(Isolate* isolate, Type t)
7763
  : type_(t),
7764
    isolate_(isolate) {
7765
  uint32_t in0 = 0xffffffffu;  // Bit-pattern for a NaN that isn't
7766
  uint32_t in1 = 0xffffffffu;  // generated by the FPU.
7767
  for (int i = 0; i < kCacheSize; i++) {
7768
    elements_[i].in[0] = in0;
7769
    elements_[i].in[1] = in1;
7770
    elements_[i].output = NULL;
7771
  }
7772
}
7773

    
7774

    
7775
void TranscendentalCache::Clear() {
7776
  for (int i = 0; i < kNumberOfCaches; i++) {
7777
    if (caches_[i] != NULL) {
7778
      delete caches_[i];
7779
      caches_[i] = NULL;
7780
    }
7781
  }
7782
}
7783

    
7784

    
7785
void ExternalStringTable::CleanUp() {
7786
  int last = 0;
7787
  for (int i = 0; i < new_space_strings_.length(); ++i) {
7788
    if (new_space_strings_[i] == heap_->the_hole_value()) {
7789
      continue;
7790
    }
7791
    ASSERT(new_space_strings_[i]->IsExternalString());
7792
    if (heap_->InNewSpace(new_space_strings_[i])) {
7793
      new_space_strings_[last++] = new_space_strings_[i];
7794
    } else {
7795
      old_space_strings_.Add(new_space_strings_[i]);
7796
    }
7797
  }
7798
  new_space_strings_.Rewind(last);
7799
  new_space_strings_.Trim();
7800

    
7801
  last = 0;
7802
  for (int i = 0; i < old_space_strings_.length(); ++i) {
7803
    if (old_space_strings_[i] == heap_->the_hole_value()) {
7804
      continue;
7805
    }
7806
    ASSERT(old_space_strings_[i]->IsExternalString());
7807
    ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
7808
    old_space_strings_[last++] = old_space_strings_[i];
7809
  }
7810
  old_space_strings_.Rewind(last);
7811
  old_space_strings_.Trim();
7812
#ifdef VERIFY_HEAP
7813
  if (FLAG_verify_heap) {
7814
    Verify();
7815
  }
7816
#endif
7817
}
7818

    
7819

    
7820
void ExternalStringTable::TearDown() {
7821
  new_space_strings_.Free();
7822
  old_space_strings_.Free();
7823
}
7824

    
7825

    
7826
void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
7827
  chunk->set_next_chunk(chunks_queued_for_free_);
7828
  chunks_queued_for_free_ = chunk;
7829
}
7830

    
7831

    
7832
void Heap::FreeQueuedChunks() {
7833
  if (chunks_queued_for_free_ == NULL) return;
7834
  MemoryChunk* next;
7835
  MemoryChunk* chunk;
7836
  for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7837
    next = chunk->next_chunk();
7838
    chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
7839

    
7840
    if (chunk->owner()->identity() == LO_SPACE) {
7841
      // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
7842
      // If FromAnyPointerAddress encounters a slot that belongs to a large
7843
      // chunk queued for deletion it will fail to find the chunk because
7844
      // it try to perform a search in the list of pages owned by of the large
7845
      // object space and queued chunks were detached from that list.
7846
      // To work around this we split large chunk into normal kPageSize aligned
7847
      // pieces and initialize size, owner and flags field of every piece.
7848
      // If FromAnyPointerAddress encounters a slot that belongs to one of
7849
      // these smaller pieces it will treat it as a slot on a normal Page.
7850
      Address chunk_end = chunk->address() + chunk->size();
7851
      MemoryChunk* inner = MemoryChunk::FromAddress(
7852
          chunk->address() + Page::kPageSize);
7853
      MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
7854
      while (inner <= inner_last) {
7855
        // Size of a large chunk is always a multiple of
7856
        // OS::AllocateAlignment() so there is always
7857
        // enough space for a fake MemoryChunk header.
7858
        Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
7859
        // Guard against overflow.
7860
        if (area_end < inner->address()) area_end = chunk_end;
7861
        inner->SetArea(inner->address(), area_end);
7862
        inner->set_size(Page::kPageSize);
7863
        inner->set_owner(lo_space());
7864
        inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
7865
        inner = MemoryChunk::FromAddress(
7866
            inner->address() + Page::kPageSize);
7867
      }
7868
    }
7869
  }
7870
  isolate_->heap()->store_buffer()->Compact();
7871
  isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
7872
  for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
7873
    next = chunk->next_chunk();
7874
    isolate_->memory_allocator()->Free(chunk);
7875
  }
7876
  chunks_queued_for_free_ = NULL;
7877
}
7878

    
7879

    
7880
void Heap::RememberUnmappedPage(Address page, bool compacted) {
7881
  uintptr_t p = reinterpret_cast<uintptr_t>(page);
7882
  // Tag the page pointer to make it findable in the dump file.
7883
  if (compacted) {
7884
    p ^= 0xc1ead & (Page::kPageSize - 1);  // Cleared.
7885
  } else {
7886
    p ^= 0x1d1ed & (Page::kPageSize - 1);  // I died.
7887
  }
7888
  remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
7889
      reinterpret_cast<Address>(p);
7890
  remembered_unmapped_pages_index_++;
7891
  remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
7892
}
7893

    
7894

    
7895
void Heap::ClearObjectStats(bool clear_last_time_stats) {
7896
  memset(object_counts_, 0, sizeof(object_counts_));
7897
  memset(object_sizes_, 0, sizeof(object_sizes_));
7898
  if (clear_last_time_stats) {
7899
    memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
7900
    memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
7901
  }
7902
}
7903

    
7904

    
7905
static LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
7906

    
7907

    
7908
void Heap::CheckpointObjectStats() {
7909
  LockGuard<Mutex> lock_guard(checkpoint_object_stats_mutex.Pointer());
7910
  Counters* counters = isolate()->counters();
7911
#define ADJUST_LAST_TIME_OBJECT_COUNT(name)                                    \
7912
  counters->count_of_##name()->Increment(                                      \
7913
      static_cast<int>(object_counts_[name]));                                 \
7914
  counters->count_of_##name()->Decrement(                                      \
7915
      static_cast<int>(object_counts_last_time_[name]));                       \
7916
  counters->size_of_##name()->Increment(                                       \
7917
      static_cast<int>(object_sizes_[name]));                                  \
7918
  counters->size_of_##name()->Decrement(                                       \
7919
      static_cast<int>(object_sizes_last_time_[name]));
7920
  INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
7921
#undef ADJUST_LAST_TIME_OBJECT_COUNT
7922
  int index;
7923
#define ADJUST_LAST_TIME_OBJECT_COUNT(name)               \
7924
  index = FIRST_CODE_KIND_SUB_TYPE + Code::name;          \
7925
  counters->count_of_CODE_TYPE_##name()->Increment(       \
7926
      static_cast<int>(object_counts_[index]));           \
7927
  counters->count_of_CODE_TYPE_##name()->Decrement(       \
7928
      static_cast<int>(object_counts_last_time_[index])); \
7929
  counters->size_of_CODE_TYPE_##name()->Increment(        \
7930
      static_cast<int>(object_sizes_[index]));            \
7931
  counters->size_of_CODE_TYPE_##name()->Decrement(        \
7932
      static_cast<int>(object_sizes_last_time_[index]));
7933
  CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
7934
#undef ADJUST_LAST_TIME_OBJECT_COUNT
7935
#define ADJUST_LAST_TIME_OBJECT_COUNT(name)               \
7936
  index = FIRST_FIXED_ARRAY_SUB_TYPE + name;              \
7937
  counters->count_of_FIXED_ARRAY_##name()->Increment(     \
7938
      static_cast<int>(object_counts_[index]));           \
7939
  counters->count_of_FIXED_ARRAY_##name()->Decrement(     \
7940
      static_cast<int>(object_counts_last_time_[index])); \
7941
  counters->size_of_FIXED_ARRAY_##name()->Increment(      \
7942
      static_cast<int>(object_sizes_[index]));            \
7943
  counters->size_of_FIXED_ARRAY_##name()->Decrement(      \
7944
      static_cast<int>(object_sizes_last_time_[index]));
7945
  FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
7946
#undef ADJUST_LAST_TIME_OBJECT_COUNT
7947
#define ADJUST_LAST_TIME_OBJECT_COUNT(name)                 \
7948
  index = FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge; \
7949
  counters->count_of_CODE_AGE_##name()->Increment(          \
7950
      static_cast<int>(object_counts_[index]));             \
7951
  counters->count_of_CODE_AGE_##name()->Decrement(          \
7952
      static_cast<int>(object_counts_last_time_[index]));   \
7953
  counters->size_of_CODE_AGE_##name()->Increment(           \
7954
      static_cast<int>(object_sizes_[index]));              \
7955
  counters->size_of_CODE_AGE_##name()->Decrement(          \
7956
      static_cast<int>(object_sizes_last_time_[index]));
7957
  CODE_AGE_LIST_WITH_NO_AGE(ADJUST_LAST_TIME_OBJECT_COUNT)
7958
#undef ADJUST_LAST_TIME_OBJECT_COUNT
7959

    
7960
  OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
7961
  OS::MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
7962
  ClearObjectStats();
7963
}
7964

    
7965

    
7966
Heap::RelocationLock::RelocationLock(Heap* heap) : heap_(heap) {
7967
  if (FLAG_concurrent_recompilation) {
7968
    heap_->relocation_mutex_->Lock();
7969
#ifdef DEBUG
7970
    heap_->relocation_mutex_locked_by_optimizer_thread_ =
7971
        heap_->isolate()->optimizing_compiler_thread()->IsOptimizerThread();
7972
#endif  // DEBUG
7973
  }
7974
}
7975

    
7976
} }  // namespace v8::internal