The data contained in this repository can be downloaded to your computer using one of several clients.
Please see the documentation of your version control software client for more information.

Please select the desired protocol below to get the URL.

This URL has Read-Only access.

Statistics
| Branch: | Revision:

main_repo / deps / v8 / src / serialize.cc @ 40c0f755

History | View | Annotate | Download (45.9 KB)

1
// Copyright 2006-2008 the V8 project authors. All rights reserved.
2
// Redistribution and use in source and binary forms, with or without
3
// modification, are permitted provided that the following conditions are
4
// met:
5
//
6
//     * Redistributions of source code must retain the above copyright
7
//       notice, this list of conditions and the following disclaimer.
8
//     * Redistributions in binary form must reproduce the above
9
//       copyright notice, this list of conditions and the following
10
//       disclaimer in the documentation and/or other materials provided
11
//       with the distribution.
12
//     * Neither the name of Google Inc. nor the names of its
13
//       contributors may be used to endorse or promote products derived
14
//       from this software without specific prior written permission.
15
//
16
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27

    
28
#include "v8.h"
29

    
30
#include "accessors.h"
31
#include "api.h"
32
#include "execution.h"
33
#include "global-handles.h"
34
#include "ic-inl.h"
35
#include "natives.h"
36
#include "platform.h"
37
#include "runtime.h"
38
#include "serialize.h"
39
#include "stub-cache.h"
40
#include "v8threads.h"
41

    
42
namespace v8 { namespace internal {
43

    
44
// Encoding: a RelativeAddress must be able to fit in a pointer:
45
// it is encoded as an Address with (from MS to LS bits):
46
// 27 bits identifying a word in the space, in one of three formats:
47
// - MAP and OLD spaces: 16 bits of page number, 11 bits of word offset in page
48
// - NEW space:          27 bits of word offset
49
// - LO space:           27 bits of page number
50
// 3 bits to encode the AllocationSpace (special values for code in LO space)
51
// 2 bits identifying this as a HeapObject
52

    
53
const int kSpaceShift = kHeapObjectTagSize;
54
const int kSpaceBits = kSpaceTagSize;
55
const int kSpaceMask = kSpaceTagMask;
56

    
57
// These value are used instead of space numbers when serializing/
58
// deserializing.  They indicate an object that is in large object space, but
59
// should be treated specially.
60
// Make the pages executable on platforms that support it:
61
const int kLOSpaceExecutable = LAST_SPACE + 1;
62
// Reserve space for write barrier bits (for objects that can contain
63
// references to new space):
64
const int kLOSpacePointer = LAST_SPACE + 2;
65

    
66

    
67
const int kOffsetShift = kSpaceShift + kSpaceBits;
68
const int kOffsetBits = 11;
69
const int kOffsetMask = (1 << kOffsetBits) - 1;
70

    
71
const int kPageBits = 32 - (kOffsetBits + kSpaceBits + kHeapObjectTagSize);
72
const int kPageShift = kOffsetShift + kOffsetBits;
73
const int kPageMask = (1 << kPageBits) - 1;
74

    
75
const int kPageAndOffsetShift = kOffsetShift;
76
const int kPageAndOffsetBits = kPageBits + kOffsetBits;
77
const int kPageAndOffsetMask = (1 << kPageAndOffsetBits) - 1;
78

    
79

    
80
static inline AllocationSpace GetSpace(Address addr) {
81
  const int encoded = reinterpret_cast<int>(addr);
82
  int space_number = ((encoded >> kSpaceShift) & kSpaceMask);
83
  if (space_number == kLOSpaceExecutable) space_number = LO_SPACE;
84
  else if (space_number == kLOSpacePointer) space_number = LO_SPACE;
85
  return static_cast<AllocationSpace>(space_number);
86
}
87

    
88

    
89
static inline bool IsLargeExecutableObject(Address addr) {
90
  const int encoded = reinterpret_cast<int>(addr);
91
  const int space_number = ((encoded >> kSpaceShift) & kSpaceMask);
92
  if (space_number == kLOSpaceExecutable) return true;
93
  return false;
94
}
95

    
96

    
97
static inline bool IsLargeFixedArray(Address addr) {
98
  const int encoded = reinterpret_cast<int>(addr);
99
  const int space_number = ((encoded >> kSpaceShift) & kSpaceMask);
100
  if (space_number == kLOSpacePointer) return true;
101
  return false;
102
}
103

    
104

    
105
static inline int PageIndex(Address addr) {
106
  const int encoded = reinterpret_cast<int>(addr);
107
  return (encoded >> kPageShift) & kPageMask;
108
}
109

    
110

    
111
static inline int PageOffset(Address addr) {
112
  const int encoded = reinterpret_cast<int>(addr);
113
  return ((encoded >> kOffsetShift) & kOffsetMask) << kObjectAlignmentBits;
114
}
115

    
116

    
117
static inline int NewSpaceOffset(Address addr) {
118
  const int encoded = reinterpret_cast<int>(addr);
119
  return ((encoded >> kPageAndOffsetShift) & kPageAndOffsetMask) <<
120
      kObjectAlignmentBits;
121
}
122

    
123

    
124
static inline int LargeObjectIndex(Address addr) {
125
  const int encoded = reinterpret_cast<int>(addr);
126
  return (encoded >> kPageAndOffsetShift) & kPageAndOffsetMask;
127
}
128

    
129

    
130
// A RelativeAddress encodes a heap address that is independent of
131
// the actual memory addresses in real heap. The general case (for the
132
// OLD, CODE and MAP spaces) is as a (space id, page number, page offset)
133
// triple. The NEW space has page number == 0, because there are no
134
// pages. The LARGE_OBJECT space has page offset = 0, since there is
135
// exactly one object per page.  RelativeAddresses are encodable as
136
// Addresses, so that they can replace the map() pointers of
137
// HeapObjects. The encoded Addresses are also encoded as HeapObjects
138
// and allow for marking (is_marked() see mark(), clear_mark()...) as
139
// used by the Mark-Compact collector.
140

    
141
class RelativeAddress {
142
 public:
143
  RelativeAddress(AllocationSpace space,
144
                  int page_index,
145
                  int page_offset)
146
  : space_(space), page_index_(page_index), page_offset_(page_offset)  {
147
    ASSERT(space <= LAST_SPACE && space >= 0);
148
  }
149

    
150
  // Return the encoding of 'this' as an Address. Decode with constructor.
151
  Address Encode() const;
152

    
153
  AllocationSpace space() const {
154
    if (space_ == kLOSpaceExecutable) return LO_SPACE;
155
    if (space_ == kLOSpacePointer) return LO_SPACE;
156
    return static_cast<AllocationSpace>(space_);
157
  }
158
  int page_index() const { return page_index_; }
159
  int page_offset() const { return page_offset_; }
160

    
161
  bool in_paged_space() const {
162
    return space_ == CODE_SPACE ||
163
           space_ == OLD_POINTER_SPACE ||
164
           space_ == OLD_DATA_SPACE ||
165
           space_ == MAP_SPACE;
166
  }
167

    
168
  void next_address(int offset) { page_offset_ += offset; }
169
  void next_page(int init_offset = 0) {
170
    page_index_++;
171
    page_offset_ = init_offset;
172
  }
173

    
174
#ifdef DEBUG
175
  void Verify();
176
#endif
177

    
178
  void set_to_large_code_object() {
179
    ASSERT(space_ == LO_SPACE);
180
    space_ = kLOSpaceExecutable;
181
  }
182
  void set_to_large_fixed_array() {
183
    ASSERT(space_ == LO_SPACE);
184
    space_ = kLOSpacePointer;
185
  }
186

    
187

    
188
 private:
189
  int space_;
190
  int page_index_;
191
  int page_offset_;
192
};
193

    
194

    
195
Address RelativeAddress::Encode() const {
196
  ASSERT(page_index_ >= 0);
197
  int word_offset = 0;
198
  int result = 0;
199
  switch (space_) {
200
    case MAP_SPACE:
201
    case OLD_POINTER_SPACE:
202
    case OLD_DATA_SPACE:
203
    case CODE_SPACE:
204
      ASSERT_EQ(0, page_index_ & ~kPageMask);
205
      word_offset = page_offset_ >> kObjectAlignmentBits;
206
      ASSERT_EQ(0, word_offset & ~kOffsetMask);
207
      result = (page_index_ << kPageShift) | (word_offset << kOffsetShift);
208
      break;
209
    case NEW_SPACE:
210
      ASSERT_EQ(0, page_index_);
211
      word_offset = page_offset_ >> kObjectAlignmentBits;
212
      ASSERT_EQ(0, word_offset & ~kPageAndOffsetMask);
213
      result = word_offset << kPageAndOffsetShift;
214
      break;
215
    case LO_SPACE:
216
    case kLOSpaceExecutable:
217
    case kLOSpacePointer:
218
      ASSERT_EQ(0, page_offset_);
219
      ASSERT_EQ(0, page_index_ & ~kPageAndOffsetMask);
220
      result = page_index_ << kPageAndOffsetShift;
221
      break;
222
  }
223
  // OR in AllocationSpace and kHeapObjectTag
224
  ASSERT_EQ(0, space_ & ~kSpaceMask);
225
  result |= (space_ << kSpaceShift) | kHeapObjectTag;
226
  return reinterpret_cast<Address>(result);
227
}
228

    
229

    
230
#ifdef DEBUG
231
void RelativeAddress::Verify() {
232
  ASSERT(page_offset_ >= 0 && page_index_ >= 0);
233
  switch (space_) {
234
    case MAP_SPACE:
235
    case OLD_POINTER_SPACE:
236
    case OLD_DATA_SPACE:
237
    case CODE_SPACE:
238
      ASSERT(Page::kObjectStartOffset <= page_offset_ &&
239
             page_offset_ <= Page::kPageSize);
240
      break;
241
    case NEW_SPACE:
242
      ASSERT(page_index_ == 0);
243
      break;
244
    case LO_SPACE:
245
    case kLOSpaceExecutable:
246
    case kLOSpacePointer:
247
      ASSERT(page_offset_ == 0);
248
      break;
249
  }
250
}
251
#endif
252

    
253
enum GCTreatment {
254
  DataObject,     // Object that cannot contain a reference to new space.
255
  PointerObject,  // Object that can contain a reference to new space.
256
  CodeObject      // Object that contains executable code.
257
};
258

    
259
// A SimulatedHeapSpace simulates the allocation of objects in a page in
260
// the heap. It uses linear allocation - that is, it doesn't simulate the
261
// use of a free list. This simulated
262
// allocation must exactly match that done by Heap.
263

    
264
class SimulatedHeapSpace {
265
 public:
266
  // The default constructor initializes to an invalid state.
267
  SimulatedHeapSpace(): current_(LAST_SPACE, -1, -1) {}
268

    
269
  // Sets 'this' to the first address in 'space' that would be
270
  // returned by allocation in an empty heap.
271
  void InitEmptyHeap(AllocationSpace space);
272

    
273
  // Sets 'this' to the next address in 'space' that would be returned
274
  // by allocation in the current heap. Intended only for testing
275
  // serialization and deserialization in the current address space.
276
  void InitCurrentHeap(AllocationSpace space);
277

    
278
  // Returns the RelativeAddress where the next
279
  // object of 'size' bytes will be allocated, and updates 'this' to
280
  // point to the next free address beyond that object.
281
  RelativeAddress Allocate(int size, GCTreatment special_gc_treatment);
282

    
283
 private:
284
  RelativeAddress current_;
285
};
286

    
287

    
288
void SimulatedHeapSpace::InitEmptyHeap(AllocationSpace space) {
289
  switch (space) {
290
    case MAP_SPACE:
291
    case OLD_POINTER_SPACE:
292
    case OLD_DATA_SPACE:
293
    case CODE_SPACE:
294
      current_ = RelativeAddress(space, 0, Page::kObjectStartOffset);
295
      break;
296
    case NEW_SPACE:
297
    case LO_SPACE:
298
      current_ = RelativeAddress(space, 0, 0);
299
      break;
300
  }
301
}
302

    
303

    
304
void SimulatedHeapSpace::InitCurrentHeap(AllocationSpace space) {
305
  switch (space) {
306
    case MAP_SPACE:
307
    case OLD_POINTER_SPACE:
308
    case OLD_DATA_SPACE:
309
    case CODE_SPACE: {
310
      PagedSpace* ps;
311
      if (space == MAP_SPACE) {
312
        ps = Heap::map_space();
313
      } else if (space == OLD_POINTER_SPACE) {
314
        ps = Heap::old_pointer_space();
315
      } else if (space == OLD_DATA_SPACE) {
316
        ps = Heap::old_data_space();
317
      } else {
318
        ASSERT(space == CODE_SPACE);
319
        ps = Heap::code_space();
320
      }
321
      Address top = ps->top();
322
      Page* top_page = Page::FromAllocationTop(top);
323
      int page_index = 0;
324
      PageIterator it(ps, PageIterator::PAGES_IN_USE);
325
      while (it.has_next()) {
326
        if (it.next() == top_page) break;
327
        page_index++;
328
      }
329
      current_ = RelativeAddress(space,
330
                                 page_index,
331
                                 top_page->Offset(top));
332
      break;
333
    }
334
    case NEW_SPACE:
335
      current_ = RelativeAddress(space,
336
                                 0,
337
                                 Heap::NewSpaceTop() - Heap::NewSpaceStart());
338
      break;
339
    case LO_SPACE:
340
      int page_index = 0;
341
      for (LargeObjectIterator it(Heap::lo_space()); it.has_next(); it.next()) {
342
        page_index++;
343
      }
344
      current_ = RelativeAddress(space, page_index, 0);
345
      break;
346
  }
347
}
348

    
349

    
350
RelativeAddress SimulatedHeapSpace::Allocate(int size,
351
                                             GCTreatment special_gc_treatment) {
352
#ifdef DEBUG
353
  current_.Verify();
354
#endif
355
  int alloc_size = OBJECT_SIZE_ALIGN(size);
356
  if (current_.in_paged_space() &&
357
      current_.page_offset() + alloc_size > Page::kPageSize) {
358
    ASSERT(alloc_size <= Page::kMaxHeapObjectSize);
359
    current_.next_page(Page::kObjectStartOffset);
360
  }
361
  RelativeAddress result = current_;
362
  if (current_.space() == LO_SPACE) {
363
    current_.next_page();
364
    if (special_gc_treatment == CodeObject) {
365
      result.set_to_large_code_object();
366
    } else if (special_gc_treatment == PointerObject) {
367
      result.set_to_large_fixed_array();
368
    }
369
  } else {
370
    current_.next_address(alloc_size);
371
  }
372
#ifdef DEBUG
373
  current_.Verify();
374
  result.Verify();
375
#endif
376
  return result;
377
}
378

    
379
// -----------------------------------------------------------------------------
380
// Coding of external references.
381

    
382
// The encoding of an external reference. The type is in the high word.
383
// The id is in the low word.
384
static uint32_t EncodeExternal(TypeCode type, uint16_t id) {
385
  return static_cast<uint32_t>(type) << 16 | id;
386
}
387

    
388

    
389
static int* GetInternalPointer(StatsCounter* counter) {
390
  // All counters refer to dummy_counter, if deserializing happens without
391
  // setting up counters.
392
  static int dummy_counter = 0;
393
  return counter->Enabled() ? counter->GetInternalPointer() : &dummy_counter;
394
}
395

    
396

    
397
// ExternalReferenceTable is a helper class that defines the relationship
398
// between external references and their encodings. It is used to build
399
// hashmaps in ExternalReferenceEncoder and ExternalReferenceDecoder.
400
class ExternalReferenceTable {
401
 public:
402
  static ExternalReferenceTable* instance() {
403
    if (!instance_) instance_ = new ExternalReferenceTable();
404
    return instance_;
405
  }
406

    
407
  int size() const { return refs_.length(); }
408

    
409
  Address address(int i) { return refs_[i].address; }
410

    
411
  uint32_t code(int i) { return refs_[i].code; }
412

    
413
  const char* name(int i) { return refs_[i].name; }
414

    
415
  int max_id(int code) { return max_id_[code]; }
416

    
417
 private:
418
  static ExternalReferenceTable* instance_;
419

    
420
  ExternalReferenceTable() : refs_(64) { PopulateTable(); }
421
  ~ExternalReferenceTable() { }
422

    
423
  struct ExternalReferenceEntry {
424
    Address address;
425
    uint32_t code;
426
    const char* name;
427
  };
428

    
429
  void PopulateTable();
430

    
431
  // For a few types of references, we can get their address from their id.
432
  void AddFromId(TypeCode type, uint16_t id, const char* name);
433

    
434
  // For other types of references, the caller will figure out the address.
435
  void Add(Address address, TypeCode type, uint16_t id, const char* name);
436

    
437
  List<ExternalReferenceEntry> refs_;
438
  int max_id_[kTypeCodeCount];
439
};
440

    
441

    
442
ExternalReferenceTable* ExternalReferenceTable::instance_ = NULL;
443

    
444

    
445
void ExternalReferenceTable::AddFromId(TypeCode type,
446
                                       uint16_t id,
447
                                       const char* name) {
448
  Address address;
449
  switch (type) {
450
    case C_BUILTIN:
451
      address = Builtins::c_function_address(
452
          static_cast<Builtins::CFunctionId>(id));
453
      break;
454
    case BUILTIN:
455
      address = Builtins::builtin_address(static_cast<Builtins::Name>(id));
456
      break;
457
    case RUNTIME_FUNCTION:
458
      address = Runtime::FunctionForId(
459
          static_cast<Runtime::FunctionId>(id))->entry;
460
      break;
461
    case IC_UTILITY:
462
      address = IC::AddressFromUtilityId(static_cast<IC::UtilityId>(id));
463
      break;
464
    default:
465
      UNREACHABLE();
466
      return;
467
  }
468
  Add(address, type, id, name);
469
}
470

    
471

    
472
void ExternalReferenceTable::Add(Address address,
473
                                 TypeCode type,
474
                                 uint16_t id,
475
                                 const char* name) {
476
  CHECK_NE(NULL, address);
477
  ExternalReferenceEntry entry;
478
  entry.address = address;
479
  entry.code = EncodeExternal(type, id);
480
  entry.name = name;
481
  CHECK_NE(0, entry.code);
482
  refs_.Add(entry);
483
  if (id > max_id_[type]) max_id_[type] = id;
484
}
485

    
486

    
487
void ExternalReferenceTable::PopulateTable() {
488
  for (int type_code = 0; type_code < kTypeCodeCount; type_code++) {
489
    max_id_[type_code] = 0;
490
  }
491

    
492
  // The following populates all of the different type of external references
493
  // into the ExternalReferenceTable.
494
  //
495
  // NOTE: This function was originally 100k of code.  It has since been
496
  // rewritten to be mostly table driven, as the callback macro style tends to
497
  // very easily cause code bloat.  Please be careful in the future when adding
498
  // new references.
499

    
500
  struct RefTableEntry {
501
    TypeCode type;
502
    uint16_t id;
503
    const char* name;
504
  };
505

    
506
  static const RefTableEntry ref_table[] = {
507
  // Builtins
508
#define DEF_ENTRY_C(name) \
509
  { C_BUILTIN, \
510
    Builtins::c_##name, \
511
    "Builtins::" #name },
512

    
513
  BUILTIN_LIST_C(DEF_ENTRY_C)
514
#undef DEF_ENTRY_C
515

    
516
#define DEF_ENTRY_C(name) \
517
  { BUILTIN, \
518
    Builtins::name, \
519
    "Builtins::" #name },
520
#define DEF_ENTRY_A(name, kind, state) DEF_ENTRY_C(name)
521

    
522
  BUILTIN_LIST_C(DEF_ENTRY_C)
523
  BUILTIN_LIST_A(DEF_ENTRY_A)
524
  BUILTIN_LIST_DEBUG_A(DEF_ENTRY_A)
525
#undef DEF_ENTRY_C
526
#undef DEF_ENTRY_A
527

    
528
  // Runtime functions
529
#define RUNTIME_ENTRY(name, nargs) \
530
  { RUNTIME_FUNCTION, \
531
    Runtime::k##name, \
532
    "Runtime::" #name },
533

    
534
  RUNTIME_FUNCTION_LIST(RUNTIME_ENTRY)
535
#undef RUNTIME_ENTRY
536

    
537
  // IC utilities
538
#define IC_ENTRY(name) \
539
  { IC_UTILITY, \
540
    IC::k##name, \
541
    "IC::" #name },
542

    
543
  IC_UTIL_LIST(IC_ENTRY)
544
#undef IC_ENTRY
545
  };  // end of ref_table[].
546

    
547
  for (size_t i = 0; i < ARRAY_SIZE(ref_table); ++i) {
548
    AddFromId(ref_table[i].type, ref_table[i].id, ref_table[i].name);
549
  }
550

    
551
  // Debug addresses
552
  Add(Debug_Address(Debug::k_after_break_target_address).address(),
553
      DEBUG_ADDRESS,
554
      Debug::k_after_break_target_address << kDebugIdShift,
555
      "Debug::after_break_target_address()");
556
  Add(Debug_Address(Debug::k_debug_break_return_address).address(),
557
      DEBUG_ADDRESS,
558
      Debug::k_debug_break_return_address << kDebugIdShift,
559
      "Debug::debug_break_return_address()");
560
  const char* debug_register_format = "Debug::register_address(%i)";
561
  size_t dr_format_length = strlen(debug_register_format);
562
  for (int i = 0; i < kNumJSCallerSaved; ++i) {
563
    Vector<char> name = Vector<char>::New(dr_format_length + 1);
564
    OS::SNPrintF(name, debug_register_format, i);
565
    Add(Debug_Address(Debug::k_register_address, i).address(),
566
        DEBUG_ADDRESS,
567
        Debug::k_register_address << kDebugIdShift | i,
568
        name.start());
569
  }
570

    
571
  // Stat counters
572
  struct StatsRefTableEntry {
573
    StatsCounter* counter;
574
    uint16_t id;
575
    const char* name;
576
  };
577

    
578
  static const StatsRefTableEntry stats_ref_table[] = {
579
#define COUNTER_ENTRY(name, caption) \
580
  { &Counters::name, \
581
    Counters::k_##name, \
582
    "Counters::" #name },
583

    
584
  STATS_COUNTER_LIST_1(COUNTER_ENTRY)
585
  STATS_COUNTER_LIST_2(COUNTER_ENTRY)
586
#undef COUNTER_ENTRY
587
  };  // end of stats_ref_table[].
588

    
589
  for (size_t i = 0; i < ARRAY_SIZE(stats_ref_table); ++i) {
590
    Add(reinterpret_cast<Address>(
591
            GetInternalPointer(stats_ref_table[i].counter)),
592
        STATS_COUNTER,
593
        stats_ref_table[i].id,
594
        stats_ref_table[i].name);
595
  }
596

    
597
  // Top addresses
598
  const char* top_address_format = "Top::get_address_from_id(%i)";
599
  size_t top_format_length = strlen(top_address_format);
600
  for (uint16_t i = 0; i < Top::k_top_address_count; ++i) {
601
    Vector<char> name = Vector<char>::New(top_format_length + 1);
602
    const char* chars = name.start();
603
    OS::SNPrintF(name, top_address_format, i);
604
    Add(Top::get_address_from_id((Top::AddressId)i), TOP_ADDRESS, i, chars);
605
  }
606

    
607
  // Extensions
608
  Add(FUNCTION_ADDR(GCExtension::GC), EXTENSION, 1,
609
      "GCExtension::GC");
610

    
611
  // Accessors
612
#define ACCESSOR_DESCRIPTOR_DECLARATION(name) \
613
  Add((Address)&Accessors::name, \
614
      ACCESSOR, \
615
      Accessors::k##name, \
616
      "Accessors::" #name);
617

    
618
  ACCESSOR_DESCRIPTOR_LIST(ACCESSOR_DESCRIPTOR_DECLARATION)
619
#undef ACCESSOR_DESCRIPTOR_DECLARATION
620

    
621
  // Stub cache tables
622
  Add(SCTableReference::keyReference(StubCache::kPrimary).address(),
623
      STUB_CACHE_TABLE,
624
      1,
625
      "StubCache::primary_->key");
626
  Add(SCTableReference::valueReference(StubCache::kPrimary).address(),
627
      STUB_CACHE_TABLE,
628
      2,
629
      "StubCache::primary_->value");
630
  Add(SCTableReference::keyReference(StubCache::kSecondary).address(),
631
      STUB_CACHE_TABLE,
632
      3,
633
      "StubCache::secondary_->key");
634
  Add(SCTableReference::valueReference(StubCache::kSecondary).address(),
635
      STUB_CACHE_TABLE,
636
      4,
637
      "StubCache::secondary_->value");
638

    
639
  // Runtime entries
640
  Add(FUNCTION_ADDR(Runtime::PerformGC),
641
      RUNTIME_ENTRY,
642
      1,
643
      "Runtime::PerformGC");
644

    
645
  // Miscellaneous
646
  Add(ExternalReference::builtin_passed_function().address(),
647
      UNCLASSIFIED,
648
      1,
649
      "Builtins::builtin_passed_function");
650
  Add(ExternalReference::the_hole_value_location().address(),
651
      UNCLASSIFIED,
652
      2,
653
      "Factory::the_hole_value().location()");
654
  Add(ExternalReference::address_of_stack_guard_limit().address(),
655
      UNCLASSIFIED,
656
      3,
657
      "StackGuard::address_of_jslimit()");
658
  Add(ExternalReference::address_of_regexp_stack_limit().address(),
659
      UNCLASSIFIED,
660
      4,
661
      "RegExpStack::limit_address()");
662
  Add(ExternalReference::debug_break().address(),
663
      UNCLASSIFIED,
664
      5,
665
      "Debug::Break()");
666
  Add(ExternalReference::new_space_start().address(),
667
      UNCLASSIFIED,
668
      6,
669
      "Heap::NewSpaceStart()");
670
  Add(ExternalReference::heap_always_allocate_scope_depth().address(),
671
      UNCLASSIFIED,
672
      7,
673
      "Heap::always_allocate_scope_depth()");
674
  Add(ExternalReference::new_space_allocation_limit_address().address(),
675
      UNCLASSIFIED,
676
      8,
677
      "Heap::NewSpaceAllocationLimitAddress()");
678
  Add(ExternalReference::new_space_allocation_top_address().address(),
679
      UNCLASSIFIED,
680
      9,
681
      "Heap::NewSpaceAllocationTopAddress()");
682
  Add(ExternalReference::debug_step_in_fp_address().address(),
683
      UNCLASSIFIED,
684
      10,
685
      "Debug::step_in_fp_addr()");
686
}
687

    
688

    
689
ExternalReferenceEncoder::ExternalReferenceEncoder()
690
    : encodings_(Match) {
691
  ExternalReferenceTable* external_references =
692
      ExternalReferenceTable::instance();
693
  for (int i = 0; i < external_references->size(); ++i) {
694
    Put(external_references->address(i), i);
695
  }
696
}
697

    
698

    
699
uint32_t ExternalReferenceEncoder::Encode(Address key) const {
700
  int index = IndexOf(key);
701
  return index >=0 ? ExternalReferenceTable::instance()->code(index) : 0;
702
}
703

    
704

    
705
const char* ExternalReferenceEncoder::NameOfAddress(Address key) const {
706
  int index = IndexOf(key);
707
  return index >=0 ? ExternalReferenceTable::instance()->name(index) : NULL;
708
}
709

    
710

    
711
int ExternalReferenceEncoder::IndexOf(Address key) const {
712
  if (key == NULL) return -1;
713
  HashMap::Entry* entry =
714
      const_cast<HashMap &>(encodings_).Lookup(key, Hash(key), false);
715
  return entry == NULL ? -1 : reinterpret_cast<int>(entry->value);
716
}
717

    
718

    
719
void ExternalReferenceEncoder::Put(Address key, int index) {
720
  HashMap::Entry* entry = encodings_.Lookup(key, Hash(key), true);
721
  entry->value = reinterpret_cast<void *>(index);
722
}
723

    
724

    
725
ExternalReferenceDecoder::ExternalReferenceDecoder()
726
  : encodings_(NewArray<Address*>(kTypeCodeCount)) {
727
  ExternalReferenceTable* external_references =
728
      ExternalReferenceTable::instance();
729
  for (int type = kFirstTypeCode; type < kTypeCodeCount; ++type) {
730
    int max = external_references->max_id(type) + 1;
731
    encodings_[type] = NewArray<Address>(max + 1);
732
  }
733
  for (int i = 0; i < external_references->size(); ++i) {
734
    Put(external_references->code(i), external_references->address(i));
735
  }
736
}
737

    
738

    
739
ExternalReferenceDecoder::~ExternalReferenceDecoder() {
740
  for (int type = kFirstTypeCode; type < kTypeCodeCount; ++type) {
741
    DeleteArray(encodings_[type]);
742
  }
743
  DeleteArray(encodings_);
744
}
745

    
746

    
747
//------------------------------------------------------------------------------
748
// Implementation of Serializer
749

    
750

    
751
// Helper class to write the bytes of the serialized heap.
752

    
753
class SnapshotWriter {
754
 public:
755
  SnapshotWriter() {
756
    len_ = 0;
757
    max_ = 8 << 10;  // 8K initial size
758
    str_ = NewArray<byte>(max_);
759
  }
760

    
761
  ~SnapshotWriter() {
762
    DeleteArray(str_);
763
  }
764

    
765
  void GetBytes(byte** str, int* len) {
766
    *str = NewArray<byte>(len_);
767
    memcpy(*str, str_, len_);
768
    *len = len_;
769
  }
770

    
771
  void Reserve(int bytes, int pos);
772

    
773
  void PutC(char c) {
774
    InsertC(c, len_);
775
  }
776

    
777
  void PutInt(int i) {
778
    InsertInt(i, len_);
779
  }
780

    
781
  void PutBytes(const byte* a, int size) {
782
    InsertBytes(a, len_, size);
783
  }
784

    
785
  void PutString(const char* s) {
786
    InsertString(s, len_);
787
  }
788

    
789
  int InsertC(char c, int pos) {
790
    Reserve(1, pos);
791
    str_[pos] = c;
792
    len_++;
793
    return pos + 1;
794
  }
795

    
796
  int InsertInt(int i, int pos) {
797
    return InsertBytes(reinterpret_cast<byte*>(&i), pos, sizeof(i));
798
  }
799

    
800
  int InsertBytes(const byte* a, int pos, int size) {
801
    Reserve(size, pos);
802
    memcpy(&str_[pos], a, size);
803
    len_ += size;
804
    return pos + size;
805
  }
806

    
807
  int InsertString(const char* s, int pos);
808

    
809
  int length() { return len_; }
810

    
811
  Address position() { return reinterpret_cast<Address>(&str_[len_]); }
812

    
813
 private:
814
  byte* str_;  // the snapshot
815
  int len_;   // the current length of str_
816
  int max_;   // the allocated size of str_
817
};
818

    
819

    
820
void SnapshotWriter::Reserve(int bytes, int pos) {
821
  CHECK(0 <= pos && pos <= len_);
822
  while (len_ + bytes >= max_) {
823
    max_ *= 2;
824
    byte* old = str_;
825
    str_ = NewArray<byte>(max_);
826
    memcpy(str_, old, len_);
827
    DeleteArray(old);
828
  }
829
  if (pos < len_) {
830
    byte* old = str_;
831
    str_ = NewArray<byte>(max_);
832
    memcpy(str_, old, pos);
833
    memcpy(str_ + pos + bytes, old + pos, len_ - pos);
834
    DeleteArray(old);
835
  }
836
}
837

    
838
int SnapshotWriter::InsertString(const char* s, int pos) {
839
  int size = strlen(s);
840
  pos = InsertC('[', pos);
841
  pos = InsertInt(size, pos);
842
  pos = InsertC(']', pos);
843
  return InsertBytes(reinterpret_cast<const byte*>(s), pos, size);
844
}
845

    
846

    
847
class ReferenceUpdater: public ObjectVisitor {
848
 public:
849
  ReferenceUpdater(HeapObject* obj, Serializer* serializer)
850
    : obj_address_(obj->address()),
851
      serializer_(serializer),
852
      reference_encoder_(serializer->reference_encoder_),
853
      offsets_(8),
854
      addresses_(8) {
855
  }
856

    
857
  virtual void VisitPointers(Object** start, Object** end) {
858
    for (Object** p = start; p < end; ++p) {
859
      if ((*p)->IsHeapObject()) {
860
        offsets_.Add(reinterpret_cast<Address>(p) - obj_address_);
861
        Address a = serializer_->GetSavedAddress(HeapObject::cast(*p));
862
        addresses_.Add(a);
863
      }
864
    }
865
  }
866

    
867
  virtual void VisitExternalReferences(Address* start, Address* end) {
868
    for (Address* p = start; p < end; ++p) {
869
      uint32_t code = reference_encoder_->Encode(*p);
870
      CHECK(*p == NULL ? code == 0 : code != 0);
871
      offsets_.Add(reinterpret_cast<Address>(p) - obj_address_);
872
      addresses_.Add(reinterpret_cast<Address>(code));
873
    }
874
  }
875

    
876
  virtual void VisitRuntimeEntry(RelocInfo* rinfo) {
877
    Address target = rinfo->target_address();
878
    uint32_t encoding = reference_encoder_->Encode(target);
879
    CHECK(target == NULL ? encoding == 0 : encoding != 0);
880
    offsets_.Add(rinfo->target_address_address() - obj_address_);
881
    addresses_.Add(reinterpret_cast<Address>(encoding));
882
  }
883

    
884
  void Update(Address start_address) {
885
    for (int i = 0; i < offsets_.length(); i++) {
886
      memcpy(start_address + offsets_[i], &addresses_[i], sizeof(Address));
887
    }
888
  }
889

    
890
 private:
891
  Address obj_address_;
892
  Serializer* serializer_;
893
  ExternalReferenceEncoder* reference_encoder_;
894
  List<int> offsets_;
895
  List<Address> addresses_;
896
};
897

    
898

    
899
// Helper functions for a map of encoded heap object addresses.
900
static uint32_t HeapObjectHash(HeapObject* key) {
901
  return reinterpret_cast<uint32_t>(key) >> 2;
902
}
903

    
904

    
905
static bool MatchHeapObject(void* key1, void* key2) {
906
  return key1 == key2;
907
}
908

    
909

    
910
Serializer::Serializer()
911
  : global_handles_(4),
912
    saved_addresses_(MatchHeapObject) {
913
  root_ = true;
914
  roots_ = 0;
915
  objects_ = 0;
916
  reference_encoder_ = NULL;
917
  writer_ = new SnapshotWriter();
918
  for (int i = 0; i <= LAST_SPACE; i++) {
919
    allocator_[i] = new SimulatedHeapSpace();
920
  }
921
}
922

    
923

    
924
Serializer::~Serializer() {
925
  for (int i = 0; i <= LAST_SPACE; i++) {
926
    delete allocator_[i];
927
  }
928
  if (reference_encoder_) delete reference_encoder_;
929
  delete writer_;
930
}
931

    
932

    
933
bool Serializer::serialization_enabled_ = false;
934

    
935

    
936
#ifdef DEBUG
937
static const int kMaxTagLength = 32;
938

    
939
void Serializer::Synchronize(const char* tag) {
940
  if (FLAG_debug_serialization) {
941
    int length = strlen(tag);
942
    ASSERT(length <= kMaxTagLength);
943
    writer_->PutC('S');
944
    writer_->PutInt(length);
945
    writer_->PutBytes(reinterpret_cast<const byte*>(tag), length);
946
  }
947
}
948
#endif
949

    
950

    
951
void Serializer::InitializeAllocators() {
952
  for (int i = 0; i <= LAST_SPACE; i++) {
953
    allocator_[i]->InitEmptyHeap(static_cast<AllocationSpace>(i));
954
  }
955
}
956

    
957

    
958
bool Serializer::IsVisited(HeapObject* obj) {
959
  HashMap::Entry* entry =
960
    saved_addresses_.Lookup(obj, HeapObjectHash(obj), false);
961
  return entry != NULL;
962
}
963

    
964

    
965
Address Serializer::GetSavedAddress(HeapObject* obj) {
966
  HashMap::Entry* entry =
967
    saved_addresses_.Lookup(obj, HeapObjectHash(obj), false);
968
  ASSERT(entry != NULL);
969
  return reinterpret_cast<Address>(entry->value);
970
}
971

    
972

    
973
void Serializer::SaveAddress(HeapObject* obj, Address addr) {
974
  HashMap::Entry* entry =
975
    saved_addresses_.Lookup(obj, HeapObjectHash(obj), true);
976
  entry->value = addr;
977
}
978

    
979

    
980
void Serializer::Serialize() {
981
  // No active threads.
982
  CHECK_EQ(NULL, ThreadState::FirstInUse());
983
  // No active or weak handles.
984
  CHECK(HandleScopeImplementer::instance()->Blocks()->is_empty());
985
  CHECK_EQ(0, GlobalHandles::NumberOfWeakHandles());
986
  // We need a counter function during serialization to resolve the
987
  // references to counters in the code on the heap.
988
  CHECK(StatsTable::HasCounterFunction());
989
  CHECK(enabled());
990
  InitializeAllocators();
991
  reference_encoder_ = new ExternalReferenceEncoder();
992
  PutHeader();
993
  Heap::IterateRoots(this);
994
  PutLog();
995
  PutContextStack();
996
  Disable();
997
}
998

    
999

    
1000
void Serializer::Finalize(byte** str, int* len) {
1001
  writer_->GetBytes(str, len);
1002
}
1003

    
1004

    
1005
// Serialize objects by writing them into the stream.
1006

    
1007
void Serializer::VisitPointers(Object** start, Object** end) {
1008
  bool root = root_;
1009
  root_ = false;
1010
  for (Object** p = start; p < end; ++p) {
1011
    bool serialized;
1012
    Address a = Encode(*p, &serialized);
1013
    if (root) {
1014
      roots_++;
1015
      // If the object was not just serialized,
1016
      // write its encoded address instead.
1017
      if (!serialized) PutEncodedAddress(a);
1018
    }
1019
  }
1020
  root_ = root;
1021
}
1022

    
1023

    
1024
class GlobalHandlesRetriever: public ObjectVisitor {
1025
 public:
1026
  explicit GlobalHandlesRetriever(List<Object**>* handles)
1027
  : global_handles_(handles) {}
1028

    
1029
  virtual void VisitPointers(Object** start, Object** end) {
1030
    for (; start != end; ++start) {
1031
      global_handles_->Add(start);
1032
    }
1033
  }
1034

    
1035
 private:
1036
  List<Object**>* global_handles_;
1037
};
1038

    
1039

    
1040
void Serializer::PutFlags() {
1041
  writer_->PutC('F');
1042
  List<const char*>* argv = FlagList::argv();
1043
  writer_->PutInt(argv->length());
1044
  writer_->PutC('[');
1045
  for (int i = 0; i < argv->length(); i++) {
1046
    if (i > 0) writer_->PutC('|');
1047
    writer_->PutString((*argv)[i]);
1048
    DeleteArray((*argv)[i]);
1049
  }
1050
  writer_->PutC(']');
1051
  flags_end_ = writer_->length();
1052
  delete argv;
1053
}
1054

    
1055

    
1056
void Serializer::PutHeader() {
1057
  PutFlags();
1058
  writer_->PutC('D');
1059
#ifdef DEBUG
1060
  writer_->PutC(FLAG_debug_serialization ? '1' : '0');
1061
#else
1062
  writer_->PutC('0');
1063
#endif
1064
  // Write sizes of paged memory spaces. Allocate extra space for the old
1065
  // and code spaces, because objects in new space will be promoted to them.
1066
  writer_->PutC('S');
1067
  writer_->PutC('[');
1068
  writer_->PutInt(Heap::old_pointer_space()->Size() +
1069
                  Heap::new_space()->Size());
1070
  writer_->PutC('|');
1071
  writer_->PutInt(Heap::old_data_space()->Size() + Heap::new_space()->Size());
1072
  writer_->PutC('|');
1073
  writer_->PutInt(Heap::code_space()->Size() + Heap::new_space()->Size());
1074
  writer_->PutC('|');
1075
  writer_->PutInt(Heap::map_space()->Size());
1076
  writer_->PutC(']');
1077
  // Write global handles.
1078
  writer_->PutC('G');
1079
  writer_->PutC('[');
1080
  GlobalHandlesRetriever ghr(&global_handles_);
1081
  GlobalHandles::IterateRoots(&ghr);
1082
  for (int i = 0; i < global_handles_.length(); i++) {
1083
    writer_->PutC('N');
1084
  }
1085
  writer_->PutC(']');
1086
}
1087

    
1088

    
1089
void Serializer::PutLog() {
1090
#ifdef ENABLE_LOGGING_AND_PROFILING
1091
  if (FLAG_log_code) {
1092
    Logger::TearDown();
1093
    int pos = writer_->InsertC('L', flags_end_);
1094
    bool exists;
1095
    Vector<const char> log = ReadFile(FLAG_logfile, &exists);
1096
    writer_->InsertString(log.start(), pos);
1097
    log.Dispose();
1098
  }
1099
#endif
1100
}
1101

    
1102

    
1103
static int IndexOf(const List<Object**>& list, Object** element) {
1104
  for (int i = 0; i < list.length(); i++) {
1105
    if (list[i] == element) return i;
1106
  }
1107
  return -1;
1108
}
1109

    
1110

    
1111
void Serializer::PutGlobalHandleStack(const List<Handle<Object> >& stack) {
1112
  writer_->PutC('[');
1113
  writer_->PutInt(stack.length());
1114
  for (int i = stack.length() - 1; i >= 0; i--) {
1115
    writer_->PutC('|');
1116
    int gh_index = IndexOf(global_handles_, stack[i].location());
1117
    CHECK_GE(gh_index, 0);
1118
    writer_->PutInt(gh_index);
1119
  }
1120
  writer_->PutC(']');
1121
}
1122

    
1123

    
1124
void Serializer::PutContextStack() {
1125
  List<Handle<Object> > contexts(2);
1126
  while (HandleScopeImplementer::instance()->HasSavedContexts()) {
1127
    Handle<Object> context =
1128
      HandleScopeImplementer::instance()->RestoreContext();
1129
    contexts.Add(context);
1130
  }
1131
  for (int i = contexts.length() - 1; i >= 0; i--) {
1132
    HandleScopeImplementer::instance()->SaveContext(contexts[i]);
1133
  }
1134
  PutGlobalHandleStack(contexts);
1135
}
1136

    
1137

    
1138
void Serializer::PutEncodedAddress(Address addr) {
1139
  writer_->PutC('P');
1140
  writer_->PutInt(reinterpret_cast<int>(addr));
1141
}
1142

    
1143

    
1144
Address Serializer::Encode(Object* o, bool* serialized) {
1145
  *serialized = false;
1146
  if (o->IsSmi()) {
1147
    return reinterpret_cast<Address>(o);
1148
  } else {
1149
    HeapObject* obj = HeapObject::cast(o);
1150
    if (IsVisited(obj)) {
1151
      return GetSavedAddress(obj);
1152
    } else {
1153
      // First visit: serialize the object.
1154
      *serialized = true;
1155
      return PutObject(obj);
1156
    }
1157
  }
1158
}
1159

    
1160

    
1161
Address Serializer::PutObject(HeapObject* obj) {
1162
  Map* map = obj->map();
1163
  InstanceType type = map->instance_type();
1164
  int size = obj->SizeFromMap(map);
1165

    
1166
  // Simulate the allocation of obj to predict where it will be
1167
  // allocated during deserialization.
1168
  Address addr = Allocate(obj).Encode();
1169

    
1170
  SaveAddress(obj, addr);
1171

    
1172
  if (type == CODE_TYPE) {
1173
    Code* code = Code::cast(obj);
1174
    // Ensure Code objects contain Object pointers, not Addresses.
1175
    code->ConvertICTargetsFromAddressToObject();
1176
    LOG(CodeMoveEvent(code->address(), addr));
1177
  }
1178

    
1179
  // Write out the object prologue: type, size, and simulated address of obj.
1180
  writer_->PutC('[');
1181
  CHECK_EQ(0, size & kObjectAlignmentMask);
1182
  writer_->PutInt(type);
1183
  writer_->PutInt(size >> kObjectAlignmentBits);
1184
  PutEncodedAddress(addr);  // encodes AllocationSpace
1185

    
1186
  // Visit all the pointers in the object other than the map. This
1187
  // will recursively serialize any as-yet-unvisited objects.
1188
  obj->Iterate(this);
1189

    
1190
  // Mark end of recursively embedded objects, start of object body.
1191
  writer_->PutC('|');
1192
  // Write out the raw contents of the object. No compression, but
1193
  // fast to deserialize.
1194
  writer_->PutBytes(obj->address(), size);
1195
  // Update pointers and external references in the written object.
1196
  ReferenceUpdater updater(obj, this);
1197
  obj->Iterate(&updater);
1198
  updater.Update(writer_->position() - size);
1199

    
1200
#ifdef DEBUG
1201
  if (FLAG_debug_serialization) {
1202
    // Write out the object epilogue to catch synchronization errors.
1203
    PutEncodedAddress(addr);
1204
    writer_->PutC(']');
1205
  }
1206
#endif
1207

    
1208
  if (type == CODE_TYPE) {
1209
    Code* code = Code::cast(obj);
1210
    // Convert relocations from Object* to Address in Code objects
1211
    code->ConvertICTargetsFromObjectToAddress();
1212
  }
1213

    
1214
  objects_++;
1215
  return addr;
1216
}
1217

    
1218

    
1219
RelativeAddress Serializer::Allocate(HeapObject* obj) {
1220
  // Find out which AllocationSpace 'obj' is in.
1221
  AllocationSpace s;
1222
  bool found = false;
1223
  for (int i = FIRST_SPACE; !found && i <= LAST_SPACE; i++) {
1224
    s = static_cast<AllocationSpace>(i);
1225
    found = Heap::InSpace(obj, s);
1226
  }
1227
  CHECK(found);
1228
  if (s == NEW_SPACE) {
1229
    Space* space = Heap::TargetSpace(obj);
1230
    ASSERT(space == Heap::old_pointer_space() ||
1231
           space == Heap::old_data_space());
1232
    s = (space == Heap::old_pointer_space()) ?
1233
        OLD_POINTER_SPACE :
1234
        OLD_DATA_SPACE;
1235
  }
1236
  int size = obj->Size();
1237
  GCTreatment gc_treatment = DataObject;
1238
  if (obj->IsFixedArray()) gc_treatment = PointerObject;
1239
  else if (obj->IsCode()) gc_treatment = CodeObject;
1240
  return allocator_[s]->Allocate(size, gc_treatment);
1241
}
1242

    
1243

    
1244
//------------------------------------------------------------------------------
1245
// Implementation of Deserializer
1246

    
1247

    
1248
static const int kInitArraySize = 32;
1249

    
1250

    
1251
Deserializer::Deserializer(const byte* str, int len)
1252
  : reader_(str, len),
1253
    map_pages_(kInitArraySize),
1254
    old_pointer_pages_(kInitArraySize),
1255
    old_data_pages_(kInitArraySize),
1256
    code_pages_(kInitArraySize),
1257
    large_objects_(kInitArraySize),
1258
    global_handles_(4) {
1259
  root_ = true;
1260
  roots_ = 0;
1261
  objects_ = 0;
1262
  reference_decoder_ = NULL;
1263
#ifdef DEBUG
1264
  expect_debug_information_ = false;
1265
#endif
1266
}
1267

    
1268

    
1269
Deserializer::~Deserializer() {
1270
  if (reference_decoder_) delete reference_decoder_;
1271
}
1272

    
1273

    
1274
void Deserializer::ExpectEncodedAddress(Address expected) {
1275
  Address a = GetEncodedAddress();
1276
  USE(a);
1277
  ASSERT(a == expected);
1278
}
1279

    
1280

    
1281
#ifdef DEBUG
1282
void Deserializer::Synchronize(const char* tag) {
1283
  if (expect_debug_information_) {
1284
    char buf[kMaxTagLength];
1285
    reader_.ExpectC('S');
1286
    int length = reader_.GetInt();
1287
    ASSERT(length <= kMaxTagLength);
1288
    reader_.GetBytes(reinterpret_cast<Address>(buf), length);
1289
    ASSERT_EQ(strlen(tag), length);
1290
    ASSERT(strncmp(tag, buf, length) == 0);
1291
  }
1292
}
1293
#endif
1294

    
1295

    
1296
void Deserializer::Deserialize() {
1297
  // No active threads.
1298
  ASSERT_EQ(NULL, ThreadState::FirstInUse());
1299
  // No active handles.
1300
  ASSERT(HandleScopeImplementer::instance()->Blocks()->is_empty());
1301
  reference_decoder_ = new ExternalReferenceDecoder();
1302
  // By setting linear allocation only, we forbid the use of free list
1303
  // allocation which is not predicted by SimulatedAddress.
1304
  GetHeader();
1305
  Heap::IterateRoots(this);
1306
  GetContextStack();
1307
}
1308

    
1309

    
1310
void Deserializer::VisitPointers(Object** start, Object** end) {
1311
  bool root = root_;
1312
  root_ = false;
1313
  for (Object** p = start; p < end; ++p) {
1314
    if (root) {
1315
      roots_++;
1316
      // Read the next object or pointer from the stream
1317
      // pointer in the stream.
1318
      int c = reader_.GetC();
1319
      if (c == '[') {
1320
        *p = GetObject();  // embedded object
1321
      } else {
1322
        ASSERT(c == 'P');  // pointer to previously serialized object
1323
        *p = Resolve(reinterpret_cast<Address>(reader_.GetInt()));
1324
      }
1325
    } else {
1326
      // A pointer internal to a HeapObject that we've already
1327
      // read: resolve it to a true address (or Smi)
1328
      *p = Resolve(reinterpret_cast<Address>(*p));
1329
    }
1330
  }
1331
  root_ = root;
1332
}
1333

    
1334

    
1335
void Deserializer::VisitExternalReferences(Address* start, Address* end) {
1336
  for (Address* p = start; p < end; ++p) {
1337
    uint32_t code = reinterpret_cast<uint32_t>(*p);
1338
    *p = reference_decoder_->Decode(code);
1339
  }
1340
}
1341

    
1342

    
1343
void Deserializer::VisitRuntimeEntry(RelocInfo* rinfo) {
1344
  uint32_t* pc = reinterpret_cast<uint32_t*>(rinfo->target_address_address());
1345
  uint32_t encoding = *pc;
1346
  Address target = reference_decoder_->Decode(encoding);
1347
  rinfo->set_target_address(target);
1348
}
1349

    
1350

    
1351
void Deserializer::GetFlags() {
1352
  reader_.ExpectC('F');
1353
  int argc = reader_.GetInt() + 1;
1354
  char** argv = NewArray<char*>(argc);
1355
  reader_.ExpectC('[');
1356
  for (int i = 1; i < argc; i++) {
1357
    if (i > 1) reader_.ExpectC('|');
1358
    argv[i] = reader_.GetString();
1359
  }
1360
  reader_.ExpectC(']');
1361
  has_log_ = false;
1362
  for (int i = 1; i < argc; i++) {
1363
    if (strcmp("--log_code", argv[i]) == 0) {
1364
      has_log_ = true;
1365
    } else if (strcmp("--nouse_ic", argv[i]) == 0) {
1366
      FLAG_use_ic = false;
1367
    } else if (strcmp("--debug_code", argv[i]) == 0) {
1368
      FLAG_debug_code = true;
1369
    } else if (strcmp("--nolazy", argv[i]) == 0) {
1370
      FLAG_lazy = false;
1371
    }
1372
    DeleteArray(argv[i]);
1373
  }
1374

    
1375
  DeleteArray(argv);
1376
}
1377

    
1378

    
1379
void Deserializer::GetLog() {
1380
  if (has_log_) {
1381
    reader_.ExpectC('L');
1382
    char* snapshot_log = reader_.GetString();
1383
#ifdef ENABLE_LOGGING_AND_PROFILING
1384
    if (FLAG_log_code) {
1385
      LOG(Preamble(snapshot_log));
1386
    }
1387
#endif
1388
    DeleteArray(snapshot_log);
1389
  }
1390
}
1391

    
1392

    
1393
static void InitPagedSpace(PagedSpace* space,
1394
                           int capacity,
1395
                           List<Page*>* page_list) {
1396
  space->EnsureCapacity(capacity);
1397
  // TODO(1240712): PagedSpace::EnsureCapacity can return false due to
1398
  // a failure to allocate from the OS to expand the space.
1399
  PageIterator it(space, PageIterator::ALL_PAGES);
1400
  while (it.has_next()) page_list->Add(it.next());
1401
}
1402

    
1403

    
1404
void Deserializer::GetHeader() {
1405
  reader_.ExpectC('D');
1406
#ifdef DEBUG
1407
  expect_debug_information_ = reader_.GetC() == '1';
1408
#else
1409
  // In release mode, don't attempt to read a snapshot containing
1410
  // synchronization tags.
1411
  if (reader_.GetC() != '0') FATAL("Snapshot contains synchronization tags.");
1412
#endif
1413
  // Ensure sufficient capacity in paged memory spaces to avoid growth
1414
  // during deserialization.
1415
  reader_.ExpectC('S');
1416
  reader_.ExpectC('[');
1417
  InitPagedSpace(Heap::old_pointer_space(),
1418
                 reader_.GetInt(),
1419
                 &old_pointer_pages_);
1420
  reader_.ExpectC('|');
1421
  InitPagedSpace(Heap::old_data_space(), reader_.GetInt(), &old_data_pages_);
1422
  reader_.ExpectC('|');
1423
  InitPagedSpace(Heap::code_space(), reader_.GetInt(), &code_pages_);
1424
  reader_.ExpectC('|');
1425
  InitPagedSpace(Heap::map_space(), reader_.GetInt(), &map_pages_);
1426
  reader_.ExpectC(']');
1427
  // Create placeholders for global handles later to be fill during
1428
  // IterateRoots.
1429
  reader_.ExpectC('G');
1430
  reader_.ExpectC('[');
1431
  int c = reader_.GetC();
1432
  while (c != ']') {
1433
    ASSERT(c == 'N');
1434
    global_handles_.Add(GlobalHandles::Create(NULL).location());
1435
    c = reader_.GetC();
1436
  }
1437
}
1438

    
1439

    
1440
void Deserializer::GetGlobalHandleStack(List<Handle<Object> >* stack) {
1441
  reader_.ExpectC('[');
1442
  int length = reader_.GetInt();
1443
  for (int i = 0; i < length; i++) {
1444
    reader_.ExpectC('|');
1445
    int gh_index = reader_.GetInt();
1446
    stack->Add(global_handles_[gh_index]);
1447
  }
1448
  reader_.ExpectC(']');
1449
}
1450

    
1451

    
1452
void Deserializer::GetContextStack() {
1453
  List<Handle<Object> > entered_contexts(2);
1454
  GetGlobalHandleStack(&entered_contexts);
1455
  for (int i = 0; i < entered_contexts.length(); i++) {
1456
    HandleScopeImplementer::instance()->SaveContext(entered_contexts[i]);
1457
  }
1458
}
1459

    
1460

    
1461
Address Deserializer::GetEncodedAddress() {
1462
  reader_.ExpectC('P');
1463
  return reinterpret_cast<Address>(reader_.GetInt());
1464
}
1465

    
1466

    
1467
Object* Deserializer::GetObject() {
1468
  // Read the prologue: type, size and encoded address.
1469
  InstanceType type = static_cast<InstanceType>(reader_.GetInt());
1470
  int size = reader_.GetInt() << kObjectAlignmentBits;
1471
  Address a = GetEncodedAddress();
1472

    
1473
  // Get a raw object of the right size in the right space.
1474
  AllocationSpace space = GetSpace(a);
1475
  Object* o;
1476
  if (IsLargeExecutableObject(a)) {
1477
    o = Heap::lo_space()->AllocateRawCode(size);
1478
  } else if (IsLargeFixedArray(a)) {
1479
    o = Heap::lo_space()->AllocateRawFixedArray(size);
1480
  } else {
1481
    AllocationSpace retry_space = (space == NEW_SPACE)
1482
        ? Heap::TargetSpaceId(type)
1483
        : space;
1484
    o = Heap::AllocateRaw(size, space, retry_space);
1485
  }
1486
  ASSERT(!o->IsFailure());
1487
  // Check that the simulation of heap allocation was correct.
1488
  ASSERT(o == Resolve(a));
1489

    
1490
  // Read any recursively embedded objects.
1491
  int c = reader_.GetC();
1492
  while (c == '[') {
1493
    GetObject();
1494
    c = reader_.GetC();
1495
  }
1496
  ASSERT(c == '|');
1497

    
1498
  HeapObject* obj = reinterpret_cast<HeapObject*>(o);
1499
  // Read the uninterpreted contents of the object after the map
1500
  reader_.GetBytes(obj->address(), size);
1501
#ifdef DEBUG
1502
  if (expect_debug_information_) {
1503
    // Read in the epilogue to check that we're still synchronized
1504
    ExpectEncodedAddress(a);
1505
    reader_.ExpectC(']');
1506
  }
1507
#endif
1508

    
1509
  // Resolve the encoded pointers we just read in.
1510
  // Same as obj->Iterate(this), but doesn't rely on the map pointer being set.
1511
  VisitPointer(reinterpret_cast<Object**>(obj->address()));
1512
  obj->IterateBody(type, size, this);
1513

    
1514
  if (type == CODE_TYPE) {
1515
    Code* code = Code::cast(obj);
1516
    // Convert relocations from Object* to Address in Code objects
1517
    code->ConvertICTargetsFromObjectToAddress();
1518
    LOG(CodeMoveEvent(a, code->address()));
1519
  }
1520
  objects_++;
1521
  return o;
1522
}
1523

    
1524

    
1525
static inline Object* ResolvePaged(int page_index,
1526
                                   int page_offset,
1527
                                   PagedSpace* space,
1528
                                   List<Page*>* page_list) {
1529
  ASSERT(page_index < page_list->length());
1530
  Address address = (*page_list)[page_index]->OffsetToAddress(page_offset);
1531
  return HeapObject::FromAddress(address);
1532
}
1533

    
1534

    
1535
template<typename T>
1536
void ConcatReversed(List<T>* target, const List<T>& source) {
1537
  for (int i = source.length() - 1; i >= 0; i--) {
1538
    target->Add(source[i]);
1539
  }
1540
}
1541

    
1542

    
1543
Object* Deserializer::Resolve(Address encoded) {
1544
  Object* o = reinterpret_cast<Object*>(encoded);
1545
  if (o->IsSmi()) return o;
1546

    
1547
  // Encoded addresses of HeapObjects always have 'HeapObject' tags.
1548
  ASSERT(o->IsHeapObject());
1549

    
1550
  switch (GetSpace(encoded)) {
1551
    // For Map space and Old space, we cache the known Pages in map_pages,
1552
    // old_pointer_pages and old_data_pages. Even though MapSpace keeps a list
1553
    // of page addresses, we don't rely on it since GetObject uses AllocateRaw,
1554
    // and that appears not to update the page list.
1555
    case MAP_SPACE:
1556
      return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
1557
                          Heap::map_space(), &map_pages_);
1558
    case OLD_POINTER_SPACE:
1559
      return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
1560
                          Heap::old_pointer_space(), &old_pointer_pages_);
1561
    case OLD_DATA_SPACE:
1562
      return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
1563
                          Heap::old_data_space(), &old_data_pages_);
1564
    case CODE_SPACE:
1565
      return ResolvePaged(PageIndex(encoded), PageOffset(encoded),
1566
                          Heap::code_space(), &code_pages_);
1567
    case NEW_SPACE:
1568
      return HeapObject::FromAddress(Heap::NewSpaceStart() +
1569
                                     NewSpaceOffset(encoded));
1570
    case LO_SPACE:
1571
      // Cache the known large_objects, allocated one per 'page'
1572
      int index = LargeObjectIndex(encoded);
1573
      if (index >= large_objects_.length()) {
1574
        int new_object_count =
1575
          Heap::lo_space()->PageCount() - large_objects_.length();
1576
        List<Object*> new_objects(new_object_count);
1577
        LargeObjectIterator it(Heap::lo_space());
1578
        for (int i = 0; i < new_object_count; i++) {
1579
          new_objects.Add(it.next());
1580
        }
1581
#ifdef DEBUG
1582
        for (int i = large_objects_.length() - 1; i >= 0; i--) {
1583
          ASSERT(it.next() == large_objects_[i]);
1584
        }
1585
#endif
1586
        ConcatReversed(&large_objects_, new_objects);
1587
        ASSERT(index < large_objects_.length());
1588
      }
1589
      return large_objects_[index];  // s.page_offset() is ignored.
1590
  }
1591
  UNREACHABLE();
1592
  return NULL;
1593
}
1594

    
1595

    
1596
} }  // namespace v8::internal