The data contained in this repository can be downloaded to your computer using one of several clients.
Please see the documentation of your version control software client for more information.

Please select the desired protocol below to get the URL.

This URL has Read-Only access.

Statistics
| Branch: | Revision:

main_repo / deps / v8 / src / profile-generator.cc @ f230a1cf

History | View | Annotate | Download (20.6 KB)

1
// Copyright 2012 the V8 project authors. All rights reserved.
2
// Redistribution and use in source and binary forms, with or without
3
// modification, are permitted provided that the following conditions are
4
// met:
5
//
6
//     * Redistributions of source code must retain the above copyright
7
//       notice, this list of conditions and the following disclaimer.
8
//     * Redistributions in binary form must reproduce the above
9
//       copyright notice, this list of conditions and the following
10
//       disclaimer in the documentation and/or other materials provided
11
//       with the distribution.
12
//     * Neither the name of Google Inc. nor the names of its
13
//       contributors may be used to endorse or promote products derived
14
//       from this software without specific prior written permission.
15
//
16
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27

    
28
#include "v8.h"
29

    
30
#include "profile-generator-inl.h"
31

    
32
#include "compiler.h"
33
#include "debug.h"
34
#include "sampler.h"
35
#include "global-handles.h"
36
#include "scopeinfo.h"
37
#include "unicode.h"
38
#include "zone-inl.h"
39

    
40
namespace v8 {
41
namespace internal {
42

    
43

    
44
bool StringsStorage::StringsMatch(void* key1, void* key2) {
45
  return strcmp(reinterpret_cast<char*>(key1),
46
                reinterpret_cast<char*>(key2)) == 0;
47
}
48

    
49

    
50
StringsStorage::StringsStorage(Heap* heap)
51
    : hash_seed_(heap->HashSeed()), names_(StringsMatch) {
52
}
53

    
54

    
55
StringsStorage::~StringsStorage() {
56
  for (HashMap::Entry* p = names_.Start();
57
       p != NULL;
58
       p = names_.Next(p)) {
59
    DeleteArray(reinterpret_cast<const char*>(p->value));
60
  }
61
}
62

    
63

    
64
const char* StringsStorage::GetCopy(const char* src) {
65
  int len = static_cast<int>(strlen(src));
66
  HashMap::Entry* entry = GetEntry(src, len);
67
  if (entry->value == NULL) {
68
    Vector<char> dst = Vector<char>::New(len + 1);
69
    OS::StrNCpy(dst, src, len);
70
    dst[len] = '\0';
71
    entry->key = dst.start();
72
    entry->value = entry->key;
73
  }
74
  return reinterpret_cast<const char*>(entry->value);
75
}
76

    
77

    
78
const char* StringsStorage::GetFormatted(const char* format, ...) {
79
  va_list args;
80
  va_start(args, format);
81
  const char* result = GetVFormatted(format, args);
82
  va_end(args);
83
  return result;
84
}
85

    
86

    
87
const char* StringsStorage::AddOrDisposeString(char* str, int len) {
88
  HashMap::Entry* entry = GetEntry(str, len);
89
  if (entry->value == NULL) {
90
    // New entry added.
91
    entry->key = str;
92
    entry->value = str;
93
  } else {
94
    DeleteArray(str);
95
  }
96
  return reinterpret_cast<const char*>(entry->value);
97
}
98

    
99

    
100
const char* StringsStorage::GetVFormatted(const char* format, va_list args) {
101
  Vector<char> str = Vector<char>::New(1024);
102
  int len = OS::VSNPrintF(str, format, args);
103
  if (len == -1) {
104
    DeleteArray(str.start());
105
    return GetCopy(format);
106
  }
107
  return AddOrDisposeString(str.start(), len);
108
}
109

    
110

    
111
const char* StringsStorage::GetName(Name* name) {
112
  if (name->IsString()) {
113
    String* str = String::cast(name);
114
    int length = Min(kMaxNameSize, str->length());
115
    int actual_length = 0;
116
    SmartArrayPointer<char> data =
117
        str->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL, 0, length,
118
                       &actual_length);
119
    return AddOrDisposeString(data.Detach(), actual_length);
120
  } else if (name->IsSymbol()) {
121
    return "<symbol>";
122
  }
123
  return "";
124
}
125

    
126

    
127
const char* StringsStorage::GetName(int index) {
128
  return GetFormatted("%d", index);
129
}
130

    
131

    
132
const char* StringsStorage::GetFunctionName(Name* name) {
133
  return BeautifyFunctionName(GetName(name));
134
}
135

    
136

    
137
const char* StringsStorage::GetFunctionName(const char* name) {
138
  return BeautifyFunctionName(GetCopy(name));
139
}
140

    
141

    
142
const char* StringsStorage::BeautifyFunctionName(const char* name) {
143
  return (*name == 0) ? ProfileGenerator::kAnonymousFunctionName : name;
144
}
145

    
146

    
147
size_t StringsStorage::GetUsedMemorySize() const {
148
  size_t size = sizeof(*this);
149
  size += sizeof(HashMap::Entry) * names_.capacity();
150
  for (HashMap::Entry* p = names_.Start(); p != NULL; p = names_.Next(p)) {
151
    size += strlen(reinterpret_cast<const char*>(p->value)) + 1;
152
  }
153
  return size;
154
}
155

    
156

    
157
HashMap::Entry* StringsStorage::GetEntry(const char* str, int len) {
158
  uint32_t hash = StringHasher::HashSequentialString(str, len, hash_seed_);
159
  return names_.Lookup(const_cast<char*>(str), hash, true);
160
}
161

    
162

    
163
const char* const CodeEntry::kEmptyNamePrefix = "";
164
const char* const CodeEntry::kEmptyResourceName = "";
165
const char* const CodeEntry::kEmptyBailoutReason = "";
166

    
167

    
168
CodeEntry::~CodeEntry() {
169
  delete no_frame_ranges_;
170
}
171

    
172

    
173
uint32_t CodeEntry::GetCallUid() const {
174
  uint32_t hash = ComputeIntegerHash(tag_, v8::internal::kZeroHashSeed);
175
  if (shared_id_ != 0) {
176
    hash ^= ComputeIntegerHash(static_cast<uint32_t>(shared_id_),
177
                               v8::internal::kZeroHashSeed);
178
  } else {
179
    hash ^= ComputeIntegerHash(
180
        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_prefix_)),
181
        v8::internal::kZeroHashSeed);
182
    hash ^= ComputeIntegerHash(
183
        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_)),
184
        v8::internal::kZeroHashSeed);
185
    hash ^= ComputeIntegerHash(
186
        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(resource_name_)),
187
        v8::internal::kZeroHashSeed);
188
    hash ^= ComputeIntegerHash(line_number_, v8::internal::kZeroHashSeed);
189
  }
190
  return hash;
191
}
192

    
193

    
194
bool CodeEntry::IsSameAs(CodeEntry* entry) const {
195
  return this == entry
196
      || (tag_ == entry->tag_
197
          && shared_id_ == entry->shared_id_
198
          && (shared_id_ != 0
199
              || (name_prefix_ == entry->name_prefix_
200
                  && name_ == entry->name_
201
                  && resource_name_ == entry->resource_name_
202
                  && line_number_ == entry->line_number_)));
203
}
204

    
205

    
206
void CodeEntry::SetBuiltinId(Builtins::Name id) {
207
  tag_ = Logger::BUILTIN_TAG;
208
  builtin_id_ = id;
209
}
210

    
211

    
212
ProfileNode* ProfileNode::FindChild(CodeEntry* entry) {
213
  HashMap::Entry* map_entry =
214
      children_.Lookup(entry, CodeEntryHash(entry), false);
215
  return map_entry != NULL ?
216
      reinterpret_cast<ProfileNode*>(map_entry->value) : NULL;
217
}
218

    
219

    
220
ProfileNode* ProfileNode::FindOrAddChild(CodeEntry* entry) {
221
  HashMap::Entry* map_entry =
222
      children_.Lookup(entry, CodeEntryHash(entry), true);
223
  if (map_entry->value == NULL) {
224
    // New node added.
225
    ProfileNode* new_node = new ProfileNode(tree_, entry);
226
    map_entry->value = new_node;
227
    children_list_.Add(new_node);
228
  }
229
  return reinterpret_cast<ProfileNode*>(map_entry->value);
230
}
231

    
232

    
233
void ProfileNode::Print(int indent) {
234
  OS::Print("%5u %*c %s%s %d #%d %s",
235
            self_ticks_,
236
            indent, ' ',
237
            entry_->name_prefix(),
238
            entry_->name(),
239
            entry_->script_id(),
240
            id(),
241
            entry_->bailout_reason());
242
  if (entry_->resource_name()[0] != '\0')
243
    OS::Print(" %s:%d", entry_->resource_name(), entry_->line_number());
244
  OS::Print("\n");
245
  for (HashMap::Entry* p = children_.Start();
246
       p != NULL;
247
       p = children_.Next(p)) {
248
    reinterpret_cast<ProfileNode*>(p->value)->Print(indent + 2);
249
  }
250
}
251

    
252

    
253
class DeleteNodesCallback {
254
 public:
255
  void BeforeTraversingChild(ProfileNode*, ProfileNode*) { }
256

    
257
  void AfterAllChildrenTraversed(ProfileNode* node) {
258
    delete node;
259
  }
260

    
261
  void AfterChildTraversed(ProfileNode*, ProfileNode*) { }
262
};
263

    
264

    
265
ProfileTree::ProfileTree()
266
    : root_entry_(Logger::FUNCTION_TAG, "(root)"),
267
      next_node_id_(1),
268
      root_(new ProfileNode(this, &root_entry_)) {
269
}
270

    
271

    
272
ProfileTree::~ProfileTree() {
273
  DeleteNodesCallback cb;
274
  TraverseDepthFirst(&cb);
275
}
276

    
277

    
278
ProfileNode* ProfileTree::AddPathFromEnd(const Vector<CodeEntry*>& path) {
279
  ProfileNode* node = root_;
280
  for (CodeEntry** entry = path.start() + path.length() - 1;
281
       entry != path.start() - 1;
282
       --entry) {
283
    if (*entry != NULL) {
284
      node = node->FindOrAddChild(*entry);
285
    }
286
  }
287
  node->IncrementSelfTicks();
288
  return node;
289
}
290

    
291

    
292
void ProfileTree::AddPathFromStart(const Vector<CodeEntry*>& path) {
293
  ProfileNode* node = root_;
294
  for (CodeEntry** entry = path.start();
295
       entry != path.start() + path.length();
296
       ++entry) {
297
    if (*entry != NULL) {
298
      node = node->FindOrAddChild(*entry);
299
    }
300
  }
301
  node->IncrementSelfTicks();
302
}
303

    
304

    
305
struct NodesPair {
306
  NodesPair(ProfileNode* src, ProfileNode* dst)
307
      : src(src), dst(dst) { }
308
  ProfileNode* src;
309
  ProfileNode* dst;
310
};
311

    
312

    
313
class Position {
314
 public:
315
  explicit Position(ProfileNode* node)
316
      : node(node), child_idx_(0) { }
317
  INLINE(ProfileNode* current_child()) {
318
    return node->children()->at(child_idx_);
319
  }
320
  INLINE(bool has_current_child()) {
321
    return child_idx_ < node->children()->length();
322
  }
323
  INLINE(void next_child()) { ++child_idx_; }
324

    
325
  ProfileNode* node;
326
 private:
327
  int child_idx_;
328
};
329

    
330

    
331
// Non-recursive implementation of a depth-first post-order tree traversal.
332
template <typename Callback>
333
void ProfileTree::TraverseDepthFirst(Callback* callback) {
334
  List<Position> stack(10);
335
  stack.Add(Position(root_));
336
  while (stack.length() > 0) {
337
    Position& current = stack.last();
338
    if (current.has_current_child()) {
339
      callback->BeforeTraversingChild(current.node, current.current_child());
340
      stack.Add(Position(current.current_child()));
341
    } else {
342
      callback->AfterAllChildrenTraversed(current.node);
343
      if (stack.length() > 1) {
344
        Position& parent = stack[stack.length() - 2];
345
        callback->AfterChildTraversed(parent.node, current.node);
346
        parent.next_child();
347
      }
348
      // Remove child from the stack.
349
      stack.RemoveLast();
350
    }
351
  }
352
}
353

    
354

    
355
CpuProfile::CpuProfile(const char* title, unsigned uid, bool record_samples)
356
    : title_(title),
357
      uid_(uid),
358
      record_samples_(record_samples),
359
      start_time_(Time::NowFromSystemTime()) {
360
  timer_.Start();
361
}
362

    
363

    
364
void CpuProfile::AddPath(const Vector<CodeEntry*>& path) {
365
  ProfileNode* top_frame_node = top_down_.AddPathFromEnd(path);
366
  if (record_samples_) samples_.Add(top_frame_node);
367
}
368

    
369

    
370
void CpuProfile::CalculateTotalTicksAndSamplingRate() {
371
  end_time_ = start_time_ + timer_.Elapsed();
372
}
373

    
374

    
375
void CpuProfile::Print() {
376
  OS::Print("[Top down]:\n");
377
  top_down_.Print();
378
}
379

    
380

    
381
CodeEntry* const CodeMap::kSharedFunctionCodeEntry = NULL;
382
const CodeMap::CodeTreeConfig::Key CodeMap::CodeTreeConfig::kNoKey = NULL;
383

    
384

    
385
void CodeMap::AddCode(Address addr, CodeEntry* entry, unsigned size) {
386
  DeleteAllCoveredCode(addr, addr + size);
387
  CodeTree::Locator locator;
388
  tree_.Insert(addr, &locator);
389
  locator.set_value(CodeEntryInfo(entry, size));
390
}
391

    
392

    
393
void CodeMap::DeleteAllCoveredCode(Address start, Address end) {
394
  List<Address> to_delete;
395
  Address addr = end - 1;
396
  while (addr >= start) {
397
    CodeTree::Locator locator;
398
    if (!tree_.FindGreatestLessThan(addr, &locator)) break;
399
    Address start2 = locator.key(), end2 = start2 + locator.value().size;
400
    if (start2 < end && start < end2) to_delete.Add(start2);
401
    addr = start2 - 1;
402
  }
403
  for (int i = 0; i < to_delete.length(); ++i) tree_.Remove(to_delete[i]);
404
}
405

    
406

    
407
CodeEntry* CodeMap::FindEntry(Address addr, Address* start) {
408
  CodeTree::Locator locator;
409
  if (tree_.FindGreatestLessThan(addr, &locator)) {
410
    // locator.key() <= addr. Need to check that addr is within entry.
411
    const CodeEntryInfo& entry = locator.value();
412
    if (addr < (locator.key() + entry.size)) {
413
      if (start) {
414
        *start = locator.key();
415
      }
416
      return entry.entry;
417
    }
418
  }
419
  return NULL;
420
}
421

    
422

    
423
int CodeMap::GetSharedId(Address addr) {
424
  CodeTree::Locator locator;
425
  // For shared function entries, 'size' field is used to store their IDs.
426
  if (tree_.Find(addr, &locator)) {
427
    const CodeEntryInfo& entry = locator.value();
428
    ASSERT(entry.entry == kSharedFunctionCodeEntry);
429
    return entry.size;
430
  } else {
431
    tree_.Insert(addr, &locator);
432
    int id = next_shared_id_++;
433
    locator.set_value(CodeEntryInfo(kSharedFunctionCodeEntry, id));
434
    return id;
435
  }
436
}
437

    
438

    
439
void CodeMap::MoveCode(Address from, Address to) {
440
  if (from == to) return;
441
  CodeTree::Locator locator;
442
  if (!tree_.Find(from, &locator)) return;
443
  CodeEntryInfo entry = locator.value();
444
  tree_.Remove(from);
445
  AddCode(to, entry.entry, entry.size);
446
}
447

    
448

    
449
void CodeMap::CodeTreePrinter::Call(
450
    const Address& key, const CodeMap::CodeEntryInfo& value) {
451
  // For shared function entries, 'size' field is used to store their IDs.
452
  if (value.entry == kSharedFunctionCodeEntry) {
453
    OS::Print("%p SharedFunctionInfo %d\n", key, value.size);
454
  } else {
455
    OS::Print("%p %5d %s\n", key, value.size, value.entry->name());
456
  }
457
}
458

    
459

    
460
void CodeMap::Print() {
461
  CodeTreePrinter printer;
462
  tree_.ForEach(&printer);
463
}
464

    
465

    
466
CpuProfilesCollection::CpuProfilesCollection(Heap* heap)
467
    : function_and_resource_names_(heap),
468
      current_profiles_semaphore_(1) {
469
}
470

    
471

    
472
static void DeleteCodeEntry(CodeEntry** entry_ptr) {
473
  delete *entry_ptr;
474
}
475

    
476

    
477
static void DeleteCpuProfile(CpuProfile** profile_ptr) {
478
  delete *profile_ptr;
479
}
480

    
481

    
482
CpuProfilesCollection::~CpuProfilesCollection() {
483
  finished_profiles_.Iterate(DeleteCpuProfile);
484
  current_profiles_.Iterate(DeleteCpuProfile);
485
  code_entries_.Iterate(DeleteCodeEntry);
486
}
487

    
488

    
489
bool CpuProfilesCollection::StartProfiling(const char* title, unsigned uid,
490
                                           bool record_samples) {
491
  ASSERT(uid > 0);
492
  current_profiles_semaphore_.Wait();
493
  if (current_profiles_.length() >= kMaxSimultaneousProfiles) {
494
    current_profiles_semaphore_.Signal();
495
    return false;
496
  }
497
  for (int i = 0; i < current_profiles_.length(); ++i) {
498
    if (strcmp(current_profiles_[i]->title(), title) == 0) {
499
      // Ignore attempts to start profile with the same title.
500
      current_profiles_semaphore_.Signal();
501
      return false;
502
    }
503
  }
504
  current_profiles_.Add(new CpuProfile(title, uid, record_samples));
505
  current_profiles_semaphore_.Signal();
506
  return true;
507
}
508

    
509

    
510
CpuProfile* CpuProfilesCollection::StopProfiling(const char* title) {
511
  const int title_len = StrLength(title);
512
  CpuProfile* profile = NULL;
513
  current_profiles_semaphore_.Wait();
514
  for (int i = current_profiles_.length() - 1; i >= 0; --i) {
515
    if (title_len == 0 || strcmp(current_profiles_[i]->title(), title) == 0) {
516
      profile = current_profiles_.Remove(i);
517
      break;
518
    }
519
  }
520
  current_profiles_semaphore_.Signal();
521

    
522
  if (profile == NULL) return NULL;
523
  profile->CalculateTotalTicksAndSamplingRate();
524
  finished_profiles_.Add(profile);
525
  return profile;
526
}
527

    
528

    
529
bool CpuProfilesCollection::IsLastProfile(const char* title) {
530
  // Called from VM thread, and only it can mutate the list,
531
  // so no locking is needed here.
532
  if (current_profiles_.length() != 1) return false;
533
  return StrLength(title) == 0
534
      || strcmp(current_profiles_[0]->title(), title) == 0;
535
}
536

    
537

    
538
void CpuProfilesCollection::RemoveProfile(CpuProfile* profile) {
539
  // Called from VM thread for a completed profile.
540
  unsigned uid = profile->uid();
541
  for (int i = 0; i < finished_profiles_.length(); i++) {
542
    if (uid == finished_profiles_[i]->uid()) {
543
      finished_profiles_.Remove(i);
544
      return;
545
    }
546
  }
547
  UNREACHABLE();
548
}
549

    
550

    
551
void CpuProfilesCollection::AddPathToCurrentProfiles(
552
    const Vector<CodeEntry*>& path) {
553
  // As starting / stopping profiles is rare relatively to this
554
  // method, we don't bother minimizing the duration of lock holding,
555
  // e.g. copying contents of the list to a local vector.
556
  current_profiles_semaphore_.Wait();
557
  for (int i = 0; i < current_profiles_.length(); ++i) {
558
    current_profiles_[i]->AddPath(path);
559
  }
560
  current_profiles_semaphore_.Signal();
561
}
562

    
563

    
564
CodeEntry* CpuProfilesCollection::NewCodeEntry(
565
      Logger::LogEventsAndTags tag,
566
      const char* name,
567
      const char* name_prefix,
568
      const char* resource_name,
569
      int line_number,
570
      int column_number) {
571
  CodeEntry* code_entry = new CodeEntry(tag,
572
                                        name,
573
                                        name_prefix,
574
                                        resource_name,
575
                                        line_number,
576
                                        column_number);
577
  code_entries_.Add(code_entry);
578
  return code_entry;
579
}
580

    
581

    
582
const char* const ProfileGenerator::kAnonymousFunctionName =
583
    "(anonymous function)";
584
const char* const ProfileGenerator::kProgramEntryName =
585
    "(program)";
586
const char* const ProfileGenerator::kIdleEntryName =
587
    "(idle)";
588
const char* const ProfileGenerator::kGarbageCollectorEntryName =
589
    "(garbage collector)";
590
const char* const ProfileGenerator::kUnresolvedFunctionName =
591
    "(unresolved function)";
592

    
593

    
594
ProfileGenerator::ProfileGenerator(CpuProfilesCollection* profiles)
595
    : profiles_(profiles),
596
      program_entry_(
597
          profiles->NewCodeEntry(Logger::FUNCTION_TAG, kProgramEntryName)),
598
      idle_entry_(
599
          profiles->NewCodeEntry(Logger::FUNCTION_TAG, kIdleEntryName)),
600
      gc_entry_(
601
          profiles->NewCodeEntry(Logger::BUILTIN_TAG,
602
                                 kGarbageCollectorEntryName)),
603
      unresolved_entry_(
604
          profiles->NewCodeEntry(Logger::FUNCTION_TAG,
605
                                 kUnresolvedFunctionName)) {
606
}
607

    
608

    
609
void ProfileGenerator::RecordTickSample(const TickSample& sample) {
610
  // Allocate space for stack frames + pc + function + vm-state.
611
  ScopedVector<CodeEntry*> entries(sample.frames_count + 3);
612
  // As actual number of decoded code entries may vary, initialize
613
  // entries vector with NULL values.
614
  CodeEntry** entry = entries.start();
615
  memset(entry, 0, entries.length() * sizeof(*entry));
616
  if (sample.pc != NULL) {
617
    if (sample.has_external_callback && sample.state == EXTERNAL &&
618
        sample.top_frame_type == StackFrame::EXIT) {
619
      // Don't use PC when in external callback code, as it can point
620
      // inside callback's code, and we will erroneously report
621
      // that a callback calls itself.
622
      *entry++ = code_map_.FindEntry(sample.external_callback);
623
    } else {
624
      Address start;
625
      CodeEntry* pc_entry = code_map_.FindEntry(sample.pc, &start);
626
      // If pc is in the function code before it set up stack frame or after the
627
      // frame was destroyed SafeStackFrameIterator incorrectly thinks that
628
      // ebp contains return address of the current function and skips caller's
629
      // frame. Check for this case and just skip such samples.
630
      if (pc_entry) {
631
        List<OffsetRange>* ranges = pc_entry->no_frame_ranges();
632
        if (ranges) {
633
          Code* code = Code::cast(HeapObject::FromAddress(start));
634
          int pc_offset = static_cast<int>(
635
              sample.pc - code->instruction_start());
636
          for (int i = 0; i < ranges->length(); i++) {
637
            OffsetRange& range = ranges->at(i);
638
            if (range.from <= pc_offset && pc_offset < range.to) {
639
              return;
640
            }
641
          }
642
        }
643
        *entry++ = pc_entry;
644

    
645
        if (pc_entry->builtin_id() == Builtins::kFunctionCall ||
646
            pc_entry->builtin_id() == Builtins::kFunctionApply) {
647
          // When current function is FunctionCall or FunctionApply builtin the
648
          // top frame is either frame of the calling JS function or internal
649
          // frame. In the latter case we know the caller for sure but in the
650
          // former case we don't so we simply replace the frame with
651
          // 'unresolved' entry.
652
          if (sample.top_frame_type == StackFrame::JAVA_SCRIPT) {
653
            *entry++ = unresolved_entry_;
654
          }
655
        }
656
      }
657
    }
658

    
659
    for (const Address* stack_pos = sample.stack,
660
           *stack_end = stack_pos + sample.frames_count;
661
         stack_pos != stack_end;
662
         ++stack_pos) {
663
      *entry++ = code_map_.FindEntry(*stack_pos);
664
    }
665
  }
666

    
667
  if (FLAG_prof_browser_mode) {
668
    bool no_symbolized_entries = true;
669
    for (CodeEntry** e = entries.start(); e != entry; ++e) {
670
      if (*e != NULL) {
671
        no_symbolized_entries = false;
672
        break;
673
      }
674
    }
675
    // If no frames were symbolized, put the VM state entry in.
676
    if (no_symbolized_entries) {
677
      *entry++ = EntryForVMState(sample.state);
678
    }
679
  }
680

    
681
  profiles_->AddPathToCurrentProfiles(entries);
682
}
683

    
684

    
685
CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
686
  switch (tag) {
687
    case GC:
688
      return gc_entry_;
689
    case JS:
690
    case COMPILER:
691
    // DOM events handlers are reported as OTHER / EXTERNAL entries.
692
    // To avoid confusing people, let's put all these entries into
693
    // one bucket.
694
    case OTHER:
695
    case EXTERNAL:
696
      return program_entry_;
697
    case IDLE:
698
      return idle_entry_;
699
    default: return NULL;
700
  }
701
}
702

    
703
} }  // namespace v8::internal