Revision f230a1cf deps/v8/src/optimizing-compiler-thread.h

View differences:

deps/v8/src/optimizing-compiler-thread.h
40 40
namespace internal {
41 41

  
42 42
class HOptimizedGraphBuilder;
43
class OptimizingCompiler;
43
class RecompileJob;
44 44
class SharedFunctionInfo;
45 45

  
46 46
class OptimizingCompilerThread : public Thread {
......
53 53
      isolate_(isolate),
54 54
      stop_semaphore_(0),
55 55
      input_queue_semaphore_(0),
56
      osr_candidates_(2),
57
      ready_for_osr_(2),
56
      input_queue_capacity_(FLAG_concurrent_recompilation_queue_length),
57
      input_queue_length_(0),
58
      input_queue_shift_(0),
59
      osr_buffer_capacity_(FLAG_concurrent_recompilation_queue_length + 4),
60
      osr_buffer_cursor_(0),
58 61
      osr_hits_(0),
59
      osr_attempts_(0) {
62
      osr_attempts_(0),
63
      blocked_jobs_(0) {
60 64
    NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE));
61
    NoBarrier_Store(&queue_length_, static_cast<AtomicWord>(0));
65
    input_queue_ = NewArray<RecompileJob*>(input_queue_capacity_);
66
    if (FLAG_concurrent_osr) {
67
      // Allocate and mark OSR buffer slots as empty.
68
      osr_buffer_ = NewArray<RecompileJob*>(osr_buffer_capacity_);
69
      for (int i = 0; i < osr_buffer_capacity_; i++) osr_buffer_[i] = NULL;
70
    }
62 71
  }
63
  ~OptimizingCompilerThread() {}
72

  
73
  ~OptimizingCompilerThread();
64 74

  
65 75
  void Run();
66 76
  void Stop();
67 77
  void Flush();
68
  void QueueForOptimization(OptimizingCompiler* optimizing_compiler);
78
  void QueueForOptimization(RecompileJob* optimizing_compiler);
79
  void Unblock();
69 80
  void InstallOptimizedFunctions();
70
  OptimizingCompiler* FindReadyOSRCandidate(Handle<JSFunction> function,
71
                                            uint32_t osr_pc_offset);
81
  RecompileJob* FindReadyOSRCandidate(Handle<JSFunction> function,
82
                                      uint32_t osr_pc_offset);
72 83
  bool IsQueuedForOSR(Handle<JSFunction> function, uint32_t osr_pc_offset);
73 84

  
74 85
  bool IsQueuedForOSR(JSFunction* function);
75 86

  
76 87
  inline bool IsQueueAvailable() {
77
    // We don't need a barrier since we have a data dependency right
78
    // after.
79
    Atomic32 current_length = NoBarrier_Load(&queue_length_);
80

  
81
    // This can be queried only from the execution thread.
82
    ASSERT(!IsOptimizerThread());
83
    // Since only the execution thread increments queue_length_ and
84
    // only one thread can run inside an Isolate at one time, a direct
85
    // doesn't introduce a race -- queue_length_ may decreased in
86
    // meantime, but not increased.
87
    return (current_length < FLAG_concurrent_recompilation_queue_length);
88
    LockGuard<Mutex> access_input_queue(&input_queue_mutex_);
89
    return input_queue_length_ < input_queue_capacity_;
90
  }
91

  
92
  inline void AgeBufferedOsrJobs() {
93
    // Advance cursor of the cyclic buffer to next empty slot or stale OSR job.
94
    // Dispose said OSR job in the latter case.  Calling this on every GC
95
    // should make sure that we do not hold onto stale jobs indefinitely.
96
    AddToOsrBuffer(NULL);
88 97
  }
89 98

  
90 99
#ifdef DEBUG
......
94 103
 private:
95 104
  enum StopFlag { CONTINUE, STOP, FLUSH };
96 105

  
97
  // Remove the oldest OSR candidates that are ready so that we
98
  // only have |limit| left waiting.
99
  void RemoveStaleOSRCandidates(int limit = kReadyForOSRLimit);
100

  
101 106
  void FlushInputQueue(bool restore_function_code);
102 107
  void FlushOutputQueue(bool restore_function_code);
108
  void FlushOsrBuffer(bool restore_function_code);
103 109
  void CompileNext();
110
  RecompileJob* NextInput();
111

  
112
  // Add a recompilation task for OSR to the cyclic buffer, awaiting OSR entry.
113
  // Tasks evicted from the cyclic buffer are discarded.
114
  void AddToOsrBuffer(RecompileJob* compiler);
115

  
116
  inline int InputQueueIndex(int i) {
117
    int result = (i + input_queue_shift_) % input_queue_capacity_;
118
    ASSERT_LE(0, result);
119
    ASSERT_LT(result, input_queue_capacity_);
120
    return result;
121
  }
104 122

  
105 123
#ifdef DEBUG
106 124
  int thread_id_;
......
111 129
  Semaphore stop_semaphore_;
112 130
  Semaphore input_queue_semaphore_;
113 131

  
114
  // Queue of incoming recompilation tasks (including OSR).
115
  UnboundQueue<OptimizingCompiler*> input_queue_;
132
  // Circular queue of incoming recompilation tasks (including OSR).
133
  RecompileJob** input_queue_;
134
  int input_queue_capacity_;
135
  int input_queue_length_;
136
  int input_queue_shift_;
137
  Mutex input_queue_mutex_;
138

  
116 139
  // Queue of recompilation tasks ready to be installed (excluding OSR).
117
  UnboundQueue<OptimizingCompiler*> output_queue_;
118
  // List of all OSR related recompilation tasks (both incoming and ready ones).
119
  List<OptimizingCompiler*> osr_candidates_;
120
  // List of recompilation tasks ready for OSR.
121
  List<OptimizingCompiler*> ready_for_osr_;
140
  UnboundQueue<RecompileJob*> output_queue_;
141

  
142
  // Cyclic buffer of recompilation tasks for OSR.
143
  RecompileJob** osr_buffer_;
144
  int osr_buffer_capacity_;
145
  int osr_buffer_cursor_;
122 146

  
123 147
  volatile AtomicWord stop_thread_;
124
  volatile Atomic32 queue_length_;
125 148
  TimeDelta time_spent_compiling_;
126 149
  TimeDelta time_spent_total_;
127 150

  
128
  Mutex osr_list_mutex_;
129 151
  int osr_hits_;
130 152
  int osr_attempts_;
131 153

  
132
  static const int kReadyForOSRLimit = 4;
154
  int blocked_jobs_;
133 155
};
134 156

  
135 157
} }  // namespace v8::internal

Also available in: Unified diff