The data contained in this repository can be downloaded to your computer using one of several clients.
Please see the documentation of your version control software client for more information.

Please select the desired protocol below to get the URL.

This URL has Read-Only access.

Statistics
| Branch: | Revision:

main_repo / deps / v8 / src / v8threads.cc @ 40c0f755

History | View | Annotate | Download (11.5 KB)

1
// Copyright 2008 the V8 project authors. All rights reserved.
2
// Redistribution and use in source and binary forms, with or without
3
// modification, are permitted provided that the following conditions are
4
// met:
5
//
6
//     * Redistributions of source code must retain the above copyright
7
//       notice, this list of conditions and the following disclaimer.
8
//     * Redistributions in binary form must reproduce the above
9
//       copyright notice, this list of conditions and the following
10
//       disclaimer in the documentation and/or other materials provided
11
//       with the distribution.
12
//     * Neither the name of Google Inc. nor the names of its
13
//       contributors may be used to endorse or promote products derived
14
//       from this software without specific prior written permission.
15
//
16
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27

    
28
#include "v8.h"
29

    
30
#include "api.h"
31
#include "bootstrapper.h"
32
#include "debug.h"
33
#include "execution.h"
34
#include "v8threads.h"
35
#include "regexp-stack.h"
36

    
37
namespace v8 {
38

    
39
static internal::Thread::LocalStorageKey thread_state_key =
40
    internal::Thread::CreateThreadLocalKey();
41
static internal::Thread::LocalStorageKey thread_id_key =
42
    internal::Thread::CreateThreadLocalKey();
43

    
44

    
45
// Track whether this V8 instance has ever called v8::Locker. This allows the
46
// API code to verify that the lock is always held when V8 is being entered.
47
bool Locker::active_ = false;
48

    
49

    
50
// Constructor for the Locker object.  Once the Locker is constructed the
51
// current thread will be guaranteed to have the big V8 lock.
52
Locker::Locker() : has_lock_(false), top_level_(true) {
53
  // Record that the Locker has been used at least once.
54
  active_ = true;
55
  // Get the big lock if necessary.
56
  if (!internal::ThreadManager::IsLockedByCurrentThread()) {
57
    internal::ThreadManager::Lock();
58
    has_lock_ = true;
59
    // This may be a locker within an unlocker in which case we have to
60
    // get the saved state for this thread and restore it.
61
    if (internal::ThreadManager::RestoreThread()) {
62
      top_level_ = false;
63
    }
64
  }
65
  ASSERT(internal::ThreadManager::IsLockedByCurrentThread());
66

    
67
  // Make sure this thread is assigned a thread id.
68
  internal::ThreadManager::AssignId();
69
}
70

    
71

    
72
bool Locker::IsLocked() {
73
  return internal::ThreadManager::IsLockedByCurrentThread();
74
}
75

    
76

    
77
Locker::~Locker() {
78
  ASSERT(internal::ThreadManager::IsLockedByCurrentThread());
79
  if (has_lock_) {
80
    if (!top_level_) {
81
      internal::ThreadManager::ArchiveThread();
82
    }
83
    internal::ThreadManager::Unlock();
84
  }
85
}
86

    
87

    
88
Unlocker::Unlocker() {
89
  ASSERT(internal::ThreadManager::IsLockedByCurrentThread());
90
  internal::ThreadManager::ArchiveThread();
91
  internal::ThreadManager::Unlock();
92
}
93

    
94

    
95
Unlocker::~Unlocker() {
96
  ASSERT(!internal::ThreadManager::IsLockedByCurrentThread());
97
  internal::ThreadManager::Lock();
98
  internal::ThreadManager::RestoreThread();
99
}
100

    
101

    
102
void Locker::StartPreemption(int every_n_ms) {
103
  v8::internal::ContextSwitcher::StartPreemption(every_n_ms);
104
}
105

    
106

    
107
void Locker::StopPreemption() {
108
  v8::internal::ContextSwitcher::StopPreemption();
109
}
110

    
111

    
112
namespace internal {
113

    
114

    
115
bool ThreadManager::RestoreThread() {
116
  // First check whether the current thread has been 'lazily archived', ie
117
  // not archived at all.  If that is the case we put the state storage we
118
  // had prepared back in the free list, since we didn't need it after all.
119
  if (lazily_archived_thread_.IsSelf()) {
120
    lazily_archived_thread_.Initialize(ThreadHandle::INVALID);
121
    ASSERT(Thread::GetThreadLocal(thread_state_key) ==
122
           lazily_archived_thread_state_);
123
    lazily_archived_thread_state_->set_id(kInvalidId);
124
    lazily_archived_thread_state_->LinkInto(ThreadState::FREE_LIST);
125
    lazily_archived_thread_state_ = NULL;
126
    Thread::SetThreadLocal(thread_state_key, NULL);
127
    return true;
128
  }
129

    
130
  // Make sure that the preemption thread cannot modify the thread state while
131
  // it is being archived or restored.
132
  ExecutionAccess access;
133

    
134
  // If there is another thread that was lazily archived then we have to really
135
  // archive it now.
136
  if (lazily_archived_thread_.IsValid()) {
137
    EagerlyArchiveThread();
138
  }
139
  ThreadState* state =
140
      reinterpret_cast<ThreadState*>(Thread::GetThreadLocal(thread_state_key));
141
  if (state == NULL) {
142
    return false;
143
  }
144
  char* from = state->data();
145
  from = HandleScopeImplementer::RestoreThread(from);
146
  from = Top::RestoreThread(from);
147
  from = Debug::RestoreDebug(from);
148
  from = StackGuard::RestoreStackGuard(from);
149
  from = RegExpStack::RestoreStack(from);
150
  from = Bootstrapper::RestoreState(from);
151
  Thread::SetThreadLocal(thread_state_key, NULL);
152
  state->set_id(kInvalidId);
153
  state->Unlink();
154
  state->LinkInto(ThreadState::FREE_LIST);
155
  return true;
156
}
157

    
158

    
159
void ThreadManager::Lock() {
160
  mutex_->Lock();
161
  mutex_owner_.Initialize(ThreadHandle::SELF);
162
  ASSERT(IsLockedByCurrentThread());
163
}
164

    
165

    
166
void ThreadManager::Unlock() {
167
  mutex_owner_.Initialize(ThreadHandle::INVALID);
168
  mutex_->Unlock();
169
}
170

    
171

    
172
static int ArchiveSpacePerThread() {
173
  return HandleScopeImplementer::ArchiveSpacePerThread() +
174
                            Top::ArchiveSpacePerThread() +
175
                          Debug::ArchiveSpacePerThread() +
176
                     StackGuard::ArchiveSpacePerThread() +
177
                    RegExpStack::ArchiveSpacePerThread() +
178
                   Bootstrapper::ArchiveSpacePerThread();
179
}
180

    
181

    
182
ThreadState* ThreadState::free_anchor_ = new ThreadState();
183
ThreadState* ThreadState::in_use_anchor_ = new ThreadState();
184

    
185

    
186
ThreadState::ThreadState() : id_(ThreadManager::kInvalidId),
187
                             next_(this), previous_(this) {
188
}
189

    
190

    
191
void ThreadState::AllocateSpace() {
192
  data_ = NewArray<char>(ArchiveSpacePerThread());
193
}
194

    
195

    
196
void ThreadState::Unlink() {
197
  next_->previous_ = previous_;
198
  previous_->next_ = next_;
199
}
200

    
201

    
202
void ThreadState::LinkInto(List list) {
203
  ThreadState* flying_anchor =
204
      list == FREE_LIST ? free_anchor_ : in_use_anchor_;
205
  next_ = flying_anchor->next_;
206
  previous_ = flying_anchor;
207
  flying_anchor->next_ = this;
208
  next_->previous_ = this;
209
}
210

    
211

    
212
ThreadState* ThreadState::GetFree() {
213
  ThreadState* gotten = free_anchor_->next_;
214
  if (gotten == free_anchor_) {
215
    ThreadState* new_thread_state = new ThreadState();
216
    new_thread_state->AllocateSpace();
217
    return new_thread_state;
218
  }
219
  return gotten;
220
}
221

    
222

    
223
// Gets the first in the list of archived threads.
224
ThreadState* ThreadState::FirstInUse() {
225
  return in_use_anchor_->Next();
226
}
227

    
228

    
229
ThreadState* ThreadState::Next() {
230
  if (next_ == in_use_anchor_) return NULL;
231
  return next_;
232
}
233

    
234

    
235
int ThreadManager::next_id_ = 0;
236
Mutex* ThreadManager::mutex_ = OS::CreateMutex();
237
ThreadHandle ThreadManager::mutex_owner_(ThreadHandle::INVALID);
238
ThreadHandle ThreadManager::lazily_archived_thread_(ThreadHandle::INVALID);
239
ThreadState* ThreadManager::lazily_archived_thread_state_ = NULL;
240

    
241

    
242
void ThreadManager::ArchiveThread() {
243
  ASSERT(!lazily_archived_thread_.IsValid());
244
  ASSERT(Thread::GetThreadLocal(thread_state_key) == NULL);
245
  ThreadState* state = ThreadState::GetFree();
246
  state->Unlink();
247
  Thread::SetThreadLocal(thread_state_key, reinterpret_cast<void*>(state));
248
  lazily_archived_thread_.Initialize(ThreadHandle::SELF);
249
  lazily_archived_thread_state_ = state;
250
  ASSERT(state->id() == kInvalidId);
251
  state->set_id(CurrentId());
252
  ASSERT(state->id() != kInvalidId);
253
}
254

    
255

    
256
void ThreadManager::EagerlyArchiveThread() {
257
  ThreadState* state = lazily_archived_thread_state_;
258
  state->LinkInto(ThreadState::IN_USE_LIST);
259
  char* to = state->data();
260
  to = HandleScopeImplementer::ArchiveThread(to);
261
  to = Top::ArchiveThread(to);
262
  to = Debug::ArchiveDebug(to);
263
  to = StackGuard::ArchiveStackGuard(to);
264
  to = RegExpStack::ArchiveStack(to);
265
  to = Bootstrapper::ArchiveState(to);
266
  lazily_archived_thread_.Initialize(ThreadHandle::INVALID);
267
  lazily_archived_thread_state_ = NULL;
268
}
269

    
270

    
271
void ThreadManager::Iterate(ObjectVisitor* v) {
272
  // Expecting no threads during serialization/deserialization
273
  for (ThreadState* state = ThreadState::FirstInUse();
274
       state != NULL;
275
       state = state->Next()) {
276
    char* data = state->data();
277
    data = HandleScopeImplementer::Iterate(v, data);
278
    data = Top::Iterate(v, data);
279
  }
280
}
281

    
282

    
283
void ThreadManager::MarkCompactPrologue(bool is_compacting) {
284
  for (ThreadState* state = ThreadState::FirstInUse();
285
       state != NULL;
286
       state = state->Next()) {
287
    char* data = state->data();
288
    data += HandleScopeImplementer::ArchiveSpacePerThread();
289
    Top::MarkCompactPrologue(is_compacting, data);
290
  }
291
}
292

    
293

    
294
void ThreadManager::MarkCompactEpilogue(bool is_compacting) {
295
  for (ThreadState* state = ThreadState::FirstInUse();
296
       state != NULL;
297
       state = state->Next()) {
298
    char* data = state->data();
299
    data += HandleScopeImplementer::ArchiveSpacePerThread();
300
    Top::MarkCompactEpilogue(is_compacting, data);
301
  }
302
}
303

    
304

    
305
int ThreadManager::CurrentId() {
306
  return bit_cast<int, void*>(Thread::GetThreadLocal(thread_id_key));
307
}
308

    
309

    
310
void ThreadManager::AssignId() {
311
  if (Thread::GetThreadLocal(thread_id_key) == NULL) {
312
    Thread::SetThreadLocal(thread_id_key, bit_cast<void*, int>(next_id_++));
313
  }
314
}
315

    
316

    
317
// This is the ContextSwitcher singleton. There is at most a single thread
318
// running which delivers preemption events to V8 threads.
319
ContextSwitcher* ContextSwitcher::singleton_ = NULL;
320

    
321

    
322
ContextSwitcher::ContextSwitcher(int every_n_ms)
323
  : keep_going_(true),
324
    sleep_ms_(every_n_ms) {
325
}
326

    
327

    
328
// Set the scheduling interval of V8 threads. This function starts the
329
// ContextSwitcher thread if needed.
330
void ContextSwitcher::StartPreemption(int every_n_ms) {
331
  ASSERT(Locker::IsLocked());
332
  if (singleton_ == NULL) {
333
    // If the ContextSwitcher thread is not running at the moment start it now.
334
    singleton_ = new ContextSwitcher(every_n_ms);
335
    singleton_->Start();
336
  } else {
337
    // ContextSwitcher thread is already running, so we just change the
338
    // scheduling interval.
339
    singleton_->sleep_ms_ = every_n_ms;
340
  }
341
}
342

    
343

    
344
// Disable preemption of V8 threads. If multiple threads want to use V8 they
345
// must cooperatively schedule amongst them from this point on.
346
void ContextSwitcher::StopPreemption() {
347
  ASSERT(Locker::IsLocked());
348
  if (singleton_ != NULL) {
349
    // The ContextSwitcher thread is running. We need to stop it and release
350
    // its resources.
351
    singleton_->keep_going_ = false;
352
    singleton_->Join();  // Wait for the ContextSwitcher thread to exit.
353
    // Thread has exited, now we can delete it.
354
    delete(singleton_);
355
    singleton_ = NULL;
356
  }
357
}
358

    
359

    
360
// Main loop of the ContextSwitcher thread: Preempt the currently running V8
361
// thread at regular intervals.
362
void ContextSwitcher::Run() {
363
  while (keep_going_) {
364
    OS::Sleep(sleep_ms_);
365
    StackGuard::Preempt();
366
  }
367
}
368

    
369

    
370
// Acknowledge the preemption by the receiving thread.
371
void ContextSwitcher::PreemptionReceived() {
372
  ASSERT(Locker::IsLocked());
373
  // There is currently no accounting being done for this. But could be in the
374
  // future, which is why we leave this in.
375
}
376

    
377

    
378
}  // namespace internal
379
}  // namespace v8