The data contained in this repository can be downloaded to your computer using one of several clients.
Please see the documentation of your version control software client for more information.

Please select the desired protocol below to get the URL.

This URL has Read-Only access.

Statistics
| Branch: | Revision:

main_repo / deps / v8 / src / v8threads.cc @ f230a1cf

History | View | Annotate | Download (14.9 KB)

1
// Copyright 2012 the V8 project authors. All rights reserved.
2
// Redistribution and use in source and binary forms, with or without
3
// modification, are permitted provided that the following conditions are
4
// met:
5
//
6
//     * Redistributions of source code must retain the above copyright
7
//       notice, this list of conditions and the following disclaimer.
8
//     * Redistributions in binary form must reproduce the above
9
//       copyright notice, this list of conditions and the following
10
//       disclaimer in the documentation and/or other materials provided
11
//       with the distribution.
12
//     * Neither the name of Google Inc. nor the names of its
13
//       contributors may be used to endorse or promote products derived
14
//       from this software without specific prior written permission.
15
//
16
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27

    
28
#include "v8.h"
29

    
30
#include "api.h"
31
#include "bootstrapper.h"
32
#include "debug.h"
33
#include "execution.h"
34
#include "v8threads.h"
35
#include "regexp-stack.h"
36

    
37
namespace v8 {
38

    
39

    
40
// Track whether this V8 instance has ever called v8::Locker. This allows the
41
// API code to verify that the lock is always held when V8 is being entered.
42
bool Locker::active_ = false;
43

    
44

    
45
// Once the Locker is initialized, the current thread will be guaranteed to have
46
// the lock for a given isolate.
47
void Locker::Initialize(v8::Isolate* isolate) {
48
  ASSERT(isolate != NULL);
49
  has_lock_= false;
50
  top_level_ = true;
51
  isolate_ = reinterpret_cast<i::Isolate*>(isolate);
52
  // Record that the Locker has been used at least once.
53
  active_ = true;
54
  // Get the big lock if necessary.
55
  if (!isolate_->thread_manager()->IsLockedByCurrentThread()) {
56
    isolate_->thread_manager()->Lock();
57
    has_lock_ = true;
58

    
59
    // Make sure that V8 is initialized.  Archiving of threads interferes
60
    // with deserialization by adding additional root pointers, so we must
61
    // initialize here, before anyone can call ~Locker() or Unlocker().
62
    if (!isolate_->IsInitialized()) {
63
      isolate_->Enter();
64
      V8::Initialize();
65
      isolate_->Exit();
66
    }
67

    
68
    // This may be a locker within an unlocker in which case we have to
69
    // get the saved state for this thread and restore it.
70
    if (isolate_->thread_manager()->RestoreThread()) {
71
      top_level_ = false;
72
    } else {
73
      internal::ExecutionAccess access(isolate_);
74
      isolate_->stack_guard()->ClearThread(access);
75
      isolate_->stack_guard()->InitThread(access);
76
    }
77
    if (isolate_->IsDefaultIsolate()) {
78
      // This only enters if not yet entered.
79
      internal::Isolate::EnterDefaultIsolate();
80
    }
81
  }
82
  ASSERT(isolate_->thread_manager()->IsLockedByCurrentThread());
83
}
84

    
85

    
86
bool Locker::IsLocked(v8::Isolate* isolate) {
87
  ASSERT(isolate != NULL);
88
  i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
89
  return internal_isolate->thread_manager()->IsLockedByCurrentThread();
90
}
91

    
92

    
93
bool Locker::IsActive() {
94
  return active_;
95
}
96

    
97

    
98
Locker::~Locker() {
99
  ASSERT(isolate_->thread_manager()->IsLockedByCurrentThread());
100
  if (has_lock_) {
101
    if (isolate_->IsDefaultIsolate()) {
102
      isolate_->Exit();
103
    }
104
    if (top_level_) {
105
      isolate_->thread_manager()->FreeThreadResources();
106
    } else {
107
      isolate_->thread_manager()->ArchiveThread();
108
    }
109
    isolate_->thread_manager()->Unlock();
110
  }
111
}
112

    
113

    
114
void Unlocker::Initialize(v8::Isolate* isolate) {
115
  ASSERT(isolate != NULL);
116
  isolate_ = reinterpret_cast<i::Isolate*>(isolate);
117
  ASSERT(isolate_->thread_manager()->IsLockedByCurrentThread());
118
  if (isolate_->IsDefaultIsolate()) {
119
    isolate_->Exit();
120
  }
121
  isolate_->thread_manager()->ArchiveThread();
122
  isolate_->thread_manager()->Unlock();
123
}
124

    
125

    
126
Unlocker::~Unlocker() {
127
  ASSERT(!isolate_->thread_manager()->IsLockedByCurrentThread());
128
  isolate_->thread_manager()->Lock();
129
  isolate_->thread_manager()->RestoreThread();
130
  if (isolate_->IsDefaultIsolate()) {
131
    isolate_->Enter();
132
  }
133
}
134

    
135

    
136
void Locker::StartPreemption(v8::Isolate* isolate, int every_n_ms) {
137
  v8::internal::ContextSwitcher::StartPreemption(
138
      reinterpret_cast<i::Isolate*>(isolate), every_n_ms);
139
}
140

    
141

    
142
void Locker::StopPreemption(v8::Isolate* isolate) {
143
  v8::internal::ContextSwitcher::StopPreemption(
144
      reinterpret_cast<i::Isolate*>(isolate));
145
}
146

    
147

    
148
namespace internal {
149

    
150

    
151
bool ThreadManager::RestoreThread() {
152
  ASSERT(IsLockedByCurrentThread());
153
  // First check whether the current thread has been 'lazily archived', i.e.
154
  // not archived at all.  If that is the case we put the state storage we
155
  // had prepared back in the free list, since we didn't need it after all.
156
  if (lazily_archived_thread_.Equals(ThreadId::Current())) {
157
    lazily_archived_thread_ = ThreadId::Invalid();
158
    Isolate::PerIsolateThreadData* per_thread =
159
        isolate_->FindPerThreadDataForThisThread();
160
    ASSERT(per_thread != NULL);
161
    ASSERT(per_thread->thread_state() == lazily_archived_thread_state_);
162
    lazily_archived_thread_state_->set_id(ThreadId::Invalid());
163
    lazily_archived_thread_state_->LinkInto(ThreadState::FREE_LIST);
164
    lazily_archived_thread_state_ = NULL;
165
    per_thread->set_thread_state(NULL);
166
    return true;
167
  }
168

    
169
  // Make sure that the preemption thread cannot modify the thread state while
170
  // it is being archived or restored.
171
  ExecutionAccess access(isolate_);
172

    
173
  // If there is another thread that was lazily archived then we have to really
174
  // archive it now.
175
  if (lazily_archived_thread_.IsValid()) {
176
    EagerlyArchiveThread();
177
  }
178
  Isolate::PerIsolateThreadData* per_thread =
179
      isolate_->FindPerThreadDataForThisThread();
180
  if (per_thread == NULL || per_thread->thread_state() == NULL) {
181
    // This is a new thread.
182
    isolate_->stack_guard()->InitThread(access);
183
    return false;
184
  }
185
  ThreadState* state = per_thread->thread_state();
186
  char* from = state->data();
187
  from = isolate_->handle_scope_implementer()->RestoreThread(from);
188
  from = isolate_->RestoreThread(from);
189
  from = Relocatable::RestoreState(isolate_, from);
190
#ifdef ENABLE_DEBUGGER_SUPPORT
191
  from = isolate_->debug()->RestoreDebug(from);
192
#endif
193
  from = isolate_->stack_guard()->RestoreStackGuard(from);
194
  from = isolate_->regexp_stack()->RestoreStack(from);
195
  from = isolate_->bootstrapper()->RestoreState(from);
196
  per_thread->set_thread_state(NULL);
197
  if (state->terminate_on_restore()) {
198
    isolate_->stack_guard()->TerminateExecution();
199
    state->set_terminate_on_restore(false);
200
  }
201
  state->set_id(ThreadId::Invalid());
202
  state->Unlink();
203
  state->LinkInto(ThreadState::FREE_LIST);
204
  return true;
205
}
206

    
207

    
208
void ThreadManager::Lock() {
209
  mutex_.Lock();
210
  mutex_owner_ = ThreadId::Current();
211
  ASSERT(IsLockedByCurrentThread());
212
}
213

    
214

    
215
void ThreadManager::Unlock() {
216
  mutex_owner_ = ThreadId::Invalid();
217
  mutex_.Unlock();
218
}
219

    
220

    
221
static int ArchiveSpacePerThread() {
222
  return HandleScopeImplementer::ArchiveSpacePerThread() +
223
                        Isolate::ArchiveSpacePerThread() +
224
#ifdef ENABLE_DEBUGGER_SUPPORT
225
                          Debug::ArchiveSpacePerThread() +
226
#endif
227
                     StackGuard::ArchiveSpacePerThread() +
228
                    RegExpStack::ArchiveSpacePerThread() +
229
                   Bootstrapper::ArchiveSpacePerThread() +
230
                    Relocatable::ArchiveSpacePerThread();
231
}
232

    
233

    
234
ThreadState::ThreadState(ThreadManager* thread_manager)
235
    : id_(ThreadId::Invalid()),
236
      terminate_on_restore_(false),
237
      data_(NULL),
238
      next_(this),
239
      previous_(this),
240
      thread_manager_(thread_manager) {
241
}
242

    
243

    
244
ThreadState::~ThreadState() {
245
  DeleteArray<char>(data_);
246
}
247

    
248

    
249
void ThreadState::AllocateSpace() {
250
  data_ = NewArray<char>(ArchiveSpacePerThread());
251
}
252

    
253

    
254
void ThreadState::Unlink() {
255
  next_->previous_ = previous_;
256
  previous_->next_ = next_;
257
}
258

    
259

    
260
void ThreadState::LinkInto(List list) {
261
  ThreadState* flying_anchor =
262
      list == FREE_LIST ? thread_manager_->free_anchor_
263
                        : thread_manager_->in_use_anchor_;
264
  next_ = flying_anchor->next_;
265
  previous_ = flying_anchor;
266
  flying_anchor->next_ = this;
267
  next_->previous_ = this;
268
}
269

    
270

    
271
ThreadState* ThreadManager::GetFreeThreadState() {
272
  ThreadState* gotten = free_anchor_->next_;
273
  if (gotten == free_anchor_) {
274
    ThreadState* new_thread_state = new ThreadState(this);
275
    new_thread_state->AllocateSpace();
276
    return new_thread_state;
277
  }
278
  return gotten;
279
}
280

    
281

    
282
// Gets the first in the list of archived threads.
283
ThreadState* ThreadManager::FirstThreadStateInUse() {
284
  return in_use_anchor_->Next();
285
}
286

    
287

    
288
ThreadState* ThreadState::Next() {
289
  if (next_ == thread_manager_->in_use_anchor_) return NULL;
290
  return next_;
291
}
292

    
293

    
294
// Thread ids must start with 1, because in TLS having thread id 0 can't
295
// be distinguished from not having a thread id at all (since NULL is
296
// defined as 0.)
297
ThreadManager::ThreadManager()
298
    : mutex_owner_(ThreadId::Invalid()),
299
      lazily_archived_thread_(ThreadId::Invalid()),
300
      lazily_archived_thread_state_(NULL),
301
      free_anchor_(NULL),
302
      in_use_anchor_(NULL) {
303
  free_anchor_ = new ThreadState(this);
304
  in_use_anchor_ = new ThreadState(this);
305
}
306

    
307

    
308
ThreadManager::~ThreadManager() {
309
  DeleteThreadStateList(free_anchor_);
310
  DeleteThreadStateList(in_use_anchor_);
311
}
312

    
313

    
314
void ThreadManager::DeleteThreadStateList(ThreadState* anchor) {
315
  // The list starts and ends with the anchor.
316
  for (ThreadState* current = anchor->next_; current != anchor;) {
317
    ThreadState* next = current->next_;
318
    delete current;
319
    current = next;
320
  }
321
  delete anchor;
322
}
323

    
324

    
325
void ThreadManager::ArchiveThread() {
326
  ASSERT(lazily_archived_thread_.Equals(ThreadId::Invalid()));
327
  ASSERT(!IsArchived());
328
  ASSERT(IsLockedByCurrentThread());
329
  ThreadState* state = GetFreeThreadState();
330
  state->Unlink();
331
  Isolate::PerIsolateThreadData* per_thread =
332
      isolate_->FindOrAllocatePerThreadDataForThisThread();
333
  per_thread->set_thread_state(state);
334
  lazily_archived_thread_ = ThreadId::Current();
335
  lazily_archived_thread_state_ = state;
336
  ASSERT(state->id().Equals(ThreadId::Invalid()));
337
  state->set_id(CurrentId());
338
  ASSERT(!state->id().Equals(ThreadId::Invalid()));
339
}
340

    
341

    
342
void ThreadManager::EagerlyArchiveThread() {
343
  ASSERT(IsLockedByCurrentThread());
344
  ThreadState* state = lazily_archived_thread_state_;
345
  state->LinkInto(ThreadState::IN_USE_LIST);
346
  char* to = state->data();
347
  // Ensure that data containing GC roots are archived first, and handle them
348
  // in ThreadManager::Iterate(ObjectVisitor*).
349
  to = isolate_->handle_scope_implementer()->ArchiveThread(to);
350
  to = isolate_->ArchiveThread(to);
351
  to = Relocatable::ArchiveState(isolate_, to);
352
#ifdef ENABLE_DEBUGGER_SUPPORT
353
  to = isolate_->debug()->ArchiveDebug(to);
354
#endif
355
  to = isolate_->stack_guard()->ArchiveStackGuard(to);
356
  to = isolate_->regexp_stack()->ArchiveStack(to);
357
  to = isolate_->bootstrapper()->ArchiveState(to);
358
  lazily_archived_thread_ = ThreadId::Invalid();
359
  lazily_archived_thread_state_ = NULL;
360
}
361

    
362

    
363
void ThreadManager::FreeThreadResources() {
364
  isolate_->handle_scope_implementer()->FreeThreadResources();
365
  isolate_->FreeThreadResources();
366
#ifdef ENABLE_DEBUGGER_SUPPORT
367
  isolate_->debug()->FreeThreadResources();
368
#endif
369
  isolate_->stack_guard()->FreeThreadResources();
370
  isolate_->regexp_stack()->FreeThreadResources();
371
  isolate_->bootstrapper()->FreeThreadResources();
372
}
373

    
374

    
375
bool ThreadManager::IsArchived() {
376
  Isolate::PerIsolateThreadData* data =
377
      isolate_->FindPerThreadDataForThisThread();
378
  return data != NULL && data->thread_state() != NULL;
379
}
380

    
381

    
382
void ThreadManager::Iterate(ObjectVisitor* v) {
383
  // Expecting no threads during serialization/deserialization
384
  for (ThreadState* state = FirstThreadStateInUse();
385
       state != NULL;
386
       state = state->Next()) {
387
    char* data = state->data();
388
    data = HandleScopeImplementer::Iterate(v, data);
389
    data = isolate_->Iterate(v, data);
390
    data = Relocatable::Iterate(v, data);
391
  }
392
}
393

    
394

    
395
void ThreadManager::IterateArchivedThreads(ThreadVisitor* v) {
396
  for (ThreadState* state = FirstThreadStateInUse();
397
       state != NULL;
398
       state = state->Next()) {
399
    char* data = state->data();
400
    data += HandleScopeImplementer::ArchiveSpacePerThread();
401
    isolate_->IterateThread(v, data);
402
  }
403
}
404

    
405

    
406
ThreadId ThreadManager::CurrentId() {
407
  return ThreadId::Current();
408
}
409

    
410

    
411
void ThreadManager::TerminateExecution(ThreadId thread_id) {
412
  for (ThreadState* state = FirstThreadStateInUse();
413
       state != NULL;
414
       state = state->Next()) {
415
    if (thread_id.Equals(state->id())) {
416
      state->set_terminate_on_restore(true);
417
    }
418
  }
419
}
420

    
421

    
422
ContextSwitcher::ContextSwitcher(Isolate* isolate, int every_n_ms)
423
  : Thread("v8:CtxtSwitcher"),
424
    keep_going_(true),
425
    sleep_ms_(every_n_ms),
426
    isolate_(isolate) {
427
}
428

    
429

    
430
// Set the scheduling interval of V8 threads. This function starts the
431
// ContextSwitcher thread if needed.
432
void ContextSwitcher::StartPreemption(Isolate* isolate, int every_n_ms) {
433
  ASSERT(Locker::IsLocked(reinterpret_cast<v8::Isolate*>(isolate)));
434
  if (isolate->context_switcher() == NULL) {
435
    // If the ContextSwitcher thread is not running at the moment start it now.
436
    isolate->set_context_switcher(new ContextSwitcher(isolate, every_n_ms));
437
    isolate->context_switcher()->Start();
438
  } else {
439
    // ContextSwitcher thread is already running, so we just change the
440
    // scheduling interval.
441
    isolate->context_switcher()->sleep_ms_ = every_n_ms;
442
  }
443
}
444

    
445

    
446
// Disable preemption of V8 threads. If multiple threads want to use V8 they
447
// must cooperatively schedule amongst them from this point on.
448
void ContextSwitcher::StopPreemption(Isolate* isolate) {
449
  ASSERT(Locker::IsLocked(reinterpret_cast<v8::Isolate*>(isolate)));
450
  if (isolate->context_switcher() != NULL) {
451
    // The ContextSwitcher thread is running. We need to stop it and release
452
    // its resources.
453
    isolate->context_switcher()->keep_going_ = false;
454
    // Wait for the ContextSwitcher thread to exit.
455
    isolate->context_switcher()->Join();
456
    // Thread has exited, now we can delete it.
457
    delete(isolate->context_switcher());
458
    isolate->set_context_switcher(NULL);
459
  }
460
}
461

    
462

    
463
// Main loop of the ContextSwitcher thread: Preempt the currently running V8
464
// thread at regular intervals.
465
void ContextSwitcher::Run() {
466
  while (keep_going_) {
467
    OS::Sleep(sleep_ms_);
468
    isolate()->stack_guard()->Preempt();
469
  }
470
}
471

    
472

    
473
// Acknowledge the preemption by the receiving thread.
474
void ContextSwitcher::PreemptionReceived() {
475
  // There is currently no accounting being done for this. But could be in the
476
  // future, which is why we leave this in.
477
}
478

    
479

    
480
}  // namespace internal
481
}  // namespace v8