The data contained in this repository can be downloaded to your computer using one of several clients.
Please see the documentation of your version control software client for more information.
Please select the desired protocol below to get the URL.
This URL has Read-Only access.
main_repo / deps / v8 / src / incremental-marking.cc @ f230a1cf
History | View | Annotate | Download (35.1 KB)
1 |
// Copyright 2012 the V8 project authors. All rights reserved.
|
---|---|
2 |
// Redistribution and use in source and binary forms, with or without
|
3 |
// modification, are permitted provided that the following conditions are
|
4 |
// met:
|
5 |
//
|
6 |
// * Redistributions of source code must retain the above copyright
|
7 |
// notice, this list of conditions and the following disclaimer.
|
8 |
// * Redistributions in binary form must reproduce the above
|
9 |
// copyright notice, this list of conditions and the following
|
10 |
// disclaimer in the documentation and/or other materials provided
|
11 |
// with the distribution.
|
12 |
// * Neither the name of Google Inc. nor the names of its
|
13 |
// contributors may be used to endorse or promote products derived
|
14 |
// from this software without specific prior written permission.
|
15 |
//
|
16 |
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
17 |
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
18 |
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
19 |
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
20 |
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
21 |
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
22 |
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
23 |
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
24 |
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
25 |
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
26 |
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
27 |
|
28 |
#include "v8.h" |
29 |
|
30 |
#include "incremental-marking.h" |
31 |
|
32 |
#include "code-stubs.h" |
33 |
#include "compilation-cache.h" |
34 |
#include "objects-visiting.h" |
35 |
#include "objects-visiting-inl.h" |
36 |
#include "v8conversions.h" |
37 |
|
38 |
namespace v8 {
|
39 |
namespace internal {
|
40 |
|
41 |
|
42 |
IncrementalMarking::IncrementalMarking(Heap* heap) |
43 |
: heap_(heap), |
44 |
state_(STOPPED), |
45 |
marking_deque_memory_(NULL),
|
46 |
marking_deque_memory_committed_(false),
|
47 |
steps_count_(0),
|
48 |
steps_took_(0),
|
49 |
longest_step_(0.0), |
50 |
old_generation_space_available_at_start_of_incremental_(0),
|
51 |
old_generation_space_used_at_start_of_incremental_(0),
|
52 |
steps_count_since_last_gc_(0),
|
53 |
steps_took_since_last_gc_(0),
|
54 |
should_hurry_(false),
|
55 |
marking_speed_(0),
|
56 |
allocated_(0),
|
57 |
no_marking_scope_depth_(0),
|
58 |
unscanned_bytes_of_large_object_(0) {
|
59 |
} |
60 |
|
61 |
|
62 |
void IncrementalMarking::TearDown() {
|
63 |
delete marking_deque_memory_;
|
64 |
} |
65 |
|
66 |
|
67 |
void IncrementalMarking::RecordWriteSlow(HeapObject* obj,
|
68 |
Object** slot, |
69 |
Object* value) { |
70 |
if (BaseRecordWrite(obj, slot, value) && slot != NULL) { |
71 |
MarkBit obj_bit = Marking::MarkBitFrom(obj); |
72 |
if (Marking::IsBlack(obj_bit)) {
|
73 |
// Object is not going to be rescanned we need to record the slot.
|
74 |
heap_->mark_compact_collector()->RecordSlot( |
75 |
HeapObject::RawField(obj, 0), slot, value);
|
76 |
} |
77 |
} |
78 |
} |
79 |
|
80 |
|
81 |
void IncrementalMarking::RecordWriteFromCode(HeapObject* obj,
|
82 |
Object** slot, |
83 |
Isolate* isolate) { |
84 |
ASSERT(obj->IsHeapObject()); |
85 |
IncrementalMarking* marking = isolate->heap()->incremental_marking(); |
86 |
ASSERT(!marking->is_compacting_); |
87 |
|
88 |
MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); |
89 |
int counter = chunk->write_barrier_counter();
|
90 |
if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) { |
91 |
marking->write_barriers_invoked_since_last_step_ += |
92 |
MemoryChunk::kWriteBarrierCounterGranularity - |
93 |
chunk->write_barrier_counter(); |
94 |
chunk->set_write_barrier_counter( |
95 |
MemoryChunk::kWriteBarrierCounterGranularity); |
96 |
} |
97 |
|
98 |
marking->RecordWrite(obj, slot, *slot); |
99 |
} |
100 |
|
101 |
|
102 |
void IncrementalMarking::RecordWriteForEvacuationFromCode(HeapObject* obj,
|
103 |
Object** slot, |
104 |
Isolate* isolate) { |
105 |
ASSERT(obj->IsHeapObject()); |
106 |
IncrementalMarking* marking = isolate->heap()->incremental_marking(); |
107 |
ASSERT(marking->is_compacting_); |
108 |
|
109 |
MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); |
110 |
int counter = chunk->write_barrier_counter();
|
111 |
if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) { |
112 |
marking->write_barriers_invoked_since_last_step_ += |
113 |
MemoryChunk::kWriteBarrierCounterGranularity - |
114 |
chunk->write_barrier_counter(); |
115 |
chunk->set_write_barrier_counter( |
116 |
MemoryChunk::kWriteBarrierCounterGranularity); |
117 |
} |
118 |
|
119 |
marking->RecordWrite(obj, slot, *slot); |
120 |
} |
121 |
|
122 |
|
123 |
void IncrementalMarking::RecordCodeTargetPatch(Code* host,
|
124 |
Address pc, |
125 |
HeapObject* value) { |
126 |
if (IsMarking()) {
|
127 |
RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
|
128 |
RecordWriteIntoCode(host, &rinfo, value); |
129 |
} |
130 |
} |
131 |
|
132 |
|
133 |
void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) {
|
134 |
if (IsMarking()) {
|
135 |
Code* host = heap_->isolate()->inner_pointer_to_code_cache()-> |
136 |
GcSafeFindCodeForInnerPointer(pc); |
137 |
RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
|
138 |
RecordWriteIntoCode(host, &rinfo, value); |
139 |
} |
140 |
} |
141 |
|
142 |
|
143 |
void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host,
|
144 |
Object** slot, |
145 |
Code* value) { |
146 |
if (BaseRecordWrite(host, slot, value)) {
|
147 |
ASSERT(slot != NULL);
|
148 |
heap_->mark_compact_collector()-> |
149 |
RecordCodeEntrySlot(reinterpret_cast<Address>(slot), value);
|
150 |
} |
151 |
} |
152 |
|
153 |
|
154 |
void IncrementalMarking::RecordWriteIntoCodeSlow(HeapObject* obj,
|
155 |
RelocInfo* rinfo, |
156 |
Object* value) { |
157 |
MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value)); |
158 |
if (Marking::IsWhite(value_bit)) {
|
159 |
MarkBit obj_bit = Marking::MarkBitFrom(obj); |
160 |
if (Marking::IsBlack(obj_bit)) {
|
161 |
BlackToGreyAndUnshift(obj, obj_bit); |
162 |
RestartIfNotMarking(); |
163 |
} |
164 |
// Object is either grey or white. It will be scanned if survives.
|
165 |
return;
|
166 |
} |
167 |
|
168 |
if (is_compacting_) {
|
169 |
MarkBit obj_bit = Marking::MarkBitFrom(obj); |
170 |
if (Marking::IsBlack(obj_bit)) {
|
171 |
// Object is not going to be rescanned. We need to record the slot.
|
172 |
heap_->mark_compact_collector()->RecordRelocSlot(rinfo, |
173 |
Code::cast(value)); |
174 |
} |
175 |
} |
176 |
} |
177 |
|
178 |
|
179 |
static void MarkObjectGreyDoNotEnqueue(Object* obj) { |
180 |
if (obj->IsHeapObject()) {
|
181 |
HeapObject* heap_obj = HeapObject::cast(obj); |
182 |
MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj)); |
183 |
if (Marking::IsBlack(mark_bit)) {
|
184 |
MemoryChunk::IncrementLiveBytesFromGC(heap_obj->address(), |
185 |
-heap_obj->Size()); |
186 |
} |
187 |
Marking::AnyToGrey(mark_bit); |
188 |
} |
189 |
} |
190 |
|
191 |
|
192 |
static inline void MarkBlackOrKeepGrey(HeapObject* heap_object, |
193 |
MarkBit mark_bit, |
194 |
int size) {
|
195 |
ASSERT(!Marking::IsImpossible(mark_bit)); |
196 |
if (mark_bit.Get()) return; |
197 |
mark_bit.Set(); |
198 |
MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size); |
199 |
ASSERT(Marking::IsBlack(mark_bit)); |
200 |
} |
201 |
|
202 |
|
203 |
static inline void MarkBlackOrKeepBlack(HeapObject* heap_object, |
204 |
MarkBit mark_bit, |
205 |
int size) {
|
206 |
ASSERT(!Marking::IsImpossible(mark_bit)); |
207 |
if (Marking::IsBlack(mark_bit)) return; |
208 |
Marking::MarkBlack(mark_bit); |
209 |
MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size); |
210 |
ASSERT(Marking::IsBlack(mark_bit)); |
211 |
} |
212 |
|
213 |
|
214 |
class IncrementalMarkingMarkingVisitor |
215 |
: public StaticMarkingVisitor<IncrementalMarkingMarkingVisitor> {
|
216 |
public:
|
217 |
static void Initialize() { |
218 |
StaticMarkingVisitor<IncrementalMarkingMarkingVisitor>::Initialize(); |
219 |
table_.Register(kVisitFixedArray, &VisitFixedArrayIncremental); |
220 |
table_.Register(kVisitNativeContext, &VisitNativeContextIncremental); |
221 |
table_.Register(kVisitJSRegExp, &VisitJSRegExp); |
222 |
} |
223 |
|
224 |
static const int kProgressBarScanningChunk = 32 * 1024; |
225 |
|
226 |
static void VisitFixedArrayIncremental(Map* map, HeapObject* object) { |
227 |
MemoryChunk* chunk = MemoryChunk::FromAddress(object->address()); |
228 |
// TODO(mstarzinger): Move setting of the flag to the allocation site of
|
229 |
// the array. The visitor should just check the flag.
|
230 |
if (FLAG_use_marking_progress_bar &&
|
231 |
chunk->owner()->identity() == LO_SPACE) { |
232 |
chunk->SetFlag(MemoryChunk::HAS_PROGRESS_BAR); |
233 |
} |
234 |
if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
|
235 |
Heap* heap = map->GetHeap(); |
236 |
// When using a progress bar for large fixed arrays, scan only a chunk of
|
237 |
// the array and try to push it onto the marking deque again until it is
|
238 |
// fully scanned. Fall back to scanning it through to the end in case this
|
239 |
// fails because of a full deque.
|
240 |
int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
|
241 |
int start_offset = Max(FixedArray::BodyDescriptor::kStartOffset,
|
242 |
chunk->progress_bar()); |
243 |
int end_offset = Min(object_size,
|
244 |
start_offset + kProgressBarScanningChunk); |
245 |
int already_scanned_offset = start_offset;
|
246 |
bool scan_until_end = false; |
247 |
do {
|
248 |
VisitPointersWithAnchor(heap, |
249 |
HeapObject::RawField(object, 0),
|
250 |
HeapObject::RawField(object, start_offset), |
251 |
HeapObject::RawField(object, end_offset)); |
252 |
start_offset = end_offset; |
253 |
end_offset = Min(object_size, end_offset + kProgressBarScanningChunk); |
254 |
scan_until_end = heap->incremental_marking()->marking_deque()->IsFull(); |
255 |
} while (scan_until_end && start_offset < object_size);
|
256 |
chunk->set_progress_bar(start_offset); |
257 |
if (start_offset < object_size) {
|
258 |
heap->incremental_marking()->marking_deque()->UnshiftGrey(object); |
259 |
heap->incremental_marking()->NotifyIncompleteScanOfObject( |
260 |
object_size - (start_offset - already_scanned_offset)); |
261 |
} |
262 |
} else {
|
263 |
FixedArrayVisitor::Visit(map, object); |
264 |
} |
265 |
} |
266 |
|
267 |
static void VisitNativeContextIncremental(Map* map, HeapObject* object) { |
268 |
Context* context = Context::cast(object); |
269 |
|
270 |
// We will mark cache black with a separate pass
|
271 |
// when we finish marking.
|
272 |
MarkObjectGreyDoNotEnqueue(context->normalized_map_cache()); |
273 |
VisitNativeContext(map, context); |
274 |
} |
275 |
|
276 |
static void VisitWeakCollection(Map* map, HeapObject* object) { |
277 |
Heap* heap = map->GetHeap(); |
278 |
VisitPointers(heap, |
279 |
HeapObject::RawField(object, |
280 |
JSWeakCollection::kPropertiesOffset), |
281 |
HeapObject::RawField(object, JSWeakCollection::kSize)); |
282 |
} |
283 |
|
284 |
static void BeforeVisitingSharedFunctionInfo(HeapObject* object) {} |
285 |
|
286 |
INLINE(static void VisitPointer(Heap* heap, Object** p)) { |
287 |
Object* obj = *p; |
288 |
if (obj->NonFailureIsHeapObject()) {
|
289 |
heap->mark_compact_collector()->RecordSlot(p, p, obj); |
290 |
MarkObject(heap, obj); |
291 |
} |
292 |
} |
293 |
|
294 |
INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) { |
295 |
for (Object** p = start; p < end; p++) {
|
296 |
Object* obj = *p; |
297 |
if (obj->NonFailureIsHeapObject()) {
|
298 |
heap->mark_compact_collector()->RecordSlot(start, p, obj); |
299 |
MarkObject(heap, obj); |
300 |
} |
301 |
} |
302 |
} |
303 |
|
304 |
INLINE(static void VisitPointersWithAnchor(Heap* heap, |
305 |
Object** anchor, |
306 |
Object** start, |
307 |
Object** end)) { |
308 |
for (Object** p = start; p < end; p++) {
|
309 |
Object* obj = *p; |
310 |
if (obj->NonFailureIsHeapObject()) {
|
311 |
heap->mark_compact_collector()->RecordSlot(anchor, p, obj); |
312 |
MarkObject(heap, obj); |
313 |
} |
314 |
} |
315 |
} |
316 |
|
317 |
// Marks the object grey and pushes it on the marking stack.
|
318 |
INLINE(static void MarkObject(Heap* heap, Object* obj)) { |
319 |
HeapObject* heap_object = HeapObject::cast(obj); |
320 |
MarkBit mark_bit = Marking::MarkBitFrom(heap_object); |
321 |
if (mark_bit.data_only()) {
|
322 |
MarkBlackOrKeepGrey(heap_object, mark_bit, heap_object->Size()); |
323 |
} else if (Marking::IsWhite(mark_bit)) { |
324 |
heap->incremental_marking()->WhiteToGreyAndPush(heap_object, mark_bit); |
325 |
} |
326 |
} |
327 |
|
328 |
// Marks the object black without pushing it on the marking stack.
|
329 |
// Returns true if object needed marking and false otherwise.
|
330 |
INLINE(static bool MarkObjectWithoutPush(Heap* heap, Object* obj)) { |
331 |
HeapObject* heap_object = HeapObject::cast(obj); |
332 |
MarkBit mark_bit = Marking::MarkBitFrom(heap_object); |
333 |
if (Marking::IsWhite(mark_bit)) {
|
334 |
mark_bit.Set(); |
335 |
MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), |
336 |
heap_object->Size()); |
337 |
return true; |
338 |
} |
339 |
return false; |
340 |
} |
341 |
}; |
342 |
|
343 |
|
344 |
class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor { |
345 |
public:
|
346 |
explicit IncrementalMarkingRootMarkingVisitor(
|
347 |
IncrementalMarking* incremental_marking) |
348 |
: incremental_marking_(incremental_marking) { |
349 |
} |
350 |
|
351 |
void VisitPointer(Object** p) {
|
352 |
MarkObjectByPointer(p); |
353 |
} |
354 |
|
355 |
void VisitPointers(Object** start, Object** end) {
|
356 |
for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
|
357 |
} |
358 |
|
359 |
private:
|
360 |
void MarkObjectByPointer(Object** p) {
|
361 |
Object* obj = *p; |
362 |
if (!obj->IsHeapObject()) return; |
363 |
|
364 |
HeapObject* heap_object = HeapObject::cast(obj); |
365 |
MarkBit mark_bit = Marking::MarkBitFrom(heap_object); |
366 |
if (mark_bit.data_only()) {
|
367 |
MarkBlackOrKeepGrey(heap_object, mark_bit, heap_object->Size()); |
368 |
} else {
|
369 |
if (Marking::IsWhite(mark_bit)) {
|
370 |
incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit); |
371 |
} |
372 |
} |
373 |
} |
374 |
|
375 |
IncrementalMarking* incremental_marking_; |
376 |
}; |
377 |
|
378 |
|
379 |
void IncrementalMarking::Initialize() {
|
380 |
IncrementalMarkingMarkingVisitor::Initialize(); |
381 |
} |
382 |
|
383 |
|
384 |
void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
|
385 |
bool is_marking,
|
386 |
bool is_compacting) {
|
387 |
if (is_marking) {
|
388 |
chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING); |
389 |
chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); |
390 |
|
391 |
// It's difficult to filter out slots recorded for large objects.
|
392 |
if (chunk->owner()->identity() == LO_SPACE &&
|
393 |
chunk->size() > static_cast<size_t>(Page::kPageSize) &&
|
394 |
is_compacting) { |
395 |
chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION); |
396 |
} |
397 |
} else if (chunk->owner()->identity() == CELL_SPACE || |
398 |
chunk->owner()->identity() == PROPERTY_CELL_SPACE || |
399 |
chunk->scan_on_scavenge()) { |
400 |
chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING); |
401 |
chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); |
402 |
} else {
|
403 |
chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING); |
404 |
chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); |
405 |
} |
406 |
} |
407 |
|
408 |
|
409 |
void IncrementalMarking::SetNewSpacePageFlags(NewSpacePage* chunk,
|
410 |
bool is_marking) {
|
411 |
chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING); |
412 |
if (is_marking) {
|
413 |
chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); |
414 |
} else {
|
415 |
chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); |
416 |
} |
417 |
chunk->SetFlag(MemoryChunk::SCAN_ON_SCAVENGE); |
418 |
} |
419 |
|
420 |
|
421 |
void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
|
422 |
PagedSpace* space) { |
423 |
PageIterator it(space); |
424 |
while (it.has_next()) {
|
425 |
Page* p = it.next(); |
426 |
SetOldSpacePageFlags(p, false, false); |
427 |
} |
428 |
} |
429 |
|
430 |
|
431 |
void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
|
432 |
NewSpace* space) { |
433 |
NewSpacePageIterator it(space); |
434 |
while (it.has_next()) {
|
435 |
NewSpacePage* p = it.next(); |
436 |
SetNewSpacePageFlags(p, false);
|
437 |
} |
438 |
} |
439 |
|
440 |
|
441 |
void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
|
442 |
DeactivateIncrementalWriteBarrierForSpace(heap_->old_pointer_space()); |
443 |
DeactivateIncrementalWriteBarrierForSpace(heap_->old_data_space()); |
444 |
DeactivateIncrementalWriteBarrierForSpace(heap_->cell_space()); |
445 |
DeactivateIncrementalWriteBarrierForSpace(heap_->property_cell_space()); |
446 |
DeactivateIncrementalWriteBarrierForSpace(heap_->map_space()); |
447 |
DeactivateIncrementalWriteBarrierForSpace(heap_->code_space()); |
448 |
DeactivateIncrementalWriteBarrierForSpace(heap_->new_space()); |
449 |
|
450 |
LargePage* lop = heap_->lo_space()->first_page(); |
451 |
while (lop->is_valid()) {
|
452 |
SetOldSpacePageFlags(lop, false, false); |
453 |
lop = lop->next_page(); |
454 |
} |
455 |
} |
456 |
|
457 |
|
458 |
void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
|
459 |
PageIterator it(space); |
460 |
while (it.has_next()) {
|
461 |
Page* p = it.next(); |
462 |
SetOldSpacePageFlags(p, true, is_compacting_);
|
463 |
} |
464 |
} |
465 |
|
466 |
|
467 |
void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
|
468 |
NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd()); |
469 |
while (it.has_next()) {
|
470 |
NewSpacePage* p = it.next(); |
471 |
SetNewSpacePageFlags(p, true);
|
472 |
} |
473 |
} |
474 |
|
475 |
|
476 |
void IncrementalMarking::ActivateIncrementalWriteBarrier() {
|
477 |
ActivateIncrementalWriteBarrier(heap_->old_pointer_space()); |
478 |
ActivateIncrementalWriteBarrier(heap_->old_data_space()); |
479 |
ActivateIncrementalWriteBarrier(heap_->cell_space()); |
480 |
ActivateIncrementalWriteBarrier(heap_->property_cell_space()); |
481 |
ActivateIncrementalWriteBarrier(heap_->map_space()); |
482 |
ActivateIncrementalWriteBarrier(heap_->code_space()); |
483 |
ActivateIncrementalWriteBarrier(heap_->new_space()); |
484 |
|
485 |
LargePage* lop = heap_->lo_space()->first_page(); |
486 |
while (lop->is_valid()) {
|
487 |
SetOldSpacePageFlags(lop, true, is_compacting_);
|
488 |
lop = lop->next_page(); |
489 |
} |
490 |
} |
491 |
|
492 |
|
493 |
bool IncrementalMarking::WorthActivating() {
|
494 |
#ifndef DEBUG
|
495 |
static const intptr_t kActivationThreshold = 8 * MB; |
496 |
#else
|
497 |
// TODO(gc) consider setting this to some low level so that some
|
498 |
// debug tests run with incremental marking and some without.
|
499 |
static const intptr_t kActivationThreshold = 0; |
500 |
#endif
|
501 |
// Only start incremental marking in a safe state: 1) when expose GC is
|
502 |
// deactivated, 2) when incremental marking is turned on, 3) when we are
|
503 |
// currently not in a GC, and 4) when we are currently not serializing
|
504 |
// or deserializing the heap.
|
505 |
return !FLAG_expose_gc &&
|
506 |
FLAG_incremental_marking && |
507 |
FLAG_incremental_marking_steps && |
508 |
heap_->gc_state() == Heap::NOT_IN_GC && |
509 |
!Serializer::enabled() && |
510 |
heap_->isolate()->IsInitialized() && |
511 |
heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold; |
512 |
} |
513 |
|
514 |
|
515 |
void IncrementalMarking::ActivateGeneratedStub(Code* stub) {
|
516 |
ASSERT(RecordWriteStub::GetMode(stub) == |
517 |
RecordWriteStub::STORE_BUFFER_ONLY); |
518 |
|
519 |
if (!IsMarking()) {
|
520 |
// Initially stub is generated in STORE_BUFFER_ONLY mode thus
|
521 |
// we don't need to do anything if incremental marking is
|
522 |
// not active.
|
523 |
} else if (IsCompacting()) { |
524 |
RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION); |
525 |
} else {
|
526 |
RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL); |
527 |
} |
528 |
} |
529 |
|
530 |
|
531 |
static void PatchIncrementalMarkingRecordWriteStubs( |
532 |
Heap* heap, RecordWriteStub::Mode mode) { |
533 |
UnseededNumberDictionary* stubs = heap->code_stubs(); |
534 |
|
535 |
int capacity = stubs->Capacity();
|
536 |
for (int i = 0; i < capacity; i++) { |
537 |
Object* k = stubs->KeyAt(i); |
538 |
if (stubs->IsKey(k)) {
|
539 |
uint32_t key = NumberToUint32(k); |
540 |
|
541 |
if (CodeStub::MajorKeyFromKey(key) ==
|
542 |
CodeStub::RecordWrite) { |
543 |
Object* e = stubs->ValueAt(i); |
544 |
if (e->IsCode()) {
|
545 |
RecordWriteStub::Patch(Code::cast(e), mode); |
546 |
} |
547 |
} |
548 |
} |
549 |
} |
550 |
} |
551 |
|
552 |
|
553 |
void IncrementalMarking::EnsureMarkingDequeIsCommitted() {
|
554 |
if (marking_deque_memory_ == NULL) { |
555 |
marking_deque_memory_ = new VirtualMemory(4 * MB); |
556 |
} |
557 |
if (!marking_deque_memory_committed_) {
|
558 |
bool success = marking_deque_memory_->Commit(
|
559 |
reinterpret_cast<Address>(marking_deque_memory_->address()),
|
560 |
marking_deque_memory_->size(), |
561 |
false); // Not executable. |
562 |
CHECK(success); |
563 |
marking_deque_memory_committed_ = true;
|
564 |
} |
565 |
} |
566 |
|
567 |
|
568 |
void IncrementalMarking::UncommitMarkingDeque() {
|
569 |
if (state_ == STOPPED && marking_deque_memory_committed_) {
|
570 |
bool success = marking_deque_memory_->Uncommit(
|
571 |
reinterpret_cast<Address>(marking_deque_memory_->address()),
|
572 |
marking_deque_memory_->size()); |
573 |
CHECK(success); |
574 |
marking_deque_memory_committed_ = false;
|
575 |
} |
576 |
} |
577 |
|
578 |
|
579 |
void IncrementalMarking::Start(CompactionFlag flag) {
|
580 |
if (FLAG_trace_incremental_marking) {
|
581 |
PrintF("[IncrementalMarking] Start\n");
|
582 |
} |
583 |
ASSERT(FLAG_incremental_marking); |
584 |
ASSERT(FLAG_incremental_marking_steps); |
585 |
ASSERT(state_ == STOPPED); |
586 |
ASSERT(heap_->gc_state() == Heap::NOT_IN_GC); |
587 |
ASSERT(!Serializer::enabled()); |
588 |
ASSERT(heap_->isolate()->IsInitialized()); |
589 |
|
590 |
ResetStepCounters(); |
591 |
|
592 |
if (heap_->IsSweepingComplete()) {
|
593 |
StartMarking(flag); |
594 |
} else {
|
595 |
if (FLAG_trace_incremental_marking) {
|
596 |
PrintF("[IncrementalMarking] Start sweeping.\n");
|
597 |
} |
598 |
state_ = SWEEPING; |
599 |
} |
600 |
|
601 |
heap_->new_space()->LowerInlineAllocationLimit(kAllocatedThreshold); |
602 |
} |
603 |
|
604 |
|
605 |
void IncrementalMarking::StartMarking(CompactionFlag flag) {
|
606 |
if (FLAG_trace_incremental_marking) {
|
607 |
PrintF("[IncrementalMarking] Start marking\n");
|
608 |
} |
609 |
|
610 |
is_compacting_ = !FLAG_never_compact && (flag == ALLOW_COMPACTION) && |
611 |
heap_->mark_compact_collector()->StartCompaction( |
612 |
MarkCompactCollector::INCREMENTAL_COMPACTION); |
613 |
|
614 |
state_ = MARKING; |
615 |
|
616 |
RecordWriteStub::Mode mode = is_compacting_ ? |
617 |
RecordWriteStub::INCREMENTAL_COMPACTION : RecordWriteStub::INCREMENTAL; |
618 |
|
619 |
PatchIncrementalMarkingRecordWriteStubs(heap_, mode); |
620 |
|
621 |
EnsureMarkingDequeIsCommitted(); |
622 |
|
623 |
// Initialize marking stack.
|
624 |
Address addr = static_cast<Address>(marking_deque_memory_->address());
|
625 |
size_t size = marking_deque_memory_->size(); |
626 |
if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize; |
627 |
marking_deque_.Initialize(addr, addr + size); |
628 |
|
629 |
ActivateIncrementalWriteBarrier(); |
630 |
|
631 |
// Marking bits are cleared by the sweeper.
|
632 |
#ifdef VERIFY_HEAP
|
633 |
if (FLAG_verify_heap) {
|
634 |
heap_->mark_compact_collector()->VerifyMarkbitsAreClean(); |
635 |
} |
636 |
#endif
|
637 |
|
638 |
heap_->CompletelyClearInstanceofCache(); |
639 |
heap_->isolate()->compilation_cache()->MarkCompactPrologue(); |
640 |
|
641 |
if (FLAG_cleanup_code_caches_at_gc) {
|
642 |
// We will mark cache black with a separate pass
|
643 |
// when we finish marking.
|
644 |
MarkObjectGreyDoNotEnqueue(heap_->polymorphic_code_cache()); |
645 |
} |
646 |
|
647 |
// Mark strong roots grey.
|
648 |
IncrementalMarkingRootMarkingVisitor visitor(this);
|
649 |
heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG); |
650 |
|
651 |
heap_->mark_compact_collector()->MarkWeakObjectToCodeTable(); |
652 |
|
653 |
// Ready to start incremental marking.
|
654 |
if (FLAG_trace_incremental_marking) {
|
655 |
PrintF("[IncrementalMarking] Running\n");
|
656 |
} |
657 |
} |
658 |
|
659 |
|
660 |
void IncrementalMarking::PrepareForScavenge() {
|
661 |
if (!IsMarking()) return; |
662 |
NewSpacePageIterator it(heap_->new_space()->FromSpaceStart(), |
663 |
heap_->new_space()->FromSpaceEnd()); |
664 |
while (it.has_next()) {
|
665 |
Bitmap::Clear(it.next()); |
666 |
} |
667 |
} |
668 |
|
669 |
|
670 |
void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
|
671 |
if (!IsMarking()) return; |
672 |
|
673 |
int current = marking_deque_.bottom();
|
674 |
int mask = marking_deque_.mask();
|
675 |
int limit = marking_deque_.top();
|
676 |
HeapObject** array = marking_deque_.array(); |
677 |
int new_top = current;
|
678 |
|
679 |
Map* filler_map = heap_->one_pointer_filler_map(); |
680 |
|
681 |
while (current != limit) {
|
682 |
HeapObject* obj = array[current]; |
683 |
ASSERT(obj->IsHeapObject()); |
684 |
current = ((current + 1) & mask);
|
685 |
if (heap_->InNewSpace(obj)) {
|
686 |
MapWord map_word = obj->map_word(); |
687 |
if (map_word.IsForwardingAddress()) {
|
688 |
HeapObject* dest = map_word.ToForwardingAddress(); |
689 |
array[new_top] = dest; |
690 |
new_top = ((new_top + 1) & mask);
|
691 |
ASSERT(new_top != marking_deque_.bottom()); |
692 |
#ifdef DEBUG
|
693 |
MarkBit mark_bit = Marking::MarkBitFrom(obj); |
694 |
ASSERT(Marking::IsGrey(mark_bit) || |
695 |
(obj->IsFiller() && Marking::IsWhite(mark_bit))); |
696 |
#endif
|
697 |
} |
698 |
} else if (obj->map() != filler_map) { |
699 |
// Skip one word filler objects that appear on the
|
700 |
// stack when we perform in place array shift.
|
701 |
array[new_top] = obj; |
702 |
new_top = ((new_top + 1) & mask);
|
703 |
ASSERT(new_top != marking_deque_.bottom()); |
704 |
#ifdef DEBUG
|
705 |
MarkBit mark_bit = Marking::MarkBitFrom(obj); |
706 |
MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); |
707 |
ASSERT(Marking::IsGrey(mark_bit) || |
708 |
(obj->IsFiller() && Marking::IsWhite(mark_bit)) || |
709 |
(chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) && |
710 |
Marking::IsBlack(mark_bit))); |
711 |
#endif
|
712 |
} |
713 |
} |
714 |
marking_deque_.set_top(new_top); |
715 |
|
716 |
steps_took_since_last_gc_ = 0;
|
717 |
steps_count_since_last_gc_ = 0;
|
718 |
longest_step_ = 0.0; |
719 |
} |
720 |
|
721 |
|
722 |
void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) { |
723 |
MarkBit map_mark_bit = Marking::MarkBitFrom(map); |
724 |
if (Marking::IsWhite(map_mark_bit)) {
|
725 |
WhiteToGreyAndPush(map, map_mark_bit); |
726 |
} |
727 |
|
728 |
IncrementalMarkingMarkingVisitor::IterateBody(map, obj); |
729 |
|
730 |
MarkBit mark_bit = Marking::MarkBitFrom(obj); |
731 |
#if ENABLE_SLOW_ASSERTS
|
732 |
MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); |
733 |
SLOW_ASSERT(Marking::IsGrey(mark_bit) || |
734 |
(obj->IsFiller() && Marking::IsWhite(mark_bit)) || |
735 |
(chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) && |
736 |
Marking::IsBlack(mark_bit))); |
737 |
#endif
|
738 |
MarkBlackOrKeepBlack(obj, mark_bit, size); |
739 |
} |
740 |
|
741 |
|
742 |
void IncrementalMarking::ProcessMarkingDeque(intptr_t bytes_to_process) {
|
743 |
Map* filler_map = heap_->one_pointer_filler_map(); |
744 |
while (!marking_deque_.IsEmpty() && bytes_to_process > 0) { |
745 |
HeapObject* obj = marking_deque_.Pop(); |
746 |
|
747 |
// Explicitly skip one word fillers. Incremental markbit patterns are
|
748 |
// correct only for objects that occupy at least two words.
|
749 |
Map* map = obj->map(); |
750 |
if (map == filler_map) continue; |
751 |
|
752 |
int size = obj->SizeFromMap(map);
|
753 |
unscanned_bytes_of_large_object_ = 0;
|
754 |
VisitObject(map, obj, size); |
755 |
bytes_to_process -= (size - unscanned_bytes_of_large_object_); |
756 |
} |
757 |
} |
758 |
|
759 |
|
760 |
void IncrementalMarking::ProcessMarkingDeque() {
|
761 |
Map* filler_map = heap_->one_pointer_filler_map(); |
762 |
while (!marking_deque_.IsEmpty()) {
|
763 |
HeapObject* obj = marking_deque_.Pop(); |
764 |
|
765 |
// Explicitly skip one word fillers. Incremental markbit patterns are
|
766 |
// correct only for objects that occupy at least two words.
|
767 |
Map* map = obj->map(); |
768 |
if (map == filler_map) continue; |
769 |
|
770 |
VisitObject(map, obj, obj->SizeFromMap(map)); |
771 |
} |
772 |
} |
773 |
|
774 |
|
775 |
void IncrementalMarking::Hurry() {
|
776 |
if (state() == MARKING) {
|
777 |
double start = 0.0; |
778 |
if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
|
779 |
start = OS::TimeCurrentMillis(); |
780 |
if (FLAG_trace_incremental_marking) {
|
781 |
PrintF("[IncrementalMarking] Hurry\n");
|
782 |
} |
783 |
} |
784 |
// TODO(gc) hurry can mark objects it encounters black as mutator
|
785 |
// was stopped.
|
786 |
ProcessMarkingDeque(); |
787 |
state_ = COMPLETE; |
788 |
if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
|
789 |
double end = OS::TimeCurrentMillis();
|
790 |
double delta = end - start;
|
791 |
heap_->AddMarkingTime(delta); |
792 |
if (FLAG_trace_incremental_marking) {
|
793 |
PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
|
794 |
static_cast<int>(delta)); |
795 |
} |
796 |
} |
797 |
} |
798 |
|
799 |
if (FLAG_cleanup_code_caches_at_gc) {
|
800 |
PolymorphicCodeCache* poly_cache = heap_->polymorphic_code_cache(); |
801 |
Marking::GreyToBlack(Marking::MarkBitFrom(poly_cache)); |
802 |
MemoryChunk::IncrementLiveBytesFromGC(poly_cache->address(), |
803 |
PolymorphicCodeCache::kSize); |
804 |
} |
805 |
|
806 |
Object* context = heap_->native_contexts_list(); |
807 |
while (!context->IsUndefined()) {
|
808 |
// GC can happen when the context is not fully initialized,
|
809 |
// so the cache can be undefined.
|
810 |
HeapObject* cache = HeapObject::cast( |
811 |
Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX)); |
812 |
if (!cache->IsUndefined()) {
|
813 |
MarkBit mark_bit = Marking::MarkBitFrom(cache); |
814 |
if (Marking::IsGrey(mark_bit)) {
|
815 |
Marking::GreyToBlack(mark_bit); |
816 |
MemoryChunk::IncrementLiveBytesFromGC(cache->address(), cache->Size()); |
817 |
} |
818 |
} |
819 |
context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK); |
820 |
} |
821 |
} |
822 |
|
823 |
|
824 |
void IncrementalMarking::Abort() {
|
825 |
if (IsStopped()) return; |
826 |
if (FLAG_trace_incremental_marking) {
|
827 |
PrintF("[IncrementalMarking] Aborting.\n");
|
828 |
} |
829 |
heap_->new_space()->LowerInlineAllocationLimit(0);
|
830 |
IncrementalMarking::set_should_hurry(false);
|
831 |
ResetStepCounters(); |
832 |
if (IsMarking()) {
|
833 |
PatchIncrementalMarkingRecordWriteStubs(heap_, |
834 |
RecordWriteStub::STORE_BUFFER_ONLY); |
835 |
DeactivateIncrementalWriteBarrier(); |
836 |
|
837 |
if (is_compacting_) {
|
838 |
LargeObjectIterator it(heap_->lo_space()); |
839 |
for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { |
840 |
Page* p = Page::FromAddress(obj->address()); |
841 |
if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
|
842 |
p->ClearFlag(Page::RESCAN_ON_EVACUATION); |
843 |
} |
844 |
} |
845 |
} |
846 |
} |
847 |
heap_->isolate()->stack_guard()->Continue(GC_REQUEST); |
848 |
state_ = STOPPED; |
849 |
is_compacting_ = false;
|
850 |
} |
851 |
|
852 |
|
853 |
void IncrementalMarking::Finalize() {
|
854 |
Hurry(); |
855 |
state_ = STOPPED; |
856 |
is_compacting_ = false;
|
857 |
heap_->new_space()->LowerInlineAllocationLimit(0);
|
858 |
IncrementalMarking::set_should_hurry(false);
|
859 |
ResetStepCounters(); |
860 |
PatchIncrementalMarkingRecordWriteStubs(heap_, |
861 |
RecordWriteStub::STORE_BUFFER_ONLY); |
862 |
DeactivateIncrementalWriteBarrier(); |
863 |
ASSERT(marking_deque_.IsEmpty()); |
864 |
heap_->isolate()->stack_guard()->Continue(GC_REQUEST); |
865 |
} |
866 |
|
867 |
|
868 |
void IncrementalMarking::MarkingComplete(CompletionAction action) {
|
869 |
state_ = COMPLETE; |
870 |
// We will set the stack guard to request a GC now. This will mean the rest
|
871 |
// of the GC gets performed as soon as possible (we can't do a GC here in a
|
872 |
// record-write context). If a few things get allocated between now and then
|
873 |
// that shouldn't make us do a scavenge and keep being incremental, so we set
|
874 |
// the should-hurry flag to indicate that there can't be much work left to do.
|
875 |
set_should_hurry(true);
|
876 |
if (FLAG_trace_incremental_marking) {
|
877 |
PrintF("[IncrementalMarking] Complete (normal).\n");
|
878 |
} |
879 |
if (action == GC_VIA_STACK_GUARD) {
|
880 |
heap_->isolate()->stack_guard()->RequestGC(); |
881 |
} |
882 |
} |
883 |
|
884 |
|
885 |
void IncrementalMarking::OldSpaceStep(intptr_t allocated) {
|
886 |
if (IsStopped() && WorthActivating() && heap_->NextGCIsLikelyToBeFull()) {
|
887 |
// TODO(hpayer): Let's play safe for now, but compaction should be
|
888 |
// in principle possible.
|
889 |
Start(PREVENT_COMPACTION); |
890 |
} else {
|
891 |
Step(allocated * kFastMarking / kInitialMarkingSpeed, GC_VIA_STACK_GUARD); |
892 |
} |
893 |
} |
894 |
|
895 |
|
896 |
void IncrementalMarking::Step(intptr_t allocated_bytes,
|
897 |
CompletionAction action) { |
898 |
if (heap_->gc_state() != Heap::NOT_IN_GC ||
|
899 |
!FLAG_incremental_marking || |
900 |
!FLAG_incremental_marking_steps || |
901 |
(state_ != SWEEPING && state_ != MARKING)) { |
902 |
return;
|
903 |
} |
904 |
|
905 |
allocated_ += allocated_bytes; |
906 |
|
907 |
if (allocated_ < kAllocatedThreshold &&
|
908 |
write_barriers_invoked_since_last_step_ < |
909 |
kWriteBarriersInvokedThreshold) { |
910 |
return;
|
911 |
} |
912 |
|
913 |
if (state_ == MARKING && no_marking_scope_depth_ > 0) return; |
914 |
|
915 |
// The marking speed is driven either by the allocation rate or by the rate
|
916 |
// at which we are having to check the color of objects in the write barrier.
|
917 |
// It is possible for a tight non-allocating loop to run a lot of write
|
918 |
// barriers before we get here and check them (marking can only take place on
|
919 |
// allocation), so to reduce the lumpiness we don't use the write barriers
|
920 |
// invoked since last step directly to determine the amount of work to do.
|
921 |
intptr_t bytes_to_process = |
922 |
marking_speed_ * Max(allocated_, write_barriers_invoked_since_last_step_); |
923 |
allocated_ = 0;
|
924 |
write_barriers_invoked_since_last_step_ = 0;
|
925 |
|
926 |
bytes_scanned_ += bytes_to_process; |
927 |
|
928 |
double start = 0; |
929 |
|
930 |
if (FLAG_trace_incremental_marking || FLAG_trace_gc ||
|
931 |
FLAG_print_cumulative_gc_stat) { |
932 |
start = OS::TimeCurrentMillis(); |
933 |
} |
934 |
|
935 |
if (state_ == SWEEPING) {
|
936 |
if (heap_->EnsureSweepersProgressed(static_cast<int>(bytes_to_process))) { |
937 |
bytes_scanned_ = 0;
|
938 |
StartMarking(PREVENT_COMPACTION); |
939 |
} |
940 |
} else if (state_ == MARKING) { |
941 |
ProcessMarkingDeque(bytes_to_process); |
942 |
if (marking_deque_.IsEmpty()) MarkingComplete(action);
|
943 |
} |
944 |
|
945 |
steps_count_++; |
946 |
steps_count_since_last_gc_++; |
947 |
|
948 |
bool speed_up = false; |
949 |
|
950 |
if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) { |
951 |
if (FLAG_trace_gc) {
|
952 |
PrintPID("Speed up marking after %d steps\n",
|
953 |
static_cast<int>(kMarkingSpeedAccellerationInterval)); |
954 |
} |
955 |
speed_up = true;
|
956 |
} |
957 |
|
958 |
bool space_left_is_very_small =
|
959 |
(old_generation_space_available_at_start_of_incremental_ < 10 * MB);
|
960 |
|
961 |
bool only_1_nth_of_space_that_was_available_still_left =
|
962 |
(SpaceLeftInOldSpace() * (marking_speed_ + 1) <
|
963 |
old_generation_space_available_at_start_of_incremental_); |
964 |
|
965 |
if (space_left_is_very_small ||
|
966 |
only_1_nth_of_space_that_was_available_still_left) { |
967 |
if (FLAG_trace_gc) PrintPID("Speed up marking because of low space left\n"); |
968 |
speed_up = true;
|
969 |
} |
970 |
|
971 |
bool size_of_old_space_multiplied_by_n_during_marking =
|
972 |
(heap_->PromotedTotalSize() > |
973 |
(marking_speed_ + 1) *
|
974 |
old_generation_space_used_at_start_of_incremental_); |
975 |
if (size_of_old_space_multiplied_by_n_during_marking) {
|
976 |
speed_up = true;
|
977 |
if (FLAG_trace_gc) {
|
978 |
PrintPID("Speed up marking because of heap size increase\n");
|
979 |
} |
980 |
} |
981 |
|
982 |
int64_t promoted_during_marking = heap_->PromotedTotalSize() |
983 |
- old_generation_space_used_at_start_of_incremental_; |
984 |
intptr_t delay = marking_speed_ * MB; |
985 |
intptr_t scavenge_slack = heap_->MaxSemiSpaceSize(); |
986 |
|
987 |
// We try to scan at at least twice the speed that we are allocating.
|
988 |
if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) { |
989 |
if (FLAG_trace_gc) {
|
990 |
PrintPID("Speed up marking because marker was not keeping up\n");
|
991 |
} |
992 |
speed_up = true;
|
993 |
} |
994 |
|
995 |
if (speed_up) {
|
996 |
if (state_ != MARKING) {
|
997 |
if (FLAG_trace_gc) {
|
998 |
PrintPID("Postponing speeding up marking until marking starts\n");
|
999 |
} |
1000 |
} else {
|
1001 |
marking_speed_ += kMarkingSpeedAccelleration; |
1002 |
marking_speed_ = static_cast<int>( |
1003 |
Min(kMaxMarkingSpeed, |
1004 |
static_cast<intptr_t>(marking_speed_ * 1.3))); |
1005 |
if (FLAG_trace_gc) {
|
1006 |
PrintPID("Marking speed increased to %d\n", marking_speed_);
|
1007 |
} |
1008 |
} |
1009 |
} |
1010 |
|
1011 |
if (FLAG_trace_incremental_marking || FLAG_trace_gc ||
|
1012 |
FLAG_print_cumulative_gc_stat) { |
1013 |
double end = OS::TimeCurrentMillis();
|
1014 |
double delta = (end - start);
|
1015 |
longest_step_ = Max(longest_step_, delta); |
1016 |
steps_took_ += delta; |
1017 |
steps_took_since_last_gc_ += delta; |
1018 |
heap_->AddMarkingTime(delta); |
1019 |
} |
1020 |
} |
1021 |
|
1022 |
|
1023 |
void IncrementalMarking::ResetStepCounters() {
|
1024 |
steps_count_ = 0;
|
1025 |
steps_took_ = 0;
|
1026 |
longest_step_ = 0.0; |
1027 |
old_generation_space_available_at_start_of_incremental_ = |
1028 |
SpaceLeftInOldSpace(); |
1029 |
old_generation_space_used_at_start_of_incremental_ = |
1030 |
heap_->PromotedTotalSize(); |
1031 |
steps_count_since_last_gc_ = 0;
|
1032 |
steps_took_since_last_gc_ = 0;
|
1033 |
bytes_rescanned_ = 0;
|
1034 |
marking_speed_ = kInitialMarkingSpeed; |
1035 |
bytes_scanned_ = 0;
|
1036 |
write_barriers_invoked_since_last_step_ = 0;
|
1037 |
} |
1038 |
|
1039 |
|
1040 |
int64_t IncrementalMarking::SpaceLeftInOldSpace() { |
1041 |
return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects();
|
1042 |
} |
1043 |
|
1044 |
} } // namespace v8::internal
|