Revision f230a1cf deps/v8/src/spaces.cc

View differences:

deps/v8/src/spaces.cc
29 29

  
30 30
#include "macro-assembler.h"
31 31
#include "mark-compact.h"
32
#include "msan.h"
32 33
#include "platform.h"
33 34

  
34 35
namespace v8 {
......
717 718
                                                executable,
718 719
                                                owner);
719 720
  result->set_reserved_memory(&reservation);
721
  MSAN_MEMORY_IS_INITIALIZED(base, chunk_size);
720 722
  return result;
721 723
}
722 724

  
......
958 960
      * AreaSize();
959 961
  accounting_stats_.Clear();
960 962

  
961
  allocation_info_.top = NULL;
962
  allocation_info_.limit = NULL;
963
  allocation_info_.set_top(NULL);
964
  allocation_info_.set_limit(NULL);
963 965

  
964 966
  anchor_.InitializeAsAnchor(this);
965 967
}
......
988 990

  
989 991
size_t PagedSpace::CommittedPhysicalMemory() {
990 992
  if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
991
  MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
993
  MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
992 994
  size_t size = 0;
993 995
  PageIterator it(this);
994 996
  while (it.has_next()) {
......
1056 1058
  int size = 0;
1057 1059
  switch (identity()) {
1058 1060
    case OLD_POINTER_SPACE:
1059
      size = 64 * kPointerSize * KB;
1061
      size = 72 * kPointerSize * KB;
1060 1062
      break;
1061 1063
    case OLD_DATA_SPACE:
1062 1064
      size = 192 * KB;
......
1077 1079
        // upgraded to handle small pages.
1078 1080
        size = AreaSize();
1079 1081
      } else {
1080
        size = 384 * KB;
1082
#if V8_TARGET_ARCH_MIPS
1083
        // TODO(plind): Investigate larger code stubs size on MIPS.
1084
        size = 480 * KB;
1085
#else
1086
        size = 416 * KB;
1087
#endif
1081 1088
      }
1082 1089
      break;
1083 1090
    default:
......
1135 1142
    DecreaseUnsweptFreeBytes(page);
1136 1143
  }
1137 1144

  
1138
  if (Page::FromAllocationTop(allocation_info_.top) == page) {
1139
    allocation_info_.top = allocation_info_.limit = NULL;
1145
  if (Page::FromAllocationTop(allocation_info_.top()) == page) {
1146
    allocation_info_.set_top(NULL);
1147
    allocation_info_.set_limit(NULL);
1140 1148
  }
1141 1149

  
1142 1150
  if (unlink) {
......
1163 1171
  if (was_swept_conservatively_) return;
1164 1172

  
1165 1173
  bool allocation_pointer_found_in_space =
1166
      (allocation_info_.top == allocation_info_.limit);
1174
      (allocation_info_.top() == allocation_info_.limit());
1167 1175
  PageIterator page_iterator(this);
1168 1176
  while (page_iterator.has_next()) {
1169 1177
    Page* page = page_iterator.next();
1170 1178
    CHECK(page->owner() == this);
1171
    if (page == Page::FromAllocationTop(allocation_info_.top)) {
1179
    if (page == Page::FromAllocationTop(allocation_info_.top())) {
1172 1180
      allocation_pointer_found_in_space = true;
1173 1181
    }
1174 1182
    CHECK(page->WasSweptPrecisely());
......
1279 1287
  }
1280 1288

  
1281 1289
  start_ = NULL;
1282
  allocation_info_.top = NULL;
1283
  allocation_info_.limit = NULL;
1290
  allocation_info_.set_top(NULL);
1291
  allocation_info_.set_limit(NULL);
1284 1292

  
1285 1293
  to_space_.TearDown();
1286 1294
  from_space_.TearDown();
......
1337 1345
      }
1338 1346
    }
1339 1347
  }
1340
  allocation_info_.limit = to_space_.page_high();
1348
  allocation_info_.set_limit(to_space_.page_high());
1341 1349
  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1342 1350
}
1343 1351

  
1344 1352

  
1345 1353
void NewSpace::UpdateAllocationInfo() {
1346
  MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
1347
  allocation_info_.top = to_space_.page_low();
1348
  allocation_info_.limit = to_space_.page_high();
1354
  MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1355
  allocation_info_.set_top(to_space_.page_low());
1356
  allocation_info_.set_limit(to_space_.page_high());
1349 1357

  
1350 1358
  // Lower limit during incremental marking.
1351 1359
  if (heap()->incremental_marking()->IsMarking() &&
1352 1360
      inline_allocation_limit_step() != 0) {
1353 1361
    Address new_limit =
1354
        allocation_info_.top + inline_allocation_limit_step();
1355
    allocation_info_.limit = Min(new_limit, allocation_info_.limit);
1362
        allocation_info_.top() + inline_allocation_limit_step();
1363
    allocation_info_.set_limit(Min(new_limit, allocation_info_.limit()));
1356 1364
  }
1357 1365
  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1358 1366
}
......
1371 1379

  
1372 1380

  
1373 1381
bool NewSpace::AddFreshPage() {
1374
  Address top = allocation_info_.top;
1382
  Address top = allocation_info_.top();
1375 1383
  if (NewSpacePage::IsAtStart(top)) {
1376 1384
    // The current page is already empty. Don't try to make another.
1377 1385

  
......
1403 1411

  
1404 1412

  
1405 1413
MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) {
1406
  Address old_top = allocation_info_.top;
1414
  Address old_top = allocation_info_.top();
1407 1415
  Address new_top = old_top + size_in_bytes;
1408 1416
  Address high = to_space_.page_high();
1409
  if (allocation_info_.limit < high) {
1417
  if (allocation_info_.limit() < high) {
1410 1418
    // Incremental marking has lowered the limit to get a
1411 1419
    // chance to do a step.
1412
    allocation_info_.limit = Min(
1413
        allocation_info_.limit + inline_allocation_limit_step_,
1420
    Address new_limit = Min(
1421
        allocation_info_.limit() + inline_allocation_limit_step_,
1414 1422
        high);
1423
    allocation_info_.set_limit(new_limit);
1415 1424
    int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
1416 1425
    heap()->incremental_marking()->Step(
1417 1426
        bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
......
1520 1529
bool SemiSpace::Commit() {
1521 1530
  ASSERT(!is_committed());
1522 1531
  int pages = capacity_ / Page::kPageSize;
1523
  Address end = start_ + maximum_capacity_;
1524
  Address start = end - pages * Page::kPageSize;
1525
  if (!heap()->isolate()->memory_allocator()->CommitBlock(start,
1532
  if (!heap()->isolate()->memory_allocator()->CommitBlock(start_,
1526 1533
                                                          capacity_,
1527 1534
                                                          executable())) {
1528 1535
    return false;
1529 1536
  }
1530 1537

  
1531
  NewSpacePage* page = anchor();
1532
  for (int i = 1; i <= pages; i++) {
1538
  NewSpacePage* current = anchor();
1539
  for (int i = 0; i < pages; i++) {
1533 1540
    NewSpacePage* new_page =
1534
      NewSpacePage::Initialize(heap(), end - i * Page::kPageSize, this);
1535
    new_page->InsertAfter(page);
1536
    page = new_page;
1541
      NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this);
1542
    new_page->InsertAfter(current);
1543
    current = new_page;
1537 1544
  }
1538 1545

  
1539 1546
  committed_ = true;
......
1577 1584
  int pages_before = capacity_ / Page::kPageSize;
1578 1585
  int pages_after = new_capacity / Page::kPageSize;
1579 1586

  
1580
  Address end = start_ + maximum_capacity_;
1581
  Address start = end - new_capacity;
1582 1587
  size_t delta = new_capacity - capacity_;
1583 1588

  
1584 1589
  ASSERT(IsAligned(delta, OS::AllocateAlignment()));
1585 1590
  if (!heap()->isolate()->memory_allocator()->CommitBlock(
1586
      start, delta, executable())) {
1591
      start_ + capacity_, delta, executable())) {
1587 1592
    return false;
1588 1593
  }
1589 1594
  capacity_ = new_capacity;
1590 1595
  NewSpacePage* last_page = anchor()->prev_page();
1591 1596
  ASSERT(last_page != anchor());
1592
  for (int i = pages_before + 1; i <= pages_after; i++) {
1593
    Address page_address = end - i * Page::kPageSize;
1597
  for (int i = pages_before; i < pages_after; i++) {
1598
    Address page_address = start_ + i * Page::kPageSize;
1594 1599
    NewSpacePage* new_page = NewSpacePage::Initialize(heap(),
1595 1600
                                                      page_address,
1596 1601
                                                      this);
......
1610 1615
  ASSERT(new_capacity >= initial_capacity_);
1611 1616
  ASSERT(new_capacity < capacity_);
1612 1617
  if (is_committed()) {
1613
    // Semispaces grow backwards from the end of their allocated capacity,
1614
    // so we find the before and after start addresses relative to the
1615
    // end of the space.
1616
    Address space_end = start_ + maximum_capacity_;
1617
    Address old_start = space_end - capacity_;
1618 1618
    size_t delta = capacity_ - new_capacity;
1619 1619
    ASSERT(IsAligned(delta, OS::AllocateAlignment()));
1620 1620

  
1621 1621
    MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
1622
    if (!allocator->UncommitBlock(old_start, delta)) {
1622
    if (!allocator->UncommitBlock(start_ + new_capacity, delta)) {
1623 1623
      return false;
1624 1624
    }
1625 1625

  
1626 1626
    int pages_after = new_capacity / Page::kPageSize;
1627 1627
    NewSpacePage* new_last_page =
1628
        NewSpacePage::FromAddress(space_end - pages_after * Page::kPageSize);
1628
        NewSpacePage::FromAddress(start_ + (pages_after - 1) * Page::kPageSize);
1629 1629
    new_last_page->set_next_page(anchor());
1630 1630
    anchor()->set_prev_page(new_last_page);
1631
    ASSERT((current_page_ <= first_page()) && (current_page_ >= new_last_page));
1631
    ASSERT((current_page_ >= first_page()) && (current_page_ <= new_last_page));
1632 1632
  }
1633 1633

  
1634 1634
  capacity_ = new_capacity;
......
1975 1975

  
1976 1976
size_t NewSpace::CommittedPhysicalMemory() {
1977 1977
  if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
1978
  MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
1978
  MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1979 1979
  size_t size = to_space_.CommittedPhysicalMemory();
1980 1980
  if (from_space_.is_committed()) {
1981 1981
    size += from_space_.CommittedPhysicalMemory();
......
2501 2501
  Object* object = NULL;
2502 2502
  if (!maybe->ToObject(&object)) return false;
2503 2503
  HeapObject* allocation = HeapObject::cast(object);
2504
  Address top = allocation_info_.top;
2504
  Address top = allocation_info_.top();
2505 2505
  if ((top - bytes) == allocation->address()) {
2506
    allocation_info_.top = allocation->address();
2506
    allocation_info_.set_top(allocation->address());
2507 2507
    return true;
2508 2508
  }
2509 2509
  // There may be a borderline case here where the allocation succeeded, but
......
2549 2549
bool PagedSpace::ReserveSpace(int size_in_bytes) {
2550 2550
  ASSERT(size_in_bytes <= AreaSize());
2551 2551
  ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes));
2552
  Address current_top = allocation_info_.top;
2552
  Address current_top = allocation_info_.top();
2553 2553
  Address new_top = current_top + size_in_bytes;
2554
  if (new_top <= allocation_info_.limit) return true;
2554
  if (new_top <= allocation_info_.limit()) return true;
2555 2555

  
2556 2556
  HeapObject* new_area = free_list_.Allocate(size_in_bytes);
2557 2557
  if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes);
......
2626 2626

  
2627 2627

  
2628 2628
void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
2629
  if (allocation_info_.top >= allocation_info_.limit) return;
2629
  if (allocation_info_.top() >= allocation_info_.limit()) return;
2630 2630

  
2631
  if (Page::FromAllocationTop(allocation_info_.top)->IsEvacuationCandidate()) {
2631
  if (Page::FromAllocationTop(allocation_info_.top())->
2632
      IsEvacuationCandidate()) {
2632 2633
    // Create filler object to keep page iterable if it was iterable.
2633 2634
    int remaining =
2634
        static_cast<int>(allocation_info_.limit - allocation_info_.top);
2635
    heap()->CreateFillerObjectAt(allocation_info_.top, remaining);
2635
        static_cast<int>(allocation_info_.limit() - allocation_info_.top());
2636
    heap()->CreateFillerObjectAt(allocation_info_.top(), remaining);
2636 2637

  
2637
    allocation_info_.top = NULL;
2638
    allocation_info_.limit = NULL;
2638
    allocation_info_.set_top(NULL);
2639
    allocation_info_.set_limit(NULL);
2639 2640
  }
2640 2641
}
2641 2642

  
......
2685 2686

  
2686 2687
  // Try to expand the space and allocate in the new next page.
2687 2688
  if (Expand()) {
2689
    ASSERT(CountTotalPages() > 1 || size_in_bytes <= free_list_.available());
2688 2690
    return free_list_.Allocate(size_in_bytes);
2689 2691
  }
2690 2692

  

Also available in: Unified diff