Revision f230a1cf deps/v8/src/mips/lithium-codegen-mips.cc

View differences:

deps/v8/src/mips/lithium-codegen-mips.cc
98 98
}
99 99

  
100 100

  
101
void LCodeGen::Comment(const char* format, ...) {
102
  if (!FLAG_code_comments) return;
103
  char buffer[4 * KB];
104
  StringBuilder builder(buffer, ARRAY_SIZE(buffer));
105
  va_list arguments;
106
  va_start(arguments, format);
107
  builder.AddFormattedList(format, arguments);
108
  va_end(arguments);
109

  
110
  // Copy the string before recording it in the assembler to avoid
111
  // issues when the stack allocated buffer goes out of scope.
112
  size_t length = builder.position();
113
  Vector<char> copy = Vector<char>::New(length + 1);
114
  OS::MemCopy(copy.start(), builder.Finalize(), copy.length());
115
  masm()->RecordComment(copy.start());
116
}
117

  
118

  
119 101
bool LCodeGen::GeneratePrologue() {
120 102
  ASSERT(is_generating());
121 103

  
......
151 133

  
152 134
  info()->set_prologue_offset(masm_->pc_offset());
153 135
  if (NeedsEagerFrame()) {
154
    if (info()->IsStub()) {
155
      __ Push(ra, fp, cp);
156
      __ Push(Smi::FromInt(StackFrame::STUB));
157
      // Adjust FP to point to saved FP.
158
      __ Addu(fp, sp, Operand(2 * kPointerSize));
159
    } else {
160
      // The following three instructions must remain together and unmodified
161
      // for code aging to work properly.
162
      __ Push(ra, fp, cp, a1);
163
      // Add unused nop to ensure prologue sequence is identical for
164
      // full-codegen and lithium-codegen.
165
      __ nop(Assembler::CODE_AGE_SEQUENCE_NOP);
166
      // Adj. FP to point to saved FP.
167
      __ Addu(fp, sp, Operand(2 * kPointerSize));
168
    }
136
    __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
169 137
    frame_is_built_ = true;
170 138
    info_->AddNoFrameRange(0, masm_->pc_offset());
171 139
  }
......
242 210

  
243 211
  // Trace the call.
244 212
  if (FLAG_trace && info()->IsOptimizing()) {
213
    // We have not executed any compiled code yet, so cp still holds the
214
    // incoming context.
245 215
    __ CallRuntime(Runtime::kTraceEnter, 0);
246 216
  }
247 217
  return !is_aborted();
......
263 233
}
264 234

  
265 235

  
266
bool LCodeGen::GenerateBody() {
267
  ASSERT(is_generating());
268
  bool emit_instructions = true;
269
  for (current_instruction_ = 0;
270
       !is_aborted() && current_instruction_ < instructions_->length();
271
       current_instruction_++) {
272
    LInstruction* instr = instructions_->at(current_instruction_);
273

  
274
    // Don't emit code for basic blocks with a replacement.
275
    if (instr->IsLabel()) {
276
      emit_instructions = !LLabel::cast(instr)->HasReplacement();
277
    }
278
    if (!emit_instructions) continue;
279

  
280
    if (FLAG_code_comments && instr->HasInterestingComment(this)) {
281
      Comment(";;; <@%d,#%d> %s",
282
              current_instruction_,
283
              instr->hydrogen_value()->id(),
284
              instr->Mnemonic());
285
    }
286

  
287
    RecordAndUpdatePosition(instr->position());
288

  
289
    instr->CompileToNative(this);
290
  }
291
  EnsureSpaceForLazyDeopt();
292
  last_lazy_deopt_pc_ = masm()->pc_offset();
293
  return !is_aborted();
294
}
295

  
296

  
297 236
bool LCodeGen::GenerateDeferredCode() {
298 237
  ASSERT(is_generating());
299 238
  if (deferred_.length() > 0) {
300 239
    for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
301 240
      LDeferredCode* code = deferred_[i];
302 241

  
303
      int pos = instructions_->at(code->instruction_index())->position();
304
      RecordAndUpdatePosition(pos);
242
      HValue* value =
243
          instructions_->at(code->instruction_index())->hydrogen_value();
244
      RecordAndWritePosition(value->position());
305 245

  
306 246
      Comment(";;; <@%d,#%d> "
307 247
              "-------------------- Deferred %s --------------------",
......
701 641
                               RelocInfo::Mode mode,
702 642
                               LInstruction* instr,
703 643
                               SafepointMode safepoint_mode) {
704
  EnsureSpaceForLazyDeopt();
644
  EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
705 645
  ASSERT(instr != NULL);
706
  LPointerMap* pointers = instr->pointer_map();
707
  RecordPosition(pointers->position());
708 646
  __ Call(code, mode);
709 647
  RecordSafepointWithLazyDeopt(instr, safepoint_mode);
710 648
}
......
712 650

  
713 651
void LCodeGen::CallRuntime(const Runtime::Function* function,
714 652
                           int num_arguments,
715
                           LInstruction* instr) {
653
                           LInstruction* instr,
654
                           SaveFPRegsMode save_doubles) {
716 655
  ASSERT(instr != NULL);
717
  LPointerMap* pointers = instr->pointer_map();
718
  ASSERT(pointers != NULL);
719
  RecordPosition(pointers->position());
720 656

  
721
  __ CallRuntime(function, num_arguments);
657
  __ CallRuntime(function, num_arguments, save_doubles);
658

  
722 659
  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
723 660
}
724 661

  
725 662

  
663
void LCodeGen::LoadContextFromDeferred(LOperand* context) {
664
  if (context->IsRegister()) {
665
    __ Move(cp, ToRegister(context));
666
  } else if (context->IsStackSlot()) {
667
    __ lw(cp, ToMemOperand(context));
668
  } else if (context->IsConstantOperand()) {
669
    HConstant* constant =
670
        chunk_->LookupConstant(LConstantOperand::cast(context));
671
    __ LoadObject(cp, Handle<Object>::cast(constant->handle(isolate())));
672
  } else {
673
    UNREACHABLE();
674
  }
675
}
676

  
677

  
726 678
void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
727 679
                                       int argc,
728
                                       LInstruction* instr) {
680
                                       LInstruction* instr,
681
                                       LOperand* context) {
682
  LoadContextFromDeferred(context);
729 683
  __ CallRuntimeSaveDoubles(id);
730 684
  RecordSafepointWithRegisters(
731 685
      instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
......
835 789

  
836 790
void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
837 791
  ZoneList<Handle<Map> > maps(1, zone());
792
  ZoneList<Handle<JSObject> > objects(1, zone());
838 793
  int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
839 794
  for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
840
    RelocInfo::Mode mode = it.rinfo()->rmode();
841
    if (mode == RelocInfo::EMBEDDED_OBJECT &&
842
        it.rinfo()->target_object()->IsMap()) {
843
      Handle<Map> map(Map::cast(it.rinfo()->target_object()));
844
      if (map->CanTransition()) {
795
    if (Code::IsWeakEmbeddedObject(code->kind(), it.rinfo()->target_object())) {
796
      if (it.rinfo()->target_object()->IsMap()) {
797
        Handle<Map> map(Map::cast(it.rinfo()->target_object()));
845 798
        maps.Add(map, zone());
799
      } else if (it.rinfo()->target_object()->IsJSObject()) {
800
        Handle<JSObject> object(JSObject::cast(it.rinfo()->target_object()));
801
        objects.Add(object, zone());
846 802
      }
847 803
    }
848 804
  }
849 805
#ifdef VERIFY_HEAP
850
  // This disables verification of weak embedded maps after full GC.
806
  // This disables verification of weak embedded objects after full GC.
851 807
  // AddDependentCode can cause a GC, which would observe the state where
852 808
  // this code is not yet in the depended code lists of the embedded maps.
853
  NoWeakEmbeddedMapsVerificationScope disable_verification_of_embedded_maps;
809
  NoWeakObjectVerificationScope disable_verification_of_embedded_objects;
854 810
#endif
855 811
  for (int i = 0; i < maps.length(); i++) {
856 812
    maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
857 813
  }
814
  for (int i = 0; i < objects.length(); i++) {
815
    AddWeakObjectToCodeDependency(isolate()->heap(), objects.at(i), code);
816
  }
858 817
}
859 818

  
860 819

  
......
950 909
      safepoint.DefinePointerRegister(ToRegister(pointer), zone());
951 910
    }
952 911
  }
953
  if (kind & Safepoint::kWithRegisters) {
954
    // Register cp always contains a pointer to the context.
955
    safepoint.DefinePointerRegister(cp, zone());
956
  }
957 912
}
958 913

  
959 914

  
......
964 919

  
965 920

  
966 921
void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
967
  LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
922
  LPointerMap empty_pointers(zone());
968 923
  RecordSafepoint(&empty_pointers, deopt_mode);
969 924
}
970 925

  
......
986 941
}
987 942

  
988 943

  
989
void LCodeGen::RecordPosition(int position) {
944
void LCodeGen::RecordAndWritePosition(int position) {
990 945
  if (position == RelocInfo::kNoPosition) return;
991 946
  masm()->positions_recorder()->RecordPosition(position);
992
}
993

  
994

  
995
void LCodeGen::RecordAndUpdatePosition(int position) {
996
  if (position >= 0 && position != old_position_) {
997
    masm()->positions_recorder()->RecordPosition(position);
998
    old_position_ = position;
999
  }
947
  masm()->positions_recorder()->WriteRecordedPositions();
1000 948
}
1001 949

  
1002 950

  
......
1046 994

  
1047 995

  
1048 996
void LCodeGen::DoCallStub(LCallStub* instr) {
997
  ASSERT(ToRegister(instr->context()).is(cp));
1049 998
  ASSERT(ToRegister(instr->result()).is(v0));
1050 999
  switch (instr->hydrogen()->major_key()) {
1051 1000
    case CodeStub::RegExpConstructResult: {
......
1063 1012
      CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1064 1013
      break;
1065 1014
    }
1066
    case CodeStub::NumberToString: {
1067
      NumberToStringStub stub;
1068
      CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1069
      break;
1070
    }
1071 1015
    case CodeStub::StringCompare: {
1072 1016
      StringCompareStub stub;
1073 1017
      CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
......
1408 1352
  Register left = ToRegister(instr->left());
1409 1353
  LOperand* right_op = instr->right();
1410 1354

  
1411
  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1412 1355
  bool bailout_on_minus_zero =
1413 1356
    instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1357
  bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1414 1358

  
1415
  if (right_op->IsConstantOperand() && !can_overflow) {
1359
  if (right_op->IsConstantOperand()) {
1416 1360
    int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1417 1361

  
1418 1362
    if (bailout_on_minus_zero && (constant < 0)) {
......
1423 1367

  
1424 1368
    switch (constant) {
1425 1369
      case -1:
1426
        __ Subu(result, zero_reg, left);
1370
        if (overflow) {
1371
          __ SubuAndCheckForOverflow(result, zero_reg, left, scratch);
1372
          DeoptimizeIf(lt, instr->environment(), scratch, Operand(zero_reg));
1373
        } else {
1374
          __ Subu(result, zero_reg, left);
1375
        }
1427 1376
        break;
1428 1377
      case 0:
1429 1378
        if (bailout_on_minus_zero) {
......
1444 1393
        int32_t mask = constant >> 31;
1445 1394
        uint32_t constant_abs = (constant + mask) ^ mask;
1446 1395

  
1447
        if (IsPowerOf2(constant_abs) ||
1448
            IsPowerOf2(constant_abs - 1) ||
1449
            IsPowerOf2(constant_abs + 1)) {
1450
          if (IsPowerOf2(constant_abs)) {
1451
            int32_t shift = WhichPowerOf2(constant_abs);
1452
            __ sll(result, left, shift);
1453
          } else if (IsPowerOf2(constant_abs - 1)) {
1454
            int32_t shift = WhichPowerOf2(constant_abs - 1);
1455
            __ sll(scratch, left, shift);
1456
            __ Addu(result, scratch, left);
1457
          } else if (IsPowerOf2(constant_abs + 1)) {
1458
            int32_t shift = WhichPowerOf2(constant_abs + 1);
1459
            __ sll(scratch, left, shift);
1460
            __ Subu(result, scratch, left);
1461
          }
1462

  
1463
          // Correct the sign of the result is the constant is negative.
1464
          if (constant < 0)  {
1465
            __ Subu(result, zero_reg, result);
1466
          }
1467

  
1396
        if (IsPowerOf2(constant_abs)) {
1397
          int32_t shift = WhichPowerOf2(constant_abs);
1398
          __ sll(result, left, shift);
1399
          // Correct the sign of the result if the constant is negative.
1400
          if (constant < 0)  __ Subu(result, zero_reg, result);
1401
        } else if (IsPowerOf2(constant_abs - 1)) {
1402
          int32_t shift = WhichPowerOf2(constant_abs - 1);
1403
          __ sll(scratch, left, shift);
1404
          __ Addu(result, scratch, left);
1405
          // Correct the sign of the result if the constant is negative.
1406
          if (constant < 0)  __ Subu(result, zero_reg, result);
1407
        } else if (IsPowerOf2(constant_abs + 1)) {
1408
          int32_t shift = WhichPowerOf2(constant_abs + 1);
1409
          __ sll(scratch, left, shift);
1410
          __ Subu(result, scratch, left);
1411
          // Correct the sign of the result if the constant is negative.
1412
          if (constant < 0)  __ Subu(result, zero_reg, result);
1468 1413
        } else {
1469 1414
          // Generate standard code.
1470 1415
          __ li(at, constant);
......
1473 1418
    }
1474 1419

  
1475 1420
  } else {
1476
    Register right = EmitLoadRegister(right_op, scratch);
1477
    if (bailout_on_minus_zero) {
1478
      __ Or(ToRegister(instr->temp()), left, right);
1479
    }
1421
    ASSERT(right_op->IsRegister());
1422
    Register right = ToRegister(right_op);
1480 1423

  
1481
    if (can_overflow) {
1424
    if (overflow) {
1482 1425
      // hi:lo = left * right.
1483 1426
      if (instr->hydrogen()->representation().IsSmi()) {
1484 1427
        __ SmiUntag(result, left);
......
1502 1445
    }
1503 1446

  
1504 1447
    if (bailout_on_minus_zero) {
1505
      // Bail out if the result is supposed to be negative zero.
1506 1448
      Label done;
1507
      __ Branch(&done, ne, result, Operand(zero_reg));
1508
      DeoptimizeIf(lt,
1449
      __ Xor(at, left, right);
1450
      __ Branch(&done, ge, at, Operand(zero_reg));
1451
      // Bail out if the result is minus zero.
1452
      DeoptimizeIf(eq,
1509 1453
                   instr->environment(),
1510
                   ToRegister(instr->temp()),
1454
                   result,
1511 1455
                   Operand(zero_reg));
1512 1456
      __ bind(&done);
1513 1457
    }
......
1789 1733

  
1790 1734
void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1791 1735
  Register string = ToRegister(instr->string());
1792
  Register index = ToRegister(instr->index());
1736
  LOperand* index_op = instr->index();
1793 1737
  Register value = ToRegister(instr->value());
1794 1738
  Register scratch = scratch0();
1795 1739
  String::Encoding encoding = instr->encoding();
1796 1740

  
1797 1741
  if (FLAG_debug_code) {
1798
    __ lw(at, FieldMemOperand(string, HeapObject::kMapOffset));
1799
    __ lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
1742
    __ lw(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
1743
    __ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1800 1744

  
1801
    __ And(at, at, Operand(kStringRepresentationMask | kStringEncodingMask));
1745
    __ And(scratch, scratch,
1746
           Operand(kStringRepresentationMask | kStringEncodingMask));
1802 1747
    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1803 1748
    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1804
    __ Subu(at, at, Operand(encoding == String::ONE_BYTE_ENCODING
1749
    __ Subu(at, scratch, Operand(encoding == String::ONE_BYTE_ENCODING
1805 1750
                                ? one_byte_seq_type : two_byte_seq_type));
1806 1751
    __ Check(eq, kUnexpectedStringType, at, Operand(zero_reg));
1807 1752
  }
1808 1753

  
1809
  __ Addu(scratch,
1810
          string,
1811
          Operand(SeqString::kHeaderSize - kHeapObjectTag));
1812
  if (encoding == String::ONE_BYTE_ENCODING) {
1813
    __ Addu(at, scratch, index);
1814
    __ sb(value, MemOperand(at));
1754
  if (index_op->IsConstantOperand()) {
1755
    int constant_index = ToInteger32(LConstantOperand::cast(index_op));
1756
    if (encoding == String::ONE_BYTE_ENCODING) {
1757
      __ sb(value,
1758
          FieldMemOperand(string, SeqString::kHeaderSize + constant_index));
1759
    } else {
1760
      __ sh(value,
1761
          FieldMemOperand(string, SeqString::kHeaderSize + constant_index * 2));
1762
    }
1815 1763
  } else {
1816
    __ sll(at, index, 1);
1817
    __ Addu(at, scratch, at);
1818
    __ sh(value, MemOperand(at));
1764
    Register index = ToRegister(index_op);
1765
    if (encoding == String::ONE_BYTE_ENCODING) {
1766
      __ Addu(scratch, string, Operand(index));
1767
      __ sb(value, FieldMemOperand(scratch, SeqString::kHeaderSize));
1768
    } else {
1769
      __ sll(scratch, index, 1);
1770
      __ Addu(scratch, string, scratch);
1771
      __ sh(value, FieldMemOperand(scratch, SeqString::kHeaderSize));
1772
    }
1819 1773
  }
1820 1774
}
1821 1775

  
......
1823 1777
void LCodeGen::DoThrow(LThrow* instr) {
1824 1778
  Register input_reg = EmitLoadRegister(instr->value(), at);
1825 1779
  __ push(input_reg);
1780
  ASSERT(ToRegister(instr->context()).is(cp));
1826 1781
  CallRuntime(Runtime::kThrow, 1, instr);
1827 1782

  
1828 1783
  if (FLAG_debug_code) {
......
1974 1929

  
1975 1930

  
1976 1931
void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1932
  ASSERT(ToRegister(instr->context()).is(cp));
1977 1933
  ASSERT(ToRegister(instr->left()).is(a1));
1978 1934
  ASSERT(ToRegister(instr->right()).is(a0));
1979 1935
  ASSERT(ToRegister(instr->result()).is(v0));
......
1986 1942
}
1987 1943

  
1988 1944

  
1989
int LCodeGen::GetNextEmittedBlock() const {
1990
  for (int i = current_block_ + 1; i < graph()->blocks()->length(); ++i) {
1991
    if (!chunk_->GetLabel(i)->HasReplacement()) return i;
1992
  }
1993
  return -1;
1994
}
1995

  
1996 1945
template<class InstrType>
1997 1946
void LCodeGen::EmitBranch(InstrType instr,
1998 1947
                          Condition condition,
......
2057 2006
}
2058 2007

  
2059 2008

  
2060
void LCodeGen::DoIsNumberAndBranch(LIsNumberAndBranch* instr) {
2061
  Representation r = instr->hydrogen()->value()->representation();
2062
  if (r.IsSmiOrInteger32() || r.IsDouble()) {
2063
    EmitBranch(instr, al, zero_reg, Operand(zero_reg));
2064
  } else {
2065
    ASSERT(r.IsTagged());
2066
    Register reg = ToRegister(instr->value());
2067
    HType type = instr->hydrogen()->value()->type();
2068
    if (type.IsTaggedNumber()) {
2069
      EmitBranch(instr, al, zero_reg, Operand(zero_reg));
2070
    }
2071
    __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2072
    __ lw(scratch0(), FieldMemOperand(reg, HeapObject::kMapOffset));
2073
    __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
2074
    EmitBranch(instr, eq, scratch0(), Operand(at));
2075
  }
2076
}
2077

  
2078

  
2079 2009
void LCodeGen::DoBranch(LBranch* instr) {
2080 2010
  Representation r = instr->hydrogen()->value()->representation();
2081 2011
  if (r.IsInteger32() || r.IsSmi()) {
......
2223 2153
    case Token::EQ_STRICT:
2224 2154
      cond = eq;
2225 2155
      break;
2156
    case Token::NE:
2157
    case Token::NE_STRICT:
2158
      cond = ne;
2159
      break;
2226 2160
    case Token::LT:
2227 2161
      cond = is_unsigned ? lo : lt;
2228 2162
      break;
......
2439 2373

  
2440 2374

  
2441 2375
void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2376
  ASSERT(ToRegister(instr->context()).is(cp));
2442 2377
  Token::Value op = instr->op();
2443 2378

  
2444 2379
  Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
......
2598 2533

  
2599 2534

  
2600 2535
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2536
  ASSERT(ToRegister(instr->context()).is(cp));
2601 2537
  Label true_label, done;
2602 2538
  ASSERT(ToRegister(instr->left()).is(a0));  // Object is in a0.
2603 2539
  ASSERT(ToRegister(instr->right()).is(a1));  // Function is in a1.
......
2708 2644
  InstanceofStub stub(flags);
2709 2645

  
2710 2646
  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
2647
  LoadContextFromDeferred(instr->context());
2711 2648

  
2712 2649
  // Get the temp register reserved by the instruction. This needs to be t0 as
2713 2650
  // its slot of the pushing of safepoint registers is used to communicate the
......
2736 2673
}
2737 2674

  
2738 2675

  
2739
void LCodeGen::DoInstanceSize(LInstanceSize* instr) {
2740
  Register object = ToRegister(instr->object());
2741
  Register result = ToRegister(instr->result());
2742
  __ lw(result, FieldMemOperand(object, HeapObject::kMapOffset));
2743
  __ lbu(result, FieldMemOperand(result, Map::kInstanceSizeOffset));
2744
}
2745

  
2746

  
2747 2676
void LCodeGen::DoCmpT(LCmpT* instr) {
2677
  ASSERT(ToRegister(instr->context()).is(cp));
2748 2678
  Token::Value op = instr->op();
2749 2679

  
2750 2680
  Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
......
2768 2698
void LCodeGen::DoReturn(LReturn* instr) {
2769 2699
  if (FLAG_trace && info()->IsOptimizing()) {
2770 2700
    // Push the return value on the stack as the parameter.
2771
    // Runtime::TraceExit returns its parameter in v0.
2701
    // Runtime::TraceExit returns its parameter in v0. We're leaving the code
2702
    // managed by the register allocator and tearing down the frame, it's
2703
    // safe to write to the context register.
2772 2704
    __ push(v0);
2705
    __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2773 2706
    __ CallRuntime(Runtime::kTraceExit, 1);
2774 2707
  }
2775 2708
  if (info()->saves_caller_doubles()) {
......
2814 2747

  
2815 2748
void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2816 2749
  Register result = ToRegister(instr->result());
2817
  __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell())));
2750
  __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
2818 2751
  __ lw(result, FieldMemOperand(at, Cell::kValueOffset));
2819 2752
  if (instr->hydrogen()->RequiresHoleCheck()) {
2820 2753
    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
......
2824 2757

  
2825 2758

  
2826 2759
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2760
  ASSERT(ToRegister(instr->context()).is(cp));
2827 2761
  ASSERT(ToRegister(instr->global_object()).is(a0));
2828 2762
  ASSERT(ToRegister(instr->result()).is(v0));
2829 2763

  
......
2840 2774
  Register cell = scratch0();
2841 2775

  
2842 2776
  // Load the cell.
2843
  __ li(cell, Operand(instr->hydrogen()->cell()));
2777
  __ li(cell, Operand(instr->hydrogen()->cell().handle()));
2844 2778

  
2845 2779
  // If the cell we are storing to contains the hole it could have
2846 2780
  // been deleted from the property dictionary. In that case, we need
......
2861 2795

  
2862 2796

  
2863 2797
void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
2798
  ASSERT(ToRegister(instr->context()).is(cp));
2864 2799
  ASSERT(ToRegister(instr->global_object()).is(a1));
2865 2800
  ASSERT(ToRegister(instr->value()).is(a0));
2866 2801

  
......
2937 2872

  
2938 2873
  if (access.IsExternalMemory()) {
2939 2874
    Register result = ToRegister(instr->result());
2940
    __ lw(result, MemOperand(object, offset));
2875
    MemOperand operand = MemOperand(object, offset);
2876
    if (access.representation().IsByte()) {
2877
      __ lb(result, operand);
2878
    } else {
2879
      __ lw(result, operand);
2880
    }
2941 2881
    return;
2942 2882
  }
2943 2883

  
......
2948 2888
  }
2949 2889

  
2950 2890
  Register result = ToRegister(instr->result());
2951
  if (access.IsInobject()) {
2952
    __ lw(result, FieldMemOperand(object, offset));
2953
  } else {
2891
  if (!access.IsInobject()) {
2954 2892
    __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2955
    __ lw(result, FieldMemOperand(result, offset));
2893
    object = result;
2894
  }
2895
  MemOperand operand = FieldMemOperand(object, offset);
2896
  if (access.representation().IsByte()) {
2897
    __ lb(result, operand);
2898
  } else {
2899
    __ lw(result, operand);
2956 2900
  }
2957 2901
}
2958 2902

  
2959 2903

  
2960 2904
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2905
  ASSERT(ToRegister(instr->context()).is(cp));
2961 2906
  ASSERT(ToRegister(instr->object()).is(a0));
2962 2907
  ASSERT(ToRegister(instr->result()).is(v0));
2963 2908

  
......
3011 2956
}
3012 2957

  
3013 2958

  
2959
void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
2960
  Register result = ToRegister(instr->result());
2961
  __ LoadRoot(result, instr->index());
2962
}
2963

  
2964

  
3014 2965
void LCodeGen::DoLoadExternalArrayPointer(
3015 2966
    LLoadExternalArrayPointer* instr) {
3016 2967
  Register to_reg = ToRegister(instr->result());
......
3132 3083
  Register scratch = scratch0();
3133 3084

  
3134 3085
  int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
3135
  int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3136
      ? (element_size_shift - kSmiTagSize) : element_size_shift;
3137
  int constant_key = 0;
3086

  
3087
  int base_offset =
3088
      FixedDoubleArray::kHeaderSize - kHeapObjectTag +
3089
      (instr->additional_index() << element_size_shift);
3138 3090
  if (key_is_constant) {
3139
    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3091
    int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3140 3092
    if (constant_key & 0xF0000000) {
3141 3093
      Abort(kArrayIndexConstantValueTooBig);
3142 3094
    }
3143
  } else {
3144
    key = ToRegister(instr->key());
3095
    base_offset += constant_key << element_size_shift;
3145 3096
  }
3097
  __ Addu(scratch, elements, Operand(base_offset));
3146 3098

  
3147
  int base_offset = (FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
3148
      ((constant_key + instr->additional_index()) << element_size_shift);
3149 3099
  if (!key_is_constant) {
3150
    __ sll(scratch, key, shift_size);
3151
    __ Addu(elements, elements, scratch);
3100
    key = ToRegister(instr->key());
3101
    int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3102
        ? (element_size_shift - kSmiTagSize) : element_size_shift;
3103
    __ sll(at, key, shift_size);
3104
    __ Addu(scratch, scratch, at);
3152 3105
  }
3153
  __ Addu(elements, elements, Operand(base_offset));
3154
  __ ldc1(result, MemOperand(elements));
3106

  
3107
  __ ldc1(result, MemOperand(scratch));
3108

  
3155 3109
  if (instr->hydrogen()->RequiresHoleCheck()) {
3156
    __ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
3110
    __ lw(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
3157 3111
    DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
3158 3112
  }
3159 3113
}
......
3172 3126
                                           instr->additional_index());
3173 3127
    store_base = elements;
3174 3128
  } else {
3175
    Register key = EmitLoadRegister(instr->key(), scratch0());
3129
    Register key = ToRegister(instr->key());
3176 3130
    // Even though the HLoadKeyed instruction forces the input
3177 3131
    // representation for the key to be an integer, the input gets replaced
3178 3132
    // during bound check elimination with the index argument to the bounds
......
3257 3211

  
3258 3212

  
3259 3213
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3214
  ASSERT(ToRegister(instr->context()).is(cp));
3260 3215
  ASSERT(ToRegister(instr->object()).is(a1));
3261 3216
  ASSERT(ToRegister(instr->key()).is(a0));
3262 3217

  
......
3394 3349
  __ bind(&invoke);
3395 3350
  ASSERT(instr->HasPointerMap());
3396 3351
  LPointerMap* pointers = instr->pointer_map();
3397
  RecordPosition(pointers->position());
3398 3352
  SafepointGenerator safepoint_generator(
3399 3353
      this, pointers, Safepoint::kLazyDeopt);
3400 3354
  // The number of arguments is stored in receiver which is a0, as expected
......
3402 3356
  ParameterCount actual(receiver);
3403 3357
  __ InvokeFunction(function, actual, CALL_FUNCTION,
3404 3358
                    safepoint_generator, CALL_AS_METHOD);
3405
  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3406 3359
}
3407 3360

  
3408 3361

  
......
3431 3384
void LCodeGen::DoContext(LContext* instr) {
3432 3385
  // If there is a non-return use, the context must be moved to a register.
3433 3386
  Register result = ToRegister(instr->result());
3434
  for (HUseIterator it(instr->hydrogen()->uses()); !it.Done(); it.Advance()) {
3435
    if (!it.value()->IsReturn()) {
3436
      __ mov(result, cp);
3437
      return;
3438
    }
3387
  if (info()->IsOptimizing()) {
3388
    __ lw(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
3389
  } else {
3390
    // If there is no frame, the context must be in cp.
3391
    ASSERT(result.is(cp));
3439 3392
  }
3440 3393
}
3441 3394

  
......
3449 3402

  
3450 3403

  
3451 3404
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3405
  ASSERT(ToRegister(instr->context()).is(cp));
3452 3406
  __ LoadHeapObject(scratch0(), instr->hydrogen()->pairs());
3453 3407
  __ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
3454 3408
  // The context is the first argument.
......
3458 3412

  
3459 3413

  
3460 3414
void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
3415
  Register context = ToRegister(instr->context());
3461 3416
  Register result = ToRegister(instr->result());
3462
  __ lw(result, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
3417
  __ lw(result, ContextOperand(context, Context::GLOBAL_OBJECT_INDEX));
3463 3418
}
3464 3419

  
3465 3420

  
......
3482 3437
      dont_adapt_arguments || formal_parameter_count == arity;
3483 3438

  
3484 3439
  LPointerMap* pointers = instr->pointer_map();
3485
  RecordPosition(pointers->position());
3486 3440

  
3487 3441
  if (can_invoke_directly) {
3488 3442
    if (a1_state == A1_UNINITIALIZED) {
......
3512 3466
    __ InvokeFunction(
3513 3467
        function, expected, count, CALL_FUNCTION, generator, call_kind);
3514 3468
  }
3515

  
3516
  // Restore context.
3517
  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3518 3469
}
3519 3470

  
3520 3471

  
......
3531 3482

  
3532 3483

  
3533 3484
void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3485
  ASSERT(instr->context() != NULL);
3486
  ASSERT(ToRegister(instr->context()).is(cp));
3534 3487
  Register input = ToRegister(instr->value());
3535 3488
  Register result = ToRegister(instr->result());
3536 3489
  Register scratch = scratch0();
......
3572 3525
    // Slow case: Call the runtime system to do the number allocation.
3573 3526
    __ bind(&slow);
3574 3527

  
3575
    CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
3528
    CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
3529
                            instr->context());
3576 3530
    // Set the pointer to the new heap number in tmp.
3577 3531
    if (!tmp1.is(v0))
3578 3532
      __ mov(tmp1, v0);
......
3890 3844

  
3891 3845
void LCodeGen::DoMathLog(LMathLog* instr) {
3892 3846
  ASSERT(ToDoubleRegister(instr->result()).is(f4));
3847
  // Set the context register to a GC-safe fake value. Clobbering it is
3848
  // OK because this instruction is marked as a call.
3849
  __ mov(cp, zero_reg);
3893 3850
  TranscendentalCacheStub stub(TranscendentalCache::LOG,
3894 3851
                               TranscendentalCacheStub::UNTAGGED);
3895 3852
  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
......
3898 3855

  
3899 3856
void LCodeGen::DoMathTan(LMathTan* instr) {
3900 3857
  ASSERT(ToDoubleRegister(instr->result()).is(f4));
3858
  // Set the context register to a GC-safe fake value. Clobbering it is
3859
  // OK because this instruction is marked as a call.
3860
  __ mov(cp, zero_reg);
3901 3861
  TranscendentalCacheStub stub(TranscendentalCache::TAN,
3902 3862
                               TranscendentalCacheStub::UNTAGGED);
3903 3863
  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
......
3906 3866

  
3907 3867
void LCodeGen::DoMathCos(LMathCos* instr) {
3908 3868
  ASSERT(ToDoubleRegister(instr->result()).is(f4));
3869
  // Set the context register to a GC-safe fake value. Clobbering it is
3870
  // OK because this instruction is marked as a call.
3871
  __ mov(cp, zero_reg);
3909 3872
  TranscendentalCacheStub stub(TranscendentalCache::COS,
3910 3873
                               TranscendentalCacheStub::UNTAGGED);
3911 3874
  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
......
3914 3877

  
3915 3878
void LCodeGen::DoMathSin(LMathSin* instr) {
3916 3879
  ASSERT(ToDoubleRegister(instr->result()).is(f4));
3880
  // Set the context register to a GC-safe fake value. Clobbering it is
3881
  // OK because this instruction is marked as a call.
3882
  __ mov(cp, zero_reg);
3917 3883
  TranscendentalCacheStub stub(TranscendentalCache::SIN,
3918 3884
                               TranscendentalCacheStub::UNTAGGED);
3919 3885
  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
......
3921 3887

  
3922 3888

  
3923 3889
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3890
  ASSERT(ToRegister(instr->context()).is(cp));
3924 3891
  ASSERT(ToRegister(instr->function()).is(a1));
3925 3892
  ASSERT(instr->HasPointerMap());
3926 3893

  
3927 3894
  Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3928 3895
  if (known_function.is_null()) {
3929 3896
    LPointerMap* pointers = instr->pointer_map();
3930
    RecordPosition(pointers->position());
3931 3897
    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3932 3898
    ParameterCount count(instr->arity());
3933 3899
    __ InvokeFunction(a1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
3934
    __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3935 3900
  } else {
3936 3901
    CallKnownFunction(known_function,
3937 3902
                      instr->hydrogen()->formal_parameter_count(),
......
3944 3909

  
3945 3910

  
3946 3911
void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
3912
  ASSERT(ToRegister(instr->context()).is(cp));
3947 3913
  ASSERT(ToRegister(instr->result()).is(v0));
3948 3914

  
3949 3915
  int arity = instr->arity();
3950 3916
  Handle<Code> ic =
3951 3917
      isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
3952 3918
  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3953
  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3954 3919
}
3955 3920

  
3956 3921

  
3957 3922
void LCodeGen::DoCallNamed(LCallNamed* instr) {
3923
  ASSERT(ToRegister(instr->context()).is(cp));
3958 3924
  ASSERT(ToRegister(instr->result()).is(v0));
3959 3925

  
3960 3926
  int arity = instr->arity();
......
3963 3929
      isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
3964 3930
  __ li(a2, Operand(instr->name()));
3965 3931
  CallCode(ic, mode, instr);
3966
  // Restore context register.
3967
  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3968 3932
}
3969 3933

  
3970 3934

  
3971 3935
void LCodeGen::DoCallFunction(LCallFunction* instr) {
3936
  ASSERT(ToRegister(instr->context()).is(cp));
3972 3937
  ASSERT(ToRegister(instr->function()).is(a1));
3973 3938
  ASSERT(ToRegister(instr->result()).is(v0));
3974 3939

  
3975 3940
  int arity = instr->arity();
3976 3941
  CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
3977 3942
  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
3978
  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3979 3943
}
3980 3944

  
3981 3945

  
3982 3946
void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
3947
  ASSERT(ToRegister(instr->context()).is(cp));
3983 3948
  ASSERT(ToRegister(instr->result()).is(v0));
3984 3949

  
3985 3950
  int arity = instr->arity();
......
3988 3953
      isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
3989 3954
  __ li(a2, Operand(instr->name()));
3990 3955
  CallCode(ic, mode, instr);
3991
  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3992 3956
}
3993 3957

  
3994 3958

  
......
4004 3968

  
4005 3969

  
4006 3970
void LCodeGen::DoCallNew(LCallNew* instr) {
3971
  ASSERT(ToRegister(instr->context()).is(cp));
4007 3972
  ASSERT(ToRegister(instr->constructor()).is(a1));
4008 3973
  ASSERT(ToRegister(instr->result()).is(v0));
4009 3974

  
......
4017 3982

  
4018 3983

  
4019 3984
void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
3985
  ASSERT(ToRegister(instr->context()).is(cp));
4020 3986
  ASSERT(ToRegister(instr->constructor()).is(a1));
4021 3987
  ASSERT(ToRegister(instr->result()).is(v0));
4022 3988

  
......
4091 4057

  
4092 4058
  if (access.IsExternalMemory()) {
4093 4059
    Register value = ToRegister(instr->value());
4094
    __ sw(value, MemOperand(object, offset));
4060
    MemOperand operand = MemOperand(object, offset);
4061
    if (representation.IsByte()) {
4062
      __ sb(value, operand);
4063
    } else {
4064
      __ sw(value, operand);
4065
    }
4095 4066
    return;
4096 4067
  }
4097 4068

  
......
4136 4107
      instr->hydrogen()->value()->IsHeapObject()
4137 4108
          ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4138 4109
  if (access.IsInobject()) {
4139
    __ sw(value, FieldMemOperand(object, offset));
4110
    MemOperand operand = FieldMemOperand(object, offset);
4111
    if (representation.IsByte()) {
4112
      __ sb(value, operand);
4113
    } else {
4114
      __ sw(value, operand);
4115
    }
4140 4116
    if (instr->hydrogen()->NeedsWriteBarrier()) {
4141 4117
      // Update the write barrier for the object for in-object properties.
4142 4118
      __ RecordWriteField(object,
......
4150 4126
    }
4151 4127
  } else {
4152 4128
    __ lw(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
4153
    __ sw(value, FieldMemOperand(scratch, offset));
4129
    MemOperand operand = FieldMemOperand(scratch, offset);
4130
    if (representation.IsByte()) {
4131
      __ sb(value, operand);
4132
    } else {
4133
      __ sw(value, operand);
4134
    }
4154 4135
    if (instr->hydrogen()->NeedsWriteBarrier()) {
4155 4136
      // Update the write barrier for the properties array.
4156 4137
      // object is used as a scratch register.
......
4168 4149

  
4169 4150

  
4170 4151
void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4152
  ASSERT(ToRegister(instr->context()).is(cp));
4171 4153
  ASSERT(ToRegister(instr->object()).is(a1));
4172 4154
  ASSERT(ToRegister(instr->value()).is(a0));
4173 4155

  
......
4241 4223

  
4242 4224
  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
4243 4225
      elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
4226
    Register address = scratch0();
4244 4227
    FPURegister value(ToDoubleRegister(instr->value()));
4245 4228
    if (key_is_constant) {
4246
      __ Addu(scratch0(), external_pointer, constant_key <<
4247
          element_size_shift);
4229
      if (constant_key != 0) {
4230
        __ Addu(address, external_pointer,
4231
                Operand(constant_key << element_size_shift));
4232
      } else {
4233
        address = external_pointer;
4234
      }
4248 4235
    } else {
4249
      __ sll(scratch0(), key, shift_size);
4250
      __ Addu(scratch0(), scratch0(), external_pointer);
4236
      __ sll(address, key, shift_size);
4237
      __ Addu(address, external_pointer, address);
4251 4238
    }
4252 4239

  
4253 4240
    if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
4254 4241
      __ cvt_s_d(double_scratch0(), value);
4255
      __ swc1(double_scratch0(), MemOperand(scratch0(), additional_offset));
4242
      __ swc1(double_scratch0(), MemOperand(address, additional_offset));
4256 4243
    } else {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
4257
      __ sdc1(value, MemOperand(scratch0(), additional_offset));
4244
      __ sdc1(value, MemOperand(address, additional_offset));
4258 4245
    }
4259 4246
  } else {
4260 4247
    Register value(ToRegister(instr->value()));
......
4296 4283
void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4297 4284
  DoubleRegister value = ToDoubleRegister(instr->value());
4298 4285
  Register elements = ToRegister(instr->elements());
4299
  Register key = no_reg;
4300 4286
  Register scratch = scratch0();
4287
  DoubleRegister double_scratch = double_scratch0();
4301 4288
  bool key_is_constant = instr->key()->IsConstantOperand();
4302
  int constant_key = 0;
4303
  Label not_nan;
4289
  Label not_nan, done;
4304 4290

  
4305 4291
  // Calculate the effective address of the slot in the array to store the
4306 4292
  // double value.
4293
  int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
4307 4294
  if (key_is_constant) {
4308
    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4295
    int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4309 4296
    if (constant_key & 0xF0000000) {
4310 4297
      Abort(kArrayIndexConstantValueTooBig);
4311 4298
    }
4299
    __ Addu(scratch, elements,
4300
            Operand((constant_key << element_size_shift) +
4301
                    FixedDoubleArray::kHeaderSize - kHeapObjectTag));
4312 4302
  } else {
4313
    key = ToRegister(instr->key());
4314
  }
4315
  int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
4316
  int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4317
      ? (element_size_shift - kSmiTagSize) : element_size_shift;
4318
  if (key_is_constant) {
4319
    __ Addu(scratch, elements, Operand((constant_key << element_size_shift) +
4320
            FixedDoubleArray::kHeaderSize - kHeapObjectTag));
4321
  } else {
4322
    __ sll(scratch, key, shift_size);
4323
    __ Addu(scratch, elements, Operand(scratch));
4324
    __ Addu(scratch, scratch,
4303
    int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4304
        ? (element_size_shift - kSmiTagSize) : element_size_shift;
4305
    __ Addu(scratch, elements,
4325 4306
            Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
4307
    __ sll(at, ToRegister(instr->key()), shift_size);
4308
    __ Addu(scratch, scratch, at);
4326 4309
  }
4327 4310

  
4328 4311
  if (instr->NeedsCanonicalization()) {
......
4333 4316

  
4334 4317
    // Only load canonical NaN if the comparison above set the overflow.
4335 4318
    __ bind(&is_nan);
4336
    __ Move(value, FixedDoubleArray::canonical_not_the_hole_nan_as_double());
4319
    __ Move(double_scratch,
4320
            FixedDoubleArray::canonical_not_the_hole_nan_as_double());
4321
    __ sdc1(double_scratch, MemOperand(scratch, instr->additional_index() <<
4322
        element_size_shift));
4323
    __ Branch(&done);
4337 4324
  }
4338 4325

  
4339 4326
  __ bind(&not_nan);
4340 4327
  __ sdc1(value, MemOperand(scratch, instr->additional_index() <<
4341 4328
      element_size_shift));
4329
  __ bind(&done);
4342 4330
}
4343 4331

  
4344 4332

  
......
4404 4392

  
4405 4393

  
4406 4394
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4395
  ASSERT(ToRegister(instr->context()).is(cp));
4407 4396
  ASSERT(ToRegister(instr->object()).is(a2));
4408 4397
  ASSERT(ToRegister(instr->key()).is(a1));
4409 4398
  ASSERT(ToRegister(instr->value()).is(a0));
......
4436 4425
    __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
4437 4426
                        scratch, GetRAState(), kDontSaveFPRegs);
4438 4427
  } else {
4428
    ASSERT(ToRegister(instr->context()).is(cp));
4439 4429
    PushSafepointRegistersScope scope(
4440 4430
        this, Safepoint::kWithRegistersAndDoubles);
4441 4431
    __ mov(a0, object_reg);
......
4452 4442
void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4453 4443
  Register object = ToRegister(instr->object());
4454 4444
  Register temp = ToRegister(instr->temp());
4455
  Label fail;
4456
  __ TestJSArrayForAllocationMemento(object, temp, ne, &fail);
4445
  Label no_memento_found;
4446
  __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found,
4447
                                     ne, &no_memento_found);
4457 4448
  DeoptimizeIf(al, instr->environment());
4458
  __ bind(&fail);
4449
  __ bind(&no_memento_found);
4459 4450
}
4460 4451

  
4461 4452

  
4462 4453
void LCodeGen::DoStringAdd(LStringAdd* instr) {
4454
  ASSERT(ToRegister(instr->context()).is(cp));
4463 4455
  __ push(ToRegister(instr->left()));
4464 4456
  __ push(ToRegister(instr->right()));
4465 4457
  StringAddStub stub(instr->hydrogen()->flags());
......
4514 4506
    __ SmiTag(index);
4515 4507
    __ push(index);
4516 4508
  }
4517
  CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
4509
  CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr,
4510
                          instr->context());
4518 4511
  __ AssertSmi(v0);
4519 4512
  __ SmiUntag(v0);
4520 4513
  __ StoreToSafepointRegisterSlot(v0, result);
......
4567 4560
  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4568 4561
  __ SmiTag(char_code);
4569 4562
  __ push(char_code);
4570
  CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
4563
  CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
4571 4564
  __ StoreToSafepointRegisterSlot(v0, result);
4572 4565
}
4573 4566

  
......
4707 4700
  // register is stored, as this register is in the pointer map, but contains an
4708 4701
  // integer value.
4709 4702
  __ StoreToSafepointRegisterSlot(zero_reg, dst);
4710
  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
4703
  // NumberTagI and NumberTagD use the context from the frame, rather than
4704
  // the environment's HContext or HInlinedContext value.
4705
  // They only call Runtime::kAllocateHeapNumber.
4706
  // The corresponding HChange instructions are added in a phase that does
4707
  // not have easy access to the local context.
4708
  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4709
  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4710
  RecordSafepointWithRegisters(
4711
      instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4711 4712
  __ Move(dst, v0);
4712 4713
  __ Subu(dst, dst, kHeapObjectTag);
4713 4714

  
......
4763 4764
  __ mov(reg, zero_reg);
4764 4765

  
4765 4766
  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4766
  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
4767
  // NumberTagI and NumberTagD use the context from the frame, rather than
4768
  // the environment's HContext or HInlinedContext value.
4769
  // They only call Runtime::kAllocateHeapNumber.
4770
  // The corresponding HChange instructions are added in a phase that does
4771
  // not have easy access to the local context.
4772
  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4773
  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4774
  RecordSafepointWithRegisters(
4775
      instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4767 4776
  __ Subu(v0, v0, kHeapObjectTag);
4768 4777
  __ StoreToSafepointRegisterSlot(v0, reg);
4769 4778
}
......
4798 4807
                                LEnvironment* env,
4799 4808
                                NumberUntagDMode mode) {
4800 4809
  Register scratch = scratch0();
4801

  
4802
  Label load_smi, heap_number, done;
4803

  
4810
  Label convert, load_smi, done;
4804 4811
  if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4805 4812
    // Smi check.
4806 4813
    __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4807

  
4808 4814
    // Heap number map check.
4809 4815
    __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4810 4816
    __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4811
    if (!can_convert_undefined_to_nan) {
4812
      DeoptimizeIf(ne, env, scratch, Operand(at));
4817
    if (can_convert_undefined_to_nan) {
4818
      __ Branch(&convert, ne, scratch, Operand(at));
4813 4819
    } else {
4814
      Label heap_number, convert;
4815
      __ Branch(&heap_number, eq, scratch, Operand(at));
4816

  
4817
      // Convert undefined (and hole) to NaN.
4818
      __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4819
      DeoptimizeIf(ne, env, input_reg, Operand(at));
4820

  
4821
      __ bind(&convert);
4822
      __ LoadRoot(at, Heap::kNanValueRootIndex);
4823
      __ ldc1(result_reg, FieldMemOperand(at, HeapNumber::kValueOffset));
4824
      __ Branch(&done);
4825

  
4826
      __ bind(&heap_number);
4820
      DeoptimizeIf(ne, env, scratch, Operand(at));
4827 4821
    }
4828
    // Heap number to double register conversion.
4822
    // Load heap number.
4829 4823
    __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4830 4824
    if (deoptimize_on_minus_zero) {
4831 4825
      __ mfc1(at, result_reg.low());
......
4834 4828
      DeoptimizeIf(eq, env, scratch, Operand(HeapNumber::kSignMask));
4835 4829
    }
4836 4830
    __ Branch(&done);
4831
    if (can_convert_undefined_to_nan) {
4832
      __ bind(&convert);
4833
      // Convert undefined (and hole) to NaN.
4834
      __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4835
      DeoptimizeIf(ne, env, input_reg, Operand(at));
4836
      __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4837
      __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
4838
      __ Branch(&done);
4839
    }
4837 4840
  } else {
4838 4841
    __ SmiUntag(scratch, input_reg);
4839 4842
    ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
4840 4843
  }
4841

  
4842 4844
  // Smi to double register conversion
4843 4845
  __ bind(&load_smi);
4844 4846
  // scratch: untagged value of input_reg
......
4870 4872
  if (instr->truncating()) {
4871 4873
    // Performs a truncating conversion of a floating point number as used by
4872 4874
    // the JS bitwise operations.
4873
    Label heap_number;
4874
    __ Branch(&heap_number, eq, scratch1, Operand(at));  // HeapNumber map?
4875
    // Check for undefined. Undefined is converted to zero for truncating
4876
    // conversions.
4875
    Label no_heap_number, check_bools, check_false;
4876
    __ Branch(&no_heap_number, ne, scratch1, Operand(at));  // HeapNumber map?
4877
    __ mov(scratch2, input_reg);
4878
    __ TruncateHeapNumberToI(input_reg, scratch2);
4879
    __ Branch(&done);
4880

  
4881
    // Check for Oddballs. Undefined/False is converted to zero and True to one
4882
    // for truncating conversions.
4883
    __ bind(&no_heap_number);
4877 4884
    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4878
    DeoptimizeIf(ne, instr->environment(), input_reg, Operand(at));
4885
    __ Branch(&check_bools, ne, input_reg, Operand(at));
4879 4886
    ASSERT(ToRegister(instr->result()).is(input_reg));
4880
    __ mov(input_reg, zero_reg);
4881
    __ Branch(&done);
4887
    __ Branch(USE_DELAY_SLOT, &done);
4888
    __ mov(input_reg, zero_reg);  // In delay slot.
4882 4889

  
4883
    __ bind(&heap_number);
4884
    __ mov(scratch2, input_reg);
4885
    __ TruncateHeapNumberToI(input_reg, scratch2);
4890
    __ bind(&check_bools);
4891
    __ LoadRoot(at, Heap::kTrueValueRootIndex);
4892
    __ Branch(&check_false, ne, scratch2, Operand(at));
4893
    __ Branch(USE_DELAY_SLOT, &done);
4894
    __ li(input_reg, Operand(1));  // In delay slot.
4895

  
4896
    __ bind(&check_false);
4897
    __ LoadRoot(at, Heap::kFalseValueRootIndex);
4898
    DeoptimizeIf(ne, instr->environment(), scratch2, Operand(at));
4899
    __ Branch(USE_DELAY_SLOT, &done);
4900
    __ mov(input_reg, zero_reg);  // In delay slot.
4886 4901
  } else {
4887 4902
    // Deoptimize if we don't have a heap number.
4888 4903
    DeoptimizeIf(ne, instr->environment(), scratch1, Operand(at));
......
4934 4949

  
4935 4950
  Register input_reg = ToRegister(input);
4936 4951

  
4937
  DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
4952
  if (instr->hydrogen()->value()->representation().IsSmi()) {
4953
    __ SmiUntag(input_reg);
4954
  } else {
4955
    DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
4938 4956

  
4939
  // Let the deferred code handle the HeapObject case.
4940
  __ JumpIfNotSmi(input_reg, deferred->entry());
4957
    // Let the deferred code handle the HeapObject case.
4958
    __ JumpIfNotSmi(input_reg, deferred->entry());
4941 4959

  
4942
  // Smi to int32 conversion.
4943
  __ SmiUntag(input_reg);
4944
  __ bind(deferred->exit());
4960
    // Smi to int32 conversion.
4961
    __ SmiUntag(input_reg);
4962
    __ bind(deferred->exit());
4963
  }
4945 4964
}
4946 4965

  
4947 4966

  
......
5091 5110

  
5092 5111
void LCodeGen::DoCheckValue(LCheckValue* instr) {
5093 5112
  Register reg = ToRegister(instr->value());
5094
  Handle<HeapObject> object = instr->hydrogen()->object();
5113
  Handle<HeapObject> object = instr->hydrogen()->object().handle();
5095 5114
  AllowDeferredHandleDereference smi_check;
5096 5115
  if (isolate()->heap()->InNewSpace(*object)) {
5097 5116
    Register reg = ToRegister(instr->value());
......
5111 5130
  {
5112 5131
    PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5113 5132
    __ push(object);
5114
    CallRuntimeFromDeferred(Runtime::kMigrateInstance, 1, instr);
5133
    __ mov(cp, zero_reg);
5134
    __ CallRuntimeSaveDoubles(Runtime::kMigrateInstance);
5135
    RecordSafepointWithRegisters(
5136
        instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5115 5137
    __ StoreToSafepointRegisterSlot(v0, scratch0());
5116 5138
  }
5117 5139
  __ And(at, scratch0(), Operand(kSmiTagMask));
......
5142 5164
  LOperand* input = instr->value();
5143 5165
  ASSERT(input->IsRegister());
5144 5166
  Register reg = ToRegister(input);
5145
  SmallMapList* map_set = instr->hydrogen()->map_set();
5146 5167
  __ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
5147 5168

  
5148 5169
  DeferredCheckMaps* deferred = NULL;
......
5151 5172
    __ bind(deferred->check_maps());
5152 5173
  }
5153 5174

  
5175
  UniqueSet<Map> map_set = instr->hydrogen()->map_set();
5154 5176
  Label success;
5155
  for (int i = 0; i < map_set->length() - 1; i++) {
5156
    Handle<Map> map = map_set->at(i);
5177
  for (int i = 0; i < map_set.size() - 1; i++) {
5178
    Handle<Map> map = map_set.at(i).handle();
5157 5179
    __ CompareMapAndBranch(map_reg, map, &success, eq, &success);
5158 5180
  }
5159
  Handle<Map> map = map_set->last();
5181
  Handle<Map> map = map_set.at(map_set.size() - 1).handle();
5160 5182
  // Do the CompareMap() directly within the Branch() and DeoptimizeIf().
5161 5183
  if (instr->hydrogen()->has_migration_target()) {
5162 5184
    __ Branch(deferred->entry(), ne, map_reg, Operand(map));
......
5309 5331
  if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5310 5332
    ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
5311 5333
    ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5312
    CallRuntimeFromDeferred(Runtime::kAllocateInOldPointerSpace, 1, instr);
5334
    CallRuntimeFromDeferred(Runtime::kAllocateInOldPointerSpace, 1, instr,
5335
                            instr->context());
5313 5336
  } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5314 5337
    ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5315
    CallRuntimeFromDeferred(Runtime::kAllocateInOldDataSpace, 1, instr);
5338
    CallRuntimeFromDeferred(Runtime::kAllocateInOldDataSpace, 1, instr,
5339
                            instr->context());
5316 5340
  } else {
5317
    CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
5341
    CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr,
5342
                            instr->context());
5318 5343
  }
5319 5344
  __ StoreToSafepointRegisterSlot(v0, result);
5320 5345
}
......
5329 5354

  
5330 5355

  
5331 5356
void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5357
  ASSERT(ToRegister(instr->context()).is(cp));
5332 5358
  Label materialized;
5333 5359
  // Registers will be used as follows:
5334 5360
  // t3 = literals array.
......
5381 5407

  
5382 5408

  
5383 5409
void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5410
  ASSERT(ToRegister(instr->context()).is(cp));
5384 5411
  // Use the fast case closure allocation code that allocates in new
5385 5412
  // space for nested functions that don't need literals cloning.
5386 5413
  bool pretenure = instr->hydrogen()->pretenure();
......
5563 5590
}
5564 5591

  
5565 5592

  
5566
void LCodeGen::EnsureSpaceForLazyDeopt() {
5593
void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5567 5594
  if (info()->IsStub()) return;
5568 5595
  // Ensure that we have enough space after the previous lazy-bailout
5569 5596
  // instruction for patching the code here.
5570 5597
  int current_pc = masm()->pc_offset();
5571
  int patch_size = Deoptimizer::patch_size();
5572
  if (current_pc < last_lazy_deopt_pc_ + patch_size) {
5573
    int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
5598
  if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5599
    int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5574 5600
    ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
5575 5601
    while (padding_size > 0) {
5576 5602
      __ nop();
......
5581 5607

  
5582 5608

  
5583 5609
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5584
  EnsureSpaceForLazyDeopt();
5610
  EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5585 5611
  last_lazy_deopt_pc_ = masm()->pc_offset();
5586 5612
  ASSERT(instr->HasEnvironment());
5587 5613
  LEnvironment* env = instr->environment();
......
5612 5638

  
5613 5639
void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5614 5640
  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5641
  LoadContextFromDeferred(instr->context());
5615 5642
  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5616 5643
  RecordSafepointWithLazyDeopt(
5617 5644
      instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
......
5643 5670
    Label done;
5644 5671
    __ LoadRoot(at, Heap::kStackLimitRootIndex);
5645 5672
    __ Branch(&done, hs, sp, Operand(at));
5673
    ASSERT(instr->context()->IsRegister());
5674
    ASSERT(ToRegister(instr->context()).is(cp));
5646 5675
    CallCode(isolate()->builtins()->StackCheck(),
5647 5676
             RelocInfo::CODE_TARGET,
5648 5677
             instr);
5649
    EnsureSpaceForLazyDeopt();
5678
    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5650 5679
    last_lazy_deopt_pc_ = masm()->pc_offset();
5651 5680
    __ bind(&done);
5652 5681
    RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
......
5658 5687
        new(zone()) DeferredStackCheck(this, instr);
5659 5688
    __ LoadRoot(at, Heap::kStackLimitRootIndex);
5660 5689
    __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
5661
    EnsureSpaceForLazyDeopt();
5690
    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5662 5691
    last_lazy_deopt_pc_ = masm()->pc_offset();
5663 5692
    __ bind(instr->done_label());
5664 5693
    deferred_stack_check->SetExit(instr->done_label());

Also available in: Unified diff