Revision f230a1cf deps/v8/src/arm/lithium-codegen-arm.cc

View differences:

deps/v8/src/arm/lithium-codegen-arm.cc
98 98
}
99 99

  
100 100

  
101
void LCodeGen::Comment(const char* format, ...) {
102
  if (!FLAG_code_comments) return;
103
  char buffer[4 * KB];
104
  StringBuilder builder(buffer, ARRAY_SIZE(buffer));
105
  va_list arguments;
106
  va_start(arguments, format);
107
  builder.AddFormattedList(format, arguments);
108
  va_end(arguments);
109

  
110
  // Copy the string before recording it in the assembler to avoid
111
  // issues when the stack allocated buffer goes out of scope.
112
  size_t length = builder.position();
113
  Vector<char> copy = Vector<char>::New(length + 1);
114
  OS::MemCopy(copy.start(), builder.Finalize(), copy.length());
115
  masm()->RecordComment(copy.start());
116
}
117

  
118

  
119 101
bool LCodeGen::GeneratePrologue() {
120 102
  ASSERT(is_generating());
121 103

  
......
139 121
    // receiver object). r5 is zero for method calls and non-zero for
140 122
    // function calls.
141 123
    if (!info_->is_classic_mode() || info_->is_native()) {
142
      Label ok;
143 124
      __ cmp(r5, Operand::Zero());
144
      __ b(eq, &ok);
145 125
      int receiver_offset = scope()->num_parameters() * kPointerSize;
146 126
      __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
147
      __ str(r2, MemOperand(sp, receiver_offset));
148
      __ bind(&ok);
127
      __ str(r2, MemOperand(sp, receiver_offset), ne);
149 128
    }
150 129
  }
151 130

  
152 131
  info()->set_prologue_offset(masm_->pc_offset());
153 132
  if (NeedsEagerFrame()) {
154
    if (info()->IsStub()) {
155
      __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
156
      __ Push(Smi::FromInt(StackFrame::STUB));
157
      // Adjust FP to point to saved FP.
158
      __ add(fp, sp, Operand(2 * kPointerSize));
159
    } else {
160
      PredictableCodeSizeScope predictible_code_size_scope(
161
          masm_, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
162
      // The following three instructions must remain together and unmodified
163
      // for code aging to work properly.
164
      __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
165
      __ nop(ip.code());
166
      // Adjust FP to point to saved FP.
167
      __ add(fp, sp, Operand(2 * kPointerSize));
168
    }
133
    __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
169 134
    frame_is_built_ = true;
170 135
    info_->AddNoFrameRange(0, masm_->pc_offset());
171 136
  }
......
248 213

  
249 214
  // Trace the call.
250 215
  if (FLAG_trace && info()->IsOptimizing()) {
216
    // We have not executed any compiled code yet, so cp still holds the
217
    // incoming context.
251 218
    __ CallRuntime(Runtime::kTraceEnter, 0);
252 219
  }
253 220
  return !is_aborted();
......
269 236
}
270 237

  
271 238

  
272
bool LCodeGen::GenerateBody() {
273
  ASSERT(is_generating());
274
  bool emit_instructions = true;
275
  for (current_instruction_ = 0;
276
       !is_aborted() && current_instruction_ < instructions_->length();
277
       current_instruction_++) {
278
    LInstruction* instr = instructions_->at(current_instruction_);
279

  
280
    // Don't emit code for basic blocks with a replacement.
281
    if (instr->IsLabel()) {
282
      emit_instructions = !LLabel::cast(instr)->HasReplacement();
283
    }
284
    if (!emit_instructions) continue;
285

  
286
    if (FLAG_code_comments && instr->HasInterestingComment(this)) {
287
      Comment(";;; <@%d,#%d> %s",
288
              current_instruction_,
289
              instr->hydrogen_value()->id(),
290
              instr->Mnemonic());
291
    }
292

  
293
    RecordAndUpdatePosition(instr->position());
294

  
295
    instr->CompileToNative(this);
296
  }
297
  EnsureSpaceForLazyDeopt();
298
  last_lazy_deopt_pc_ = masm()->pc_offset();
299
  return !is_aborted();
300
}
301

  
302

  
303 239
bool LCodeGen::GenerateDeferredCode() {
304 240
  ASSERT(is_generating());
305 241
  if (deferred_.length() > 0) {
306 242
    for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
307 243
      LDeferredCode* code = deferred_[i];
308 244

  
309
      int pos = instructions_->at(code->instruction_index())->position();
310
      RecordAndUpdatePosition(pos);
245
      HValue* value =
246
          instructions_->at(code->instruction_index())->hydrogen_value();
247
      RecordAndWritePosition(value->position());
311 248

  
312 249
      Comment(";;; <@%d,#%d> "
313 250
              "-------------------- Deferred %s --------------------",
......
448 385
      Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
449 386
    } else {
450 387
      ASSERT(r.IsSmiOrTagged());
451
      __ LoadObject(scratch, literal);
388
      __ Move(scratch, literal);
452 389
    }
453 390
    return scratch;
454 391
  } else if (op->IsStackSlot() || op->IsArgument()) {
......
727 664
                               LInstruction* instr,
728 665
                               SafepointMode safepoint_mode,
729 666
                               TargetAddressStorageMode storage_mode) {
730
  EnsureSpaceForLazyDeopt();
667
  EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
731 668
  ASSERT(instr != NULL);
732 669
  // Block literal pool emission to ensure nop indicating no inlined smi code
733 670
  // is in the correct position.
734 671
  Assembler::BlockConstPoolScope block_const_pool(masm());
735
  LPointerMap* pointers = instr->pointer_map();
736
  RecordPosition(pointers->position());
737 672
  __ Call(code, mode, TypeFeedbackId::None(), al, storage_mode);
738 673
  RecordSafepointWithLazyDeopt(instr, safepoint_mode);
739 674

  
......
748 683

  
749 684
void LCodeGen::CallRuntime(const Runtime::Function* function,
750 685
                           int num_arguments,
751
                           LInstruction* instr) {
686
                           LInstruction* instr,
687
                           SaveFPRegsMode save_doubles) {
752 688
  ASSERT(instr != NULL);
753
  LPointerMap* pointers = instr->pointer_map();
754
  ASSERT(pointers != NULL);
755
  RecordPosition(pointers->position());
756 689

  
757
  __ CallRuntime(function, num_arguments);
690
  __ CallRuntime(function, num_arguments, save_doubles);
691

  
758 692
  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
759 693
}
760 694

  
761 695

  
696
void LCodeGen::LoadContextFromDeferred(LOperand* context) {
697
  if (context->IsRegister()) {
698
    __ Move(cp, ToRegister(context));
699
  } else if (context->IsStackSlot()) {
700
    __ ldr(cp, ToMemOperand(context));
701
  } else if (context->IsConstantOperand()) {
702
    HConstant* constant =
703
        chunk_->LookupConstant(LConstantOperand::cast(context));
704
    __ Move(cp, Handle<Object>::cast(constant->handle(isolate())));
705
  } else {
706
    UNREACHABLE();
707
  }
708
}
709

  
710

  
762 711
void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
763 712
                                       int argc,
764
                                       LInstruction* instr) {
713
                                       LInstruction* instr,
714
                                       LOperand* context) {
715
  LoadContextFromDeferred(context);
765 716
  __ CallRuntimeSaveDoubles(id);
766 717
  RecordSafepointWithRegisters(
767 718
      instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
......
862 813

  
863 814
void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
864 815
  ZoneList<Handle<Map> > maps(1, zone());
816
  ZoneList<Handle<JSObject> > objects(1, zone());
865 817
  int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
866 818
  for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
867
    RelocInfo::Mode mode = it.rinfo()->rmode();
868
    if (mode == RelocInfo::EMBEDDED_OBJECT &&
869
        it.rinfo()->target_object()->IsMap()) {
870
      Handle<Map> map(Map::cast(it.rinfo()->target_object()));
871
      if (map->CanTransition()) {
819
    if (Code::IsWeakEmbeddedObject(code->kind(), it.rinfo()->target_object())) {
820
      if (it.rinfo()->target_object()->IsMap()) {
821
        Handle<Map> map(Map::cast(it.rinfo()->target_object()));
872 822
        maps.Add(map, zone());
823
      } else if (it.rinfo()->target_object()->IsJSObject()) {
824
        Handle<JSObject> object(JSObject::cast(it.rinfo()->target_object()));
825
        objects.Add(object, zone());
873 826
      }
874 827
    }
875 828
  }
876 829
#ifdef VERIFY_HEAP
877
  // This disables verification of weak embedded maps after full GC.
830
  // This disables verification of weak embedded objects after full GC.
878 831
  // AddDependentCode can cause a GC, which would observe the state where
879 832
  // this code is not yet in the depended code lists of the embedded maps.
880
  NoWeakEmbeddedMapsVerificationScope disable_verification_of_embedded_maps;
833
  NoWeakObjectVerificationScope disable_verification_of_embedded_objects;
881 834
#endif
882 835
  for (int i = 0; i < maps.length(); i++) {
883 836
    maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
884 837
  }
838
  for (int i = 0; i < objects.length(); i++) {
839
    AddWeakObjectToCodeDependency(isolate()->heap(), objects.at(i), code);
840
  }
885 841
}
886 842

  
887 843

  
......
977 933
      safepoint.DefinePointerRegister(ToRegister(pointer), zone());
978 934
    }
979 935
  }
980
  if (kind & Safepoint::kWithRegisters) {
981
    // Register cp always contains a pointer to the context.
982
    safepoint.DefinePointerRegister(cp, zone());
983
  }
984 936
}
985 937

  
986 938

  
......
991 943

  
992 944

  
993 945
void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
994
  LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
946
  LPointerMap empty_pointers(zone());
995 947
  RecordSafepoint(&empty_pointers, deopt_mode);
996 948
}
997 949

  
......
1013 965
}
1014 966

  
1015 967

  
1016
void LCodeGen::RecordPosition(int position) {
968
void LCodeGen::RecordAndWritePosition(int position) {
1017 969
  if (position == RelocInfo::kNoPosition) return;
1018 970
  masm()->positions_recorder()->RecordPosition(position);
1019
}
1020

  
1021

  
1022
void LCodeGen::RecordAndUpdatePosition(int position) {
1023
  if (position >= 0 && position != old_position_) {
1024
    masm()->positions_recorder()->RecordPosition(position);
1025
    old_position_ = position;
1026
  }
971
  masm()->positions_recorder()->WriteRecordedPositions();
1027 972
}
1028 973

  
1029 974

  
......
1073 1018

  
1074 1019

  
1075 1020
void LCodeGen::DoCallStub(LCallStub* instr) {
1021
  ASSERT(ToRegister(instr->context()).is(cp));
1076 1022
  ASSERT(ToRegister(instr->result()).is(r0));
1077 1023
  switch (instr->hydrogen()->major_key()) {
1078 1024
    case CodeStub::RegExpConstructResult: {
......
1090 1036
      CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1091 1037
      break;
1092 1038
    }
1093
    case CodeStub::NumberToString: {
1094
      NumberToStringStub stub;
1095
      CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1096
      break;
1097
    }
1098 1039
    case CodeStub::StringCompare: {
1099 1040
      StringCompareStub stub;
1100 1041
      CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
......
1383 1324

  
1384 1325
void LCodeGen::DoDivI(LDivI* instr) {
1385 1326
  if (instr->hydrogen()->HasPowerOf2Divisor()) {
1386
    Register dividend = ToRegister(instr->left());
1327
    const Register dividend = ToRegister(instr->left());
1328
    const Register result = ToRegister(instr->result());
1387 1329
    int32_t divisor = instr->hydrogen()->right()->GetInteger32Constant();
1388 1330
    int32_t test_value = 0;
1389 1331
    int32_t power = 0;
......
1394 1336
    } else {
1395 1337
      // Check for (0 / -x) that will produce negative zero.
1396 1338
      if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1397
        __ tst(dividend, Operand(dividend));
1339
        __ cmp(dividend, Operand::Zero());
1398 1340
        DeoptimizeIf(eq, instr->environment());
1399 1341
      }
1400 1342
      // Check for (kMinInt / -1).
......
1409 1351
    if (test_value != 0) {
1410 1352
      if (instr->hydrogen()->CheckFlag(
1411 1353
          HInstruction::kAllUsesTruncatingToInt32)) {
1412
        __ cmp(dividend, Operand(0));
1413
        __ rsb(dividend, dividend, Operand(0), LeaveCC, lt);
1414
        __ mov(dividend, Operand(dividend, ASR, power));
1415
        if (divisor > 0) __ rsb(dividend, dividend, Operand(0), LeaveCC, lt);
1416
        if (divisor < 0) __ rsb(dividend, dividend, Operand(0), LeaveCC, gt);
1354
        __ sub(result, dividend, Operand::Zero(), SetCC);
1355
        __ rsb(result, result, Operand::Zero(), LeaveCC, lt);
1356
        __ mov(result, Operand(result, ASR, power));
1357
        if (divisor > 0) __ rsb(result, result, Operand::Zero(), LeaveCC, lt);
1358
        if (divisor < 0) __ rsb(result, result, Operand::Zero(), LeaveCC, gt);
1417 1359
        return;  // Don't fall through to "__ rsb" below.
1418 1360
      } else {
1419 1361
        // Deoptimize if remainder is not 0.
1420 1362
        __ tst(dividend, Operand(test_value));
1421 1363
        DeoptimizeIf(ne, instr->environment());
1422
        __ mov(dividend, Operand(dividend, ASR, power));
1364
        __ mov(result, Operand(dividend, ASR, power));
1365
        if (divisor < 0) __ rsb(result, result, Operand(0));
1366
      }
1367
    } else {
1368
      if (divisor < 0) {
1369
        __ rsb(result, dividend, Operand(0));
1370
      } else {
1371
        __ Move(result, dividend);
1423 1372
      }
1424 1373
    }
1425
    if (divisor < 0) __ rsb(dividend, dividend, Operand(0));
1426 1374

  
1427 1375
    return;
1428 1376
  }
......
1439 1387

  
1440 1388
  // Check for (0 / -x) that will produce negative zero.
1441 1389
  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1442
    Label left_not_zero;
1390
    Label positive;
1391
    if (!instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
1392
      // Do the test only if it hadn't be done above.
1393
      __ cmp(right, Operand::Zero());
1394
    }
1395
    __ b(pl, &positive);
1443 1396
    __ cmp(left, Operand::Zero());
1444
    __ b(ne, &left_not_zero);
1445
    __ cmp(right, Operand::Zero());
1446
    DeoptimizeIf(mi, instr->environment());
1447
    __ bind(&left_not_zero);
1397
    DeoptimizeIf(eq, instr->environment());
1398
    __ bind(&positive);
1448 1399
  }
1449 1400

  
1450 1401
  // Check for (kMinInt / -1).
......
1886 1837
void LCodeGen::DoConstantT(LConstantT* instr) {
1887 1838
  Handle<Object> value = instr->value(isolate());
1888 1839
  AllowDeferredHandleDereference smi_check;
1889
  __ LoadObject(ToRegister(instr->result()), value);
1840
  __ Move(ToRegister(instr->result()), value);
1890 1841
}
1891 1842

  
1892 1843

  
......
1975 1926

  
1976 1927
void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1977 1928
  Register string = ToRegister(instr->string());
1978
  Register index = ToRegister(instr->index());
1929
  LOperand* index_op = instr->index();
1979 1930
  Register value = ToRegister(instr->value());
1931
  Register scratch = scratch0();
1980 1932
  String::Encoding encoding = instr->encoding();
1981 1933

  
1982 1934
  if (FLAG_debug_code) {
1983
    __ ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset));
1984
    __ ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
1935
    __ ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
1936
    __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1985 1937

  
1986
    __ and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask));
1938
    __ and_(scratch, scratch,
1939
            Operand(kStringRepresentationMask | kStringEncodingMask));
1987 1940
    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1988 1941
    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1989
    __ cmp(ip, Operand(encoding == String::ONE_BYTE_ENCODING
1990
                           ? one_byte_seq_type : two_byte_seq_type));
1942
    __ cmp(scratch, Operand(encoding == String::ONE_BYTE_ENCODING
1943
                            ? one_byte_seq_type : two_byte_seq_type));
1991 1944
    __ Check(eq, kUnexpectedStringType);
1992 1945
  }
1993 1946

  
1994
  __ add(ip,
1995
         string,
1996
         Operand(SeqString::kHeaderSize - kHeapObjectTag));
1997
  if (encoding == String::ONE_BYTE_ENCODING) {
1998
    __ strb(value, MemOperand(ip, index));
1947
  if (index_op->IsConstantOperand()) {
1948
    int constant_index = ToInteger32(LConstantOperand::cast(index_op));
1949
    if (encoding == String::ONE_BYTE_ENCODING) {
1950
      __ strb(value,
1951
              FieldMemOperand(string, SeqString::kHeaderSize + constant_index));
1952
    } else {
1953
      __ strh(value,
1954
          FieldMemOperand(string, SeqString::kHeaderSize + constant_index * 2));
1955
    }
1999 1956
  } else {
2000
    // MemOperand with ip as the base register is not allowed for strh, so
2001
    // we do the address calculation explicitly.
2002
    __ add(ip, ip, Operand(index, LSL, 1));
2003
    __ strh(value, MemOperand(ip));
1957
    Register index = ToRegister(index_op);
1958
    if (encoding == String::ONE_BYTE_ENCODING) {
1959
      __ add(scratch, string, Operand(index));
1960
      __ strb(value, FieldMemOperand(scratch, SeqString::kHeaderSize));
1961
    } else {
1962
      __ add(scratch, string, Operand(index, LSL, 1));
1963
      __ strh(value, FieldMemOperand(scratch, SeqString::kHeaderSize));
1964
    }
2004 1965
  }
2005 1966
}
2006 1967

  
......
2008 1969
void LCodeGen::DoThrow(LThrow* instr) {
2009 1970
  Register input_reg = EmitLoadRegister(instr->value(), ip);
2010 1971
  __ push(input_reg);
1972
  ASSERT(ToRegister(instr->context()).is(cp));
2011 1973
  CallRuntime(Runtime::kThrow, 1, instr);
2012 1974

  
2013 1975
  if (FLAG_debug_code) {
......
2145 2107

  
2146 2108

  
2147 2109
void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2110
  ASSERT(ToRegister(instr->context()).is(cp));
2148 2111
  ASSERT(ToRegister(instr->left()).is(r1));
2149 2112
  ASSERT(ToRegister(instr->right()).is(r0));
2150 2113
  ASSERT(ToRegister(instr->result()).is(r0));
......
2158 2121
}
2159 2122

  
2160 2123

  
2161
int LCodeGen::GetNextEmittedBlock() const {
2162
  for (int i = current_block_ + 1; i < graph()->blocks()->length(); ++i) {
2163
    if (!chunk_->GetLabel(i)->HasReplacement()) return i;
2164
  }
2165
  return -1;
2166
}
2167

  
2168 2124
template<class InstrType>
2169 2125
void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
2170 2126
  int left_block = instr->TrueDestination(chunk_);
......
2197 2153
}
2198 2154

  
2199 2155

  
2200
void LCodeGen::DoIsNumberAndBranch(LIsNumberAndBranch* instr) {
2201
  Representation r = instr->hydrogen()->value()->representation();
2202
  if (r.IsSmiOrInteger32() || r.IsDouble()) {
2203
    EmitBranch(instr, al);
2204
  } else {
2205
    ASSERT(r.IsTagged());
2206
    Register reg = ToRegister(instr->value());
2207
    HType type = instr->hydrogen()->value()->type();
2208
    if (type.IsTaggedNumber()) {
2209
      EmitBranch(instr, al);
2210
    }
2211
    __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2212
    __ ldr(scratch0(), FieldMemOperand(reg, HeapObject::kMapOffset));
2213
    __ CompareRoot(scratch0(), Heap::kHeapNumberMapRootIndex);
2214
    EmitBranch(instr, eq);
2215
  }
2216
}
2217

  
2218

  
2219 2156
void LCodeGen::DoBranch(LBranch* instr) {
2220 2157
  Representation r = instr->hydrogen()->value()->representation();
2221 2158
  if (r.IsInteger32() || r.IsSmi()) {
......
2371 2308
    case Token::EQ_STRICT:
2372 2309
      cond = eq;
2373 2310
      break;
2311
    case Token::NE:
2312
    case Token::NE_STRICT:
2313
      cond = ne;
2314
      break;
2374 2315
    case Token::LT:
2375 2316
      cond = is_unsigned ? lo : lt;
2376 2317
      break;
......
2575 2516

  
2576 2517

  
2577 2518
void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2519
  ASSERT(ToRegister(instr->context()).is(cp));
2578 2520
  Token::Value op = instr->op();
2579 2521

  
2580 2522
  Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
......
2735 2677

  
2736 2678

  
2737 2679
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2680
  ASSERT(ToRegister(instr->context()).is(cp));
2738 2681
  ASSERT(ToRegister(instr->left()).is(r0));  // Object is in r0.
2739 2682
  ASSERT(ToRegister(instr->right()).is(r1));  // Function is in r1.
2740 2683

  
......
2844 2787
  InstanceofStub stub(flags);
2845 2788

  
2846 2789
  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
2790
  LoadContextFromDeferred(instr->context());
2847 2791

  
2848 2792
  // Get the temp register reserved by the instruction. This needs to be r4 as
2849 2793
  // its slot of the pushing of safepoint registers is used to communicate the
2850 2794
  // offset to the location of the map check.
2851 2795
  Register temp = ToRegister(instr->temp());
2852 2796
  ASSERT(temp.is(r4));
2853
  __ LoadHeapObject(InstanceofStub::right(), instr->function());
2797
  __ Move(InstanceofStub::right(), instr->function());
2854 2798
  static const int kAdditionalDelta = 5;
2855 2799
  // Make sure that code size is predicable, since we use specific constants
2856 2800
  // offsets in the code to find embedded values..
......
2879 2823
}
2880 2824

  
2881 2825

  
2882
void LCodeGen::DoInstanceSize(LInstanceSize* instr) {
2883
  Register object = ToRegister(instr->object());
2884
  Register result = ToRegister(instr->result());
2885
  __ ldr(result, FieldMemOperand(object, HeapObject::kMapOffset));
2886
  __ ldrb(result, FieldMemOperand(result, Map::kInstanceSizeOffset));
2887
}
2888

  
2889

  
2890 2826
void LCodeGen::DoCmpT(LCmpT* instr) {
2827
  ASSERT(ToRegister(instr->context()).is(cp));
2891 2828
  Token::Value op = instr->op();
2892 2829

  
2893 2830
  Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
......
2908 2845
void LCodeGen::DoReturn(LReturn* instr) {
2909 2846
  if (FLAG_trace && info()->IsOptimizing()) {
2910 2847
    // Push the return value on the stack as the parameter.
2911
    // Runtime::TraceExit returns its parameter in r0.
2848
    // Runtime::TraceExit returns its parameter in r0.  We're leaving the code
2849
    // managed by the register allocator and tearing down the frame, it's
2850
    // safe to write to the context register.
2912 2851
    __ push(r0);
2852
    __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2913 2853
    __ CallRuntime(Runtime::kTraceExit, 1);
2914 2854
  }
2915 2855
  if (info()->saves_caller_doubles()) {
......
2953 2893

  
2954 2894
void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2955 2895
  Register result = ToRegister(instr->result());
2956
  __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell())));
2896
  __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
2957 2897
  __ ldr(result, FieldMemOperand(ip, Cell::kValueOffset));
2958 2898
  if (instr->hydrogen()->RequiresHoleCheck()) {
2959 2899
    __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
......
2964 2904

  
2965 2905

  
2966 2906
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2907
  ASSERT(ToRegister(instr->context()).is(cp));
2967 2908
  ASSERT(ToRegister(instr->global_object()).is(r0));
2968 2909
  ASSERT(ToRegister(instr->result()).is(r0));
2969 2910

  
......
2980 2921
  Register cell = scratch0();
2981 2922

  
2982 2923
  // Load the cell.
2983
  __ mov(cell, Operand(instr->hydrogen()->cell()));
2924
  __ mov(cell, Operand(instr->hydrogen()->cell().handle()));
2984 2925

  
2985 2926
  // If the cell we are storing to contains the hole it could have
2986 2927
  // been deleted from the property dictionary. In that case, we need
......
3001 2942

  
3002 2943

  
3003 2944
void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
2945
  ASSERT(ToRegister(instr->context()).is(cp));
3004 2946
  ASSERT(ToRegister(instr->global_object()).is(r1));
3005 2947
  ASSERT(ToRegister(instr->value()).is(r0));
3006 2948

  
......
3073 3015

  
3074 3016
  if (access.IsExternalMemory()) {
3075 3017
    Register result = ToRegister(instr->result());
3076
    __ ldr(result, MemOperand(object, offset));
3018
    MemOperand operand = MemOperand(object, offset);
3019
    if (access.representation().IsByte()) {
3020
      __ ldrb(result, operand);
3021
    } else {
3022
      __ ldr(result, operand);
3023
    }
3077 3024
    return;
3078 3025
  }
3079 3026

  
......
3084 3031
  }
3085 3032

  
3086 3033
  Register result = ToRegister(instr->result());
3087
  if (access.IsInobject()) {
3088
    __ ldr(result, FieldMemOperand(object, offset));
3089
  } else {
3034
  if (!access.IsInobject()) {
3090 3035
    __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
3091
    __ ldr(result, FieldMemOperand(result, offset));
3036
    object = result;
3037
  }
3038
  MemOperand operand = FieldMemOperand(object, offset);
3039
  if (access.representation().IsByte()) {
3040
    __ ldrb(result, operand);
3041
  } else {
3042
    __ ldr(result, operand);
3092 3043
  }
3093 3044
}
3094 3045

  
3095 3046

  
3096 3047
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3048
  ASSERT(ToRegister(instr->context()).is(cp));
3097 3049
  ASSERT(ToRegister(instr->object()).is(r0));
3098 3050
  ASSERT(ToRegister(instr->result()).is(r0));
3099 3051

  
......
3148 3100
}
3149 3101

  
3150 3102

  
3103
void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3104
  Register result = ToRegister(instr->result());
3105
  __ LoadRoot(result, instr->index());
3106
}
3107

  
3108

  
3151 3109
void LCodeGen::DoLoadExternalArrayPointer(
3152 3110
    LLoadExternalArrayPointer* instr) {
3153 3111
  Register to_reg = ToRegister(instr->result());
......
3265 3223
  Register scratch = scratch0();
3266 3224

  
3267 3225
  int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
3268
  int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3269
      ? (element_size_shift - kSmiTagSize) : element_size_shift;
3270
  int constant_key = 0;
3226

  
3227
  int base_offset =
3228
      FixedDoubleArray::kHeaderSize - kHeapObjectTag +
3229
      (instr->additional_index() << element_size_shift);
3271 3230
  if (key_is_constant) {
3272
    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3231
    int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3273 3232
    if (constant_key & 0xF0000000) {
3274 3233
      Abort(kArrayIndexConstantValueTooBig);
3275 3234
    }
3276
  } else {
3277
    key = ToRegister(instr->key());
3235
    base_offset += constant_key << element_size_shift;
3278 3236
  }
3237
  __ add(scratch, elements, Operand(base_offset));
3279 3238

  
3280
  int base_offset = (FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
3281
      ((constant_key + instr->additional_index()) << element_size_shift);
3282 3239
  if (!key_is_constant) {
3283
    __ add(elements, elements, Operand(key, LSL, shift_size));
3240
    key = ToRegister(instr->key());
3241
    int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3242
        ? (element_size_shift - kSmiTagSize) : element_size_shift;
3243
    __ add(scratch, scratch, Operand(key, LSL, shift_size));
3284 3244
  }
3285
  __ add(elements, elements, Operand(base_offset));
3286
  __ vldr(result, elements, 0);
3245

  
3246
  __ vldr(result, scratch, 0);
3247

  
3287 3248
  if (instr->hydrogen()->RequiresHoleCheck()) {
3288
    __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
3249
    __ ldr(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
3289 3250
    __ cmp(scratch, Operand(kHoleNanUpper32));
3290 3251
    DeoptimizeIf(eq, instr->environment());
3291 3252
  }
......
3305 3266
                                           instr->additional_index());
3306 3267
    store_base = elements;
3307 3268
  } else {
3308
    Register key = EmitLoadRegister(instr->key(), scratch0());
3269
    Register key = ToRegister(instr->key());
3309 3270
    // Even though the HLoadKeyed instruction forces the input
3310 3271
    // representation for the key to be an integer, the input gets replaced
3311 3272
    // during bound check elimination with the index argument to the bounds
......
3381 3342

  
3382 3343

  
3383 3344
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3345
  ASSERT(ToRegister(instr->context()).is(cp));
3384 3346
  ASSERT(ToRegister(instr->object()).is(r1));
3385 3347
  ASSERT(ToRegister(instr->key()).is(r0));
3386 3348

  
......
3517 3479
  __ bind(&invoke);
3518 3480
  ASSERT(instr->HasPointerMap());
3519 3481
  LPointerMap* pointers = instr->pointer_map();
3520
  RecordPosition(pointers->position());
3521 3482
  SafepointGenerator safepoint_generator(
3522 3483
      this, pointers, Safepoint::kLazyDeopt);
3523 3484
  // The number of arguments is stored in receiver which is r0, as expected
......
3525 3486
  ParameterCount actual(receiver);
3526 3487
  __ InvokeFunction(function, actual, CALL_FUNCTION,
3527 3488
                    safepoint_generator, CALL_AS_METHOD);
3528
  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3529 3489
}
3530 3490

  
3531 3491

  
......
3554 3514
void LCodeGen::DoContext(LContext* instr) {
3555 3515
  // If there is a non-return use, the context must be moved to a register.
3556 3516
  Register result = ToRegister(instr->result());
3557
  for (HUseIterator it(instr->hydrogen()->uses()); !it.Done(); it.Advance()) {
3558
    if (!it.value()->IsReturn()) {
3559
      __ mov(result, cp);
3560
      return;
3561
    }
3517
  if (info()->IsOptimizing()) {
3518
    __ ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
3519
  } else {
3520
    // If there is no frame, the context must be in cp.
3521
    ASSERT(result.is(cp));
3562 3522
  }
3563 3523
}
3564 3524

  
......
3572 3532

  
3573 3533

  
3574 3534
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3535
  ASSERT(ToRegister(instr->context()).is(cp));
3575 3536
  __ push(cp);  // The context is the first argument.
3576
  __ LoadHeapObject(scratch0(), instr->hydrogen()->pairs());
3537
  __ Move(scratch0(), instr->hydrogen()->pairs());
3577 3538
  __ push(scratch0());
3578 3539
  __ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
3579 3540
  __ push(scratch0());
......
3582 3543

  
3583 3544

  
3584 3545
void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
3546
  Register context = ToRegister(instr->context());
3585 3547
  Register result = ToRegister(instr->result());
3586
  __ ldr(result, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
3548
  __ ldr(result, ContextOperand(context, Context::GLOBAL_OBJECT_INDEX));
3587 3549
}
3588 3550

  
3589 3551

  
......
3606 3568
      dont_adapt_arguments || formal_parameter_count == arity;
3607 3569

  
3608 3570
  LPointerMap* pointers = instr->pointer_map();
3609
  RecordPosition(pointers->position());
3610 3571

  
3611 3572
  if (can_invoke_directly) {
3612 3573
    if (r1_state == R1_UNINITIALIZED) {
3613
      __ LoadHeapObject(r1, function);
3574
      __ Move(r1, function);
3614 3575
    }
3615 3576

  
3616 3577
    // Change context.
......
3636 3597
    __ InvokeFunction(
3637 3598
        function, expected, count, CALL_FUNCTION, generator, call_kind);
3638 3599
  }
3639

  
3640
  // Restore context.
3641
  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
3642 3600
}
3643 3601

  
3644 3602

  
......
3654 3612

  
3655 3613

  
3656 3614
void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3615
  ASSERT(instr->context() != NULL);
3616
  ASSERT(ToRegister(instr->context()).is(cp));
3657 3617
  Register input = ToRegister(instr->value());
3658 3618
  Register result = ToRegister(instr->result());
3659 3619
  Register scratch = scratch0();
......
3697 3657
    // Slow case: Call the runtime system to do the number allocation.
3698 3658
    __ bind(&slow);
3699 3659

  
3700
    CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
3660
    CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
3661
                            instr->context());
3701 3662
    // Set the pointer to the new heap number in tmp.
3702 3663
    if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0));
3703 3664
    // Restore input_reg after call to runtime.
......
3872 3833
  } else if (exponent_type.IsTagged()) {
3873 3834
    Label no_deopt;
3874 3835
    __ JumpIfSmi(r2, &no_deopt);
3875
    __ ldr(r7, FieldMemOperand(r2, HeapObject::kMapOffset));
3836
    __ ldr(r6, FieldMemOperand(r2, HeapObject::kMapOffset));
3876 3837
    __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3877
    __ cmp(r7, Operand(ip));
3838
    __ cmp(r6, Operand(ip));
3878 3839
    DeoptimizeIf(ne, instr->environment());
3879 3840
    __ bind(&no_deopt);
3880 3841
    MathPowStub stub(MathPowStub::TAGGED);
......
3968 3929

  
3969 3930
void LCodeGen::DoMathLog(LMathLog* instr) {
3970 3931
  ASSERT(ToDoubleRegister(instr->result()).is(d2));
3932
  // Set the context register to a GC-safe fake value. Clobbering it is
3933
  // OK because this instruction is marked as a call.
3934
  __ mov(cp, Operand::Zero());
3971 3935
  TranscendentalCacheStub stub(TranscendentalCache::LOG,
3972 3936
                               TranscendentalCacheStub::UNTAGGED);
3973 3937
  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
......
3976 3940

  
3977 3941
void LCodeGen::DoMathTan(LMathTan* instr) {
3978 3942
  ASSERT(ToDoubleRegister(instr->result()).is(d2));
3943
  // Set the context register to a GC-safe fake value. Clobbering it is
3944
  // OK because this instruction is marked as a call.
3945
  __ mov(cp, Operand::Zero());
3979 3946
  TranscendentalCacheStub stub(TranscendentalCache::TAN,
3980 3947
                               TranscendentalCacheStub::UNTAGGED);
3981 3948
  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
......
3984 3951

  
3985 3952
void LCodeGen::DoMathCos(LMathCos* instr) {
3986 3953
  ASSERT(ToDoubleRegister(instr->result()).is(d2));
3954
  // Set the context register to a GC-safe fake value. Clobbering it is
3955
  // OK because this instruction is marked as a call.
3956
  __ mov(cp, Operand::Zero());
3987 3957
  TranscendentalCacheStub stub(TranscendentalCache::COS,
3988 3958
                               TranscendentalCacheStub::UNTAGGED);
3989 3959
  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
......
3992 3962

  
3993 3963
void LCodeGen::DoMathSin(LMathSin* instr) {
3994 3964
  ASSERT(ToDoubleRegister(instr->result()).is(d2));
3965
  // Set the context register to a GC-safe fake value. Clobbering it is
3966
  // OK because this instruction is marked as a call.
3967
  __ mov(cp, Operand::Zero());
3995 3968
  TranscendentalCacheStub stub(TranscendentalCache::SIN,
3996 3969
                               TranscendentalCacheStub::UNTAGGED);
3997 3970
  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
......
3999 3972

  
4000 3973

  
4001 3974
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3975
  ASSERT(ToRegister(instr->context()).is(cp));
4002 3976
  ASSERT(ToRegister(instr->function()).is(r1));
4003 3977
  ASSERT(instr->HasPointerMap());
4004 3978

  
4005 3979
  Handle<JSFunction> known_function = instr->hydrogen()->known_function();
4006 3980
  if (known_function.is_null()) {
4007 3981
    LPointerMap* pointers = instr->pointer_map();
4008
    RecordPosition(pointers->position());
4009 3982
    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
4010 3983
    ParameterCount count(instr->arity());
4011 3984
    __ InvokeFunction(r1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
4012
    __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4013 3985
  } else {
4014 3986
    CallKnownFunction(known_function,
4015 3987
                      instr->hydrogen()->formal_parameter_count(),
......
4022 3994

  
4023 3995

  
4024 3996
void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
3997
  ASSERT(ToRegister(instr->context()).is(cp));
4025 3998
  ASSERT(ToRegister(instr->result()).is(r0));
4026 3999

  
4027 4000
  int arity = instr->arity();
4028 4001
  Handle<Code> ic =
4029 4002
      isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
4030 4003
  CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
4031
  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4032 4004
}
4033 4005

  
4034 4006

  
4035 4007
void LCodeGen::DoCallNamed(LCallNamed* instr) {
4008
  ASSERT(ToRegister(instr->context()).is(cp));
4036 4009
  ASSERT(ToRegister(instr->result()).is(r0));
4037 4010

  
4038 4011
  int arity = instr->arity();
......
4041 4014
      isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
4042 4015
  __ mov(r2, Operand(instr->name()));
4043 4016
  CallCode(ic, mode, instr, NEVER_INLINE_TARGET_ADDRESS);
4044
  // Restore context register.
4045
  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4046 4017
}
4047 4018

  
4048 4019

  
4049 4020
void LCodeGen::DoCallFunction(LCallFunction* instr) {
4021
  ASSERT(ToRegister(instr->context()).is(cp));
4050 4022
  ASSERT(ToRegister(instr->function()).is(r1));
4051 4023
  ASSERT(ToRegister(instr->result()).is(r0));
4052 4024

  
4053 4025
  int arity = instr->arity();
4054 4026
  CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
4055 4027
  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
4056
  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4057 4028
}
4058 4029

  
4059 4030

  
4060 4031
void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
4032
  ASSERT(ToRegister(instr->context()).is(cp));
4061 4033
  ASSERT(ToRegister(instr->result()).is(r0));
4062 4034

  
4063 4035
  int arity = instr->arity();
......
4066 4038
      isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
4067 4039
  __ mov(r2, Operand(instr->name()));
4068 4040
  CallCode(ic, mode, instr, NEVER_INLINE_TARGET_ADDRESS);
4069
  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4070 4041
}
4071 4042

  
4072 4043

  
......
4082 4053

  
4083 4054

  
4084 4055
void LCodeGen::DoCallNew(LCallNew* instr) {
4056
  ASSERT(ToRegister(instr->context()).is(cp));
4085 4057
  ASSERT(ToRegister(instr->constructor()).is(r1));
4086 4058
  ASSERT(ToRegister(instr->result()).is(r0));
4087 4059

  
......
4095 4067

  
4096 4068

  
4097 4069
void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
4070
  ASSERT(ToRegister(instr->context()).is(cp));
4098 4071
  ASSERT(ToRegister(instr->constructor()).is(r1));
4099 4072
  ASSERT(ToRegister(instr->result()).is(r0));
4100 4073

  
......
4169 4142

  
4170 4143
  if (access.IsExternalMemory()) {
4171 4144
    Register value = ToRegister(instr->value());
4172
    __ str(value, MemOperand(object, offset));
4145
    MemOperand operand = MemOperand(object, offset);
4146
    if (representation.IsByte()) {
4147
      __ strb(value, operand);
4148
    } else {
4149
      __ str(value, operand);
4150
    }
4173 4151
    return;
4174 4152
  }
4175 4153

  
......
4214 4192
      instr->hydrogen()->value()->IsHeapObject()
4215 4193
          ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4216 4194
  if (access.IsInobject()) {
4217
    __ str(value, FieldMemOperand(object, offset));
4195
    MemOperand operand = FieldMemOperand(object, offset);
4196
    if (representation.IsByte()) {
4197
      __ strb(value, operand);
4198
    } else {
4199
      __ str(value, operand);
4200
    }
4218 4201
    if (instr->hydrogen()->NeedsWriteBarrier()) {
4219 4202
      // Update the write barrier for the object for in-object properties.
4220 4203
      __ RecordWriteField(object,
......
4228 4211
    }
4229 4212
  } else {
4230 4213
    __ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
4231
    __ str(value, FieldMemOperand(scratch, offset));
4214
    MemOperand operand = FieldMemOperand(scratch, offset);
4215
    if (representation.IsByte()) {
4216
      __ strb(value, operand);
4217
    } else {
4218
      __ str(value, operand);
4219
    }
4232 4220
    if (instr->hydrogen()->NeedsWriteBarrier()) {
4233 4221
      // Update the write barrier for the properties array.
4234 4222
      // object is used as a scratch register.
......
4246 4234

  
4247 4235

  
4248 4236
void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4237
  ASSERT(ToRegister(instr->context()).is(cp));
4249 4238
  ASSERT(ToRegister(instr->object()).is(r1));
4250 4239
  ASSERT(ToRegister(instr->value()).is(r0));
4251 4240

  
......
4311 4300

  
4312 4301
  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
4313 4302
      elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
4303
    Register address = scratch0();
4314 4304
    DwVfpRegister value(ToDoubleRegister(instr->value()));
4315
    Operand operand(key_is_constant
4316
                    ? Operand(constant_key << element_size_shift)
4317
                    : Operand(key, LSL, shift_size));
4318
    __ add(scratch0(), external_pointer, operand);
4305
    if (key_is_constant) {
4306
      if (constant_key != 0) {
4307
        __ add(address, external_pointer,
4308
               Operand(constant_key << element_size_shift));
4309
      } else {
4310
        address = external_pointer;
4311
      }
4312
    } else {
4313
      __ add(address, external_pointer, Operand(key, LSL, shift_size));
4314
    }
4319 4315
    if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
4320 4316
      __ vcvt_f32_f64(double_scratch0().low(), value);
4321
      __ vstr(double_scratch0().low(), scratch0(), additional_offset);
4317
      __ vstr(double_scratch0().low(), address, additional_offset);
4322 4318
    } else {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
4323
      __ vstr(value, scratch0(), additional_offset);
4319
      __ vstr(value, address, additional_offset);
4324 4320
    }
4325 4321
  } else {
4326 4322
    Register value(ToRegister(instr->value()));
......
4362 4358
void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4363 4359
  DwVfpRegister value = ToDoubleRegister(instr->value());
4364 4360
  Register elements = ToRegister(instr->elements());
4365
  Register key = no_reg;
4366 4361
  Register scratch = scratch0();
4362
  DwVfpRegister double_scratch = double_scratch0();
4367 4363
  bool key_is_constant = instr->key()->IsConstantOperand();
4368
  int constant_key = 0;
4369 4364

  
4370 4365
  // Calculate the effective address of the slot in the array to store the
4371 4366
  // double value.
4367
  int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
4372 4368
  if (key_is_constant) {
4373
    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4369
    int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4374 4370
    if (constant_key & 0xF0000000) {
4375 4371
      Abort(kArrayIndexConstantValueTooBig);
4376 4372
    }
4373
    __ add(scratch, elements,
4374
           Operand((constant_key << element_size_shift) +
4375
                   FixedDoubleArray::kHeaderSize - kHeapObjectTag));
4377 4376
  } else {
4378
    key = ToRegister(instr->key());
4379
  }
4380
  int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
4381
  int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4382
      ? (element_size_shift - kSmiTagSize) : element_size_shift;
4383
  Operand operand = key_is_constant
4384
      ? Operand((constant_key << element_size_shift) +
4385
                FixedDoubleArray::kHeaderSize - kHeapObjectTag)
4386
      : Operand(key, LSL, shift_size);
4387
  __ add(scratch, elements, operand);
4388
  if (!key_is_constant) {
4389
    __ add(scratch, scratch,
4377
    int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4378
        ? (element_size_shift - kSmiTagSize) : element_size_shift;
4379
    __ add(scratch, elements,
4390 4380
           Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
4381
    __ add(scratch, scratch,
4382
           Operand(ToRegister(instr->key()), LSL, shift_size));
4391 4383
  }
4392 4384

  
4393 4385
  if (instr->NeedsCanonicalization()) {
......
4397 4389
      __ tst(ip, Operand(kVFPDefaultNaNModeControlBit));
4398 4390
      __ Assert(ne, kDefaultNaNModeNotSet);
4399 4391
    }
4400
    __ VFPCanonicalizeNaN(value);
4392
    __ VFPCanonicalizeNaN(double_scratch, value);
4393
    __ vstr(double_scratch, scratch,
4394
            instr->additional_index() << element_size_shift);
4395
  } else {
4396
    __ vstr(value, scratch, instr->additional_index() << element_size_shift);
4401 4397
  }
4402
  __ vstr(value, scratch, instr->additional_index() << element_size_shift);
4403 4398
}
4404 4399

  
4405 4400

  
......
4463 4458

  
4464 4459

  
4465 4460
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4461
  ASSERT(ToRegister(instr->context()).is(cp));
4466 4462
  ASSERT(ToRegister(instr->object()).is(r2));
4467 4463
  ASSERT(ToRegister(instr->key()).is(r1));
4468 4464
  ASSERT(ToRegister(instr->value()).is(r0));
......
4496 4492
    __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
4497 4493
                        scratch, GetLinkRegisterState(), kDontSaveFPRegs);
4498 4494
  } else {
4495
    ASSERT(ToRegister(instr->context()).is(cp));
4499 4496
    PushSafepointRegistersScope scope(
4500 4497
        this, Safepoint::kWithRegistersAndDoubles);
4501 4498
    __ Move(r0, object_reg);
......
4512 4509
void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4513 4510
  Register object = ToRegister(instr->object());
4514 4511
  Register temp = ToRegister(instr->temp());
4515
  __ TestJSArrayForAllocationMemento(object, temp);
4512
  Label no_memento_found;
4513
  __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
4516 4514
  DeoptimizeIf(eq, instr->environment());
4515
  __ bind(&no_memento_found);
4517 4516
}
4518 4517

  
4519 4518

  
4520 4519
void LCodeGen::DoStringAdd(LStringAdd* instr) {
4520
  ASSERT(ToRegister(instr->context()).is(cp));
4521 4521
  __ push(ToRegister(instr->left()));
4522 4522
  __ push(ToRegister(instr->right()));
4523 4523
  StringAddStub stub(instr->hydrogen()->flags());
......
4573 4573
    __ SmiTag(index);
4574 4574
    __ push(index);
4575 4575
  }
4576
  CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
4576
  CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr,
4577
                          instr->context());
4577 4578
  __ AssertSmi(r0);
4578 4579
  __ SmiUntag(r0);
4579 4580
  __ StoreToSafepointRegisterSlot(r0, result);
......
4625 4626
  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4626 4627
  __ SmiTag(char_code);
4627 4628
  __ push(char_code);
4628
  CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
4629
  CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
4629 4630
  __ StoreToSafepointRegisterSlot(r0, result);
4630 4631
}
4631 4632

  
......
4649 4650

  
4650 4651
void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
4651 4652
  LOperand* input = instr->value();
4652
  ASSERT(input->IsRegister());
4653 4653
  LOperand* output = instr->result();
4654
  ASSERT(output->IsRegister());
4655 4654
  __ SmiTag(ToRegister(output), ToRegister(input), SetCC);
4656 4655
  if (!instr->hydrogen()->value()->HasRange() ||
4657 4656
      !instr->hydrogen()->value()->range()->IsInSmiRange()) {
......
4670 4669
}
4671 4670

  
4672 4671

  
4672
void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) {
4673
  LOperand* input = instr->value();
4674
  LOperand* output = instr->result();
4675
  if (!instr->hydrogen()->value()->HasRange() ||
4676
      !instr->hydrogen()->value()->range()->IsInSmiRange()) {
4677
    __ tst(ToRegister(input), Operand(0xc0000000));
4678
    DeoptimizeIf(ne, instr->environment());
4679
  }
4680
  __ SmiTag(ToRegister(output), ToRegister(input));
4681
}
4682

  
4683

  
4673 4684
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4674 4685
  class DeferredNumberTagI V8_FINAL : public LDeferredCode {
4675 4686
   public:
......
4764 4775
  // integer value.
4765 4776
  __ mov(ip, Operand::Zero());
4766 4777
  __ StoreToSafepointRegisterSlot(ip, dst);
4767
  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
4778
  // NumberTagI and NumberTagD use the context from the frame, rather than
4779
  // the environment's HContext or HInlinedContext value.
4780
  // They only call Runtime::kAllocateHeapNumber.
4781
  // The corresponding HChange instructions are added in a phase that does
4782
  // not have easy access to the local context.
4783
  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4784
  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4785
  RecordSafepointWithRegisters(
4786
      instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4768 4787
  __ Move(dst, r0);
4769 4788
  __ sub(dst, dst, Operand(kHeapObjectTag));
4770 4789

  
......
4820 4839
  __ mov(reg, Operand::Zero());
4821 4840

  
4822 4841
  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4823
  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
4842
  // NumberTagI and NumberTagD use the context from the frame, rather than
4843
  // the environment's HContext or HInlinedContext value.
4844
  // They only call Runtime::kAllocateHeapNumber.
4845
  // The corresponding HChange instructions are added in a phase that does
4846
  // not have easy access to the local context.
4847
  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4848
  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4849
  RecordSafepointWithRegisters(
4850
      instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4824 4851
  __ sub(r0, r0, Operand(kHeapObjectTag));
4825 4852
  __ StoreToSafepointRegisterSlot(r0, reg);
4826 4853
}
......
4855 4882
  Register scratch = scratch0();
4856 4883
  SwVfpRegister flt_scratch = double_scratch0().low();
4857 4884
  ASSERT(!result_reg.is(double_scratch0()));
4858

  
4859
  Label load_smi, heap_number, done;
4860

  
4885
  Label convert, load_smi, done;
4861 4886
  if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4862 4887
    // Smi check.
4863 4888
    __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4864

  
4865 4889
    // Heap number map check.
4866 4890
    __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4867 4891
    __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4868 4892
    __ cmp(scratch, Operand(ip));
4869
    if (!can_convert_undefined_to_nan) {
4870
      DeoptimizeIf(ne, env);
4893
    if (can_convert_undefined_to_nan) {
4894
      __ b(ne, &convert);
4871 4895
    } else {
4872
      Label heap_number, convert;
4873
      __ b(eq, &heap_number);
4874

  
4875
      // Convert undefined (and hole) to NaN.
4876
      __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4877
      __ cmp(input_reg, Operand(ip));
4878 4896
      DeoptimizeIf(ne, env);
4879

  
4880
      __ bind(&convert);
4881
      __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4882
      __ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag);
4883
      __ jmp(&done);
4884

  
4885
      __ bind(&heap_number);
4886 4897
    }
4887
    // Heap number to double register conversion.
4898
    // load heap number
4888 4899
    __ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag);
4889 4900
    if (deoptimize_on_minus_zero) {
4890 4901
      __ VmovLow(scratch, result_reg);
......
4895 4906
      DeoptimizeIf(eq, env);
4896 4907
    }
4897 4908
    __ jmp(&done);
4909
    if (can_convert_undefined_to_nan) {
4910
      __ bind(&convert);
4911
      // Convert undefined (and hole) to NaN.
4912
      __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4913
      __ cmp(input_reg, Operand(ip));
4914
      DeoptimizeIf(ne, env);
4915
      __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4916
      __ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag);
4917
      __ jmp(&done);
4918
    }
4898 4919
  } else {
4899 4920
    __ SmiUntag(scratch, input_reg);
4900 4921
    ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
4901 4922
  }
4902

  
4903 4923
  // Smi to double register conversion
4904 4924
  __ bind(&load_smi);
4905 4925
  // scratch: untagged value of input_reg
......
4935 4955
  if (instr->truncating()) {
4936 4956
    // Performs a truncating conversion of a floating point number as used by
4937 4957
    // the JS bitwise operations.
4938
    Label heap_number;
4939
    __ b(eq, &heap_number);
4940
    // Check for undefined. Undefined is converted to zero for truncating
4941
    // conversions.
4958
    Label no_heap_number, check_bools, check_false;
4959
    __ b(ne, &no_heap_number);
4960
    __ TruncateHeapNumberToI(input_reg, scratch2);
4961
    __ b(&done);
4962

  
4963
    // Check for Oddballs. Undefined/False is converted to zero and True to one
4964
    // for truncating conversions.
4965
    __ bind(&no_heap_number);
4942 4966
    __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4943 4967
    __ cmp(scratch2, Operand(ip));
4944
    DeoptimizeIf(ne, instr->environment());
4968
    __ b(ne, &check_bools);
4945 4969
    __ mov(input_reg, Operand::Zero());
4946 4970
    __ b(&done);
4947 4971

  
4948
    __ bind(&heap_number);
4949
    __ TruncateHeapNumberToI(input_reg, scratch2);
4972
    __ bind(&check_bools);
4973
    __ LoadRoot(ip, Heap::kTrueValueRootIndex);
4974
    __ cmp(scratch2, Operand(ip));
4975
    __ b(ne, &check_false);
4976
    __ mov(input_reg, Operand(1));
4977
    __ b(&done);
4978

  
4979
    __ bind(&check_false);
4980
    __ LoadRoot(ip, Heap::kFalseValueRootIndex);
4981
    __ cmp(scratch2, Operand(ip));
4982
    DeoptimizeIf(ne, instr->environment());
4983
    __ mov(input_reg, Operand::Zero());
4984
    __ b(&done);
4950 4985
  } else {
4951 4986
    // Deoptimize if we don't have a heap number.
4952 4987
    DeoptimizeIf(ne, instr->environment());
......
4987 5022

  
4988 5023
  Register input_reg = ToRegister(input);
4989 5024

  
4990
  DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
5025
  if (instr->hydrogen()->value()->representation().IsSmi()) {
5026
    __ SmiUntag(input_reg);
5027
  } else {
5028
    DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
4991 5029

  
4992
  // Optimistically untag the input.
4993
  // If the input is a HeapObject, SmiUntag will set the carry flag.
4994
  __ SmiUntag(input_reg, SetCC);
4995
  // Branch to deferred code if the input was tagged.
4996
  // The deferred code will take care of restoring the tag.
4997
  __ b(cs, deferred->entry());
4998
  __ bind(deferred->exit());
5030
    // Optimistically untag the input.
5031
    // If the input is a HeapObject, SmiUntag will set the carry flag.
5032
    __ SmiUntag(input_reg, SetCC);
5033
    // Branch to deferred code if the input was tagged.
5034
    // The deferred code will take care of restoring the tag.
5035
    __ b(cs, deferred->entry());
5036
    __ bind(deferred->exit());
5037
  }
4999 5038
}
5000 5039

  
5001 5040

  
......
5133 5172

  
5134 5173
void LCodeGen::DoCheckValue(LCheckValue* instr) {
5135 5174
  Register reg = ToRegister(instr->value());
5136
  Handle<HeapObject> object = instr->hydrogen()->object();
5175
  Handle<HeapObject> object = instr->hydrogen()->object().handle();
5137 5176
  AllowDeferredHandleDereference smi_check;
5138 5177
  if (isolate()->heap()->InNewSpace(*object)) {
5139 5178
    Register reg = ToRegister(instr->value());
......
5152 5191
  {
5153 5192
    PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5154 5193
    __ push(object);
5155
    CallRuntimeFromDeferred(Runtime::kMigrateInstance, 1, instr);
5194
    __ mov(cp, Operand::Zero());
5195
    __ CallRuntimeSaveDoubles(Runtime::kMigrateInstance);
5196
    RecordSafepointWithRegisters(
5197
        instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5156 5198
    __ StoreToSafepointRegisterSlot(r0, scratch0());
5157 5199
  }
5158 5200
  __ tst(scratch0(), Operand(kSmiTagMask));
......
5185 5227
  ASSERT(input->IsRegister());
5186 5228
  Register reg = ToRegister(input);
5187 5229

  
5188
  SmallMapList* map_set = instr->hydrogen()->map_set();
5189 5230
  __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
5190 5231

  
5191 5232
  DeferredCheckMaps* deferred = NULL;
......
5194 5235
    __ bind(deferred->check_maps());
5195 5236
  }
5196 5237

  
5238
  UniqueSet<Map> map_set = instr->hydrogen()->map_set();
5197 5239
  Label success;
5198
  for (int i = 0; i < map_set->length() - 1; i++) {
5199
    Handle<Map> map = map_set->at(i);
5240
  for (int i = 0; i < map_set.size() - 1; i++) {
5241
    Handle<Map> map = map_set.at(i).handle();
5200 5242
    __ CompareMap(map_reg, map, &success);
5201 5243
    __ b(eq, &success);
5202 5244
  }
5203 5245

  
5204
  Handle<Map> map = map_set->last();
5246
  Handle<Map> map = map_set.at(map_set.size() - 1).handle();
5205 5247
  __ CompareMap(map_reg, map, &success);
5206 5248
  if (instr->hydrogen()->has_migration_target()) {
5207 5249
    __ b(ne, deferred->entry());
......
5355 5397
  if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5356 5398
    ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
5357 5399
    ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5358
    CallRuntimeFromDeferred(Runtime::kAllocateInOldPointerSpace, 1, instr);
5400
    CallRuntimeFromDeferred(Runtime::kAllocateInOldPointerSpace, 1, instr,
5401
                            instr->context());
5359 5402
  } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5360 5403
    ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5361
    CallRuntimeFromDeferred(Runtime::kAllocateInOldDataSpace, 1, instr);
5404
    CallRuntimeFromDeferred(Runtime::kAllocateInOldDataSpace, 1, instr,
5405
                            instr->context());
5362 5406
  } else {
5363
    CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
5407
    CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr,
5408
                            instr->context());
5364 5409
  }
5365 5410
  __ StoreToSafepointRegisterSlot(r0, result);
5366 5411
}
......
5374 5419

  
5375 5420

  
5376 5421
void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5422
  ASSERT(ToRegister(instr->context()).is(cp));
5377 5423
  Label materialized;
5378 5424
  // Registers will be used as follows:
5379
  // r7 = literals array.
5425
  // r6 = literals array.
5380 5426
  // r1 = regexp literal.
5381 5427
  // r0 = regexp literal clone.
5382
  // r2 and r4-r6 are used as temporaries.
5428
  // r2-5 are used as temporaries.
5383 5429
  int literal_offset =
5384 5430
      FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5385
  __ LoadHeapObject(r7, instr->hydrogen()->literals());
5386
  __ ldr(r1, FieldMemOperand(r7, literal_offset));
5431
  __ Move(r6, instr->hydrogen()->literals());
5432
  __ ldr(r1, FieldMemOperand(r6, literal_offset));
5387 5433
  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5388 5434
  __ cmp(r1, ip);
5389 5435
  __ b(ne, &materialized);
5390 5436

  
5391 5437
  // Create regexp literal using runtime function
5392 5438
  // Result will be in r0.
5393
  __ mov(r6, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
5394
  __ mov(r5, Operand(instr->hydrogen()->pattern()));
5395
  __ mov(r4, Operand(instr->hydrogen()->flags()));
5396
  __ Push(r7, r6, r5, r4);
5439
  __ mov(r5, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
5440
  __ mov(r4, Operand(instr->hydrogen()->pattern()));
5441
  __ mov(r3, Operand(instr->hydrogen()->flags()));
5442
  __ Push(r6, r5, r4, r3);
5397 5443
  CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5398 5444
  __ mov(r1, r0);
5399 5445

  
......
5417 5463

  
5418 5464

  
5419 5465
void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5466
  ASSERT(ToRegister(instr->context()).is(cp));
5420 5467
  // Use the fast case closure allocation code that allocates in new
5421 5468
  // space for nested functions that don't need literals cloning.
5422 5469
  bool pretenure = instr->hydrogen()->pretenure();
......
5560 5607
}
5561 5608

  
5562 5609

  
5563
void LCodeGen::EnsureSpaceForLazyDeopt() {
5610
void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5564 5611
  if (info()->IsStub()) return;
5565 5612
  // Ensure that we have enough space after the previous lazy-bailout
5566 5613
  // instruction for patching the code here.
5567 5614
  int current_pc = masm()->pc_offset();
5568
  int patch_size = Deoptimizer::patch_size();
5569
  if (current_pc < last_lazy_deopt_pc_ + patch_size) {
5615
  if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5570 5616
    // Block literal pool emission for duration of padding.
5571 5617
    Assembler::BlockConstPoolScope block_const_pool(masm());
5572
    int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
5618
    int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5573 5619
    ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
5574 5620
    while (padding_size > 0) {
5575 5621
      __ nop();
......
5580 5626

  
5581 5627

  
5582 5628
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5583
  EnsureSpaceForLazyDeopt();
5629
  EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5584 5630
  last_lazy_deopt_pc_ = masm()->pc_offset();
5585 5631
  ASSERT(instr->HasEnvironment());
5586 5632
  LEnvironment* env = instr->environment();
......
5611 5657

  
5612 5658
void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5613 5659
  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5660
  LoadContextFromDeferred(instr->context());
5614 5661
  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5615 5662
  RecordSafepointWithLazyDeopt(
5616 5663
      instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
......
5644 5691
    __ cmp(sp, Operand(ip));
5645 5692
    __ b(hs, &done);
5646 5693
    PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize);
5694
    ASSERT(instr->context()->IsRegister());
5695
    ASSERT(ToRegister(instr->context()).is(cp));
5647 5696
    CallCode(isolate()->builtins()->StackCheck(),
5648
             RelocInfo::CODE_TARGET,
5649
             instr);
5650
    EnsureSpaceForLazyDeopt();
... This diff was truncated because it exceeds the maximum size that can be displayed.

Also available in: Unified diff