Revision f230a1cf deps/v8/src/ia32/lithium-codegen-ia32.cc

View differences:

deps/v8/src/ia32/lithium-codegen-ia32.cc
120 120
}
121 121

  
122 122

  
123
void LCodeGen::Comment(const char* format, ...) {
124
  if (!FLAG_code_comments) return;
125
  char buffer[4 * KB];
126
  StringBuilder builder(buffer, ARRAY_SIZE(buffer));
127
  va_list arguments;
128
  va_start(arguments, format);
129
  builder.AddFormattedList(format, arguments);
130
  va_end(arguments);
131

  
132
  // Copy the string before recording it in the assembler to avoid
133
  // issues when the stack allocated buffer goes out of scope.
134
  size_t length = builder.position();
135
  Vector<char> copy = Vector<char>::New(length + 1);
136
  OS::MemCopy(copy.start(), builder.Finalize(), copy.length());
137
  masm()->RecordComment(copy.start());
138
}
139

  
140

  
141 123
#ifdef _MSC_VER
142 124
void LCodeGen::MakeSureStackPagesMapped(int offset) {
143 125
  const int kPageSize = 4 * KB;
......
206 188
  if (NeedsEagerFrame()) {
207 189
    ASSERT(!frame_is_built_);
208 190
    frame_is_built_ = true;
209
    __ push(ebp);  // Caller's frame pointer.
210
    __ mov(ebp, esp);
191
    __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
211 192
    info()->AddNoFrameRange(0, masm_->pc_offset());
212
    __ push(esi);  // Callee's context.
213
    if (info()->IsStub()) {
214
      __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
215
    } else {
216
      __ push(edi);  // Callee's JS function.
217
    }
218 193
  }
219 194

  
220 195
  if (info()->IsOptimizing() &&
......
275 250
      BitVector* doubles = chunk()->allocated_double_registers();
276 251
      BitVector::Iterator save_iterator(doubles);
277 252
      while (!save_iterator.Done()) {
278
        __ movdbl(MemOperand(esp, count * kDoubleSize),
253
        __ movsd(MemOperand(esp, count * kDoubleSize),
279 254
                  XMMRegister::FromAllocationIndex(save_iterator.Current()));
280 255
        save_iterator.Advance();
281 256
        count++;
......
340 315

  
341 316
  osr_pc_offset_ = masm()->pc_offset();
342 317

  
318
    // Move state of dynamic frame alignment into edx.
319
  __ mov(edx, Immediate(kNoAlignmentPadding));
320

  
321
  if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
322
    Label do_not_pad, align_loop;
323
    // Align ebp + 4 to a multiple of 2 * kPointerSize.
324
    __ test(ebp, Immediate(kPointerSize));
325
    __ j(zero, &do_not_pad, Label::kNear);
326
    __ push(Immediate(0));
327
    __ mov(ebx, esp);
328
    __ mov(edx, Immediate(kAlignmentPaddingPushed));
329

  
330
    // Move all parts of the frame over one word. The frame consists of:
331
    // unoptimized frame slots, alignment state, context, frame pointer, return
332
    // address, receiver, and the arguments.
333
    __ mov(ecx, Immediate(scope()->num_parameters() +
334
           5 + graph()->osr()->UnoptimizedFrameSlots()));
335

  
336
    __ bind(&align_loop);
337
    __ mov(eax, Operand(ebx, 1 * kPointerSize));
338
    __ mov(Operand(ebx, 0), eax);
339
    __ add(Operand(ebx), Immediate(kPointerSize));
340
    __ dec(ecx);
341
    __ j(not_zero, &align_loop, Label::kNear);
342
    __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
343
    __ sub(Operand(ebp), Immediate(kPointerSize));
344
    __ bind(&do_not_pad);
345
  }
346

  
343 347
  // Save the first local, which is overwritten by the alignment state.
344 348
  Operand alignment_loc = MemOperand(ebp, -3 * kPointerSize);
345 349
  __ push(alignment_loc);
346 350

  
347
  // Set the dynamic frame alignment state to "not aligned".
348
  __ mov(alignment_loc, Immediate(kNoAlignmentPadding));
351
  // Set the dynamic frame alignment state.
352
  __ mov(alignment_loc, edx);
349 353

  
350 354
  // Adjust the frame size, subsuming the unoptimized frame into the
351 355
  // optimized frame.
......
355 359
}
356 360

  
357 361

  
358
bool LCodeGen::GenerateBody() {
359
  ASSERT(is_generating());
360
  bool emit_instructions = true;
361
  for (current_instruction_ = 0;
362
       !is_aborted() && current_instruction_ < instructions_->length();
363
       current_instruction_++) {
364
    LInstruction* instr = instructions_->at(current_instruction_);
365

  
366
    // Don't emit code for basic blocks with a replacement.
367
    if (instr->IsLabel()) {
368
      emit_instructions = !LLabel::cast(instr)->HasReplacement();
369
    }
370
    if (!emit_instructions) continue;
371

  
372
    if (FLAG_code_comments && instr->HasInterestingComment(this)) {
373
      Comment(";;; <@%d,#%d> %s",
374
              current_instruction_,
375
              instr->hydrogen_value()->id(),
376
              instr->Mnemonic());
377
    }
378

  
379
    if (!CpuFeatures::IsSupported(SSE2)) FlushX87StackIfNecessary(instr);
380

  
381
    RecordAndUpdatePosition(instr->position());
362
void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
363
  if (!CpuFeatures::IsSupported(SSE2)) FlushX87StackIfNecessary(instr);
364
}
382 365

  
383
    instr->CompileToNative(this);
384 366

  
385
    if (!CpuFeatures::IsSupported(SSE2)) {
386
      if (instr->IsGoto()) {
387
        x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr));
388
      } else if (FLAG_debug_code && FLAG_enable_slow_asserts &&
389
                 !instr->IsGap() && !instr->IsReturn()) {
390
        __ VerifyX87StackDepth(x87_stack_.depth());
367
void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
368
  if (!CpuFeatures::IsSupported(SSE2)) {
369
    if (instr->IsGoto()) {
370
      x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr));
371
    } else if (FLAG_debug_code && FLAG_enable_slow_asserts &&
372
               !instr->IsGap() && !instr->IsReturn()) {
373
      if (instr->ClobbersDoubleRegisters()) {
374
        if (instr->HasDoubleRegisterResult()) {
375
          ASSERT_EQ(1, x87_stack_.depth());
376
        } else {
377
          ASSERT_EQ(0, x87_stack_.depth());
378
        }
391 379
      }
380
      __ VerifyX87StackDepth(x87_stack_.depth());
392 381
    }
393 382
  }
394
  EnsureSpaceForLazyDeopt();
395
  return !is_aborted();
396 383
}
397 384

  
398 385

  
......
453 440
      X87Stack copy(code->x87_stack());
454 441
      x87_stack_ = copy;
455 442

  
456
      int pos = instructions_->at(code->instruction_index())->position();
457
      RecordAndUpdatePosition(pos);
443
      HValue* value =
444
          instructions_->at(code->instruction_index())->hydrogen_value();
445
      RecordAndWritePosition(value->position());
458 446

  
459 447
      Comment(";;; <@%d,#%d> "
460 448
              "-------------------- Deferred %s --------------------",
......
532 520
}
533 521

  
534 522

  
523
void LCodeGen::X87LoadForUsage(X87Register reg1, X87Register reg2) {
524
  ASSERT(x87_stack_.Contains(reg1));
525
  ASSERT(x87_stack_.Contains(reg2));
526
  x87_stack_.Fxch(reg1, 1);
527
  x87_stack_.Fxch(reg2);
528
  x87_stack_.pop();
529
  x87_stack_.pop();
530
}
531

  
532

  
535 533
void LCodeGen::X87Stack::Fxch(X87Register reg, int other_slot) {
536 534
  ASSERT(is_mutable_);
537 535
  ASSERT(Contains(reg) && stack_depth_ > other_slot);
......
931 929
                               LInstruction* instr,
932 930
                               SafepointMode safepoint_mode) {
933 931
  ASSERT(instr != NULL);
934
  LPointerMap* pointers = instr->pointer_map();
935
  RecordPosition(pointers->position());
936 932
  __ call(code, mode);
937 933
  RecordSafepointWithLazyDeopt(instr, safepoint_mode);
938 934

  
......
954 950

  
955 951
void LCodeGen::CallRuntime(const Runtime::Function* fun,
956 952
                           int argc,
957
                           LInstruction* instr) {
953
                           LInstruction* instr,
954
                           SaveFPRegsMode save_doubles) {
958 955
  ASSERT(instr != NULL);
959 956
  ASSERT(instr->HasPointerMap());
960
  LPointerMap* pointers = instr->pointer_map();
961
  RecordPosition(pointers->position());
962 957

  
963
  __ CallRuntime(fun, argc);
958
  __ CallRuntime(fun, argc, save_doubles);
964 959

  
965 960
  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
966 961

  
......
1122 1117

  
1123 1118
void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
1124 1119
  ZoneList<Handle<Map> > maps(1, zone());
1120
  ZoneList<Handle<JSObject> > objects(1, zone());
1125 1121
  int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
1126 1122
  for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
1127
    RelocInfo::Mode mode = it.rinfo()->rmode();
1128
    if (mode == RelocInfo::EMBEDDED_OBJECT &&
1129
        it.rinfo()->target_object()->IsMap()) {
1130
      Handle<Map> map(Map::cast(it.rinfo()->target_object()));
1131
      if (map->CanTransition()) {
1123
    if (Code::IsWeakEmbeddedObject(code->kind(), it.rinfo()->target_object())) {
1124
      if (it.rinfo()->target_object()->IsMap()) {
1125
        Handle<Map> map(Map::cast(it.rinfo()->target_object()));
1132 1126
        maps.Add(map, zone());
1127
      } else if (it.rinfo()->target_object()->IsJSObject()) {
1128
        Handle<JSObject> object(JSObject::cast(it.rinfo()->target_object()));
1129
        objects.Add(object, zone());
1133 1130
      }
1134 1131
    }
1135 1132
  }
1136 1133
#ifdef VERIFY_HEAP
1137
  // This disables verification of weak embedded maps after full GC.
1134
  // This disables verification of weak embedded objects after full GC.
1138 1135
  // AddDependentCode can cause a GC, which would observe the state where
1139 1136
  // this code is not yet in the depended code lists of the embedded maps.
1140
  NoWeakEmbeddedMapsVerificationScope disable_verification_of_embedded_maps;
1137
  NoWeakObjectVerificationScope disable_verification_of_embedded_objects;
1141 1138
#endif
1142 1139
  for (int i = 0; i < maps.length(); i++) {
1143 1140
    maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
1144 1141
  }
1142
  for (int i = 0; i < objects.length(); i++) {
1143
    AddWeakObjectToCodeDependency(isolate()->heap(), objects.at(i), code);
1144
  }
1145 1145
}
1146 1146

  
1147 1147

  
......
1246 1246

  
1247 1247

  
1248 1248
void LCodeGen::RecordSafepoint(Safepoint::DeoptMode mode) {
1249
  LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
1249
  LPointerMap empty_pointers(zone());
1250 1250
  RecordSafepoint(&empty_pointers, mode);
1251 1251
}
1252 1252

  
......
1258 1258
}
1259 1259

  
1260 1260

  
1261
void LCodeGen::RecordPosition(int position) {
1261
void LCodeGen::RecordAndWritePosition(int position) {
1262 1262
  if (position == RelocInfo::kNoPosition) return;
1263 1263
  masm()->positions_recorder()->RecordPosition(position);
1264
}
1265

  
1266

  
1267
void LCodeGen::RecordAndUpdatePosition(int position) {
1268
  if (position >= 0 && position != old_position_) {
1269
    masm()->positions_recorder()->RecordPosition(position);
1270
    old_position_ = position;
1271
  }
1264
  masm()->positions_recorder()->WriteRecordedPositions();
1272 1265
}
1273 1266

  
1274 1267

  
......
1336 1329
      CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1337 1330
      break;
1338 1331
    }
1339
    case CodeStub::NumberToString: {
1340
      NumberToStringStub stub;
1341
      CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1342
      break;
1343
    }
1344 1332
    case CodeStub::StringCompare: {
1345 1333
      StringCompareStub stub;
1346 1334
      CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
......
1733 1721
        case 9:
1734 1722
          __ lea(left, Operand(left, left, times_8, 0));
1735 1723
          break;
1736
       case 16:
1737
         __ shl(left, 4);
1738
         break;
1724
        case 16:
1725
          __ shl(left, 4);
1726
          break;
1739 1727
        default:
1740 1728
          __ imul(left, left, constant);
1741 1729
          break;
......
1967 1955
        __ movd(res, Operand(temp));
1968 1956
        __ psllq(res, 32);
1969 1957
        if (lower != 0) {
1958
          XMMRegister xmm_scratch = double_scratch0();
1970 1959
          __ Set(temp, Immediate(lower));
1971
          __ movd(xmm0, Operand(temp));
1972
          __ por(res, xmm0);
1960
          __ movd(xmm_scratch, Operand(temp));
1961
          __ por(res, xmm_scratch);
1973 1962
        }
1974 1963
      }
1975 1964
    }
......
2178 2167
    __ jmp(&return_right, Label::kNear);
2179 2168

  
2180 2169
    __ bind(&check_zero);
2181
    XMMRegister xmm_scratch = xmm0;
2170
    XMMRegister xmm_scratch = double_scratch0();
2182 2171
    __ xorps(xmm_scratch, xmm_scratch);
2183 2172
    __ ucomisd(left_reg, xmm_scratch);
2184 2173
    __ j(not_equal, &return_left, Label::kNear);  // left == right != 0.
......
2208 2197
    XMMRegister left = ToDoubleRegister(instr->left());
2209 2198
    XMMRegister right = ToDoubleRegister(instr->right());
2210 2199
    XMMRegister result = ToDoubleRegister(instr->result());
2211
    // Modulo uses a fixed result register.
2212
    ASSERT(instr->op() == Token::MOD || left.is(result));
2213 2200
    switch (instr->op()) {
2214 2201
      case Token::ADD:
2215 2202
        __ addsd(left, right);
......
2229 2216
      case Token::MOD: {
2230 2217
        // Pass two doubles as arguments on the stack.
2231 2218
        __ PrepareCallCFunction(4, eax);
2232
        __ movdbl(Operand(esp, 0 * kDoubleSize), left);
2233
        __ movdbl(Operand(esp, 1 * kDoubleSize), right);
2219
        __ movsd(Operand(esp, 0 * kDoubleSize), left);
2220
        __ movsd(Operand(esp, 1 * kDoubleSize), right);
2234 2221
        __ CallCFunction(
2235 2222
            ExternalReference::double_fp_operation(Token::MOD, isolate()),
2236 2223
            4);
2237 2224

  
2238 2225
        // Return value is in st(0) on ia32.
2239
        // Store it into the (fixed) result register.
2226
        // Store it into the result register.
2240 2227
        __ sub(Operand(esp), Immediate(kDoubleSize));
2241 2228
        __ fstp_d(Operand(esp, 0));
2242
        __ movdbl(result, Operand(esp, 0));
2229
        __ movsd(result, Operand(esp, 0));
2243 2230
        __ add(Operand(esp), Immediate(kDoubleSize));
2244 2231
        break;
2245 2232
      }
......
2272 2259
        __ PrepareCallCFunction(4, eax);
2273 2260
        X87Mov(Operand(esp, 1 * kDoubleSize), right);
2274 2261
        X87Mov(Operand(esp, 0), left);
2262
        X87Free(right);
2263
        ASSERT(left.is(result));
2275 2264
        X87PrepareToWrite(result);
2276 2265
        __ CallCFunction(
2277 2266
            ExternalReference::double_fp_operation(Token::MOD, isolate()),
......
2301 2290
}
2302 2291

  
2303 2292

  
2304
int LCodeGen::GetNextEmittedBlock() const {
2305
  for (int i = current_block_ + 1; i < graph()->blocks()->length(); ++i) {
2306
    if (!chunk_->GetLabel(i)->HasReplacement()) return i;
2307
  }
2308
  return -1;
2309
}
2310

  
2311

  
2312 2293
template<class InstrType>
2313 2294
void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
2314 2295
  int left_block = instr->TrueDestination(chunk_);
......
2340 2321
}
2341 2322

  
2342 2323

  
2343
void LCodeGen::DoIsNumberAndBranch(LIsNumberAndBranch* instr) {
2344
  Representation r = instr->hydrogen()->value()->representation();
2345
  if (r.IsSmiOrInteger32() || r.IsDouble()) {
2346
    EmitBranch(instr, no_condition);
2347
  } else {
2348
    ASSERT(r.IsTagged());
2349
    Register reg = ToRegister(instr->value());
2350
    HType type = instr->hydrogen()->value()->type();
2351
    if (type.IsTaggedNumber()) {
2352
      EmitBranch(instr, no_condition);
2353
    }
2354
    __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2355
    __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
2356
           factory()->heap_number_map());
2357
    EmitBranch(instr, equal);
2358
  }
2359
}
2360

  
2361

  
2362 2324
void LCodeGen::DoBranch(LBranch* instr) {
2363 2325
  Representation r = instr->hydrogen()->value()->representation();
2364 2326
  if (r.IsSmiOrInteger32()) {
......
2369 2331
    ASSERT(!info()->IsStub());
2370 2332
    CpuFeatureScope scope(masm(), SSE2);
2371 2333
    XMMRegister reg = ToDoubleRegister(instr->value());
2372
    __ xorps(xmm0, xmm0);
2373
    __ ucomisd(reg, xmm0);
2334
    XMMRegister xmm_scratch = double_scratch0();
2335
    __ xorps(xmm_scratch, xmm_scratch);
2336
    __ ucomisd(reg, xmm_scratch);
2374 2337
    EmitBranch(instr, not_equal);
2375 2338
  } else {
2376 2339
    ASSERT(r.IsTagged());
......
2390 2353
    } else if (type.IsHeapNumber()) {
2391 2354
      ASSERT(!info()->IsStub());
2392 2355
      CpuFeatureScope scope(masm(), SSE2);
2393
      __ xorps(xmm0, xmm0);
2394
      __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
2356
      XMMRegister xmm_scratch = double_scratch0();
2357
      __ xorps(xmm_scratch, xmm_scratch);
2358
      __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
2395 2359
      EmitBranch(instr, not_equal);
2396 2360
    } else if (type.IsString()) {
2397 2361
      ASSERT(!info()->IsStub());
......
2476 2440
        __ j(not_equal, &not_heap_number, Label::kNear);
2477 2441
        if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
2478 2442
          CpuFeatureScope scope(masm(), SSE2);
2479
          __ xorps(xmm0, xmm0);
2480
          __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
2443
          XMMRegister xmm_scratch = double_scratch0();
2444
          __ xorps(xmm_scratch, xmm_scratch);
2445
          __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
2481 2446
        } else {
2482 2447
          __ fldz();
2483 2448
          __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
......
2521 2486
    case Token::EQ_STRICT:
2522 2487
      cond = equal;
2523 2488
      break;
2489
    case Token::NE:
2490
    case Token::NE_STRICT:
2491
      cond = not_equal;
2492
      break;
2524 2493
    case Token::LT:
2525 2494
      cond = is_unsigned ? below : less;
2526 2495
      break;
......
2556 2525
    EmitGoto(next_block);
2557 2526
  } else {
2558 2527
    if (instr->is_double()) {
2559
      CpuFeatureScope scope(masm(), SSE2);
2528
      if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
2529
        CpuFeatureScope scope(masm(), SSE2);
2530
        __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
2531
      } else {
2532
        X87LoadForUsage(ToX87Register(right), ToX87Register(left));
2533
        __ FCmp();
2534
      }
2560 2535
      // Don't base result on EFLAGS when a NaN is involved. Instead
2561 2536
      // jump to the false block.
2562
      __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
2563 2537
      __ j(parity_even, instr->FalseLabel(chunk_));
2564 2538
    } else {
2565 2539
      if (right->IsConstantOperand()) {
......
2626 2600
  if (use_sse2) {
2627 2601
    CpuFeatureScope scope(masm(), SSE2);
2628 2602
    XMMRegister input_reg = ToDoubleRegister(instr->object());
2629
    __ movdbl(MemOperand(esp, 0), input_reg);
2603
    __ movsd(MemOperand(esp, 0), input_reg);
2630 2604
  } else {
2631 2605
    __ fstp_d(MemOperand(esp, 0));
2632 2606
  }
......
3016 2990
}
3017 2991

  
3018 2992

  
3019
void LCodeGen::DoInstanceSize(LInstanceSize* instr) {
3020
  Register object = ToRegister(instr->object());
3021
  Register result = ToRegister(instr->result());
3022
  __ mov(result, FieldOperand(object, HeapObject::kMapOffset));
3023
  __ movzx_b(result, FieldOperand(result, Map::kInstanceSizeOffset));
3024
}
3025

  
3026

  
3027 2993
void LCodeGen::DoCmpT(LCmpT* instr) {
3028 2994
  Token::Value op = instr->op();
3029 2995

  
......
3096 3062
    BitVector::Iterator save_iterator(doubles);
3097 3063
    int count = 0;
3098 3064
    while (!save_iterator.Done()) {
3099
      __ movdbl(XMMRegister::FromAllocationIndex(save_iterator.Current()),
3065
      __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
3100 3066
                MemOperand(esp, count * kDoubleSize));
3101 3067
      save_iterator.Advance();
3102 3068
      count++;
......
3131 3097

  
3132 3098
void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
3133 3099
  Register result = ToRegister(instr->result());
3134
  __ mov(result, Operand::ForCell(instr->hydrogen()->cell()));
3100
  __ mov(result, Operand::ForCell(instr->hydrogen()->cell().handle()));
3135 3101
  if (instr->hydrogen()->RequiresHoleCheck()) {
3136 3102
    __ cmp(result, factory()->the_hole_value());
3137 3103
    DeoptimizeIf(equal, instr->environment());
......
3154 3120

  
3155 3121
void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
3156 3122
  Register value = ToRegister(instr->value());
3157
  Handle<PropertyCell> cell_handle = instr->hydrogen()->cell();
3123
  Handle<PropertyCell> cell_handle = instr->hydrogen()->cell().handle();
3158 3124

  
3159 3125
  // If the cell we are storing to contains the hole it could have
3160 3126
  // been deleted from the property dictionary. In that case, we need
......
3245 3211

  
3246 3212
  if (access.IsExternalMemory()) {
3247 3213
    Register result = ToRegister(instr->result());
3248
    if (instr->object()->IsConstantOperand()) {
3249
      ExternalReference external_reference = ToExternalReference(
3250
          LConstantOperand::cast(instr->object()));
3251
      __ mov(result, MemOperand::StaticVariable(external_reference));
3214
    MemOperand operand = instr->object()->IsConstantOperand()
3215
        ? MemOperand::StaticVariable(ToExternalReference(
3216
                LConstantOperand::cast(instr->object())))
3217
        : MemOperand(ToRegister(instr->object()), offset);
3218
    if (access.representation().IsByte()) {
3219
      ASSERT(instr->hydrogen()->representation().IsInteger32());
3220
      __ movzx_b(result, operand);
3252 3221
    } else {
3253
      __ mov(result, MemOperand(ToRegister(instr->object()), offset));
3222
      __ mov(result, operand);
3254 3223
    }
3255 3224
    return;
3256 3225
  }
......
3261 3230
    if (CpuFeatures::IsSupported(SSE2)) {
3262 3231
      CpuFeatureScope scope(masm(), SSE2);
3263 3232
      XMMRegister result = ToDoubleRegister(instr->result());
3264
      __ movdbl(result, FieldOperand(object, offset));
3233
      __ movsd(result, FieldOperand(object, offset));
3265 3234
    } else {
3266 3235
      X87Mov(ToX87Register(instr->result()), FieldOperand(object, offset));
3267 3236
    }
......
3269 3238
  }
3270 3239

  
3271 3240
  Register result = ToRegister(instr->result());
3272
  if (access.IsInobject()) {
3273
    __ mov(result, FieldOperand(object, offset));
3274
  } else {
3241
  if (!access.IsInobject()) {
3275 3242
    __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
3276
    __ mov(result, FieldOperand(result, offset));
3243
    object = result;
3244
  }
3245
  if (access.representation().IsByte()) {
3246
    ASSERT(instr->hydrogen()->representation().IsInteger32());
3247
    __ movzx_b(result, FieldOperand(object, offset));
3248
  } else {
3249
    __ mov(result, FieldOperand(object, offset));
3277 3250
  }
3278 3251
}
3279 3252

  
......
3349 3322
}
3350 3323

  
3351 3324

  
3325
void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3326
  Register result = ToRegister(instr->result());
3327
  __ LoadRoot(result, instr->index());
3328
}
3329

  
3330

  
3352 3331
void LCodeGen::DoLoadExternalArrayPointer(
3353 3332
    LLoadExternalArrayPointer* instr) {
3354 3333
  Register result = ToRegister(instr->result());
......
3405 3384
  } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
3406 3385
    if (CpuFeatures::IsSupported(SSE2)) {
3407 3386
      CpuFeatureScope scope(masm(), SSE2);
3408
      __ movdbl(ToDoubleRegister(instr->result()), operand);
3387
      __ movsd(ToDoubleRegister(instr->result()), operand);
3409 3388
    } else {
3410 3389
      X87Mov(ToX87Register(instr->result()), operand);
3411 3390
    }
......
3476 3455
  if (CpuFeatures::IsSupported(SSE2)) {
3477 3456
    CpuFeatureScope scope(masm(), SSE2);
3478 3457
    XMMRegister result = ToDoubleRegister(instr->result());
3479
    __ movdbl(result, double_load_operand);
3458
    __ movsd(result, double_load_operand);
3480 3459
  } else {
3481 3460
    X87Mov(ToX87Register(instr->result()), double_load_operand);
3482 3461
  }
......
3693 3672
  __ bind(&invoke);
3694 3673
  ASSERT(instr->HasPointerMap());
3695 3674
  LPointerMap* pointers = instr->pointer_map();
3696
  RecordPosition(pointers->position());
3697 3675
  SafepointGenerator safepoint_generator(
3698 3676
      this, pointers, Safepoint::kLazyDeopt);
3699 3677
  ParameterCount actual(eax);
......
3778 3756
  bool can_invoke_directly =
3779 3757
      dont_adapt_arguments || formal_parameter_count == arity;
3780 3758

  
3781
  LPointerMap* pointers = instr->pointer_map();
3782
  RecordPosition(pointers->position());
3783

  
3784 3759
  if (can_invoke_directly) {
3785 3760
    if (edi_state == EDI_UNINITIALIZED) {
3786 3761
      __ LoadHeapObject(edi, function);
......
3805 3780
    RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3806 3781
  } else {
3807 3782
    // We need to adapt arguments.
3783
    LPointerMap* pointers = instr->pointer_map();
3808 3784
    SafepointGenerator generator(
3809 3785
        this, pointers, Safepoint::kLazyDeopt);
3810 3786
    ParameterCount count(arity);
......
3903 3879

  
3904 3880
  CpuFeatureScope scope(masm(), SSE2);
3905 3881
  if (r.IsDouble()) {
3906
    XMMRegister  scratch = xmm0;
3882
    XMMRegister scratch = double_scratch0();
3907 3883
    XMMRegister input_reg = ToDoubleRegister(instr->value());
3908 3884
    __ xorps(scratch, scratch);
3909 3885
    __ subsd(scratch, input_reg);
3910
    __ pand(input_reg, scratch);
3886
    __ andps(input_reg, scratch);
3911 3887
  } else if (r.IsSmiOrInteger32()) {
3912 3888
    EmitIntegerMathAbs(instr);
3913 3889
  } else {  // Tagged case.
......
3924 3900

  
3925 3901
void LCodeGen::DoMathFloor(LMathFloor* instr) {
3926 3902
  CpuFeatureScope scope(masm(), SSE2);
3927
  XMMRegister xmm_scratch = xmm0;
3903
  XMMRegister xmm_scratch = double_scratch0();
3928 3904
  Register output_reg = ToRegister(instr->result());
3929 3905
  XMMRegister input_reg = ToDoubleRegister(instr->value());
3930 3906

  
......
3977 3953
    __ bind(&negative_sign);
3978 3954
    // Truncate, then compare and compensate.
3979 3955
    __ cvttsd2si(output_reg, Operand(input_reg));
3980
    __ cvtsi2sd(xmm_scratch, output_reg);
3956
    __ Cvtsi2sd(xmm_scratch, output_reg);
3981 3957
    __ ucomisd(input_reg, xmm_scratch);
3982 3958
    __ j(equal, &done, Label::kNear);
3983 3959
    __ sub(output_reg, Immediate(1));
......
3992 3968
  CpuFeatureScope scope(masm(), SSE2);
3993 3969
  Register output_reg = ToRegister(instr->result());
3994 3970
  XMMRegister input_reg = ToDoubleRegister(instr->value());
3995
  XMMRegister xmm_scratch = xmm0;
3971
  XMMRegister xmm_scratch = double_scratch0();
3996 3972
  XMMRegister input_temp = ToDoubleRegister(instr->temp());
3997 3973
  ExternalReference one_half = ExternalReference::address_of_one_half();
3998 3974
  ExternalReference minus_one_half =
3999 3975
      ExternalReference::address_of_minus_one_half();
4000 3976

  
4001 3977
  Label done, round_to_zero, below_one_half, do_not_compensate;
4002
  __ movdbl(xmm_scratch, Operand::StaticVariable(one_half));
3978
  __ movsd(xmm_scratch, Operand::StaticVariable(one_half));
4003 3979
  __ ucomisd(xmm_scratch, input_reg);
4004 3980
  __ j(above, &below_one_half);
4005 3981

  
......
4013 3989
  __ jmp(&done);
4014 3990

  
4015 3991
  __ bind(&below_one_half);
4016
  __ movdbl(xmm_scratch, Operand::StaticVariable(minus_one_half));
3992
  __ movsd(xmm_scratch, Operand::StaticVariable(minus_one_half));
4017 3993
  __ ucomisd(xmm_scratch, input_reg);
4018 3994
  __ j(below_equal, &round_to_zero);
4019 3995

  
......
4027 4003
  __ RecordComment("D2I conversion overflow");
4028 4004
  DeoptimizeIf(equal, instr->environment());
4029 4005

  
4030
  __ cvtsi2sd(xmm_scratch, output_reg);
4006
  __ Cvtsi2sd(xmm_scratch, output_reg);
4031 4007
  __ ucomisd(xmm_scratch, input_temp);
4032 4008
  __ j(equal, &done);
4033 4009
  __ sub(output_reg, Immediate(1));
......
4059 4035

  
4060 4036
void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
4061 4037
  CpuFeatureScope scope(masm(), SSE2);
4062
  XMMRegister xmm_scratch = xmm0;
4038
  XMMRegister xmm_scratch = double_scratch0();
4063 4039
  XMMRegister input_reg = ToDoubleRegister(instr->value());
4064 4040
  Register scratch = ToRegister(instr->temp());
4065 4041
  ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
......
4178 4154
  // by computing:
4179 4155
  // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
4180 4156
  XMMRegister result = ToDoubleRegister(instr->result());
4181
  // We use xmm0 as fixed scratch register here.
4182
  XMMRegister scratch4 = xmm0;
4157
  XMMRegister scratch4 = double_scratch0();
4183 4158
  __ mov(scratch3, Immediate(0x49800000));  // 1.0 x 2^20 as single.
4184 4159
  __ movd(scratch4, scratch3);
4185 4160
  __ movd(result, random);
......
4193 4168
  CpuFeatureScope scope(masm(), SSE2);
4194 4169
  ASSERT(instr->value()->Equals(instr->result()));
4195 4170
  XMMRegister input_reg = ToDoubleRegister(instr->value());
4171
  XMMRegister xmm_scratch = double_scratch0();
4196 4172
  Label positive, done, zero;
4197
  __ xorps(xmm0, xmm0);
4198
  __ ucomisd(input_reg, xmm0);
4173
  __ xorps(xmm_scratch, xmm_scratch);
4174
  __ ucomisd(input_reg, xmm_scratch);
4199 4175
  __ j(above, &positive, Label::kNear);
4200 4176
  __ j(equal, &zero, Label::kNear);
4201 4177
  ExternalReference nan =
4202 4178
      ExternalReference::address_of_canonical_non_hole_nan();
4203
  __ movdbl(input_reg, Operand::StaticVariable(nan));
4179
  __ movsd(input_reg, Operand::StaticVariable(nan));
4204 4180
  __ jmp(&done, Label::kNear);
4205 4181
  __ bind(&zero);
4206
  __ push(Immediate(0xFFF00000));
4207
  __ push(Immediate(0));
4208
  __ movdbl(input_reg, Operand(esp, 0));
4209
  __ add(Operand(esp), Immediate(kDoubleSize));
4182
  ExternalReference ninf =
4183
      ExternalReference::address_of_negative_infinity();
4184
  __ movsd(input_reg, Operand::StaticVariable(ninf));
4210 4185
  __ jmp(&done, Label::kNear);
4211 4186
  __ bind(&positive);
4212 4187
  __ fldln2();
4213 4188
  __ sub(Operand(esp), Immediate(kDoubleSize));
4214
  __ movdbl(Operand(esp, 0), input_reg);
4189
  __ movsd(Operand(esp, 0), input_reg);
4215 4190
  __ fld_d(Operand(esp, 0));
4216 4191
  __ fyl2x();
4217 4192
  __ fstp_d(Operand(esp, 0));
4218
  __ movdbl(input_reg, Operand(esp, 0));
4193
  __ movsd(input_reg, Operand(esp, 0));
4219 4194
  __ add(Operand(esp), Immediate(kDoubleSize));
4220 4195
  __ bind(&done);
4221 4196
}
......
4225 4200
  CpuFeatureScope scope(masm(), SSE2);
4226 4201
  XMMRegister input = ToDoubleRegister(instr->value());
4227 4202
  XMMRegister result = ToDoubleRegister(instr->result());
4203
  XMMRegister temp0 = double_scratch0();
4228 4204
  Register temp1 = ToRegister(instr->temp1());
4229 4205
  Register temp2 = ToRegister(instr->temp2());
4230 4206

  
4231
  MathExpGenerator::EmitMathExp(masm(), input, result, xmm0, temp1, temp2);
4207
  MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2);
4232 4208
}
4233 4209

  
4234 4210

  
......
4273 4249
  Handle<JSFunction> known_function = instr->hydrogen()->known_function();
4274 4250
  if (known_function.is_null()) {
4275 4251
    LPointerMap* pointers = instr->pointer_map();
4276
    RecordPosition(pointers->position());
4277 4252
    SafepointGenerator generator(
4278 4253
        this, pointers, Safepoint::kLazyDeopt);
4279 4254
    ParameterCount count(instr->arity());
......
4409 4384

  
4410 4385

  
4411 4386
void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
4412
  CallRuntime(instr->function(), instr->arity(), instr);
4387
  CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
4413 4388
}
4414 4389

  
4415 4390

  
......
4441 4416
            ToExternalReference(LConstantOperand::cast(instr->object())))
4442 4417
        : MemOperand(ToRegister(instr->object()), offset);
4443 4418
    if (instr->value()->IsConstantOperand()) {
4419
      ASSERT(!representation.IsByte());
4444 4420
      LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4445 4421
      __ mov(operand, Immediate(ToInteger32(operand_value)));
4446 4422
    } else {
4447 4423
      Register value = ToRegister(instr->value());
4448
      __ mov(operand, value);
4424
      if (representation.IsByte()) {
4425
        __ mov_b(operand, value);
4426
      } else {
4427
        __ mov(operand, value);
4428
      }
4449 4429
    }
4450 4430
    return;
4451 4431
  }
......
4480 4460
    if (CpuFeatures::IsSupported(SSE2)) {
4481 4461
      CpuFeatureScope scope(masm(), SSE2);
4482 4462
      XMMRegister value = ToDoubleRegister(instr->value());
4483
      __ movdbl(FieldOperand(object, offset), value);
4463
      __ movsd(FieldOperand(object, offset), value);
4484 4464
    } else {
4485 4465
      X87Register value = ToX87Register(instr->value());
4486 4466
      X87Mov(FieldOperand(object, offset), value);
......
4518 4498
    __ mov(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
4519 4499
  }
4520 4500

  
4501
  MemOperand operand = FieldOperand(write_register, offset);
4521 4502
  if (instr->value()->IsConstantOperand()) {
4522 4503
    LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4523 4504
    if (operand_value->IsRegister()) {
4524
      __ mov(FieldOperand(write_register, offset), ToRegister(operand_value));
4505
      Register value = ToRegister(operand_value);
4506
      if (representation.IsByte()) {
4507
        __ mov_b(operand, value);
4508
      } else {
4509
        __ mov(operand, value);
4510
      }
4525 4511
    } else {
4526 4512
      Handle<Object> handle_value = ToHandle(operand_value);
4527 4513
      ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4528
      __ mov(FieldOperand(write_register, offset), handle_value);
4514
      __ mov(operand, handle_value);
4529 4515
    }
4530 4516
  } else {
4531
    __ mov(FieldOperand(write_register, offset), ToRegister(instr->value()));
4517
    Register value = ToRegister(instr->value());
4518
    if (representation.IsByte()) {
4519
      __ mov_b(operand, value);
4520
    } else {
4521
      __ mov(operand, value);
4522
    }
4532 4523
  }
4533 4524

  
4534 4525
  if (instr->hydrogen()->NeedsWriteBarrier()) {
......
4609 4600
  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
4610 4601
    if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
4611 4602
      CpuFeatureScope scope(masm(), SSE2);
4612
      __ cvtsd2ss(xmm0, ToDoubleRegister(instr->value()));
4613
      __ movss(operand, xmm0);
4603
      XMMRegister xmm_scratch = double_scratch0();
4604
      __ cvtsd2ss(xmm_scratch, ToDoubleRegister(instr->value()));
4605
      __ movss(operand, xmm_scratch);
4614 4606
    } else {
4615 4607
      __ fld(0);
4616 4608
      __ fstp_s(operand);
......
4618 4610
  } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
4619 4611
    if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
4620 4612
      CpuFeatureScope scope(masm(), SSE2);
4621
      __ movdbl(operand, ToDoubleRegister(instr->value()));
4613
      __ movsd(operand, ToDoubleRegister(instr->value()));
4622 4614
    } else {
4623 4615
      X87Mov(operand, ToX87Register(instr->value()));
4624 4616
    }
......
4676 4668
      __ ucomisd(value, value);
4677 4669
      __ j(parity_odd, &have_value);  // NaN.
4678 4670

  
4679
      __ movdbl(value, Operand::StaticVariable(canonical_nan_reference));
4671
      __ movsd(value, Operand::StaticVariable(canonical_nan_reference));
4680 4672
      __ bind(&have_value);
4681 4673
    }
4682 4674

  
4683
    __ movdbl(double_store_operand, value);
4675
    __ movsd(double_store_operand, value);
4684 4676
  } else {
4685 4677
    // Can't use SSE2 in the serializer
4686 4678
    if (instr->hydrogen()->IsConstantHoleStore()) {
......
4803 4795
void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4804 4796
  Register object = ToRegister(instr->object());
4805 4797
  Register temp = ToRegister(instr->temp());
4806
  __ TestJSArrayForAllocationMemento(object, temp);
4798
  Label no_memento_found;
4799
  __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
4807 4800
  DeoptimizeIf(equal, instr->environment());
4801
  __ bind(&no_memento_found);
4808 4802
}
4809 4803

  
4810 4804

  
......
4825 4819
  __ j(not_equal, &not_applicable, branch_distance);
4826 4820
  if (is_simple_map_transition) {
4827 4821
    Register new_map_reg = ToRegister(instr->new_map_temp());
4828
    Handle<Map> map = instr->hydrogen()->transitioned_map();
4829 4822
    __ mov(FieldOperand(object_reg, HeapObject::kMapOffset),
4830
           Immediate(map));
4823
           Immediate(to_map));
4831 4824
    // Write barrier.
4832 4825
    ASSERT_NE(instr->temp(), NULL);
4833 4826
    __ RecordWriteForMap(object_reg, to_map, new_map_reg,
......
4978 4971
  ASSERT(output->IsDoubleRegister());
4979 4972
  if (CpuFeatures::IsSupported(SSE2)) {
4980 4973
    CpuFeatureScope scope(masm(), SSE2);
4981
    __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
4974
    __ Cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
4982 4975
  } else if (input->IsRegister()) {
4983 4976
    Register input_reg = ToRegister(input);
4984 4977
    __ push(input_reg);
......
5001 4994

  
5002 4995

  
5003 4996
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
5004
  CpuFeatureScope scope(masm(), SSE2);
5005 4997
  LOperand* input = instr->value();
5006 4998
  LOperand* output = instr->result();
5007
  LOperand* temp = instr->temp();
4999
  if (CpuFeatures::IsSupported(SSE2)) {
5000
    CpuFeatureScope scope(masm(), SSE2);
5001
    LOperand* temp = instr->temp();
5008 5002

  
5009
  __ LoadUint32(ToDoubleRegister(output),
5010
                ToRegister(input),
5011
                ToDoubleRegister(temp));
5003
    __ LoadUint32(ToDoubleRegister(output),
5004
                  ToRegister(input),
5005
                  ToDoubleRegister(temp));
5006
  } else {
5007
    X87Register res = ToX87Register(output);
5008
    X87PrepareToWrite(res);
5009
    __ LoadUint32NoSSE2(ToRegister(input));
5010
    X87CommitWrite(res);
5011
  }
5012
}
5013

  
5014

  
5015
void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) {
5016
  Register input = ToRegister(instr->value());
5017
  if (!instr->hydrogen()->value()->HasRange() ||
5018
      !instr->hydrogen()->value()->range()->IsInSmiRange()) {
5019
    __ test(input, Immediate(0xc0000000));
5020
    DeoptimizeIf(not_zero, instr->environment());
5021
  }
5022
  __ SmiTag(input);
5012 5023
}
5013 5024

  
5014 5025

  
......
5073 5084
  Label slow;
5074 5085
  Register reg = ToRegister(value);
5075 5086
  Register tmp = reg.is(eax) ? ecx : eax;
5087
  XMMRegister xmm_scratch = double_scratch0();
5076 5088

  
5077 5089
  // Preserve the value of all registers.
5078 5090
  PushSafepointRegistersScope scope(this);
......
5087 5099
    __ xor_(reg, 0x80000000);
5088 5100
    if (CpuFeatures::IsSupported(SSE2)) {
5089 5101
      CpuFeatureScope feature_scope(masm(), SSE2);
5090
      __ cvtsi2sd(xmm0, Operand(reg));
5102
      __ Cvtsi2sd(xmm_scratch, Operand(reg));
5091 5103
    } else {
5092 5104
      __ push(reg);
5093 5105
      __ fild_s(Operand(esp, 0));
......
5096 5108
  } else {
5097 5109
    if (CpuFeatures::IsSupported(SSE2)) {
5098 5110
      CpuFeatureScope feature_scope(masm(), SSE2);
5099
      __ LoadUint32(xmm0, reg,
5111
      __ LoadUint32(xmm_scratch, reg,
5100 5112
                    ToDoubleRegister(LNumberTagU::cast(instr)->temp()));
5101 5113
    } else {
5102 5114
      // There's no fild variant for unsigned values, so zero-extend to a 64-bit
......
5132 5144
      instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
5133 5145
  if (!reg.is(eax)) __ mov(reg, eax);
5134 5146

  
5135
  // Done. Put the value in xmm0 into the value of the allocated heap
5147
  // Done. Put the value in xmm_scratch into the value of the allocated heap
5136 5148
  // number.
5137 5149
  __ bind(&done);
5138 5150
  if (CpuFeatures::IsSupported(SSE2)) {
5139 5151
    CpuFeatureScope feature_scope(masm(), SSE2);
5140
    __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0);
5152
    __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm_scratch);
5141 5153
  } else {
5142 5154
    __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
5143 5155
  }
......
5181 5193
  if (use_sse2) {
5182 5194
    CpuFeatureScope scope(masm(), SSE2);
5183 5195
    XMMRegister input_reg = ToDoubleRegister(instr->value());
5184
    __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
5196
    __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
5185 5197
  } else {
5186 5198
    __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
5187 5199
  }
......
5308 5320
                                bool deoptimize_on_minus_zero,
5309 5321
                                LEnvironment* env,
5310 5322
                                NumberUntagDMode mode) {
5311
  Label load_smi, done;
5323
  Label convert, load_smi, done;
5312 5324

  
5313 5325
  if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
5314 5326
    // Smi check.
......
5317 5329
    // Heap number map check.
5318 5330
    __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5319 5331
           factory()->heap_number_map());
5320
    if (!can_convert_undefined_to_nan) {
5321
      DeoptimizeIf(not_equal, env);
5332
    if (can_convert_undefined_to_nan) {
5333
      __ j(not_equal, &convert, Label::kNear);
5322 5334
    } else {
5323
      Label heap_number, convert;
5324
      __ j(equal, &heap_number, Label::kNear);
5325

  
5326
      // Convert undefined (and hole) to NaN.
5327
      __ cmp(input_reg, factory()->undefined_value());
5328 5335
      DeoptimizeIf(not_equal, env);
5329

  
5330
      __ bind(&convert);
5331
      ExternalReference nan =
5332
          ExternalReference::address_of_canonical_non_hole_nan();
5333
      __ movdbl(result_reg, Operand::StaticVariable(nan));
5334
      __ jmp(&done, Label::kNear);
5335

  
5336
      __ bind(&heap_number);
5337 5336
    }
5337

  
5338 5338
    // Heap number to XMM conversion.
5339
    __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
5339
    __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
5340

  
5340 5341
    if (deoptimize_on_minus_zero) {
5341
      XMMRegister xmm_scratch = xmm0;
5342
      XMMRegister xmm_scratch = double_scratch0();
5342 5343
      __ xorps(xmm_scratch, xmm_scratch);
5343 5344
      __ ucomisd(result_reg, xmm_scratch);
5344 5345
      __ j(not_zero, &done, Label::kNear);
......
5347 5348
      DeoptimizeIf(not_zero, env);
5348 5349
    }
5349 5350
    __ jmp(&done, Label::kNear);
5351

  
5352
    if (can_convert_undefined_to_nan) {
5353
      __ bind(&convert);
5354

  
5355
      // Convert undefined (and hole) to NaN.
5356
      __ cmp(input_reg, factory()->undefined_value());
5357
      DeoptimizeIf(not_equal, env);
5358

  
5359
      ExternalReference nan =
5360
          ExternalReference::address_of_canonical_non_hole_nan();
5361
      __ movsd(result_reg, Operand::StaticVariable(nan));
5362
      __ jmp(&done, Label::kNear);
5363
    }
5350 5364
  } else {
5351 5365
    ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
5352 5366
  }
......
5356 5370
  // input register since we avoid dependencies.
5357 5371
  __ mov(temp_reg, input_reg);
5358 5372
  __ SmiUntag(temp_reg);  // Untag smi before converting to float.
5359
  __ cvtsi2sd(result_reg, Operand(temp_reg));
5373
  __ Cvtsi2sd(result_reg, Operand(temp_reg));
5360 5374
  __ bind(&done);
5361 5375
}
5362 5376

  
......
5364 5378
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
5365 5379
  Register input_reg = ToRegister(instr->value());
5366 5380

  
5367

  
5368 5381
  if (instr->truncating()) {
5369
    Label heap_number, slow_case;
5382
    Label no_heap_number, check_bools, check_false;
5370 5383

  
5371 5384
    // Heap number map check.
5372 5385
    __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5373 5386
           factory()->heap_number_map());
5374
    __ j(equal, &heap_number, Label::kNear);
5387
    __ j(not_equal, &no_heap_number, Label::kNear);
5388
    __ TruncateHeapNumberToI(input_reg, input_reg);
5389
    __ jmp(done);
5375 5390

  
5376
    // Check for undefined. Undefined is converted to zero for truncating
5377
    // conversions.
5391
    __ bind(&no_heap_number);
5392
    // Check for Oddballs. Undefined/False is converted to zero and True to one
5393
    // for truncating conversions.
5378 5394
    __ cmp(input_reg, factory()->undefined_value());
5395
    __ j(not_equal, &check_bools, Label::kNear);
5396
    __ Set(input_reg, Immediate(0));
5397
    __ jmp(done);
5398

  
5399
    __ bind(&check_bools);
5400
    __ cmp(input_reg, factory()->true_value());
5401
    __ j(not_equal, &check_false, Label::kNear);
5402
    __ Set(input_reg, Immediate(1));
5403
    __ jmp(done);
5404

  
5405
    __ bind(&check_false);
5406
    __ cmp(input_reg, factory()->false_value());
5379 5407
    __ RecordComment("Deferred TaggedToI: cannot truncate");
5380 5408
    DeoptimizeIf(not_equal, instr->environment());
5381
    __ mov(input_reg, 0);
5409
    __ Set(input_reg, Immediate(0));
5382 5410
    __ jmp(done);
5383

  
5384
    __ bind(&heap_number);
5385
    __ TruncateHeapNumberToI(input_reg, input_reg);
5386 5411
  } else {
5387 5412
    Label bailout;
5388 5413
    XMMRegister scratch = (instr->temp() != NULL)
......
5417 5442
  Register input_reg = ToRegister(input);
5418 5443
  ASSERT(input_reg.is(ToRegister(instr->result())));
5419 5444

  
5420
  DeferredTaggedToI* deferred =
5421
      new(zone()) DeferredTaggedToI(this, instr, x87_stack_);
5445
  if (instr->hydrogen()->value()->representation().IsSmi()) {
5446
    __ SmiUntag(input_reg);
5447
  } else {
5448
    DeferredTaggedToI* deferred =
5449
        new(zone()) DeferredTaggedToI(this, instr, x87_stack_);
5422 5450

  
5423
  __ JumpIfNotSmi(input_reg, deferred->entry());
5424
  __ SmiUntag(input_reg);
5425
  __ bind(deferred->exit());
5451
    __ JumpIfNotSmi(input_reg, deferred->entry());
5452
    __ SmiUntag(input_reg);
5453
    __ bind(deferred->exit());
5454
  }
5426 5455
}
5427 5456

  
5428 5457

  
......
5487 5516
    if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
5488 5517
      CpuFeatureScope scope(masm(), SSE2);
5489 5518
      XMMRegister input_reg = ToDoubleRegister(input);
5490
       __ DoubleToI(result_reg, input_reg, xmm0,
5519
      XMMRegister xmm_scratch = double_scratch0();
5520
       __ DoubleToI(result_reg, input_reg, xmm_scratch,
5491 5521
           instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
5492 5522
    } else {
5493 5523
      X87Register input_reg = ToX87Register(input);
......
5514 5544
  if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
5515 5545
    CpuFeatureScope scope(masm(), SSE2);
5516 5546
    XMMRegister input_reg = ToDoubleRegister(input);
5517
    __ DoubleToI(result_reg, input_reg, xmm0,
5547
    XMMRegister xmm_scratch = double_scratch0();
5548
    __ DoubleToI(result_reg, input_reg, xmm_scratch,
5518 5549
        instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
5519 5550
  } else {
5520 5551
    X87Register input_reg = ToX87Register(input);
......
5594 5625

  
5595 5626

  
5596 5627
void LCodeGen::DoCheckValue(LCheckValue* instr) {
5597
  Handle<HeapObject> object = instr->hydrogen()->object();
5628
  Handle<HeapObject> object = instr->hydrogen()->object().handle();
5598 5629
  if (instr->hydrogen()->object_in_new_space()) {
5599 5630
    Register reg = ToRegister(instr->value());
5600 5631
    Handle<Cell> cell = isolate()->factory()->NewCell(object);
......
5649 5680
  ASSERT(input->IsRegister());
5650 5681
  Register reg = ToRegister(input);
5651 5682

  
5652
  SmallMapList* map_set = instr->hydrogen()->map_set();
5653

  
5654 5683
  DeferredCheckMaps* deferred = NULL;
5655 5684
  if (instr->hydrogen()->has_migration_target()) {
5656 5685
    deferred = new(zone()) DeferredCheckMaps(this, instr, reg, x87_stack_);
5657 5686
    __ bind(deferred->check_maps());
5658 5687
  }
5659 5688

  
5689
  UniqueSet<Map> map_set = instr->hydrogen()->map_set();
5660 5690
  Label success;
5661
  for (int i = 0; i < map_set->length() - 1; i++) {
5662
    Handle<Map> map = map_set->at(i);
5691
  for (int i = 0; i < map_set.size() - 1; i++) {
5692
    Handle<Map> map = map_set.at(i).handle();
5663 5693
    __ CompareMap(reg, map, &success);
5664 5694
    __ j(equal, &success);
5665 5695
  }
5666 5696

  
5667
  Handle<Map> map = map_set->last();
5697
  Handle<Map> map = map_set.at(map_set.size() - 1).handle();
5668 5698
  __ CompareMap(reg, map, &success);
5669 5699
  if (instr->hydrogen()->has_migration_target()) {
5670 5700
    __ j(not_equal, deferred->entry());
......
5679 5709
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5680 5710
  CpuFeatureScope scope(masm(), SSE2);
5681 5711
  XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
5712
  XMMRegister xmm_scratch = double_scratch0();
5682 5713
  Register result_reg = ToRegister(instr->result());
5683
  __ ClampDoubleToUint8(value_reg, xmm0, result_reg);
5714
  __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg);
5684 5715
}
5685 5716

  
5686 5717

  
......
5696 5727

  
5697 5728
  ASSERT(instr->unclamped()->Equals(instr->result()));
5698 5729
  Register input_reg = ToRegister(instr->unclamped());
5730
  XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
5731
  XMMRegister xmm_scratch = double_scratch0();
5699 5732
  Label is_smi, done, heap_number;
5700 5733

  
5701 5734
  __ JumpIfSmi(input_reg, &is_smi);
......
5714 5747

  
5715 5748
  // Heap number
5716 5749
  __ bind(&heap_number);
5717
  __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
5718
  __ ClampDoubleToUint8(xmm0, xmm1, input_reg);
5750
  __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
5751
  __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg);
5719 5752
  __ jmp(&done, Label::kNear);
5720 5753

  
5721 5754
  // smi
......
6146 6179
}
6147 6180

  
6148 6181

  
6149
void LCodeGen::EnsureSpaceForLazyDeopt() {
6182
void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
6150 6183
  if (!info()->IsStub()) {
6151 6184
    // Ensure that we have enough space after the previous lazy-bailout
6152 6185
    // instruction for patching the code here.
6153 6186
    int current_pc = masm()->pc_offset();
6154
    int patch_size = Deoptimizer::patch_size();
6155
    if (current_pc < last_lazy_deopt_pc_ + patch_size) {
6156
      int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
6187
    if (current_pc < last_lazy_deopt_pc_ + space_needed) {
6188
      int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
6157 6189
      __ Nop(padding_size);
6158 6190
    }
6159 6191
  }
......
6162 6194

  
6163 6195

  
6164 6196
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
6165
  EnsureSpaceForLazyDeopt();
6197
  EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
6166 6198
  ASSERT(instr->HasEnvironment());
6167 6199
  LEnvironment* env = instr->environment();
6168 6200
  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
......
6233 6265
    CallCode(isolate()->builtins()->StackCheck(),
6234 6266
             RelocInfo::CODE_TARGET,
6235 6267
             instr);
6236
    EnsureSpaceForLazyDeopt();
6268
    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
6237 6269
    __ bind(&done);
6238 6270
    RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
6239 6271
    safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
......
6246 6278
        ExternalReference::address_of_stack_limit(isolate());
6247 6279
    __ cmp(esp, Operand::StaticVariable(stack_limit));
6248 6280
    __ j(below, deferred_stack_check->entry());
6249
    EnsureSpaceForLazyDeopt();
6281
    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
6250 6282
    __ bind(instr->done_label());
6251 6283
    deferred_stack_check->SetExit(instr->done_label());
6252 6284
    RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);

Also available in: Unified diff