The data contained in this repository can be downloaded to your computer using one of several clients.
Please see the documentation of your version control software client for more information.

Please select the desired protocol below to get the URL.

This URL has Read-Only access.

Statistics
| Branch: | Revision:

main_repo / deps / v8 / src / ia32 / lithium-codegen-ia32.cc @ f230a1cf

History | View | Annotate | Download (210 KB)

1
// Copyright 2012 the V8 project authors. All rights reserved.
2
// Redistribution and use in source and binary forms, with or without
3
// modification, are permitted provided that the following conditions are
4
// met:
5
//
6
//     * Redistributions of source code must retain the above copyright
7
//       notice, this list of conditions and the following disclaimer.
8
//     * Redistributions in binary form must reproduce the above
9
//       copyright notice, this list of conditions and the following
10
//       disclaimer in the documentation and/or other materials provided
11
//       with the distribution.
12
//     * Neither the name of Google Inc. nor the names of its
13
//       contributors may be used to endorse or promote products derived
14
//       from this software without specific prior written permission.
15
//
16
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27

    
28
#include "v8.h"
29

    
30
#if V8_TARGET_ARCH_IA32
31

    
32
#include "ia32/lithium-codegen-ia32.h"
33
#include "ic.h"
34
#include "code-stubs.h"
35
#include "deoptimizer.h"
36
#include "stub-cache.h"
37
#include "codegen.h"
38
#include "hydrogen-osr.h"
39

    
40
namespace v8 {
41
namespace internal {
42

    
43

    
44
static SaveFPRegsMode GetSaveFPRegsMode() {
45
  // We don't need to save floating point regs when generating the snapshot
46
  return CpuFeatures::IsSafeForSnapshot(SSE2) ? kSaveFPRegs : kDontSaveFPRegs;
47
}
48

    
49

    
50
// When invoking builtins, we need to record the safepoint in the middle of
51
// the invoke instruction sequence generated by the macro assembler.
52
class SafepointGenerator V8_FINAL : public CallWrapper {
53
 public:
54
  SafepointGenerator(LCodeGen* codegen,
55
                     LPointerMap* pointers,
56
                     Safepoint::DeoptMode mode)
57
      : codegen_(codegen),
58
        pointers_(pointers),
59
        deopt_mode_(mode) {}
60
  virtual ~SafepointGenerator() {}
61

    
62
  virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
63

    
64
  virtual void AfterCall() const V8_OVERRIDE {
65
    codegen_->RecordSafepoint(pointers_, deopt_mode_);
66
  }
67

    
68
 private:
69
  LCodeGen* codegen_;
70
  LPointerMap* pointers_;
71
  Safepoint::DeoptMode deopt_mode_;
72
};
73

    
74

    
75
#define __ masm()->
76

    
77
bool LCodeGen::GenerateCode() {
78
  LPhase phase("Z_Code generation", chunk());
79
  ASSERT(is_unused());
80
  status_ = GENERATING;
81

    
82
  // Open a frame scope to indicate that there is a frame on the stack.  The
83
  // MANUAL indicates that the scope shouldn't actually generate code to set up
84
  // the frame (that is done in GeneratePrologue).
85
  FrameScope frame_scope(masm_, StackFrame::MANUAL);
86

    
87
  support_aligned_spilled_doubles_ = info()->IsOptimizing();
88

    
89
  dynamic_frame_alignment_ = info()->IsOptimizing() &&
90
      ((chunk()->num_double_slots() > 2 &&
91
        !chunk()->graph()->is_recursive()) ||
92
       !info()->osr_ast_id().IsNone());
93

    
94
  return GeneratePrologue() &&
95
      GenerateBody() &&
96
      GenerateDeferredCode() &&
97
      GenerateJumpTable() &&
98
      GenerateSafepointTable();
99
}
100

    
101

    
102
void LCodeGen::FinishCode(Handle<Code> code) {
103
  ASSERT(is_done());
104
  code->set_stack_slots(GetStackSlotCount());
105
  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
106
  if (FLAG_weak_embedded_maps_in_optimized_code) {
107
    RegisterDependentCodeForEmbeddedMaps(code);
108
  }
109
  PopulateDeoptimizationData(code);
110
  if (!info()->IsStub()) {
111
    Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
112
  }
113
  info()->CommitDependencies(code);
114
}
115

    
116

    
117
void LCodeGen::Abort(BailoutReason reason) {
118
  info()->set_bailout_reason(reason);
119
  status_ = ABORTED;
120
}
121

    
122

    
123
#ifdef _MSC_VER
124
void LCodeGen::MakeSureStackPagesMapped(int offset) {
125
  const int kPageSize = 4 * KB;
126
  for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
127
    __ mov(Operand(esp, offset), eax);
128
  }
129
}
130
#endif
131

    
132

    
133
bool LCodeGen::GeneratePrologue() {
134
  ASSERT(is_generating());
135

    
136
  if (info()->IsOptimizing()) {
137
    ProfileEntryHookStub::MaybeCallEntryHook(masm_);
138

    
139
#ifdef DEBUG
140
    if (strlen(FLAG_stop_at) > 0 &&
141
        info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
142
      __ int3();
143
    }
144
#endif
145

    
146
    // Strict mode functions and builtins need to replace the receiver
147
    // with undefined when called as functions (without an explicit
148
    // receiver object). ecx is zero for method calls and non-zero for
149
    // function calls.
150
    if (!info_->is_classic_mode() || info_->is_native()) {
151
      Label ok;
152
      __ test(ecx, Operand(ecx));
153
      __ j(zero, &ok, Label::kNear);
154
      // +1 for return address.
155
      int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
156
      __ mov(Operand(esp, receiver_offset),
157
             Immediate(isolate()->factory()->undefined_value()));
158
      __ bind(&ok);
159
    }
160

    
161
    if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
162
      // Move state of dynamic frame alignment into edx.
163
      __ mov(edx, Immediate(kNoAlignmentPadding));
164

    
165
      Label do_not_pad, align_loop;
166
      STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
167
      // Align esp + 4 to a multiple of 2 * kPointerSize.
168
      __ test(esp, Immediate(kPointerSize));
169
      __ j(not_zero, &do_not_pad, Label::kNear);
170
      __ push(Immediate(0));
171
      __ mov(ebx, esp);
172
      __ mov(edx, Immediate(kAlignmentPaddingPushed));
173
      // Copy arguments, receiver, and return address.
174
      __ mov(ecx, Immediate(scope()->num_parameters() + 2));
175

    
176
      __ bind(&align_loop);
177
      __ mov(eax, Operand(ebx, 1 * kPointerSize));
178
      __ mov(Operand(ebx, 0), eax);
179
      __ add(Operand(ebx), Immediate(kPointerSize));
180
      __ dec(ecx);
181
      __ j(not_zero, &align_loop, Label::kNear);
182
      __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
183
      __ bind(&do_not_pad);
184
    }
185
  }
186

    
187
  info()->set_prologue_offset(masm_->pc_offset());
188
  if (NeedsEagerFrame()) {
189
    ASSERT(!frame_is_built_);
190
    frame_is_built_ = true;
191
    __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
192
    info()->AddNoFrameRange(0, masm_->pc_offset());
193
  }
194

    
195
  if (info()->IsOptimizing() &&
196
      dynamic_frame_alignment_ &&
197
      FLAG_debug_code) {
198
    __ test(esp, Immediate(kPointerSize));
199
    __ Assert(zero, kFrameIsExpectedToBeAligned);
200
  }
201

    
202
  // Reserve space for the stack slots needed by the code.
203
  int slots = GetStackSlotCount();
204
  ASSERT(slots != 0 || !info()->IsOptimizing());
205
  if (slots > 0) {
206
    if (slots == 1) {
207
      if (dynamic_frame_alignment_) {
208
        __ push(edx);
209
      } else {
210
        __ push(Immediate(kNoAlignmentPadding));
211
      }
212
    } else {
213
      if (FLAG_debug_code) {
214
        __ sub(Operand(esp), Immediate(slots * kPointerSize));
215
#ifdef _MSC_VER
216
        MakeSureStackPagesMapped(slots * kPointerSize);
217
#endif
218
        __ push(eax);
219
        __ mov(Operand(eax), Immediate(slots));
220
        Label loop;
221
        __ bind(&loop);
222
        __ mov(MemOperand(esp, eax, times_4, 0),
223
               Immediate(kSlotsZapValue));
224
        __ dec(eax);
225
        __ j(not_zero, &loop);
226
        __ pop(eax);
227
      } else {
228
        __ sub(Operand(esp), Immediate(slots * kPointerSize));
229
#ifdef _MSC_VER
230
        MakeSureStackPagesMapped(slots * kPointerSize);
231
#endif
232
      }
233

    
234
      if (support_aligned_spilled_doubles_) {
235
        Comment(";;; Store dynamic frame alignment tag for spilled doubles");
236
        // Store dynamic frame alignment state in the first local.
237
        int offset = JavaScriptFrameConstants::kDynamicAlignmentStateOffset;
238
        if (dynamic_frame_alignment_) {
239
          __ mov(Operand(ebp, offset), edx);
240
        } else {
241
          __ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding));
242
        }
243
      }
244
    }
245

    
246
    if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
247
      Comment(";;; Save clobbered callee double registers");
248
      CpuFeatureScope scope(masm(), SSE2);
249
      int count = 0;
250
      BitVector* doubles = chunk()->allocated_double_registers();
251
      BitVector::Iterator save_iterator(doubles);
252
      while (!save_iterator.Done()) {
253
        __ movsd(MemOperand(esp, count * kDoubleSize),
254
                  XMMRegister::FromAllocationIndex(save_iterator.Current()));
255
        save_iterator.Advance();
256
        count++;
257
      }
258
    }
259
  }
260

    
261
  // Possibly allocate a local context.
262
  int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
263
  if (heap_slots > 0) {
264
    Comment(";;; Allocate local context");
265
    // Argument to NewContext is the function, which is still in edi.
266
    __ push(edi);
267
    if (heap_slots <= FastNewContextStub::kMaximumSlots) {
268
      FastNewContextStub stub(heap_slots);
269
      __ CallStub(&stub);
270
    } else {
271
      __ CallRuntime(Runtime::kNewFunctionContext, 1);
272
    }
273
    RecordSafepoint(Safepoint::kNoLazyDeopt);
274
    // Context is returned in both eax and esi.  It replaces the context
275
    // passed to us.  It's saved in the stack and kept live in esi.
276
    __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
277

    
278
    // Copy parameters into context if necessary.
279
    int num_parameters = scope()->num_parameters();
280
    for (int i = 0; i < num_parameters; i++) {
281
      Variable* var = scope()->parameter(i);
282
      if (var->IsContextSlot()) {
283
        int parameter_offset = StandardFrameConstants::kCallerSPOffset +
284
            (num_parameters - 1 - i) * kPointerSize;
285
        // Load parameter from stack.
286
        __ mov(eax, Operand(ebp, parameter_offset));
287
        // Store it in the context.
288
        int context_offset = Context::SlotOffset(var->index());
289
        __ mov(Operand(esi, context_offset), eax);
290
        // Update the write barrier. This clobbers eax and ebx.
291
        __ RecordWriteContextSlot(esi,
292
                                  context_offset,
293
                                  eax,
294
                                  ebx,
295
                                  kDontSaveFPRegs);
296
      }
297
    }
298
    Comment(";;; End allocate local context");
299
  }
300

    
301
  // Trace the call.
302
  if (FLAG_trace && info()->IsOptimizing()) {
303
    // We have not executed any compiled code yet, so esi still holds the
304
    // incoming context.
305
    __ CallRuntime(Runtime::kTraceEnter, 0);
306
  }
307
  return !is_aborted();
308
}
309

    
310

    
311
void LCodeGen::GenerateOsrPrologue() {
312
  // Generate the OSR entry prologue at the first unknown OSR value, or if there
313
  // are none, at the OSR entrypoint instruction.
314
  if (osr_pc_offset_ >= 0) return;
315

    
316
  osr_pc_offset_ = masm()->pc_offset();
317

    
318
    // Move state of dynamic frame alignment into edx.
319
  __ mov(edx, Immediate(kNoAlignmentPadding));
320

    
321
  if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
322
    Label do_not_pad, align_loop;
323
    // Align ebp + 4 to a multiple of 2 * kPointerSize.
324
    __ test(ebp, Immediate(kPointerSize));
325
    __ j(zero, &do_not_pad, Label::kNear);
326
    __ push(Immediate(0));
327
    __ mov(ebx, esp);
328
    __ mov(edx, Immediate(kAlignmentPaddingPushed));
329

    
330
    // Move all parts of the frame over one word. The frame consists of:
331
    // unoptimized frame slots, alignment state, context, frame pointer, return
332
    // address, receiver, and the arguments.
333
    __ mov(ecx, Immediate(scope()->num_parameters() +
334
           5 + graph()->osr()->UnoptimizedFrameSlots()));
335

    
336
    __ bind(&align_loop);
337
    __ mov(eax, Operand(ebx, 1 * kPointerSize));
338
    __ mov(Operand(ebx, 0), eax);
339
    __ add(Operand(ebx), Immediate(kPointerSize));
340
    __ dec(ecx);
341
    __ j(not_zero, &align_loop, Label::kNear);
342
    __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
343
    __ sub(Operand(ebp), Immediate(kPointerSize));
344
    __ bind(&do_not_pad);
345
  }
346

    
347
  // Save the first local, which is overwritten by the alignment state.
348
  Operand alignment_loc = MemOperand(ebp, -3 * kPointerSize);
349
  __ push(alignment_loc);
350

    
351
  // Set the dynamic frame alignment state.
352
  __ mov(alignment_loc, edx);
353

    
354
  // Adjust the frame size, subsuming the unoptimized frame into the
355
  // optimized frame.
356
  int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
357
  ASSERT(slots >= 1);
358
  __ sub(esp, Immediate((slots - 1) * kPointerSize));
359
}
360

    
361

    
362
void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
363
  if (!CpuFeatures::IsSupported(SSE2)) FlushX87StackIfNecessary(instr);
364
}
365

    
366

    
367
void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
368
  if (!CpuFeatures::IsSupported(SSE2)) {
369
    if (instr->IsGoto()) {
370
      x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr));
371
    } else if (FLAG_debug_code && FLAG_enable_slow_asserts &&
372
               !instr->IsGap() && !instr->IsReturn()) {
373
      if (instr->ClobbersDoubleRegisters()) {
374
        if (instr->HasDoubleRegisterResult()) {
375
          ASSERT_EQ(1, x87_stack_.depth());
376
        } else {
377
          ASSERT_EQ(0, x87_stack_.depth());
378
        }
379
      }
380
      __ VerifyX87StackDepth(x87_stack_.depth());
381
    }
382
  }
383
}
384

    
385

    
386
bool LCodeGen::GenerateJumpTable() {
387
  Label needs_frame;
388
  if (jump_table_.length() > 0) {
389
    Comment(";;; -------------------- Jump table --------------------");
390
  }
391
  for (int i = 0; i < jump_table_.length(); i++) {
392
    __ bind(&jump_table_[i].label);
393
    Address entry = jump_table_[i].address;
394
    Deoptimizer::BailoutType type = jump_table_[i].bailout_type;
395
    int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
396
    if (id == Deoptimizer::kNotDeoptimizationEntry) {
397
      Comment(";;; jump table entry %d.", i);
398
    } else {
399
      Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
400
    }
401
    if (jump_table_[i].needs_frame) {
402
      __ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
403
      if (needs_frame.is_bound()) {
404
        __ jmp(&needs_frame);
405
      } else {
406
        __ bind(&needs_frame);
407
        __ push(MemOperand(ebp, StandardFrameConstants::kContextOffset));
408
        // This variant of deopt can only be used with stubs. Since we don't
409
        // have a function pointer to install in the stack frame that we're
410
        // building, install a special marker there instead.
411
        ASSERT(info()->IsStub());
412
        __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
413
        // Push a PC inside the function so that the deopt code can find where
414
        // the deopt comes from. It doesn't have to be the precise return
415
        // address of a "calling" LAZY deopt, it only has to be somewhere
416
        // inside the code body.
417
        Label push_approx_pc;
418
        __ call(&push_approx_pc);
419
        __ bind(&push_approx_pc);
420
        // Push the continuation which was stashed were the ebp should
421
        // be. Replace it with the saved ebp.
422
        __ push(MemOperand(esp, 3 * kPointerSize));
423
        __ mov(MemOperand(esp, 4 * kPointerSize), ebp);
424
        __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
425
        __ ret(0);  // Call the continuation without clobbering registers.
426
      }
427
    } else {
428
      __ call(entry, RelocInfo::RUNTIME_ENTRY);
429
    }
430
  }
431
  return !is_aborted();
432
}
433

    
434

    
435
bool LCodeGen::GenerateDeferredCode() {
436
  ASSERT(is_generating());
437
  if (deferred_.length() > 0) {
438
    for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
439
      LDeferredCode* code = deferred_[i];
440
      X87Stack copy(code->x87_stack());
441
      x87_stack_ = copy;
442

    
443
      HValue* value =
444
          instructions_->at(code->instruction_index())->hydrogen_value();
445
      RecordAndWritePosition(value->position());
446

    
447
      Comment(";;; <@%d,#%d> "
448
              "-------------------- Deferred %s --------------------",
449
              code->instruction_index(),
450
              code->instr()->hydrogen_value()->id(),
451
              code->instr()->Mnemonic());
452
      __ bind(code->entry());
453
      if (NeedsDeferredFrame()) {
454
        Comment(";;; Build frame");
455
        ASSERT(!frame_is_built_);
456
        ASSERT(info()->IsStub());
457
        frame_is_built_ = true;
458
        // Build the frame in such a way that esi isn't trashed.
459
        __ push(ebp);  // Caller's frame pointer.
460
        __ push(Operand(ebp, StandardFrameConstants::kContextOffset));
461
        __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
462
        __ lea(ebp, Operand(esp, 2 * kPointerSize));
463
        Comment(";;; Deferred code");
464
      }
465
      code->Generate();
466
      if (NeedsDeferredFrame()) {
467
        __ bind(code->done());
468
        Comment(";;; Destroy frame");
469
        ASSERT(frame_is_built_);
470
        frame_is_built_ = false;
471
        __ mov(esp, ebp);
472
        __ pop(ebp);
473
      }
474
      __ jmp(code->exit());
475
    }
476
  }
477

    
478
  // Deferred code is the last part of the instruction sequence. Mark
479
  // the generated code as done unless we bailed out.
480
  if (!is_aborted()) status_ = DONE;
481
  return !is_aborted();
482
}
483

    
484

    
485
bool LCodeGen::GenerateSafepointTable() {
486
  ASSERT(is_done());
487
  if (!info()->IsStub()) {
488
    // For lazy deoptimization we need space to patch a call after every call.
489
    // Ensure there is always space for such patching, even if the code ends
490
    // in a call.
491
    int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
492
    while (masm()->pc_offset() < target_offset) {
493
      masm()->nop();
494
    }
495
  }
496
  safepoints_.Emit(masm(), GetStackSlotCount());
497
  return !is_aborted();
498
}
499

    
500

    
501
Register LCodeGen::ToRegister(int index) const {
502
  return Register::FromAllocationIndex(index);
503
}
504

    
505

    
506
X87Register LCodeGen::ToX87Register(int index) const {
507
  return X87Register::FromAllocationIndex(index);
508
}
509

    
510

    
511
XMMRegister LCodeGen::ToDoubleRegister(int index) const {
512
  return XMMRegister::FromAllocationIndex(index);
513
}
514

    
515

    
516
void LCodeGen::X87LoadForUsage(X87Register reg) {
517
  ASSERT(x87_stack_.Contains(reg));
518
  x87_stack_.Fxch(reg);
519
  x87_stack_.pop();
520
}
521

    
522

    
523
void LCodeGen::X87LoadForUsage(X87Register reg1, X87Register reg2) {
524
  ASSERT(x87_stack_.Contains(reg1));
525
  ASSERT(x87_stack_.Contains(reg2));
526
  x87_stack_.Fxch(reg1, 1);
527
  x87_stack_.Fxch(reg2);
528
  x87_stack_.pop();
529
  x87_stack_.pop();
530
}
531

    
532

    
533
void LCodeGen::X87Stack::Fxch(X87Register reg, int other_slot) {
534
  ASSERT(is_mutable_);
535
  ASSERT(Contains(reg) && stack_depth_ > other_slot);
536
  int i  = ArrayIndex(reg);
537
  int st = st2idx(i);
538
  if (st != other_slot) {
539
    int other_i = st2idx(other_slot);
540
    X87Register other = stack_[other_i];
541
    stack_[other_i]   = reg;
542
    stack_[i]         = other;
543
    if (st == 0) {
544
      __ fxch(other_slot);
545
    } else if (other_slot == 0) {
546
      __ fxch(st);
547
    } else {
548
      __ fxch(st);
549
      __ fxch(other_slot);
550
      __ fxch(st);
551
    }
552
  }
553
}
554

    
555

    
556
int LCodeGen::X87Stack::st2idx(int pos) {
557
  return stack_depth_ - pos - 1;
558
}
559

    
560

    
561
int LCodeGen::X87Stack::ArrayIndex(X87Register reg) {
562
  for (int i = 0; i < stack_depth_; i++) {
563
    if (stack_[i].is(reg)) return i;
564
  }
565
  UNREACHABLE();
566
  return -1;
567
}
568

    
569

    
570
bool LCodeGen::X87Stack::Contains(X87Register reg) {
571
  for (int i = 0; i < stack_depth_; i++) {
572
    if (stack_[i].is(reg)) return true;
573
  }
574
  return false;
575
}
576

    
577

    
578
void LCodeGen::X87Stack::Free(X87Register reg) {
579
  ASSERT(is_mutable_);
580
  ASSERT(Contains(reg));
581
  int i  = ArrayIndex(reg);
582
  int st = st2idx(i);
583
  if (st > 0) {
584
    // keep track of how fstp(i) changes the order of elements
585
    int tos_i = st2idx(0);
586
    stack_[i] = stack_[tos_i];
587
  }
588
  pop();
589
  __ fstp(st);
590
}
591

    
592

    
593
void LCodeGen::X87Mov(X87Register dst, Operand src, X87OperandType opts) {
594
  if (x87_stack_.Contains(dst)) {
595
    x87_stack_.Fxch(dst);
596
    __ fstp(0);
597
  } else {
598
    x87_stack_.push(dst);
599
  }
600
  X87Fld(src, opts);
601
}
602

    
603

    
604
void LCodeGen::X87Fld(Operand src, X87OperandType opts) {
605
  ASSERT(!src.is_reg_only());
606
  switch (opts) {
607
    case kX87DoubleOperand:
608
      __ fld_d(src);
609
      break;
610
    case kX87FloatOperand:
611
      __ fld_s(src);
612
      break;
613
    case kX87IntOperand:
614
      __ fild_s(src);
615
      break;
616
    default:
617
      UNREACHABLE();
618
  }
619
}
620

    
621

    
622
void LCodeGen::X87Mov(Operand dst, X87Register src, X87OperandType opts) {
623
  ASSERT(!dst.is_reg_only());
624
  x87_stack_.Fxch(src);
625
  switch (opts) {
626
    case kX87DoubleOperand:
627
      __ fst_d(dst);
628
      break;
629
    case kX87IntOperand:
630
      __ fist_s(dst);
631
      break;
632
    default:
633
      UNREACHABLE();
634
  }
635
}
636

    
637

    
638
void LCodeGen::X87Stack::PrepareToWrite(X87Register reg) {
639
  ASSERT(is_mutable_);
640
  if (Contains(reg)) {
641
    Free(reg);
642
  }
643
  // Mark this register as the next register to write to
644
  stack_[stack_depth_] = reg;
645
}
646

    
647

    
648
void LCodeGen::X87Stack::CommitWrite(X87Register reg) {
649
  ASSERT(is_mutable_);
650
  // Assert the reg is prepared to write, but not on the virtual stack yet
651
  ASSERT(!Contains(reg) && stack_[stack_depth_].is(reg) &&
652
      stack_depth_ < X87Register::kNumAllocatableRegisters);
653
  stack_depth_++;
654
}
655

    
656

    
657
void LCodeGen::X87PrepareBinaryOp(
658
    X87Register left, X87Register right, X87Register result) {
659
  // You need to use DefineSameAsFirst for x87 instructions
660
  ASSERT(result.is(left));
661
  x87_stack_.Fxch(right, 1);
662
  x87_stack_.Fxch(left);
663
}
664

    
665

    
666
void LCodeGen::X87Stack::FlushIfNecessary(LInstruction* instr, LCodeGen* cgen) {
667
  if (stack_depth_ > 0 && instr->ClobbersDoubleRegisters()) {
668
    bool double_inputs = instr->HasDoubleRegisterInput();
669

    
670
    // Flush stack from tos down, since FreeX87() will mess with tos
671
    for (int i = stack_depth_-1; i >= 0; i--) {
672
      X87Register reg = stack_[i];
673
      // Skip registers which contain the inputs for the next instruction
674
      // when flushing the stack
675
      if (double_inputs && instr->IsDoubleInput(reg, cgen)) {
676
        continue;
677
      }
678
      Free(reg);
679
      if (i < stack_depth_-1) i++;
680
    }
681
  }
682
  if (instr->IsReturn()) {
683
    while (stack_depth_ > 0) {
684
      __ fstp(0);
685
      stack_depth_--;
686
    }
687
    if (FLAG_debug_code && FLAG_enable_slow_asserts) __ VerifyX87StackDepth(0);
688
  }
689
}
690

    
691

    
692
void LCodeGen::X87Stack::LeavingBlock(int current_block_id, LGoto* goto_instr) {
693
  ASSERT(stack_depth_ <= 1);
694
  // If ever used for new stubs producing two pairs of doubles joined into two
695
  // phis this assert hits. That situation is not handled, since the two stacks
696
  // might have st0 and st1 swapped.
697
  if (current_block_id + 1 != goto_instr->block_id()) {
698
    // If we have a value on the x87 stack on leaving a block, it must be a
699
    // phi input. If the next block we compile is not the join block, we have
700
    // to discard the stack state.
701
    stack_depth_ = 0;
702
  }
703
}
704

    
705

    
706
void LCodeGen::EmitFlushX87ForDeopt() {
707
  // The deoptimizer does not support X87 Registers. But as long as we
708
  // deopt from a stub its not a problem, since we will re-materialize the
709
  // original stub inputs, which can't be double registers.
710
  ASSERT(info()->IsStub());
711
  if (FLAG_debug_code && FLAG_enable_slow_asserts) {
712
    __ pushfd();
713
    __ VerifyX87StackDepth(x87_stack_.depth());
714
    __ popfd();
715
  }
716
  for (int i = 0; i < x87_stack_.depth(); i++) __ fstp(0);
717
}
718

    
719

    
720
Register LCodeGen::ToRegister(LOperand* op) const {
721
  ASSERT(op->IsRegister());
722
  return ToRegister(op->index());
723
}
724

    
725

    
726
X87Register LCodeGen::ToX87Register(LOperand* op) const {
727
  ASSERT(op->IsDoubleRegister());
728
  return ToX87Register(op->index());
729
}
730

    
731

    
732
XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
733
  ASSERT(op->IsDoubleRegister());
734
  return ToDoubleRegister(op->index());
735
}
736

    
737

    
738
int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
739
  return ToRepresentation(op, Representation::Integer32());
740
}
741

    
742

    
743
int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
744
                                   const Representation& r) const {
745
  HConstant* constant = chunk_->LookupConstant(op);
746
  int32_t value = constant->Integer32Value();
747
  if (r.IsInteger32()) return value;
748
  ASSERT(r.IsSmiOrTagged());
749
  return reinterpret_cast<int32_t>(Smi::FromInt(value));
750
}
751

    
752

    
753
Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
754
  HConstant* constant = chunk_->LookupConstant(op);
755
  ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
756
  return constant->handle(isolate());
757
}
758

    
759

    
760
double LCodeGen::ToDouble(LConstantOperand* op) const {
761
  HConstant* constant = chunk_->LookupConstant(op);
762
  ASSERT(constant->HasDoubleValue());
763
  return constant->DoubleValue();
764
}
765

    
766

    
767
ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
768
  HConstant* constant = chunk_->LookupConstant(op);
769
  ASSERT(constant->HasExternalReferenceValue());
770
  return constant->ExternalReferenceValue();
771
}
772

    
773

    
774
bool LCodeGen::IsInteger32(LConstantOperand* op) const {
775
  return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
776
}
777

    
778

    
779
bool LCodeGen::IsSmi(LConstantOperand* op) const {
780
  return chunk_->LookupLiteralRepresentation(op).IsSmi();
781
}
782

    
783

    
784
Operand LCodeGen::ToOperand(LOperand* op) const {
785
  if (op->IsRegister()) return Operand(ToRegister(op));
786
  if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op));
787
  ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
788
  return Operand(ebp, StackSlotOffset(op->index()));
789
}
790

    
791

    
792
Operand LCodeGen::HighOperand(LOperand* op) {
793
  ASSERT(op->IsDoubleStackSlot());
794
  return Operand(ebp, StackSlotOffset(op->index()) + kPointerSize);
795
}
796

    
797

    
798
void LCodeGen::WriteTranslation(LEnvironment* environment,
799
                                Translation* translation) {
800
  if (environment == NULL) return;
801

    
802
  // The translation includes one command per value in the environment.
803
  int translation_size = environment->translation_size();
804
  // The output frame height does not include the parameters.
805
  int height = translation_size - environment->parameter_count();
806

    
807
  WriteTranslation(environment->outer(), translation);
808
  bool has_closure_id = !info()->closure().is_null() &&
809
      !info()->closure().is_identical_to(environment->closure());
810
  int closure_id = has_closure_id
811
      ? DefineDeoptimizationLiteral(environment->closure())
812
      : Translation::kSelfLiteralId;
813
  switch (environment->frame_type()) {
814
    case JS_FUNCTION:
815
      translation->BeginJSFrame(environment->ast_id(), closure_id, height);
816
      break;
817
    case JS_CONSTRUCT:
818
      translation->BeginConstructStubFrame(closure_id, translation_size);
819
      break;
820
    case JS_GETTER:
821
      ASSERT(translation_size == 1);
822
      ASSERT(height == 0);
823
      translation->BeginGetterStubFrame(closure_id);
824
      break;
825
    case JS_SETTER:
826
      ASSERT(translation_size == 2);
827
      ASSERT(height == 0);
828
      translation->BeginSetterStubFrame(closure_id);
829
      break;
830
    case ARGUMENTS_ADAPTOR:
831
      translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
832
      break;
833
    case STUB:
834
      translation->BeginCompiledStubFrame();
835
      break;
836
    default:
837
      UNREACHABLE();
838
  }
839

    
840
  int object_index = 0;
841
  int dematerialized_index = 0;
842
  for (int i = 0; i < translation_size; ++i) {
843
    LOperand* value = environment->values()->at(i);
844
    AddToTranslation(environment,
845
                     translation,
846
                     value,
847
                     environment->HasTaggedValueAt(i),
848
                     environment->HasUint32ValueAt(i),
849
                     &object_index,
850
                     &dematerialized_index);
851
  }
852
}
853

    
854

    
855
void LCodeGen::AddToTranslation(LEnvironment* environment,
856
                                Translation* translation,
857
                                LOperand* op,
858
                                bool is_tagged,
859
                                bool is_uint32,
860
                                int* object_index_pointer,
861
                                int* dematerialized_index_pointer) {
862
  if (op == LEnvironment::materialization_marker()) {
863
    int object_index = (*object_index_pointer)++;
864
    if (environment->ObjectIsDuplicateAt(object_index)) {
865
      int dupe_of = environment->ObjectDuplicateOfAt(object_index);
866
      translation->DuplicateObject(dupe_of);
867
      return;
868
    }
869
    int object_length = environment->ObjectLengthAt(object_index);
870
    if (environment->ObjectIsArgumentsAt(object_index)) {
871
      translation->BeginArgumentsObject(object_length);
872
    } else {
873
      translation->BeginCapturedObject(object_length);
874
    }
875
    int dematerialized_index = *dematerialized_index_pointer;
876
    int env_offset = environment->translation_size() + dematerialized_index;
877
    *dematerialized_index_pointer += object_length;
878
    for (int i = 0; i < object_length; ++i) {
879
      LOperand* value = environment->values()->at(env_offset + i);
880
      AddToTranslation(environment,
881
                       translation,
882
                       value,
883
                       environment->HasTaggedValueAt(env_offset + i),
884
                       environment->HasUint32ValueAt(env_offset + i),
885
                       object_index_pointer,
886
                       dematerialized_index_pointer);
887
    }
888
    return;
889
  }
890

    
891
  if (op->IsStackSlot()) {
892
    if (is_tagged) {
893
      translation->StoreStackSlot(op->index());
894
    } else if (is_uint32) {
895
      translation->StoreUint32StackSlot(op->index());
896
    } else {
897
      translation->StoreInt32StackSlot(op->index());
898
    }
899
  } else if (op->IsDoubleStackSlot()) {
900
    translation->StoreDoubleStackSlot(op->index());
901
  } else if (op->IsArgument()) {
902
    ASSERT(is_tagged);
903
    int src_index = GetStackSlotCount() + op->index();
904
    translation->StoreStackSlot(src_index);
905
  } else if (op->IsRegister()) {
906
    Register reg = ToRegister(op);
907
    if (is_tagged) {
908
      translation->StoreRegister(reg);
909
    } else if (is_uint32) {
910
      translation->StoreUint32Register(reg);
911
    } else {
912
      translation->StoreInt32Register(reg);
913
    }
914
  } else if (op->IsDoubleRegister()) {
915
    XMMRegister reg = ToDoubleRegister(op);
916
    translation->StoreDoubleRegister(reg);
917
  } else if (op->IsConstantOperand()) {
918
    HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
919
    int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
920
    translation->StoreLiteral(src_index);
921
  } else {
922
    UNREACHABLE();
923
  }
924
}
925

    
926

    
927
void LCodeGen::CallCodeGeneric(Handle<Code> code,
928
                               RelocInfo::Mode mode,
929
                               LInstruction* instr,
930
                               SafepointMode safepoint_mode) {
931
  ASSERT(instr != NULL);
932
  __ call(code, mode);
933
  RecordSafepointWithLazyDeopt(instr, safepoint_mode);
934

    
935
  // Signal that we don't inline smi code before these stubs in the
936
  // optimizing code generator.
937
  if (code->kind() == Code::BINARY_OP_IC ||
938
      code->kind() == Code::COMPARE_IC) {
939
    __ nop();
940
  }
941
}
942

    
943

    
944
void LCodeGen::CallCode(Handle<Code> code,
945
                        RelocInfo::Mode mode,
946
                        LInstruction* instr) {
947
  CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
948
}
949

    
950

    
951
void LCodeGen::CallRuntime(const Runtime::Function* fun,
952
                           int argc,
953
                           LInstruction* instr,
954
                           SaveFPRegsMode save_doubles) {
955
  ASSERT(instr != NULL);
956
  ASSERT(instr->HasPointerMap());
957

    
958
  __ CallRuntime(fun, argc, save_doubles);
959

    
960
  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
961

    
962
  ASSERT(info()->is_calling());
963
}
964

    
965

    
966
void LCodeGen::LoadContextFromDeferred(LOperand* context) {
967
  if (context->IsRegister()) {
968
    if (!ToRegister(context).is(esi)) {
969
      __ mov(esi, ToRegister(context));
970
    }
971
  } else if (context->IsStackSlot()) {
972
    __ mov(esi, ToOperand(context));
973
  } else if (context->IsConstantOperand()) {
974
    HConstant* constant =
975
        chunk_->LookupConstant(LConstantOperand::cast(context));
976
    __ LoadObject(esi, Handle<Object>::cast(constant->handle(isolate())));
977
  } else {
978
    UNREACHABLE();
979
  }
980
}
981

    
982
void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
983
                                       int argc,
984
                                       LInstruction* instr,
985
                                       LOperand* context) {
986
  LoadContextFromDeferred(context);
987

    
988
  __ CallRuntimeSaveDoubles(id);
989
  RecordSafepointWithRegisters(
990
      instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
991

    
992
  ASSERT(info()->is_calling());
993
}
994

    
995

    
996
void LCodeGen::RegisterEnvironmentForDeoptimization(
997
    LEnvironment* environment, Safepoint::DeoptMode mode) {
998
  if (!environment->HasBeenRegistered()) {
999
    // Physical stack frame layout:
1000
    // -x ............. -4  0 ..................................... y
1001
    // [incoming arguments] [spill slots] [pushed outgoing arguments]
1002

    
1003
    // Layout of the environment:
1004
    // 0 ..................................................... size-1
1005
    // [parameters] [locals] [expression stack including arguments]
1006

    
1007
    // Layout of the translation:
1008
    // 0 ........................................................ size - 1 + 4
1009
    // [expression stack including arguments] [locals] [4 words] [parameters]
1010
    // |>------------  translation_size ------------<|
1011

    
1012
    int frame_count = 0;
1013
    int jsframe_count = 0;
1014
    for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
1015
      ++frame_count;
1016
      if (e->frame_type() == JS_FUNCTION) {
1017
        ++jsframe_count;
1018
      }
1019
    }
1020
    Translation translation(&translations_, frame_count, jsframe_count, zone());
1021
    WriteTranslation(environment, &translation);
1022
    int deoptimization_index = deoptimizations_.length();
1023
    int pc_offset = masm()->pc_offset();
1024
    environment->Register(deoptimization_index,
1025
                          translation.index(),
1026
                          (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
1027
    deoptimizations_.Add(environment, zone());
1028
  }
1029
}
1030

    
1031

    
1032
void LCodeGen::DeoptimizeIf(Condition cc,
1033
                            LEnvironment* environment,
1034
                            Deoptimizer::BailoutType bailout_type) {
1035
  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
1036
  ASSERT(environment->HasBeenRegistered());
1037
  int id = environment->deoptimization_index();
1038
  ASSERT(info()->IsOptimizing() || info()->IsStub());
1039
  Address entry =
1040
      Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
1041
  if (entry == NULL) {
1042
    Abort(kBailoutWasNotPrepared);
1043
    return;
1044
  }
1045

    
1046
  if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
1047
    ExternalReference count = ExternalReference::stress_deopt_count(isolate());
1048
    Label no_deopt;
1049
    __ pushfd();
1050
    __ push(eax);
1051
    __ mov(eax, Operand::StaticVariable(count));
1052
    __ sub(eax, Immediate(1));
1053
    __ j(not_zero, &no_deopt, Label::kNear);
1054
    if (FLAG_trap_on_deopt) __ int3();
1055
    __ mov(eax, Immediate(FLAG_deopt_every_n_times));
1056
    __ mov(Operand::StaticVariable(count), eax);
1057
    __ pop(eax);
1058
    __ popfd();
1059
    ASSERT(frame_is_built_);
1060
    __ call(entry, RelocInfo::RUNTIME_ENTRY);
1061
    __ bind(&no_deopt);
1062
    __ mov(Operand::StaticVariable(count), eax);
1063
    __ pop(eax);
1064
    __ popfd();
1065
  }
1066

    
1067
  // Before Instructions which can deopt, we normally flush the x87 stack. But
1068
  // we can have inputs or outputs of the current instruction on the stack,
1069
  // thus we need to flush them here from the physical stack to leave it in a
1070
  // consistent state.
1071
  if (x87_stack_.depth() > 0) {
1072
    Label done;
1073
    if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
1074
    EmitFlushX87ForDeopt();
1075
    __ bind(&done);
1076
  }
1077

    
1078
  if (info()->ShouldTrapOnDeopt()) {
1079
    Label done;
1080
    if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
1081
    __ int3();
1082
    __ bind(&done);
1083
  }
1084

    
1085
  ASSERT(info()->IsStub() || frame_is_built_);
1086
  if (cc == no_condition && frame_is_built_) {
1087
    __ call(entry, RelocInfo::RUNTIME_ENTRY);
1088
  } else {
1089
    // We often have several deopts to the same entry, reuse the last
1090
    // jump entry if this is the case.
1091
    if (jump_table_.is_empty() ||
1092
        jump_table_.last().address != entry ||
1093
        jump_table_.last().needs_frame != !frame_is_built_ ||
1094
        jump_table_.last().bailout_type != bailout_type) {
1095
      Deoptimizer::JumpTableEntry table_entry(entry,
1096
                                              bailout_type,
1097
                                              !frame_is_built_);
1098
      jump_table_.Add(table_entry, zone());
1099
    }
1100
    if (cc == no_condition) {
1101
      __ jmp(&jump_table_.last().label);
1102
    } else {
1103
      __ j(cc, &jump_table_.last().label);
1104
    }
1105
  }
1106
}
1107

    
1108

    
1109
void LCodeGen::DeoptimizeIf(Condition cc,
1110
                            LEnvironment* environment) {
1111
  Deoptimizer::BailoutType bailout_type = info()->IsStub()
1112
      ? Deoptimizer::LAZY
1113
      : Deoptimizer::EAGER;
1114
  DeoptimizeIf(cc, environment, bailout_type);
1115
}
1116

    
1117

    
1118
void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
1119
  ZoneList<Handle<Map> > maps(1, zone());
1120
  ZoneList<Handle<JSObject> > objects(1, zone());
1121
  int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
1122
  for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
1123
    if (Code::IsWeakEmbeddedObject(code->kind(), it.rinfo()->target_object())) {
1124
      if (it.rinfo()->target_object()->IsMap()) {
1125
        Handle<Map> map(Map::cast(it.rinfo()->target_object()));
1126
        maps.Add(map, zone());
1127
      } else if (it.rinfo()->target_object()->IsJSObject()) {
1128
        Handle<JSObject> object(JSObject::cast(it.rinfo()->target_object()));
1129
        objects.Add(object, zone());
1130
      }
1131
    }
1132
  }
1133
#ifdef VERIFY_HEAP
1134
  // This disables verification of weak embedded objects after full GC.
1135
  // AddDependentCode can cause a GC, which would observe the state where
1136
  // this code is not yet in the depended code lists of the embedded maps.
1137
  NoWeakObjectVerificationScope disable_verification_of_embedded_objects;
1138
#endif
1139
  for (int i = 0; i < maps.length(); i++) {
1140
    maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
1141
  }
1142
  for (int i = 0; i < objects.length(); i++) {
1143
    AddWeakObjectToCodeDependency(isolate()->heap(), objects.at(i), code);
1144
  }
1145
}
1146

    
1147

    
1148
void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
1149
  int length = deoptimizations_.length();
1150
  if (length == 0) return;
1151
  Handle<DeoptimizationInputData> data =
1152
      factory()->NewDeoptimizationInputData(length, TENURED);
1153

    
1154
  Handle<ByteArray> translations =
1155
      translations_.CreateByteArray(isolate()->factory());
1156
  data->SetTranslationByteArray(*translations);
1157
  data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
1158

    
1159
  Handle<FixedArray> literals =
1160
      factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
1161
  { AllowDeferredHandleDereference copy_handles;
1162
    for (int i = 0; i < deoptimization_literals_.length(); i++) {
1163
      literals->set(i, *deoptimization_literals_[i]);
1164
    }
1165
    data->SetLiteralArray(*literals);
1166
  }
1167

    
1168
  data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
1169
  data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
1170

    
1171
  // Populate the deoptimization entries.
1172
  for (int i = 0; i < length; i++) {
1173
    LEnvironment* env = deoptimizations_[i];
1174
    data->SetAstId(i, env->ast_id());
1175
    data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
1176
    data->SetArgumentsStackHeight(i,
1177
                                  Smi::FromInt(env->arguments_stack_height()));
1178
    data->SetPc(i, Smi::FromInt(env->pc_offset()));
1179
  }
1180
  code->set_deoptimization_data(*data);
1181
}
1182

    
1183

    
1184
int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
1185
  int result = deoptimization_literals_.length();
1186
  for (int i = 0; i < deoptimization_literals_.length(); ++i) {
1187
    if (deoptimization_literals_[i].is_identical_to(literal)) return i;
1188
  }
1189
  deoptimization_literals_.Add(literal, zone());
1190
  return result;
1191
}
1192

    
1193

    
1194
void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
1195
  ASSERT(deoptimization_literals_.length() == 0);
1196

    
1197
  const ZoneList<Handle<JSFunction> >* inlined_closures =
1198
      chunk()->inlined_closures();
1199

    
1200
  for (int i = 0, length = inlined_closures->length();
1201
       i < length;
1202
       i++) {
1203
    DefineDeoptimizationLiteral(inlined_closures->at(i));
1204
  }
1205

    
1206
  inlined_function_count_ = deoptimization_literals_.length();
1207
}
1208

    
1209

    
1210
void LCodeGen::RecordSafepointWithLazyDeopt(
1211
    LInstruction* instr, SafepointMode safepoint_mode) {
1212
  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
1213
    RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
1214
  } else {
1215
    ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
1216
    RecordSafepointWithRegisters(
1217
        instr->pointer_map(), 0, Safepoint::kLazyDeopt);
1218
  }
1219
}
1220

    
1221

    
1222
void LCodeGen::RecordSafepoint(
1223
    LPointerMap* pointers,
1224
    Safepoint::Kind kind,
1225
    int arguments,
1226
    Safepoint::DeoptMode deopt_mode) {
1227
  ASSERT(kind == expected_safepoint_kind_);
1228
  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
1229
  Safepoint safepoint =
1230
      safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
1231
  for (int i = 0; i < operands->length(); i++) {
1232
    LOperand* pointer = operands->at(i);
1233
    if (pointer->IsStackSlot()) {
1234
      safepoint.DefinePointerSlot(pointer->index(), zone());
1235
    } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
1236
      safepoint.DefinePointerRegister(ToRegister(pointer), zone());
1237
    }
1238
  }
1239
}
1240

    
1241

    
1242
void LCodeGen::RecordSafepoint(LPointerMap* pointers,
1243
                               Safepoint::DeoptMode mode) {
1244
  RecordSafepoint(pointers, Safepoint::kSimple, 0, mode);
1245
}
1246

    
1247

    
1248
void LCodeGen::RecordSafepoint(Safepoint::DeoptMode mode) {
1249
  LPointerMap empty_pointers(zone());
1250
  RecordSafepoint(&empty_pointers, mode);
1251
}
1252

    
1253

    
1254
void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
1255
                                            int arguments,
1256
                                            Safepoint::DeoptMode mode) {
1257
  RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, mode);
1258
}
1259

    
1260

    
1261
void LCodeGen::RecordAndWritePosition(int position) {
1262
  if (position == RelocInfo::kNoPosition) return;
1263
  masm()->positions_recorder()->RecordPosition(position);
1264
  masm()->positions_recorder()->WriteRecordedPositions();
1265
}
1266

    
1267

    
1268
static const char* LabelType(LLabel* label) {
1269
  if (label->is_loop_header()) return " (loop header)";
1270
  if (label->is_osr_entry()) return " (OSR entry)";
1271
  return "";
1272
}
1273

    
1274

    
1275
void LCodeGen::DoLabel(LLabel* label) {
1276
  Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
1277
          current_instruction_,
1278
          label->hydrogen_value()->id(),
1279
          label->block_id(),
1280
          LabelType(label));
1281
  __ bind(label->label());
1282
  current_block_ = label->block_id();
1283
  DoGap(label);
1284
}
1285

    
1286

    
1287
void LCodeGen::DoParallelMove(LParallelMove* move) {
1288
  resolver_.Resolve(move);
1289
}
1290

    
1291

    
1292
void LCodeGen::DoGap(LGap* gap) {
1293
  for (int i = LGap::FIRST_INNER_POSITION;
1294
       i <= LGap::LAST_INNER_POSITION;
1295
       i++) {
1296
    LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
1297
    LParallelMove* move = gap->GetParallelMove(inner_pos);
1298
    if (move != NULL) DoParallelMove(move);
1299
  }
1300
}
1301

    
1302

    
1303
void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
1304
  DoGap(instr);
1305
}
1306

    
1307

    
1308
void LCodeGen::DoParameter(LParameter* instr) {
1309
  // Nothing to do.
1310
}
1311

    
1312

    
1313
void LCodeGen::DoCallStub(LCallStub* instr) {
1314
  ASSERT(ToRegister(instr->context()).is(esi));
1315
  ASSERT(ToRegister(instr->result()).is(eax));
1316
  switch (instr->hydrogen()->major_key()) {
1317
    case CodeStub::RegExpConstructResult: {
1318
      RegExpConstructResultStub stub;
1319
      CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1320
      break;
1321
    }
1322
    case CodeStub::RegExpExec: {
1323
      RegExpExecStub stub;
1324
      CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1325
      break;
1326
    }
1327
    case CodeStub::SubString: {
1328
      SubStringStub stub;
1329
      CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1330
      break;
1331
    }
1332
    case CodeStub::StringCompare: {
1333
      StringCompareStub stub;
1334
      CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1335
      break;
1336
    }
1337
    case CodeStub::TranscendentalCache: {
1338
      TranscendentalCacheStub stub(instr->transcendental_type(),
1339
                                   TranscendentalCacheStub::TAGGED);
1340
      CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1341
      break;
1342
    }
1343
    default:
1344
      UNREACHABLE();
1345
  }
1346
}
1347

    
1348

    
1349
void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1350
  GenerateOsrPrologue();
1351
}
1352

    
1353

    
1354
void LCodeGen::DoModI(LModI* instr) {
1355
  HMod* hmod = instr->hydrogen();
1356
  HValue* left = hmod->left();
1357
  HValue* right = hmod->right();
1358
  if (hmod->HasPowerOf2Divisor()) {
1359
    // TODO(svenpanne) We should really do the strength reduction on the
1360
    // Hydrogen level.
1361
    Register left_reg = ToRegister(instr->left());
1362
    ASSERT(left_reg.is(ToRegister(instr->result())));
1363

    
1364
    // Note: The code below even works when right contains kMinInt.
1365
    int32_t divisor = Abs(right->GetInteger32Constant());
1366

    
1367
    Label left_is_not_negative, done;
1368
    if (left->CanBeNegative()) {
1369
      __ test(left_reg, Operand(left_reg));
1370
      __ j(not_sign, &left_is_not_negative, Label::kNear);
1371
      __ neg(left_reg);
1372
      __ and_(left_reg, divisor - 1);
1373
      __ neg(left_reg);
1374
      if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1375
        DeoptimizeIf(zero, instr->environment());
1376
      }
1377
      __ jmp(&done, Label::kNear);
1378
    }
1379

    
1380
    __ bind(&left_is_not_negative);
1381
    __ and_(left_reg, divisor - 1);
1382
    __ bind(&done);
1383

    
1384
  } else if (hmod->fixed_right_arg().has_value) {
1385
    Register left_reg = ToRegister(instr->left());
1386
    ASSERT(left_reg.is(ToRegister(instr->result())));
1387
    Register right_reg = ToRegister(instr->right());
1388

    
1389
    int32_t divisor = hmod->fixed_right_arg().value;
1390
    ASSERT(IsPowerOf2(divisor));
1391

    
1392
    // Check if our assumption of a fixed right operand still holds.
1393
    __ cmp(right_reg, Immediate(divisor));
1394
    DeoptimizeIf(not_equal, instr->environment());
1395

    
1396
    Label left_is_not_negative, done;
1397
    if (left->CanBeNegative()) {
1398
      __ test(left_reg, Operand(left_reg));
1399
      __ j(not_sign, &left_is_not_negative, Label::kNear);
1400
      __ neg(left_reg);
1401
      __ and_(left_reg, divisor - 1);
1402
      __ neg(left_reg);
1403
      if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1404
        DeoptimizeIf(zero, instr->environment());
1405
      }
1406
      __ jmp(&done, Label::kNear);
1407
    }
1408

    
1409
    __ bind(&left_is_not_negative);
1410
    __ and_(left_reg, divisor - 1);
1411
    __ bind(&done);
1412

    
1413
  } else {
1414
    Register left_reg = ToRegister(instr->left());
1415
    ASSERT(left_reg.is(eax));
1416
    Register right_reg = ToRegister(instr->right());
1417
    ASSERT(!right_reg.is(eax));
1418
    ASSERT(!right_reg.is(edx));
1419
    Register result_reg = ToRegister(instr->result());
1420
    ASSERT(result_reg.is(edx));
1421

    
1422
    Label done;
1423
    // Check for x % 0, idiv would signal a divide error. We have to
1424
    // deopt in this case because we can't return a NaN.
1425
    if (right->CanBeZero()) {
1426
      __ test(right_reg, Operand(right_reg));
1427
      DeoptimizeIf(zero, instr->environment());
1428
    }
1429

    
1430
    // Check for kMinInt % -1, idiv would signal a divide error. We
1431
    // have to deopt if we care about -0, because we can't return that.
1432
    if (left->RangeCanInclude(kMinInt) && right->RangeCanInclude(-1)) {
1433
      Label no_overflow_possible;
1434
      __ cmp(left_reg, kMinInt);
1435
      __ j(not_equal, &no_overflow_possible, Label::kNear);
1436
      __ cmp(right_reg, -1);
1437
      if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1438
        DeoptimizeIf(equal, instr->environment());
1439
      } else {
1440
        __ j(not_equal, &no_overflow_possible, Label::kNear);
1441
        __ Set(result_reg, Immediate(0));
1442
        __ jmp(&done, Label::kNear);
1443
      }
1444
      __ bind(&no_overflow_possible);
1445
    }
1446

    
1447
    // Sign extend dividend in eax into edx:eax.
1448
    __ cdq();
1449

    
1450
    // If we care about -0, test if the dividend is <0 and the result is 0.
1451
    if (left->CanBeNegative() &&
1452
        hmod->CanBeZero() &&
1453
        hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1454
      Label positive_left;
1455
      __ test(left_reg, Operand(left_reg));
1456
      __ j(not_sign, &positive_left, Label::kNear);
1457
      __ idiv(right_reg);
1458
      __ test(result_reg, Operand(result_reg));
1459
      DeoptimizeIf(zero, instr->environment());
1460
      __ jmp(&done, Label::kNear);
1461
      __ bind(&positive_left);
1462
    }
1463
    __ idiv(right_reg);
1464
    __ bind(&done);
1465
  }
1466
}
1467

    
1468

    
1469
void LCodeGen::DoDivI(LDivI* instr) {
1470
  if (!instr->is_flooring() && instr->hydrogen()->HasPowerOf2Divisor()) {
1471
    Register dividend = ToRegister(instr->left());
1472
    int32_t divisor = instr->hydrogen()->right()->GetInteger32Constant();
1473
    int32_t test_value = 0;
1474
    int32_t power = 0;
1475

    
1476
    if (divisor > 0) {
1477
      test_value = divisor - 1;
1478
      power = WhichPowerOf2(divisor);
1479
    } else {
1480
      // Check for (0 / -x) that will produce negative zero.
1481
      if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1482
        __ test(dividend, Operand(dividend));
1483
        DeoptimizeIf(zero, instr->environment());
1484
      }
1485
      // Check for (kMinInt / -1).
1486
      if (divisor == -1 && instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1487
        __ cmp(dividend, kMinInt);
1488
        DeoptimizeIf(zero, instr->environment());
1489
      }
1490
      test_value = - divisor - 1;
1491
      power = WhichPowerOf2(-divisor);
1492
    }
1493

    
1494
    if (test_value != 0) {
1495
      if (instr->hydrogen()->CheckFlag(
1496
          HInstruction::kAllUsesTruncatingToInt32)) {
1497
        Label done, negative;
1498
        __ cmp(dividend, 0);
1499
        __ j(less, &negative, Label::kNear);
1500
        __ sar(dividend, power);
1501
        if (divisor < 0) __ neg(dividend);
1502
        __ jmp(&done, Label::kNear);
1503

    
1504
        __ bind(&negative);
1505
        __ neg(dividend);
1506
        __ sar(dividend, power);
1507
        if (divisor > 0) __ neg(dividend);
1508
        __ bind(&done);
1509
        return;  // Don't fall through to "__ neg" below.
1510
      } else {
1511
        // Deoptimize if remainder is not 0.
1512
        __ test(dividend, Immediate(test_value));
1513
        DeoptimizeIf(not_zero, instr->environment());
1514
        __ sar(dividend, power);
1515
      }
1516
    }
1517

    
1518
    if (divisor < 0) __ neg(dividend);
1519

    
1520
    return;
1521
  }
1522

    
1523
  LOperand* right = instr->right();
1524
  ASSERT(ToRegister(instr->result()).is(eax));
1525
  ASSERT(ToRegister(instr->left()).is(eax));
1526
  ASSERT(!ToRegister(instr->right()).is(eax));
1527
  ASSERT(!ToRegister(instr->right()).is(edx));
1528

    
1529
  Register left_reg = eax;
1530

    
1531
  // Check for x / 0.
1532
  Register right_reg = ToRegister(right);
1533
  if (instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
1534
    __ test(right_reg, ToOperand(right));
1535
    DeoptimizeIf(zero, instr->environment());
1536
  }
1537

    
1538
  // Check for (0 / -x) that will produce negative zero.
1539
  if (instr->hydrogen_value()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1540
    Label left_not_zero;
1541
    __ test(left_reg, Operand(left_reg));
1542
    __ j(not_zero, &left_not_zero, Label::kNear);
1543
    __ test(right_reg, ToOperand(right));
1544
    DeoptimizeIf(sign, instr->environment());
1545
    __ bind(&left_not_zero);
1546
  }
1547

    
1548
  // Check for (kMinInt / -1).
1549
  if (instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)) {
1550
    Label left_not_min_int;
1551
    __ cmp(left_reg, kMinInt);
1552
    __ j(not_zero, &left_not_min_int, Label::kNear);
1553
    __ cmp(right_reg, -1);
1554
    DeoptimizeIf(zero, instr->environment());
1555
    __ bind(&left_not_min_int);
1556
  }
1557

    
1558
  // Sign extend to edx.
1559
  __ cdq();
1560
  __ idiv(right_reg);
1561

    
1562
  if (instr->is_flooring()) {
1563
    Label done;
1564
    __ test(edx, edx);
1565
    __ j(zero, &done, Label::kNear);
1566
    __ xor_(edx, right_reg);
1567
    __ sar(edx, 31);
1568
    __ add(eax, edx);
1569
    __ bind(&done);
1570
  } else if (!instr->hydrogen()->CheckFlag(
1571
      HInstruction::kAllUsesTruncatingToInt32)) {
1572
    // Deoptimize if remainder is not 0.
1573
    __ test(edx, Operand(edx));
1574
    DeoptimizeIf(not_zero, instr->environment());
1575
  }
1576
}
1577

    
1578

    
1579
void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
1580
  ASSERT(instr->right()->IsConstantOperand());
1581

    
1582
  Register dividend = ToRegister(instr->left());
1583
  int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
1584
  Register result = ToRegister(instr->result());
1585

    
1586
  switch (divisor) {
1587
  case 0:
1588
    DeoptimizeIf(no_condition, instr->environment());
1589
    return;
1590

    
1591
  case 1:
1592
    __ Move(result, dividend);
1593
    return;
1594

    
1595
  case -1:
1596
    __ Move(result, dividend);
1597
    __ neg(result);
1598
    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1599
      DeoptimizeIf(zero, instr->environment());
1600
    }
1601
    if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1602
      DeoptimizeIf(overflow, instr->environment());
1603
    }
1604
    return;
1605
  }
1606

    
1607
  uint32_t divisor_abs = abs(divisor);
1608
  if (IsPowerOf2(divisor_abs)) {
1609
    int32_t power = WhichPowerOf2(divisor_abs);
1610
    if (divisor < 0) {
1611
      // Input[dividend] is clobbered.
1612
      // The sequence is tedious because neg(dividend) might overflow.
1613
      __ mov(result, dividend);
1614
      __ sar(dividend, 31);
1615
      __ neg(result);
1616
      if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1617
        DeoptimizeIf(zero, instr->environment());
1618
      }
1619
      __ shl(dividend, 32 - power);
1620
      __ sar(result, power);
1621
      __ not_(dividend);
1622
      // Clear result.sign if dividend.sign is set.
1623
      __ and_(result, dividend);
1624
    } else {
1625
      __ Move(result, dividend);
1626
      __ sar(result, power);
1627
    }
1628
  } else {
1629
    ASSERT(ToRegister(instr->left()).is(eax));
1630
    ASSERT(ToRegister(instr->result()).is(edx));
1631
    Register scratch = ToRegister(instr->temp());
1632

    
1633
    // Find b which: 2^b < divisor_abs < 2^(b+1).
1634
    unsigned b = 31 - CompilerIntrinsics::CountLeadingZeros(divisor_abs);
1635
    unsigned shift = 32 + b;  // Precision +1bit (effectively).
1636
    double multiplier_f =
1637
        static_cast<double>(static_cast<uint64_t>(1) << shift) / divisor_abs;
1638
    int64_t multiplier;
1639
    if (multiplier_f - floor(multiplier_f) < 0.5) {
1640
        multiplier = static_cast<int64_t>(floor(multiplier_f));
1641
    } else {
1642
        multiplier = static_cast<int64_t>(floor(multiplier_f)) + 1;
1643
    }
1644
    // The multiplier is a uint32.
1645
    ASSERT(multiplier > 0 &&
1646
           multiplier < (static_cast<int64_t>(1) << 32));
1647
    __ mov(scratch, dividend);
1648
    if (divisor < 0 &&
1649
        instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1650
      __ test(dividend, dividend);
1651
      DeoptimizeIf(zero, instr->environment());
1652
    }
1653
    __ mov(edx, static_cast<int32_t>(multiplier));
1654
    __ imul(edx);
1655
    if (static_cast<int32_t>(multiplier) < 0) {
1656
      __ add(edx, scratch);
1657
    }
1658
    Register reg_lo = eax;
1659
    Register reg_byte_scratch = scratch;
1660
    if (!reg_byte_scratch.is_byte_register()) {
1661
        __ xchg(reg_lo, reg_byte_scratch);
1662
        reg_lo = scratch;
1663
        reg_byte_scratch = eax;
1664
    }
1665
    if (divisor < 0) {
1666
      __ xor_(reg_byte_scratch, reg_byte_scratch);
1667
      __ cmp(reg_lo, 0x40000000);
1668
      __ setcc(above, reg_byte_scratch);
1669
      __ neg(edx);
1670
      __ sub(edx, reg_byte_scratch);
1671
    } else {
1672
      __ xor_(reg_byte_scratch, reg_byte_scratch);
1673
      __ cmp(reg_lo, 0xC0000000);
1674
      __ setcc(above_equal, reg_byte_scratch);
1675
      __ add(edx, reg_byte_scratch);
1676
    }
1677
    __ sar(edx, shift - 32);
1678
  }
1679
}
1680

    
1681

    
1682
void LCodeGen::DoMulI(LMulI* instr) {
1683
  Register left = ToRegister(instr->left());
1684
  LOperand* right = instr->right();
1685

    
1686
  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1687
    __ mov(ToRegister(instr->temp()), left);
1688
  }
1689

    
1690
  if (right->IsConstantOperand()) {
1691
    // Try strength reductions on the multiplication.
1692
    // All replacement instructions are at most as long as the imul
1693
    // and have better latency.
1694
    int constant = ToInteger32(LConstantOperand::cast(right));
1695
    if (constant == -1) {
1696
      __ neg(left);
1697
    } else if (constant == 0) {
1698
      __ xor_(left, Operand(left));
1699
    } else if (constant == 2) {
1700
      __ add(left, Operand(left));
1701
    } else if (!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1702
      // If we know that the multiplication can't overflow, it's safe to
1703
      // use instructions that don't set the overflow flag for the
1704
      // multiplication.
1705
      switch (constant) {
1706
        case 1:
1707
          // Do nothing.
1708
          break;
1709
        case 3:
1710
          __ lea(left, Operand(left, left, times_2, 0));
1711
          break;
1712
        case 4:
1713
          __ shl(left, 2);
1714
          break;
1715
        case 5:
1716
          __ lea(left, Operand(left, left, times_4, 0));
1717
          break;
1718
        case 8:
1719
          __ shl(left, 3);
1720
          break;
1721
        case 9:
1722
          __ lea(left, Operand(left, left, times_8, 0));
1723
          break;
1724
        case 16:
1725
          __ shl(left, 4);
1726
          break;
1727
        default:
1728
          __ imul(left, left, constant);
1729
          break;
1730
      }
1731
    } else {
1732
      __ imul(left, left, constant);
1733
    }
1734
  } else {
1735
    if (instr->hydrogen()->representation().IsSmi()) {
1736
      __ SmiUntag(left);
1737
    }
1738
    __ imul(left, ToOperand(right));
1739
  }
1740

    
1741
  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1742
    DeoptimizeIf(overflow, instr->environment());
1743
  }
1744

    
1745
  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1746
    // Bail out if the result is supposed to be negative zero.
1747
    Label done;
1748
    __ test(left, Operand(left));
1749
    __ j(not_zero, &done, Label::kNear);
1750
    if (right->IsConstantOperand()) {
1751
      if (ToInteger32(LConstantOperand::cast(right)) < 0) {
1752
        DeoptimizeIf(no_condition, instr->environment());
1753
      } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
1754
        __ cmp(ToRegister(instr->temp()), Immediate(0));
1755
        DeoptimizeIf(less, instr->environment());
1756
      }
1757
    } else {
1758
      // Test the non-zero operand for negative sign.
1759
      __ or_(ToRegister(instr->temp()), ToOperand(right));
1760
      DeoptimizeIf(sign, instr->environment());
1761
    }
1762
    __ bind(&done);
1763
  }
1764
}
1765

    
1766

    
1767
void LCodeGen::DoBitI(LBitI* instr) {
1768
  LOperand* left = instr->left();
1769
  LOperand* right = instr->right();
1770
  ASSERT(left->Equals(instr->result()));
1771
  ASSERT(left->IsRegister());
1772

    
1773
  if (right->IsConstantOperand()) {
1774
    int32_t right_operand =
1775
        ToRepresentation(LConstantOperand::cast(right),
1776
                         instr->hydrogen()->representation());
1777
    switch (instr->op()) {
1778
      case Token::BIT_AND:
1779
        __ and_(ToRegister(left), right_operand);
1780
        break;
1781
      case Token::BIT_OR:
1782
        __ or_(ToRegister(left), right_operand);
1783
        break;
1784
      case Token::BIT_XOR:
1785
        if (right_operand == int32_t(~0)) {
1786
          __ not_(ToRegister(left));
1787
        } else {
1788
          __ xor_(ToRegister(left), right_operand);
1789
        }
1790
        break;
1791
      default:
1792
        UNREACHABLE();
1793
        break;
1794
    }
1795
  } else {
1796
    switch (instr->op()) {
1797
      case Token::BIT_AND:
1798
        __ and_(ToRegister(left), ToOperand(right));
1799
        break;
1800
      case Token::BIT_OR:
1801
        __ or_(ToRegister(left), ToOperand(right));
1802
        break;
1803
      case Token::BIT_XOR:
1804
        __ xor_(ToRegister(left), ToOperand(right));
1805
        break;
1806
      default:
1807
        UNREACHABLE();
1808
        break;
1809
    }
1810
  }
1811
}
1812

    
1813

    
1814
void LCodeGen::DoShiftI(LShiftI* instr) {
1815
  LOperand* left = instr->left();
1816
  LOperand* right = instr->right();
1817
  ASSERT(left->Equals(instr->result()));
1818
  ASSERT(left->IsRegister());
1819
  if (right->IsRegister()) {
1820
    ASSERT(ToRegister(right).is(ecx));
1821

    
1822
    switch (instr->op()) {
1823
      case Token::ROR:
1824
        __ ror_cl(ToRegister(left));
1825
        if (instr->can_deopt()) {
1826
          __ test(ToRegister(left), ToRegister(left));
1827
          DeoptimizeIf(sign, instr->environment());
1828
        }
1829
        break;
1830
      case Token::SAR:
1831
        __ sar_cl(ToRegister(left));
1832
        break;
1833
      case Token::SHR:
1834
        __ shr_cl(ToRegister(left));
1835
        if (instr->can_deopt()) {
1836
          __ test(ToRegister(left), ToRegister(left));
1837
          DeoptimizeIf(sign, instr->environment());
1838
        }
1839
        break;
1840
      case Token::SHL:
1841
        __ shl_cl(ToRegister(left));
1842
        break;
1843
      default:
1844
        UNREACHABLE();
1845
        break;
1846
    }
1847
  } else {
1848
    int value = ToInteger32(LConstantOperand::cast(right));
1849
    uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1850
    switch (instr->op()) {
1851
      case Token::ROR:
1852
        if (shift_count == 0 && instr->can_deopt()) {
1853
          __ test(ToRegister(left), ToRegister(left));
1854
          DeoptimizeIf(sign, instr->environment());
1855
        } else {
1856
          __ ror(ToRegister(left), shift_count);
1857
        }
1858
        break;
1859
      case Token::SAR:
1860
        if (shift_count != 0) {
1861
          __ sar(ToRegister(left), shift_count);
1862
        }
1863
        break;
1864
      case Token::SHR:
1865
        if (shift_count == 0 && instr->can_deopt()) {
1866
          __ test(ToRegister(left), ToRegister(left));
1867
          DeoptimizeIf(sign, instr->environment());
1868
        } else {
1869
          __ shr(ToRegister(left), shift_count);
1870
        }
1871
        break;
1872
      case Token::SHL:
1873
        if (shift_count != 0) {
1874
          if (instr->hydrogen_value()->representation().IsSmi() &&
1875
              instr->can_deopt()) {
1876
            if (shift_count != 1) {
1877
              __ shl(ToRegister(left), shift_count - 1);
1878
            }
1879
            __ SmiTag(ToRegister(left));
1880
            DeoptimizeIf(overflow, instr->environment());
1881
          } else {
1882
            __ shl(ToRegister(left), shift_count);
1883
          }
1884
        }
1885
        break;
1886
      default:
1887
        UNREACHABLE();
1888
        break;
1889
    }
1890
  }
1891
}
1892

    
1893

    
1894
void LCodeGen::DoSubI(LSubI* instr) {
1895
  LOperand* left = instr->left();
1896
  LOperand* right = instr->right();
1897
  ASSERT(left->Equals(instr->result()));
1898

    
1899
  if (right->IsConstantOperand()) {
1900
    __ sub(ToOperand(left),
1901
           ToImmediate(right, instr->hydrogen()->representation()));
1902
  } else {
1903
    __ sub(ToRegister(left), ToOperand(right));
1904
  }
1905
  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1906
    DeoptimizeIf(overflow, instr->environment());
1907
  }
1908
}
1909

    
1910

    
1911
void LCodeGen::DoConstantI(LConstantI* instr) {
1912
  __ Set(ToRegister(instr->result()), Immediate(instr->value()));
1913
}
1914

    
1915

    
1916
void LCodeGen::DoConstantS(LConstantS* instr) {
1917
  __ Set(ToRegister(instr->result()), Immediate(instr->value()));
1918
}
1919

    
1920

    
1921
void LCodeGen::DoConstantD(LConstantD* instr) {
1922
  double v = instr->value();
1923
  uint64_t int_val = BitCast<uint64_t, double>(v);
1924
  int32_t lower = static_cast<int32_t>(int_val);
1925
  int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
1926
  ASSERT(instr->result()->IsDoubleRegister());
1927

    
1928
  if (!CpuFeatures::IsSafeForSnapshot(SSE2)) {
1929
    __ push(Immediate(upper));
1930
    __ push(Immediate(lower));
1931
    X87Register reg = ToX87Register(instr->result());
1932
    X87Mov(reg, Operand(esp, 0));
1933
    __ add(Operand(esp), Immediate(kDoubleSize));
1934
  } else {
1935
    CpuFeatureScope scope1(masm(), SSE2);
1936
    XMMRegister res = ToDoubleRegister(instr->result());
1937
    if (int_val == 0) {
1938
      __ xorps(res, res);
1939
    } else {
1940
      Register temp = ToRegister(instr->temp());
1941
      if (CpuFeatures::IsSupported(SSE4_1)) {
1942
        CpuFeatureScope scope2(masm(), SSE4_1);
1943
        if (lower != 0) {
1944
          __ Set(temp, Immediate(lower));
1945
          __ movd(res, Operand(temp));
1946
          __ Set(temp, Immediate(upper));
1947
          __ pinsrd(res, Operand(temp), 1);
1948
        } else {
1949
          __ xorps(res, res);
1950
          __ Set(temp, Immediate(upper));
1951
          __ pinsrd(res, Operand(temp), 1);
1952
        }
1953
      } else {
1954
        __ Set(temp, Immediate(upper));
1955
        __ movd(res, Operand(temp));
1956
        __ psllq(res, 32);
1957
        if (lower != 0) {
1958
          XMMRegister xmm_scratch = double_scratch0();
1959
          __ Set(temp, Immediate(lower));
1960
          __ movd(xmm_scratch, Operand(temp));
1961
          __ por(res, xmm_scratch);
1962
        }
1963
      }
1964
    }
1965
  }
1966
}
1967

    
1968

    
1969
void LCodeGen::DoConstantE(LConstantE* instr) {
1970
  __ lea(ToRegister(instr->result()), Operand::StaticVariable(instr->value()));
1971
}
1972

    
1973

    
1974
void LCodeGen::DoConstantT(LConstantT* instr) {
1975
  Register reg = ToRegister(instr->result());
1976
  Handle<Object> handle = instr->value(isolate());
1977
  AllowDeferredHandleDereference smi_check;
1978
  __ LoadObject(reg, handle);
1979
}
1980

    
1981

    
1982
void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1983
  Register result = ToRegister(instr->result());
1984
  Register map = ToRegister(instr->value());
1985
  __ EnumLength(result, map);
1986
}
1987

    
1988

    
1989
void LCodeGen::DoElementsKind(LElementsKind* instr) {
1990
  Register result = ToRegister(instr->result());
1991
  Register input = ToRegister(instr->value());
1992

    
1993
  // Load map into |result|.
1994
  __ mov(result, FieldOperand(input, HeapObject::kMapOffset));
1995
  // Load the map's "bit field 2" into |result|. We only need the first byte,
1996
  // but the following masking takes care of that anyway.
1997
  __ mov(result, FieldOperand(result, Map::kBitField2Offset));
1998
  // Retrieve elements_kind from bit field 2.
1999
  __ and_(result, Map::kElementsKindMask);
2000
  __ shr(result, Map::kElementsKindShift);
2001
}
2002

    
2003

    
2004
void LCodeGen::DoValueOf(LValueOf* instr) {
2005
  Register input = ToRegister(instr->value());
2006
  Register result = ToRegister(instr->result());
2007
  Register map = ToRegister(instr->temp());
2008
  ASSERT(input.is(result));
2009

    
2010
  Label done;
2011

    
2012
  if (!instr->hydrogen()->value()->IsHeapObject()) {
2013
    // If the object is a smi return the object.
2014
    __ JumpIfSmi(input, &done, Label::kNear);
2015
  }
2016

    
2017
  // If the object is not a value type, return the object.
2018
  __ CmpObjectType(input, JS_VALUE_TYPE, map);
2019
  __ j(not_equal, &done, Label::kNear);
2020
  __ mov(result, FieldOperand(input, JSValue::kValueOffset));
2021

    
2022
  __ bind(&done);
2023
}
2024

    
2025

    
2026
void LCodeGen::DoDateField(LDateField* instr) {
2027
  Register object = ToRegister(instr->date());
2028
  Register result = ToRegister(instr->result());
2029
  Register scratch = ToRegister(instr->temp());
2030
  Smi* index = instr->index();
2031
  Label runtime, done;
2032
  ASSERT(object.is(result));
2033
  ASSERT(object.is(eax));
2034

    
2035
  __ test(object, Immediate(kSmiTagMask));
2036
  DeoptimizeIf(zero, instr->environment());
2037
  __ CmpObjectType(object, JS_DATE_TYPE, scratch);
2038
  DeoptimizeIf(not_equal, instr->environment());
2039

    
2040
  if (index->value() == 0) {
2041
    __ mov(result, FieldOperand(object, JSDate::kValueOffset));
2042
  } else {
2043
    if (index->value() < JSDate::kFirstUncachedField) {
2044
      ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
2045
      __ mov(scratch, Operand::StaticVariable(stamp));
2046
      __ cmp(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
2047
      __ j(not_equal, &runtime, Label::kNear);
2048
      __ mov(result, FieldOperand(object, JSDate::kValueOffset +
2049
                                          kPointerSize * index->value()));
2050
      __ jmp(&done);
2051
    }
2052
    __ bind(&runtime);
2053
    __ PrepareCallCFunction(2, scratch);
2054
    __ mov(Operand(esp, 0), object);
2055
    __ mov(Operand(esp, 1 * kPointerSize), Immediate(index));
2056
    __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
2057
    __ bind(&done);
2058
  }
2059
}
2060

    
2061

    
2062
void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
2063
  Register string = ToRegister(instr->string());
2064
  Register index = ToRegister(instr->index());
2065
  Register value = ToRegister(instr->value());
2066
  String::Encoding encoding = instr->encoding();
2067

    
2068
  if (FLAG_debug_code) {
2069
    __ push(value);
2070
    __ mov(value, FieldOperand(string, HeapObject::kMapOffset));
2071
    __ movzx_b(value, FieldOperand(value, Map::kInstanceTypeOffset));
2072

    
2073
    __ and_(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
2074
    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
2075
    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
2076
    __ cmp(value, Immediate(encoding == String::ONE_BYTE_ENCODING
2077
                                ? one_byte_seq_type : two_byte_seq_type));
2078
    __ Check(equal, kUnexpectedStringType);
2079
    __ pop(value);
2080
  }
2081

    
2082
  if (encoding == String::ONE_BYTE_ENCODING) {
2083
    __ mov_b(FieldOperand(string, index, times_1, SeqString::kHeaderSize),
2084
             value);
2085
  } else {
2086
    __ mov_w(FieldOperand(string, index, times_2, SeqString::kHeaderSize),
2087
             value);
2088
  }
2089
}
2090

    
2091

    
2092
void LCodeGen::DoThrow(LThrow* instr) {
2093
  __ push(ToOperand(instr->value()));
2094
  ASSERT(ToRegister(instr->context()).is(esi));
2095
  CallRuntime(Runtime::kThrow, 1, instr);
2096

    
2097
  if (FLAG_debug_code) {
2098
    Comment("Unreachable code.");
2099
    __ int3();
2100
  }
2101
}
2102

    
2103

    
2104
void LCodeGen::DoAddI(LAddI* instr) {
2105
  LOperand* left = instr->left();
2106
  LOperand* right = instr->right();
2107

    
2108
  if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
2109
    if (right->IsConstantOperand()) {
2110
      int32_t offset = ToRepresentation(LConstantOperand::cast(right),
2111
                                        instr->hydrogen()->representation());
2112
      __ lea(ToRegister(instr->result()), MemOperand(ToRegister(left), offset));
2113
    } else {
2114
      Operand address(ToRegister(left), ToRegister(right), times_1, 0);
2115
      __ lea(ToRegister(instr->result()), address);
2116
    }
2117
  } else {
2118
    if (right->IsConstantOperand()) {
2119
      __ add(ToOperand(left),
2120
             ToImmediate(right, instr->hydrogen()->representation()));
2121
    } else {
2122
      __ add(ToRegister(left), ToOperand(right));
2123
    }
2124
    if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
2125
      DeoptimizeIf(overflow, instr->environment());
2126
    }
2127
  }
2128
}
2129

    
2130

    
2131
void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
2132
  CpuFeatureScope scope(masm(), SSE2);
2133
  LOperand* left = instr->left();
2134
  LOperand* right = instr->right();
2135
  ASSERT(left->Equals(instr->result()));
2136
  HMathMinMax::Operation operation = instr->hydrogen()->operation();
2137
  if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
2138
    Label return_left;
2139
    Condition condition = (operation == HMathMinMax::kMathMin)
2140
        ? less_equal
2141
        : greater_equal;
2142
    if (right->IsConstantOperand()) {
2143
      Operand left_op = ToOperand(left);
2144
      Immediate immediate = ToImmediate(LConstantOperand::cast(instr->right()),
2145
                                        instr->hydrogen()->representation());
2146
      __ cmp(left_op, immediate);
2147
      __ j(condition, &return_left, Label::kNear);
2148
      __ mov(left_op, immediate);
2149
    } else {
2150
      Register left_reg = ToRegister(left);
2151
      Operand right_op = ToOperand(right);
2152
      __ cmp(left_reg, right_op);
2153
      __ j(condition, &return_left, Label::kNear);
2154
      __ mov(left_reg, right_op);
2155
    }
2156
    __ bind(&return_left);
2157
  } else {
2158
    ASSERT(instr->hydrogen()->representation().IsDouble());
2159
    Label check_nan_left, check_zero, return_left, return_right;
2160
    Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
2161
    XMMRegister left_reg = ToDoubleRegister(left);
2162
    XMMRegister right_reg = ToDoubleRegister(right);
2163
    __ ucomisd(left_reg, right_reg);
2164
    __ j(parity_even, &check_nan_left, Label::kNear);  // At least one NaN.
2165
    __ j(equal, &check_zero, Label::kNear);  // left == right.
2166
    __ j(condition, &return_left, Label::kNear);
2167
    __ jmp(&return_right, Label::kNear);
2168

    
2169
    __ bind(&check_zero);
2170
    XMMRegister xmm_scratch = double_scratch0();
2171
    __ xorps(xmm_scratch, xmm_scratch);
2172
    __ ucomisd(left_reg, xmm_scratch);
2173
    __ j(not_equal, &return_left, Label::kNear);  // left == right != 0.
2174
    // At this point, both left and right are either 0 or -0.
2175
    if (operation == HMathMinMax::kMathMin) {
2176
      __ orpd(left_reg, right_reg);
2177
    } else {
2178
      // Since we operate on +0 and/or -0, addsd and andsd have the same effect.
2179
      __ addsd(left_reg, right_reg);
2180
    }
2181
    __ jmp(&return_left, Label::kNear);
2182

    
2183
    __ bind(&check_nan_left);
2184
    __ ucomisd(left_reg, left_reg);  // NaN check.
2185
    __ j(parity_even, &return_left, Label::kNear);  // left == NaN.
2186
    __ bind(&return_right);
2187
    __ movsd(left_reg, right_reg);
2188

    
2189
    __ bind(&return_left);
2190
  }
2191
}
2192

    
2193

    
2194
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
2195
  if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
2196
    CpuFeatureScope scope(masm(), SSE2);
2197
    XMMRegister left = ToDoubleRegister(instr->left());
2198
    XMMRegister right = ToDoubleRegister(instr->right());
2199
    XMMRegister result = ToDoubleRegister(instr->result());
2200
    switch (instr->op()) {
2201
      case Token::ADD:
2202
        __ addsd(left, right);
2203
        break;
2204
      case Token::SUB:
2205
        __ subsd(left, right);
2206
        break;
2207
      case Token::MUL:
2208
        __ mulsd(left, right);
2209
        break;
2210
      case Token::DIV:
2211
        __ divsd(left, right);
2212
        // Don't delete this mov. It may improve performance on some CPUs,
2213
        // when there is a mulsd depending on the result
2214
        __ movaps(left, left);
2215
        break;
2216
      case Token::MOD: {
2217
        // Pass two doubles as arguments on the stack.
2218
        __ PrepareCallCFunction(4, eax);
2219
        __ movsd(Operand(esp, 0 * kDoubleSize), left);
2220
        __ movsd(Operand(esp, 1 * kDoubleSize), right);
2221
        __ CallCFunction(
2222
            ExternalReference::double_fp_operation(Token::MOD, isolate()),
2223
            4);
2224

    
2225
        // Return value is in st(0) on ia32.
2226
        // Store it into the result register.
2227
        __ sub(Operand(esp), Immediate(kDoubleSize));
2228
        __ fstp_d(Operand(esp, 0));
2229
        __ movsd(result, Operand(esp, 0));
2230
        __ add(Operand(esp), Immediate(kDoubleSize));
2231
        break;
2232
      }
2233
      default:
2234
        UNREACHABLE();
2235
        break;
2236
    }
2237
  } else {
2238
    X87Register left = ToX87Register(instr->left());
2239
    X87Register right = ToX87Register(instr->right());
2240
    X87Register result = ToX87Register(instr->result());
2241
    if (instr->op() != Token::MOD) {
2242
      X87PrepareBinaryOp(left, right, result);
2243
    }
2244
    switch (instr->op()) {
2245
      case Token::ADD:
2246
        __ fadd_i(1);
2247
        break;
2248
      case Token::SUB:
2249
        __ fsub_i(1);
2250
        break;
2251
      case Token::MUL:
2252
        __ fmul_i(1);
2253
        break;
2254
      case Token::DIV:
2255
        __ fdiv_i(1);
2256
        break;
2257
      case Token::MOD: {
2258
        // Pass two doubles as arguments on the stack.
2259
        __ PrepareCallCFunction(4, eax);
2260
        X87Mov(Operand(esp, 1 * kDoubleSize), right);
2261
        X87Mov(Operand(esp, 0), left);
2262
        X87Free(right);
2263
        ASSERT(left.is(result));
2264
        X87PrepareToWrite(result);
2265
        __ CallCFunction(
2266
            ExternalReference::double_fp_operation(Token::MOD, isolate()),
2267
            4);
2268

    
2269
        // Return value is in st(0) on ia32.
2270
        X87CommitWrite(result);
2271
        break;
2272
      }
2273
      default:
2274
        UNREACHABLE();
2275
        break;
2276
    }
2277
  }
2278
}
2279

    
2280

    
2281
void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2282
  ASSERT(ToRegister(instr->context()).is(esi));
2283
  ASSERT(ToRegister(instr->left()).is(edx));
2284
  ASSERT(ToRegister(instr->right()).is(eax));
2285
  ASSERT(ToRegister(instr->result()).is(eax));
2286

    
2287
  BinaryOpStub stub(instr->op(), NO_OVERWRITE);
2288
  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2289
  __ nop();  // Signals no inlined code.
2290
}
2291

    
2292

    
2293
template<class InstrType>
2294
void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
2295
  int left_block = instr->TrueDestination(chunk_);
2296
  int right_block = instr->FalseDestination(chunk_);
2297

    
2298
  int next_block = GetNextEmittedBlock();
2299

    
2300
  if (right_block == left_block || cc == no_condition) {
2301
    EmitGoto(left_block);
2302
  } else if (left_block == next_block) {
2303
    __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
2304
  } else if (right_block == next_block) {
2305
    __ j(cc, chunk_->GetAssemblyLabel(left_block));
2306
  } else {
2307
    __ j(cc, chunk_->GetAssemblyLabel(left_block));
2308
    __ jmp(chunk_->GetAssemblyLabel(right_block));
2309
  }
2310
}
2311

    
2312

    
2313
template<class InstrType>
2314
void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
2315
  int false_block = instr->FalseDestination(chunk_);
2316
  if (cc == no_condition) {
2317
    __ jmp(chunk_->GetAssemblyLabel(false_block));
2318
  } else {
2319
    __ j(cc, chunk_->GetAssemblyLabel(false_block));
2320
  }
2321
}
2322

    
2323

    
2324
void LCodeGen::DoBranch(LBranch* instr) {
2325
  Representation r = instr->hydrogen()->value()->representation();
2326
  if (r.IsSmiOrInteger32()) {
2327
    Register reg = ToRegister(instr->value());
2328
    __ test(reg, Operand(reg));
2329
    EmitBranch(instr, not_zero);
2330
  } else if (r.IsDouble()) {
2331
    ASSERT(!info()->IsStub());
2332
    CpuFeatureScope scope(masm(), SSE2);
2333
    XMMRegister reg = ToDoubleRegister(instr->value());
2334
    XMMRegister xmm_scratch = double_scratch0();
2335
    __ xorps(xmm_scratch, xmm_scratch);
2336
    __ ucomisd(reg, xmm_scratch);
2337
    EmitBranch(instr, not_equal);
2338
  } else {
2339
    ASSERT(r.IsTagged());
2340
    Register reg = ToRegister(instr->value());
2341
    HType type = instr->hydrogen()->value()->type();
2342
    if (type.IsBoolean()) {
2343
      ASSERT(!info()->IsStub());
2344
      __ cmp(reg, factory()->true_value());
2345
      EmitBranch(instr, equal);
2346
    } else if (type.IsSmi()) {
2347
      ASSERT(!info()->IsStub());
2348
      __ test(reg, Operand(reg));
2349
      EmitBranch(instr, not_equal);
2350
    } else if (type.IsJSArray()) {
2351
      ASSERT(!info()->IsStub());
2352
      EmitBranch(instr, no_condition);
2353
    } else if (type.IsHeapNumber()) {
2354
      ASSERT(!info()->IsStub());
2355
      CpuFeatureScope scope(masm(), SSE2);
2356
      XMMRegister xmm_scratch = double_scratch0();
2357
      __ xorps(xmm_scratch, xmm_scratch);
2358
      __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
2359
      EmitBranch(instr, not_equal);
2360
    } else if (type.IsString()) {
2361
      ASSERT(!info()->IsStub());
2362
      __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2363
      EmitBranch(instr, not_equal);
2364
    } else {
2365
      ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2366
      if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2367

    
2368
      if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2369
        // undefined -> false.
2370
        __ cmp(reg, factory()->undefined_value());
2371
        __ j(equal, instr->FalseLabel(chunk_));
2372
      }
2373
      if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2374
        // true -> true.
2375
        __ cmp(reg, factory()->true_value());
2376
        __ j(equal, instr->TrueLabel(chunk_));
2377
        // false -> false.
2378
        __ cmp(reg, factory()->false_value());
2379
        __ j(equal, instr->FalseLabel(chunk_));
2380
      }
2381
      if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2382
        // 'null' -> false.
2383
        __ cmp(reg, factory()->null_value());
2384
        __ j(equal, instr->FalseLabel(chunk_));
2385
      }
2386

    
2387
      if (expected.Contains(ToBooleanStub::SMI)) {
2388
        // Smis: 0 -> false, all other -> true.
2389
        __ test(reg, Operand(reg));
2390
        __ j(equal, instr->FalseLabel(chunk_));
2391
        __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2392
      } else if (expected.NeedsMap()) {
2393
        // If we need a map later and have a Smi -> deopt.
2394
        __ test(reg, Immediate(kSmiTagMask));
2395
        DeoptimizeIf(zero, instr->environment());
2396
      }
2397

    
2398
      Register map = no_reg;  // Keep the compiler happy.
2399
      if (expected.NeedsMap()) {
2400
        map = ToRegister(instr->temp());
2401
        ASSERT(!map.is(reg));
2402
        __ mov(map, FieldOperand(reg, HeapObject::kMapOffset));
2403

    
2404
        if (expected.CanBeUndetectable()) {
2405
          // Undetectable -> false.
2406
          __ test_b(FieldOperand(map, Map::kBitFieldOffset),
2407
                    1 << Map::kIsUndetectable);
2408
          __ j(not_zero, instr->FalseLabel(chunk_));
2409
        }
2410
      }
2411

    
2412
      if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2413
        // spec object -> true.
2414
        __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
2415
        __ j(above_equal, instr->TrueLabel(chunk_));
2416
      }
2417

    
2418
      if (expected.Contains(ToBooleanStub::STRING)) {
2419
        // String value -> false iff empty.
2420
        Label not_string;
2421
        __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
2422
        __ j(above_equal, &not_string, Label::kNear);
2423
        __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
2424
        __ j(not_zero, instr->TrueLabel(chunk_));
2425
        __ jmp(instr->FalseLabel(chunk_));
2426
        __ bind(&not_string);
2427
      }
2428

    
2429
      if (expected.Contains(ToBooleanStub::SYMBOL)) {
2430
        // Symbol value -> true.
2431
        __ CmpInstanceType(map, SYMBOL_TYPE);
2432
        __ j(equal, instr->TrueLabel(chunk_));
2433
      }
2434

    
2435
      if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2436
        // heap number -> false iff +0, -0, or NaN.
2437
        Label not_heap_number;
2438
        __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
2439
               factory()->heap_number_map());
2440
        __ j(not_equal, &not_heap_number, Label::kNear);
2441
        if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
2442
          CpuFeatureScope scope(masm(), SSE2);
2443
          XMMRegister xmm_scratch = double_scratch0();
2444
          __ xorps(xmm_scratch, xmm_scratch);
2445
          __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
2446
        } else {
2447
          __ fldz();
2448
          __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
2449
          __ FCmp();
2450
        }
2451
        __ j(zero, instr->FalseLabel(chunk_));
2452
        __ jmp(instr->TrueLabel(chunk_));
2453
        __ bind(&not_heap_number);
2454
      }
2455

    
2456
      if (!expected.IsGeneric()) {
2457
        // We've seen something for the first time -> deopt.
2458
        // This can only happen if we are not generic already.
2459
        DeoptimizeIf(no_condition, instr->environment());
2460
      }
2461
    }
2462
  }
2463
}
2464

    
2465

    
2466
void LCodeGen::EmitGoto(int block) {
2467
  if (!IsNextEmittedBlock(block)) {
2468
    __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
2469
  }
2470
}
2471

    
2472

    
2473
void LCodeGen::DoClobberDoubles(LClobberDoubles* instr) {
2474
}
2475

    
2476

    
2477
void LCodeGen::DoGoto(LGoto* instr) {
2478
  EmitGoto(instr->block_id());
2479
}
2480

    
2481

    
2482
Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2483
  Condition cond = no_condition;
2484
  switch (op) {
2485
    case Token::EQ:
2486
    case Token::EQ_STRICT:
2487
      cond = equal;
2488
      break;
2489
    case Token::NE:
2490
    case Token::NE_STRICT:
2491
      cond = not_equal;
2492
      break;
2493
    case Token::LT:
2494
      cond = is_unsigned ? below : less;
2495
      break;
2496
    case Token::GT:
2497
      cond = is_unsigned ? above : greater;
2498
      break;
2499
    case Token::LTE:
2500
      cond = is_unsigned ? below_equal : less_equal;
2501
      break;
2502
    case Token::GTE:
2503
      cond = is_unsigned ? above_equal : greater_equal;
2504
      break;
2505
    case Token::IN:
2506
    case Token::INSTANCEOF:
2507
    default:
2508
      UNREACHABLE();
2509
  }
2510
  return cond;
2511
}
2512

    
2513

    
2514
void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2515
  LOperand* left = instr->left();
2516
  LOperand* right = instr->right();
2517
  Condition cc = TokenToCondition(instr->op(), instr->is_double());
2518

    
2519
  if (left->IsConstantOperand() && right->IsConstantOperand()) {
2520
    // We can statically evaluate the comparison.
2521
    double left_val = ToDouble(LConstantOperand::cast(left));
2522
    double right_val = ToDouble(LConstantOperand::cast(right));
2523
    int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2524
        instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2525
    EmitGoto(next_block);
2526
  } else {
2527
    if (instr->is_double()) {
2528
      if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
2529
        CpuFeatureScope scope(masm(), SSE2);
2530
        __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
2531
      } else {
2532
        X87LoadForUsage(ToX87Register(right), ToX87Register(left));
2533
        __ FCmp();
2534
      }
2535
      // Don't base result on EFLAGS when a NaN is involved. Instead
2536
      // jump to the false block.
2537
      __ j(parity_even, instr->FalseLabel(chunk_));
2538
    } else {
2539
      if (right->IsConstantOperand()) {
2540
        __ cmp(ToOperand(left),
2541
               ToImmediate(right, instr->hydrogen()->representation()));
2542
      } else if (left->IsConstantOperand()) {
2543
        __ cmp(ToOperand(right),
2544
               ToImmediate(left, instr->hydrogen()->representation()));
2545
        // We transposed the operands. Reverse the condition.
2546
        cc = ReverseCondition(cc);
2547
      } else {
2548
        __ cmp(ToRegister(left), ToOperand(right));
2549
      }
2550
    }
2551
    EmitBranch(instr, cc);
2552
  }
2553
}
2554

    
2555

    
2556
void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2557
  Register left = ToRegister(instr->left());
2558

    
2559
  if (instr->right()->IsConstantOperand()) {
2560
    Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
2561
    __ CmpObject(left, right);
2562
  } else {
2563
    Operand right = ToOperand(instr->right());
2564
    __ cmp(left, right);
2565
  }
2566
  EmitBranch(instr, equal);
2567
}
2568

    
2569

    
2570
void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2571
  if (instr->hydrogen()->representation().IsTagged()) {
2572
    Register input_reg = ToRegister(instr->object());
2573
    __ cmp(input_reg, factory()->the_hole_value());
2574
    EmitBranch(instr, equal);
2575
    return;
2576
  }
2577

    
2578
  bool use_sse2 = CpuFeatures::IsSupported(SSE2);
2579
  if (use_sse2) {
2580
    CpuFeatureScope scope(masm(), SSE2);
2581
    XMMRegister input_reg = ToDoubleRegister(instr->object());
2582
    __ ucomisd(input_reg, input_reg);
2583
    EmitFalseBranch(instr, parity_odd);
2584
  } else {
2585
    // Put the value to the top of stack
2586
    X87Register src = ToX87Register(instr->object());
2587
    X87LoadForUsage(src);
2588
    __ fld(0);
2589
    __ fld(0);
2590
    __ FCmp();
2591
    Label ok;
2592
    __ j(parity_even, &ok);
2593
    __ fstp(0);
2594
    EmitFalseBranch(instr, no_condition);
2595
    __ bind(&ok);
2596
  }
2597

    
2598

    
2599
  __ sub(esp, Immediate(kDoubleSize));
2600
  if (use_sse2) {
2601
    CpuFeatureScope scope(masm(), SSE2);
2602
    XMMRegister input_reg = ToDoubleRegister(instr->object());
2603
    __ movsd(MemOperand(esp, 0), input_reg);
2604
  } else {
2605
    __ fstp_d(MemOperand(esp, 0));
2606
  }
2607

    
2608
  __ add(esp, Immediate(kDoubleSize));
2609
  int offset = sizeof(kHoleNanUpper32);
2610
  __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32));
2611
  EmitBranch(instr, equal);
2612
}
2613

    
2614

    
2615
Condition LCodeGen::EmitIsObject(Register input,
2616
                                 Register temp1,
2617
                                 Label* is_not_object,
2618
                                 Label* is_object) {
2619
  __ JumpIfSmi(input, is_not_object);
2620

    
2621
  __ cmp(input, isolate()->factory()->null_value());
2622
  __ j(equal, is_object);
2623

    
2624
  __ mov(temp1, FieldOperand(input, HeapObject::kMapOffset));
2625
  // Undetectable objects behave like undefined.
2626
  __ test_b(FieldOperand(temp1, Map::kBitFieldOffset),
2627
            1 << Map::kIsUndetectable);
2628
  __ j(not_zero, is_not_object);
2629

    
2630
  __ movzx_b(temp1, FieldOperand(temp1, Map::kInstanceTypeOffset));
2631
  __ cmp(temp1, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
2632
  __ j(below, is_not_object);
2633
  __ cmp(temp1, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
2634
  return below_equal;
2635
}
2636

    
2637

    
2638
void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2639
  Register reg = ToRegister(instr->value());
2640
  Register temp = ToRegister(instr->temp());
2641

    
2642
  Condition true_cond = EmitIsObject(
2643
      reg, temp, instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
2644

    
2645
  EmitBranch(instr, true_cond);
2646
}
2647

    
2648

    
2649
Condition LCodeGen::EmitIsString(Register input,
2650
                                 Register temp1,
2651
                                 Label* is_not_string,
2652
                                 SmiCheck check_needed = INLINE_SMI_CHECK) {
2653
  if (check_needed == INLINE_SMI_CHECK) {
2654
    __ JumpIfSmi(input, is_not_string);
2655
  }
2656

    
2657
  Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
2658

    
2659
  return cond;
2660
}
2661

    
2662

    
2663
void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2664
  Register reg = ToRegister(instr->value());
2665
  Register temp = ToRegister(instr->temp());
2666

    
2667
  SmiCheck check_needed =
2668
      instr->hydrogen()->value()->IsHeapObject()
2669
          ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2670

    
2671
  Condition true_cond = EmitIsString(
2672
      reg, temp, instr->FalseLabel(chunk_), check_needed);
2673

    
2674
  EmitBranch(instr, true_cond);
2675
}
2676

    
2677

    
2678
void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2679
  Operand input = ToOperand(instr->value());
2680

    
2681
  __ test(input, Immediate(kSmiTagMask));
2682
  EmitBranch(instr, zero);
2683
}
2684

    
2685

    
2686
void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2687
  Register input = ToRegister(instr->value());
2688
  Register temp = ToRegister(instr->temp());
2689

    
2690
  if (!instr->hydrogen()->value()->IsHeapObject()) {
2691
    STATIC_ASSERT(kSmiTag == 0);
2692
    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2693
  }
2694
  __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
2695
  __ test_b(FieldOperand(temp, Map::kBitFieldOffset),
2696
            1 << Map::kIsUndetectable);
2697
  EmitBranch(instr, not_zero);
2698
}
2699

    
2700

    
2701
static Condition ComputeCompareCondition(Token::Value op) {
2702
  switch (op) {
2703
    case Token::EQ_STRICT:
2704
    case Token::EQ:
2705
      return equal;
2706
    case Token::LT:
2707
      return less;
2708
    case Token::GT:
2709
      return greater;
2710
    case Token::LTE:
2711
      return less_equal;
2712
    case Token::GTE:
2713
      return greater_equal;
2714
    default:
2715
      UNREACHABLE();
2716
      return no_condition;
2717
  }
2718
}
2719

    
2720

    
2721
void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2722
  Token::Value op = instr->op();
2723

    
2724
  Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2725
  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2726

    
2727
  Condition condition = ComputeCompareCondition(op);
2728
  __ test(eax, Operand(eax));
2729

    
2730
  EmitBranch(instr, condition);
2731
}
2732

    
2733

    
2734
static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2735
  InstanceType from = instr->from();
2736
  InstanceType to = instr->to();
2737
  if (from == FIRST_TYPE) return to;
2738
  ASSERT(from == to || to == LAST_TYPE);
2739
  return from;
2740
}
2741

    
2742

    
2743
static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2744
  InstanceType from = instr->from();
2745
  InstanceType to = instr->to();
2746
  if (from == to) return equal;
2747
  if (to == LAST_TYPE) return above_equal;
2748
  if (from == FIRST_TYPE) return below_equal;
2749
  UNREACHABLE();
2750
  return equal;
2751
}
2752

    
2753

    
2754
void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2755
  Register input = ToRegister(instr->value());
2756
  Register temp = ToRegister(instr->temp());
2757

    
2758
  if (!instr->hydrogen()->value()->IsHeapObject()) {
2759
    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2760
  }
2761

    
2762
  __ CmpObjectType(input, TestType(instr->hydrogen()), temp);
2763
  EmitBranch(instr, BranchCondition(instr->hydrogen()));
2764
}
2765

    
2766

    
2767
void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2768
  Register input = ToRegister(instr->value());
2769
  Register result = ToRegister(instr->result());
2770

    
2771
  __ AssertString(input);
2772

    
2773
  __ mov(result, FieldOperand(input, String::kHashFieldOffset));
2774
  __ IndexFromHash(result, result);
2775
}
2776

    
2777

    
2778
void LCodeGen::DoHasCachedArrayIndexAndBranch(
2779
    LHasCachedArrayIndexAndBranch* instr) {
2780
  Register input = ToRegister(instr->value());
2781

    
2782
  __ test(FieldOperand(input, String::kHashFieldOffset),
2783
          Immediate(String::kContainsCachedArrayIndexMask));
2784
  EmitBranch(instr, equal);
2785
}
2786

    
2787

    
2788
// Branches to a label or falls through with the answer in the z flag.  Trashes
2789
// the temp registers, but not the input.
2790
void LCodeGen::EmitClassOfTest(Label* is_true,
2791
                               Label* is_false,
2792
                               Handle<String>class_name,
2793
                               Register input,
2794
                               Register temp,
2795
                               Register temp2) {
2796
  ASSERT(!input.is(temp));
2797
  ASSERT(!input.is(temp2));
2798
  ASSERT(!temp.is(temp2));
2799
  __ JumpIfSmi(input, is_false);
2800

    
2801
  if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
2802
    // Assuming the following assertions, we can use the same compares to test
2803
    // for both being a function type and being in the object type range.
2804
    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2805
    STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2806
                  FIRST_SPEC_OBJECT_TYPE + 1);
2807
    STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2808
                  LAST_SPEC_OBJECT_TYPE - 1);
2809
    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2810
    __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
2811
    __ j(below, is_false);
2812
    __ j(equal, is_true);
2813
    __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
2814
    __ j(equal, is_true);
2815
  } else {
2816
    // Faster code path to avoid two compares: subtract lower bound from the
2817
    // actual type and do a signed compare with the width of the type range.
2818
    __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
2819
    __ movzx_b(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
2820
    __ sub(Operand(temp2), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2821
    __ cmp(Operand(temp2), Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2822
                                     FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2823
    __ j(above, is_false);
2824
  }
2825

    
2826
  // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2827
  // Check if the constructor in the map is a function.
2828
  __ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
2829
  // Objects with a non-function constructor have class 'Object'.
2830
  __ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
2831
  if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
2832
    __ j(not_equal, is_true);
2833
  } else {
2834
    __ j(not_equal, is_false);
2835
  }
2836

    
2837
  // temp now contains the constructor function. Grab the
2838
  // instance class name from there.
2839
  __ mov(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2840
  __ mov(temp, FieldOperand(temp,
2841
                            SharedFunctionInfo::kInstanceClassNameOffset));
2842
  // The class name we are testing against is internalized since it's a literal.
2843
  // The name in the constructor is internalized because of the way the context
2844
  // is booted.  This routine isn't expected to work for random API-created
2845
  // classes and it doesn't have to because you can't access it with natives
2846
  // syntax.  Since both sides are internalized it is sufficient to use an
2847
  // identity comparison.
2848
  __ cmp(temp, class_name);
2849
  // End with the answer in the z flag.
2850
}
2851

    
2852

    
2853
void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2854
  Register input = ToRegister(instr->value());
2855
  Register temp = ToRegister(instr->temp());
2856
  Register temp2 = ToRegister(instr->temp2());
2857

    
2858
  Handle<String> class_name = instr->hydrogen()->class_name();
2859

    
2860
  EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2861
      class_name, input, temp, temp2);
2862

    
2863
  EmitBranch(instr, equal);
2864
}
2865

    
2866

    
2867
void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2868
  Register reg = ToRegister(instr->value());
2869
  __ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
2870
  EmitBranch(instr, equal);
2871
}
2872

    
2873

    
2874
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2875
  // Object and function are in fixed registers defined by the stub.
2876
  ASSERT(ToRegister(instr->context()).is(esi));
2877
  InstanceofStub stub(InstanceofStub::kArgsInRegisters);
2878
  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2879

    
2880
  Label true_value, done;
2881
  __ test(eax, Operand(eax));
2882
  __ j(zero, &true_value, Label::kNear);
2883
  __ mov(ToRegister(instr->result()), factory()->false_value());
2884
  __ jmp(&done, Label::kNear);
2885
  __ bind(&true_value);
2886
  __ mov(ToRegister(instr->result()), factory()->true_value());
2887
  __ bind(&done);
2888
}
2889

    
2890

    
2891
void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2892
  class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
2893
   public:
2894
    DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2895
                                  LInstanceOfKnownGlobal* instr,
2896
                                  const X87Stack& x87_stack)
2897
        : LDeferredCode(codegen, x87_stack), instr_(instr) { }
2898
    virtual void Generate() V8_OVERRIDE {
2899
      codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2900
    }
2901
    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
2902
    Label* map_check() { return &map_check_; }
2903
   private:
2904
    LInstanceOfKnownGlobal* instr_;
2905
    Label map_check_;
2906
  };
2907

    
2908
  DeferredInstanceOfKnownGlobal* deferred;
2909
  deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr, x87_stack_);
2910

    
2911
  Label done, false_result;
2912
  Register object = ToRegister(instr->value());
2913
  Register temp = ToRegister(instr->temp());
2914

    
2915
  // A Smi is not an instance of anything.
2916
  __ JumpIfSmi(object, &false_result);
2917

    
2918
  // This is the inlined call site instanceof cache. The two occurences of the
2919
  // hole value will be patched to the last map/result pair generated by the
2920
  // instanceof stub.
2921
  Label cache_miss;
2922
  Register map = ToRegister(instr->temp());
2923
  __ mov(map, FieldOperand(object, HeapObject::kMapOffset));
2924
  __ bind(deferred->map_check());  // Label for calculating code patching.
2925
  Handle<Cell> cache_cell = factory()->NewCell(factory()->the_hole_value());
2926
  __ cmp(map, Operand::ForCell(cache_cell));  // Patched to cached map.
2927
  __ j(not_equal, &cache_miss, Label::kNear);
2928
  __ mov(eax, factory()->the_hole_value());  // Patched to either true or false.
2929
  __ jmp(&done);
2930

    
2931
  // The inlined call site cache did not match. Check for null and string
2932
  // before calling the deferred code.
2933
  __ bind(&cache_miss);
2934
  // Null is not an instance of anything.
2935
  __ cmp(object, factory()->null_value());
2936
  __ j(equal, &false_result);
2937

    
2938
  // String values are not instances of anything.
2939
  Condition is_string = masm_->IsObjectStringType(object, temp, temp);
2940
  __ j(is_string, &false_result);
2941

    
2942
  // Go to the deferred code.
2943
  __ jmp(deferred->entry());
2944

    
2945
  __ bind(&false_result);
2946
  __ mov(ToRegister(instr->result()), factory()->false_value());
2947

    
2948
  // Here result has either true or false. Deferred code also produces true or
2949
  // false object.
2950
  __ bind(deferred->exit());
2951
  __ bind(&done);
2952
}
2953

    
2954

    
2955
void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2956
                                               Label* map_check) {
2957
  PushSafepointRegistersScope scope(this);
2958

    
2959
  InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
2960
  flags = static_cast<InstanceofStub::Flags>(
2961
      flags | InstanceofStub::kArgsInRegisters);
2962
  flags = static_cast<InstanceofStub::Flags>(
2963
      flags | InstanceofStub::kCallSiteInlineCheck);
2964
  flags = static_cast<InstanceofStub::Flags>(
2965
      flags | InstanceofStub::kReturnTrueFalseObject);
2966
  InstanceofStub stub(flags);
2967

    
2968
  // Get the temp register reserved by the instruction. This needs to be a
2969
  // register which is pushed last by PushSafepointRegisters as top of the
2970
  // stack is used to pass the offset to the location of the map check to
2971
  // the stub.
2972
  Register temp = ToRegister(instr->temp());
2973
  ASSERT(MacroAssembler::SafepointRegisterStackIndex(temp) == 0);
2974
  __ LoadHeapObject(InstanceofStub::right(), instr->function());
2975
  static const int kAdditionalDelta = 13;
2976
  int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
2977
  __ mov(temp, Immediate(delta));
2978
  __ StoreToSafepointRegisterSlot(temp, temp);
2979
  CallCodeGeneric(stub.GetCode(isolate()),
2980
                  RelocInfo::CODE_TARGET,
2981
                  instr,
2982
                  RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2983
  // Get the deoptimization index of the LLazyBailout-environment that
2984
  // corresponds to this instruction.
2985
  LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2986
  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2987

    
2988
  // Put the result value into the eax slot and restore all registers.
2989
  __ StoreToSafepointRegisterSlot(eax, eax);
2990
}
2991

    
2992

    
2993
void LCodeGen::DoCmpT(LCmpT* instr) {
2994
  Token::Value op = instr->op();
2995

    
2996
  Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2997
  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2998

    
2999
  Condition condition = ComputeCompareCondition(op);
3000
  Label true_value, done;
3001
  __ test(eax, Operand(eax));
3002
  __ j(condition, &true_value, Label::kNear);
3003
  __ mov(ToRegister(instr->result()), factory()->false_value());
3004
  __ jmp(&done, Label::kNear);
3005
  __ bind(&true_value);
3006
  __ mov(ToRegister(instr->result()), factory()->true_value());
3007
  __ bind(&done);
3008
}
3009

    
3010

    
3011
void LCodeGen::EmitReturn(LReturn* instr, bool dynamic_frame_alignment) {
3012
  int extra_value_count = dynamic_frame_alignment ? 2 : 1;
3013

    
3014
  if (instr->has_constant_parameter_count()) {
3015
    int parameter_count = ToInteger32(instr->constant_parameter_count());
3016
    if (dynamic_frame_alignment && FLAG_debug_code) {
3017
      __ cmp(Operand(esp,
3018
                     (parameter_count + extra_value_count) * kPointerSize),
3019
             Immediate(kAlignmentZapValue));
3020
      __ Assert(equal, kExpectedAlignmentMarker);
3021
    }
3022
    __ Ret((parameter_count + extra_value_count) * kPointerSize, ecx);
3023
  } else {
3024
    Register reg = ToRegister(instr->parameter_count());
3025
    // The argument count parameter is a smi
3026
    __ SmiUntag(reg);
3027
    Register return_addr_reg = reg.is(ecx) ? ebx : ecx;
3028
    if (dynamic_frame_alignment && FLAG_debug_code) {
3029
      ASSERT(extra_value_count == 2);
3030
      __ cmp(Operand(esp, reg, times_pointer_size,
3031
                     extra_value_count * kPointerSize),
3032
             Immediate(kAlignmentZapValue));
3033
      __ Assert(equal, kExpectedAlignmentMarker);
3034
    }
3035

    
3036
    // emit code to restore stack based on instr->parameter_count()
3037
    __ pop(return_addr_reg);  // save return address
3038
    if (dynamic_frame_alignment) {
3039
      __ inc(reg);  // 1 more for alignment
3040
    }
3041
    __ shl(reg, kPointerSizeLog2);
3042
    __ add(esp, reg);
3043
    __ jmp(return_addr_reg);
3044
  }
3045
}
3046

    
3047

    
3048
void LCodeGen::DoReturn(LReturn* instr) {
3049
  if (FLAG_trace && info()->IsOptimizing()) {
3050
    // Preserve the return value on the stack and rely on the runtime call
3051
    // to return the value in the same register.  We're leaving the code
3052
    // managed by the register allocator and tearing down the frame, it's
3053
    // safe to write to the context register.
3054
    __ push(eax);
3055
    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
3056
    __ CallRuntime(Runtime::kTraceExit, 1);
3057
  }
3058
  if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
3059
    ASSERT(NeedsEagerFrame());
3060
    CpuFeatureScope scope(masm(), SSE2);
3061
    BitVector* doubles = chunk()->allocated_double_registers();
3062
    BitVector::Iterator save_iterator(doubles);
3063
    int count = 0;
3064
    while (!save_iterator.Done()) {
3065
      __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
3066
                MemOperand(esp, count * kDoubleSize));
3067
      save_iterator.Advance();
3068
      count++;
3069
    }
3070
  }
3071
  if (dynamic_frame_alignment_) {
3072
    // Fetch the state of the dynamic frame alignment.
3073
    __ mov(edx, Operand(ebp,
3074
      JavaScriptFrameConstants::kDynamicAlignmentStateOffset));
3075
  }
3076
  int no_frame_start = -1;
3077
  if (NeedsEagerFrame()) {
3078
    __ mov(esp, ebp);
3079
    __ pop(ebp);
3080
    no_frame_start = masm_->pc_offset();
3081
  }
3082
  if (dynamic_frame_alignment_) {
3083
    Label no_padding;
3084
    __ cmp(edx, Immediate(kNoAlignmentPadding));
3085
    __ j(equal, &no_padding);
3086

    
3087
    EmitReturn(instr, true);
3088
    __ bind(&no_padding);
3089
  }
3090

    
3091
  EmitReturn(instr, false);
3092
  if (no_frame_start != -1) {
3093
    info()->AddNoFrameRange(no_frame_start, masm_->pc_offset());
3094
  }
3095
}
3096

    
3097

    
3098
void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
3099
  Register result = ToRegister(instr->result());
3100
  __ mov(result, Operand::ForCell(instr->hydrogen()->cell().handle()));
3101
  if (instr->hydrogen()->RequiresHoleCheck()) {
3102
    __ cmp(result, factory()->the_hole_value());
3103
    DeoptimizeIf(equal, instr->environment());
3104
  }
3105
}
3106

    
3107

    
3108
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
3109
  ASSERT(ToRegister(instr->context()).is(esi));
3110
  ASSERT(ToRegister(instr->global_object()).is(edx));
3111
  ASSERT(ToRegister(instr->result()).is(eax));
3112

    
3113
  __ mov(ecx, instr->name());
3114
  RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET :
3115
                                               RelocInfo::CODE_TARGET_CONTEXT;
3116
  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
3117
  CallCode(ic, mode, instr);
3118
}
3119

    
3120

    
3121
void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
3122
  Register value = ToRegister(instr->value());
3123
  Handle<PropertyCell> cell_handle = instr->hydrogen()->cell().handle();
3124

    
3125
  // If the cell we are storing to contains the hole it could have
3126
  // been deleted from the property dictionary. In that case, we need
3127
  // to update the property details in the property dictionary to mark
3128
  // it as no longer deleted. We deoptimize in that case.
3129
  if (instr->hydrogen()->RequiresHoleCheck()) {
3130
    __ cmp(Operand::ForCell(cell_handle), factory()->the_hole_value());
3131
    DeoptimizeIf(equal, instr->environment());
3132
  }
3133

    
3134
  // Store the value.
3135
  __ mov(Operand::ForCell(cell_handle), value);
3136
  // Cells are always rescanned, so no write barrier here.
3137
}
3138

    
3139

    
3140
void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
3141
  ASSERT(ToRegister(instr->context()).is(esi));
3142
  ASSERT(ToRegister(instr->global_object()).is(edx));
3143
  ASSERT(ToRegister(instr->value()).is(eax));
3144

    
3145
  __ mov(ecx, instr->name());
3146
  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
3147
      ? isolate()->builtins()->StoreIC_Initialize_Strict()
3148
      : isolate()->builtins()->StoreIC_Initialize();
3149
  CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
3150
}
3151

    
3152

    
3153
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
3154
  Register context = ToRegister(instr->context());
3155
  Register result = ToRegister(instr->result());
3156
  __ mov(result, ContextOperand(context, instr->slot_index()));
3157

    
3158
  if (instr->hydrogen()->RequiresHoleCheck()) {
3159
    __ cmp(result, factory()->the_hole_value());
3160
    if (instr->hydrogen()->DeoptimizesOnHole()) {
3161
      DeoptimizeIf(equal, instr->environment());
3162
    } else {
3163
      Label is_not_hole;
3164
      __ j(not_equal, &is_not_hole, Label::kNear);
3165
      __ mov(result, factory()->undefined_value());
3166
      __ bind(&is_not_hole);
3167
    }
3168
  }
3169
}
3170

    
3171

    
3172
void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
3173
  Register context = ToRegister(instr->context());
3174
  Register value = ToRegister(instr->value());
3175

    
3176
  Label skip_assignment;
3177

    
3178
  Operand target = ContextOperand(context, instr->slot_index());
3179
  if (instr->hydrogen()->RequiresHoleCheck()) {
3180
    __ cmp(target, factory()->the_hole_value());
3181
    if (instr->hydrogen()->DeoptimizesOnHole()) {
3182
      DeoptimizeIf(equal, instr->environment());
3183
    } else {
3184
      __ j(not_equal, &skip_assignment, Label::kNear);
3185
    }
3186
  }
3187

    
3188
  __ mov(target, value);
3189
  if (instr->hydrogen()->NeedsWriteBarrier()) {
3190
    SmiCheck check_needed =
3191
        instr->hydrogen()->value()->IsHeapObject()
3192
            ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
3193
    Register temp = ToRegister(instr->temp());
3194
    int offset = Context::SlotOffset(instr->slot_index());
3195
    __ RecordWriteContextSlot(context,
3196
                              offset,
3197
                              value,
3198
                              temp,
3199
                              GetSaveFPRegsMode(),
3200
                              EMIT_REMEMBERED_SET,
3201
                              check_needed);
3202
  }
3203

    
3204
  __ bind(&skip_assignment);
3205
}
3206

    
3207

    
3208
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
3209
  HObjectAccess access = instr->hydrogen()->access();
3210
  int offset = access.offset();
3211

    
3212
  if (access.IsExternalMemory()) {
3213
    Register result = ToRegister(instr->result());
3214
    MemOperand operand = instr->object()->IsConstantOperand()
3215
        ? MemOperand::StaticVariable(ToExternalReference(
3216
                LConstantOperand::cast(instr->object())))
3217
        : MemOperand(ToRegister(instr->object()), offset);
3218
    if (access.representation().IsByte()) {
3219
      ASSERT(instr->hydrogen()->representation().IsInteger32());
3220
      __ movzx_b(result, operand);
3221
    } else {
3222
      __ mov(result, operand);
3223
    }
3224
    return;
3225
  }
3226

    
3227
  Register object = ToRegister(instr->object());
3228
  if (FLAG_track_double_fields &&
3229
      instr->hydrogen()->representation().IsDouble()) {
3230
    if (CpuFeatures::IsSupported(SSE2)) {
3231
      CpuFeatureScope scope(masm(), SSE2);
3232
      XMMRegister result = ToDoubleRegister(instr->result());
3233
      __ movsd(result, FieldOperand(object, offset));
3234
    } else {
3235
      X87Mov(ToX87Register(instr->result()), FieldOperand(object, offset));
3236
    }
3237
    return;
3238
  }
3239

    
3240
  Register result = ToRegister(instr->result());
3241
  if (!access.IsInobject()) {
3242
    __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
3243
    object = result;
3244
  }
3245
  if (access.representation().IsByte()) {
3246
    ASSERT(instr->hydrogen()->representation().IsInteger32());
3247
    __ movzx_b(result, FieldOperand(object, offset));
3248
  } else {
3249
    __ mov(result, FieldOperand(object, offset));
3250
  }
3251
}
3252

    
3253

    
3254
void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
3255
  ASSERT(!operand->IsDoubleRegister());
3256
  if (operand->IsConstantOperand()) {
3257
    Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
3258
    AllowDeferredHandleDereference smi_check;
3259
    if (object->IsSmi()) {
3260
      __ Push(Handle<Smi>::cast(object));
3261
    } else {
3262
      __ PushHeapObject(Handle<HeapObject>::cast(object));
3263
    }
3264
  } else if (operand->IsRegister()) {
3265
    __ push(ToRegister(operand));
3266
  } else {
3267
    __ push(ToOperand(operand));
3268
  }
3269
}
3270

    
3271

    
3272
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3273
  ASSERT(ToRegister(instr->context()).is(esi));
3274
  ASSERT(ToRegister(instr->object()).is(edx));
3275
  ASSERT(ToRegister(instr->result()).is(eax));
3276

    
3277
  __ mov(ecx, instr->name());
3278
  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
3279
  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3280
}
3281

    
3282

    
3283
void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3284
  Register function = ToRegister(instr->function());
3285
  Register temp = ToRegister(instr->temp());
3286
  Register result = ToRegister(instr->result());
3287

    
3288
  // Check that the function really is a function.
3289
  __ CmpObjectType(function, JS_FUNCTION_TYPE, result);
3290
  DeoptimizeIf(not_equal, instr->environment());
3291

    
3292
  // Check whether the function has an instance prototype.
3293
  Label non_instance;
3294
  __ test_b(FieldOperand(result, Map::kBitFieldOffset),
3295
            1 << Map::kHasNonInstancePrototype);
3296
  __ j(not_zero, &non_instance, Label::kNear);
3297

    
3298
  // Get the prototype or initial map from the function.
3299
  __ mov(result,
3300
         FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3301

    
3302
  // Check that the function has a prototype or an initial map.
3303
  __ cmp(Operand(result), Immediate(factory()->the_hole_value()));
3304
  DeoptimizeIf(equal, instr->environment());
3305

    
3306
  // If the function does not have an initial map, we're done.
3307
  Label done;
3308
  __ CmpObjectType(result, MAP_TYPE, temp);
3309
  __ j(not_equal, &done, Label::kNear);
3310

    
3311
  // Get the prototype from the initial map.
3312
  __ mov(result, FieldOperand(result, Map::kPrototypeOffset));
3313
  __ jmp(&done, Label::kNear);
3314

    
3315
  // Non-instance prototype: Fetch prototype from constructor field
3316
  // in the function's map.
3317
  __ bind(&non_instance);
3318
  __ mov(result, FieldOperand(result, Map::kConstructorOffset));
3319

    
3320
  // All done.
3321
  __ bind(&done);
3322
}
3323

    
3324

    
3325
void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3326
  Register result = ToRegister(instr->result());
3327
  __ LoadRoot(result, instr->index());
3328
}
3329

    
3330

    
3331
void LCodeGen::DoLoadExternalArrayPointer(
3332
    LLoadExternalArrayPointer* instr) {
3333
  Register result = ToRegister(instr->result());
3334
  Register input = ToRegister(instr->object());
3335
  __ mov(result, FieldOperand(input,
3336
                              ExternalArray::kExternalPointerOffset));
3337
}
3338

    
3339

    
3340
void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
3341
  Register arguments = ToRegister(instr->arguments());
3342
  Register result = ToRegister(instr->result());
3343
  if (instr->length()->IsConstantOperand() &&
3344
      instr->index()->IsConstantOperand()) {
3345
    int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3346
    int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
3347
    int index = (const_length - const_index) + 1;
3348
    __ mov(result, Operand(arguments, index * kPointerSize));
3349
  } else {
3350
    Register length = ToRegister(instr->length());
3351
    Operand index = ToOperand(instr->index());
3352
    // There are two words between the frame pointer and the last argument.
3353
    // Subtracting from length accounts for one of them add one more.
3354
    __ sub(length, index);
3355
    __ mov(result, Operand(arguments, length, times_4, kPointerSize));
3356
  }
3357
}
3358

    
3359

    
3360
void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
3361
  ElementsKind elements_kind = instr->elements_kind();
3362
  LOperand* key = instr->key();
3363
  if (!key->IsConstantOperand() &&
3364
      ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
3365
                                  elements_kind)) {
3366
    __ SmiUntag(ToRegister(key));
3367
  }
3368
  Operand operand(BuildFastArrayOperand(
3369
      instr->elements(),
3370
      key,
3371
      instr->hydrogen()->key()->representation(),
3372
      elements_kind,
3373
      0,
3374
      instr->additional_index()));
3375
  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
3376
    if (CpuFeatures::IsSupported(SSE2)) {
3377
      CpuFeatureScope scope(masm(), SSE2);
3378
      XMMRegister result(ToDoubleRegister(instr->result()));
3379
      __ movss(result, operand);
3380
      __ cvtss2sd(result, result);
3381
    } else {
3382
      X87Mov(ToX87Register(instr->result()), operand, kX87FloatOperand);
3383
    }
3384
  } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
3385
    if (CpuFeatures::IsSupported(SSE2)) {
3386
      CpuFeatureScope scope(masm(), SSE2);
3387
      __ movsd(ToDoubleRegister(instr->result()), operand);
3388
    } else {
3389
      X87Mov(ToX87Register(instr->result()), operand);
3390
    }
3391
  } else {
3392
    Register result(ToRegister(instr->result()));
3393
    switch (elements_kind) {
3394
      case EXTERNAL_BYTE_ELEMENTS:
3395
        __ movsx_b(result, operand);
3396
        break;
3397
      case EXTERNAL_PIXEL_ELEMENTS:
3398
      case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
3399
        __ movzx_b(result, operand);
3400
        break;
3401
      case EXTERNAL_SHORT_ELEMENTS:
3402
        __ movsx_w(result, operand);
3403
        break;
3404
      case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
3405
        __ movzx_w(result, operand);
3406
        break;
3407
      case EXTERNAL_INT_ELEMENTS:
3408
        __ mov(result, operand);
3409
        break;
3410
      case EXTERNAL_UNSIGNED_INT_ELEMENTS:
3411
        __ mov(result, operand);
3412
        if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3413
          __ test(result, Operand(result));
3414
          DeoptimizeIf(negative, instr->environment());
3415
        }
3416
        break;
3417
      case EXTERNAL_FLOAT_ELEMENTS:
3418
      case EXTERNAL_DOUBLE_ELEMENTS:
3419
      case FAST_SMI_ELEMENTS:
3420
      case FAST_ELEMENTS:
3421
      case FAST_DOUBLE_ELEMENTS:
3422
      case FAST_HOLEY_SMI_ELEMENTS:
3423
      case FAST_HOLEY_ELEMENTS:
3424
      case FAST_HOLEY_DOUBLE_ELEMENTS:
3425
      case DICTIONARY_ELEMENTS:
3426
      case NON_STRICT_ARGUMENTS_ELEMENTS:
3427
        UNREACHABLE();
3428
        break;
3429
    }
3430
  }
3431
}
3432

    
3433

    
3434
void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3435
  if (instr->hydrogen()->RequiresHoleCheck()) {
3436
    int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
3437
        sizeof(kHoleNanLower32);
3438
    Operand hole_check_operand = BuildFastArrayOperand(
3439
        instr->elements(), instr->key(),
3440
        instr->hydrogen()->key()->representation(),
3441
        FAST_DOUBLE_ELEMENTS,
3442
        offset,
3443
        instr->additional_index());
3444
    __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
3445
    DeoptimizeIf(equal, instr->environment());
3446
  }
3447

    
3448
  Operand double_load_operand = BuildFastArrayOperand(
3449
      instr->elements(),
3450
      instr->key(),
3451
      instr->hydrogen()->key()->representation(),
3452
      FAST_DOUBLE_ELEMENTS,
3453
      FixedDoubleArray::kHeaderSize - kHeapObjectTag,
3454
      instr->additional_index());
3455
  if (CpuFeatures::IsSupported(SSE2)) {
3456
    CpuFeatureScope scope(masm(), SSE2);
3457
    XMMRegister result = ToDoubleRegister(instr->result());
3458
    __ movsd(result, double_load_operand);
3459
  } else {
3460
    X87Mov(ToX87Register(instr->result()), double_load_operand);
3461
  }
3462
}
3463

    
3464

    
3465
void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3466
  Register result = ToRegister(instr->result());
3467

    
3468
  // Load the result.
3469
  __ mov(result,
3470
         BuildFastArrayOperand(instr->elements(),
3471
                               instr->key(),
3472
                               instr->hydrogen()->key()->representation(),
3473
                               FAST_ELEMENTS,
3474
                               FixedArray::kHeaderSize - kHeapObjectTag,
3475
                               instr->additional_index()));
3476

    
3477
  // Check for the hole value.
3478
  if (instr->hydrogen()->RequiresHoleCheck()) {
3479
    if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3480
      __ test(result, Immediate(kSmiTagMask));
3481
      DeoptimizeIf(not_equal, instr->environment());
3482
    } else {
3483
      __ cmp(result, factory()->the_hole_value());
3484
      DeoptimizeIf(equal, instr->environment());
3485
    }
3486
  }
3487
}
3488

    
3489

    
3490
void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3491
  if (instr->is_external()) {
3492
    DoLoadKeyedExternalArray(instr);
3493
  } else if (instr->hydrogen()->representation().IsDouble()) {
3494
    DoLoadKeyedFixedDoubleArray(instr);
3495
  } else {
3496
    DoLoadKeyedFixedArray(instr);
3497
  }
3498
}
3499

    
3500

    
3501
Operand LCodeGen::BuildFastArrayOperand(
3502
    LOperand* elements_pointer,
3503
    LOperand* key,
3504
    Representation key_representation,
3505
    ElementsKind elements_kind,
3506
    uint32_t offset,
3507
    uint32_t additional_index) {
3508
  Register elements_pointer_reg = ToRegister(elements_pointer);
3509
  int element_shift_size = ElementsKindToShiftSize(elements_kind);
3510
  int shift_size = element_shift_size;
3511
  if (key->IsConstantOperand()) {
3512
    int constant_value = ToInteger32(LConstantOperand::cast(key));
3513
    if (constant_value & 0xF0000000) {
3514
      Abort(kArrayIndexConstantValueTooBig);
3515
    }
3516
    return Operand(elements_pointer_reg,
3517
                   ((constant_value + additional_index) << shift_size)
3518
                       + offset);
3519
  } else {
3520
    // Take the tag bit into account while computing the shift size.
3521
    if (key_representation.IsSmi() && (shift_size >= 1)) {
3522
      shift_size -= kSmiTagSize;
3523
    }
3524
    ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
3525
    return Operand(elements_pointer_reg,
3526
                   ToRegister(key),
3527
                   scale_factor,
3528
                   offset + (additional_index << element_shift_size));
3529
  }
3530
}
3531

    
3532

    
3533
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3534
  ASSERT(ToRegister(instr->context()).is(esi));
3535
  ASSERT(ToRegister(instr->object()).is(edx));
3536
  ASSERT(ToRegister(instr->key()).is(ecx));
3537

    
3538
  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
3539
  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3540
}
3541

    
3542

    
3543
void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3544
  Register result = ToRegister(instr->result());
3545

    
3546
  if (instr->hydrogen()->from_inlined()) {
3547
    __ lea(result, Operand(esp, -2 * kPointerSize));
3548
  } else {
3549
    // Check for arguments adapter frame.
3550
    Label done, adapted;
3551
    __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3552
    __ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
3553
    __ cmp(Operand(result),
3554
           Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3555
    __ j(equal, &adapted, Label::kNear);
3556

    
3557
    // No arguments adaptor frame.
3558
    __ mov(result, Operand(ebp));
3559
    __ jmp(&done, Label::kNear);
3560

    
3561
    // Arguments adaptor frame present.
3562
    __ bind(&adapted);
3563
    __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3564

    
3565
    // Result is the frame pointer for the frame if not adapted and for the real
3566
    // frame below the adaptor frame if adapted.
3567
    __ bind(&done);
3568
  }
3569
}
3570

    
3571

    
3572
void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3573
  Operand elem = ToOperand(instr->elements());
3574
  Register result = ToRegister(instr->result());
3575

    
3576
  Label done;
3577

    
3578
  // If no arguments adaptor frame the number of arguments is fixed.
3579
  __ cmp(ebp, elem);
3580
  __ mov(result, Immediate(scope()->num_parameters()));
3581
  __ j(equal, &done, Label::kNear);
3582

    
3583
  // Arguments adaptor frame present. Get argument length from there.
3584
  __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3585
  __ mov(result, Operand(result,
3586
                         ArgumentsAdaptorFrameConstants::kLengthOffset));
3587
  __ SmiUntag(result);
3588

    
3589
  // Argument length is in result register.
3590
  __ bind(&done);
3591
}
3592

    
3593

    
3594
void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3595
  Register receiver = ToRegister(instr->receiver());
3596
  Register function = ToRegister(instr->function());
3597
  Register scratch = ToRegister(instr->temp());
3598

    
3599
  // If the receiver is null or undefined, we have to pass the global
3600
  // object as a receiver to normal functions. Values have to be
3601
  // passed unchanged to builtins and strict-mode functions.
3602
  Label global_object, receiver_ok;
3603

    
3604
  // Do not transform the receiver to object for strict mode
3605
  // functions.
3606
  __ mov(scratch,
3607
         FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3608
  __ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset),
3609
            1 << SharedFunctionInfo::kStrictModeBitWithinByte);
3610
  __ j(not_equal, &receiver_ok);  // A near jump is not sufficient here!
3611

    
3612
  // Do not transform the receiver to object for builtins.
3613
  __ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset),
3614
            1 << SharedFunctionInfo::kNativeBitWithinByte);
3615
  __ j(not_equal, &receiver_ok);
3616

    
3617
  // Normal function. Replace undefined or null with global receiver.
3618
  __ cmp(receiver, factory()->null_value());
3619
  __ j(equal, &global_object, Label::kNear);
3620
  __ cmp(receiver, factory()->undefined_value());
3621
  __ j(equal, &global_object, Label::kNear);
3622

    
3623
  // The receiver should be a JS object.
3624
  __ test(receiver, Immediate(kSmiTagMask));
3625
  DeoptimizeIf(equal, instr->environment());
3626
  __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, scratch);
3627
  DeoptimizeIf(below, instr->environment());
3628
  __ jmp(&receiver_ok, Label::kNear);
3629

    
3630
  __ bind(&global_object);
3631
  // TODO(kmillikin): We have a hydrogen value for the global object.  See
3632
  // if it's better to use it than to explicitly fetch it from the context
3633
  // here.
3634
  __ mov(receiver, Operand(ebp, StandardFrameConstants::kContextOffset));
3635
  __ mov(receiver, ContextOperand(receiver, Context::GLOBAL_OBJECT_INDEX));
3636
  __ mov(receiver,
3637
         FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
3638
  __ bind(&receiver_ok);
3639
}
3640

    
3641

    
3642
void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3643
  Register receiver = ToRegister(instr->receiver());
3644
  Register function = ToRegister(instr->function());
3645
  Register length = ToRegister(instr->length());
3646
  Register elements = ToRegister(instr->elements());
3647
  ASSERT(receiver.is(eax));  // Used for parameter count.
3648
  ASSERT(function.is(edi));  // Required by InvokeFunction.
3649
  ASSERT(ToRegister(instr->result()).is(eax));
3650

    
3651
  // Copy the arguments to this function possibly from the
3652
  // adaptor frame below it.
3653
  const uint32_t kArgumentsLimit = 1 * KB;
3654
  __ cmp(length, kArgumentsLimit);
3655
  DeoptimizeIf(above, instr->environment());
3656

    
3657
  __ push(receiver);
3658
  __ mov(receiver, length);
3659

    
3660
  // Loop through the arguments pushing them onto the execution
3661
  // stack.
3662
  Label invoke, loop;
3663
  // length is a small non-negative integer, due to the test above.
3664
  __ test(length, Operand(length));
3665
  __ j(zero, &invoke, Label::kNear);
3666
  __ bind(&loop);
3667
  __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
3668
  __ dec(length);
3669
  __ j(not_zero, &loop);
3670

    
3671
  // Invoke the function.
3672
  __ bind(&invoke);
3673
  ASSERT(instr->HasPointerMap());
3674
  LPointerMap* pointers = instr->pointer_map();
3675
  SafepointGenerator safepoint_generator(
3676
      this, pointers, Safepoint::kLazyDeopt);
3677
  ParameterCount actual(eax);
3678
  __ InvokeFunction(function, actual, CALL_FUNCTION,
3679
                    safepoint_generator, CALL_AS_METHOD);
3680
}
3681

    
3682

    
3683
void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
3684
  __ int3();
3685
}
3686

    
3687

    
3688
void LCodeGen::DoPushArgument(LPushArgument* instr) {
3689
  LOperand* argument = instr->value();
3690
  EmitPushTaggedOperand(argument);
3691
}
3692

    
3693

    
3694
void LCodeGen::DoDrop(LDrop* instr) {
3695
  __ Drop(instr->count());
3696
}
3697

    
3698

    
3699
void LCodeGen::DoThisFunction(LThisFunction* instr) {
3700
  Register result = ToRegister(instr->result());
3701
  __ mov(result, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
3702
}
3703

    
3704

    
3705
void LCodeGen::DoContext(LContext* instr) {
3706
  Register result = ToRegister(instr->result());
3707
  if (info()->IsOptimizing()) {
3708
    __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset));
3709
  } else {
3710
    // If there is no frame, the context must be in esi.
3711
    ASSERT(result.is(esi));
3712
  }
3713
}
3714

    
3715

    
3716
void LCodeGen::DoOuterContext(LOuterContext* instr) {
3717
  Register context = ToRegister(instr->context());
3718
  Register result = ToRegister(instr->result());
3719
  __ mov(result,
3720
         Operand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
3721
}
3722

    
3723

    
3724
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3725
  ASSERT(ToRegister(instr->context()).is(esi));
3726
  __ push(esi);  // The context is the first argument.
3727
  __ push(Immediate(instr->hydrogen()->pairs()));
3728
  __ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
3729
  CallRuntime(Runtime::kDeclareGlobals, 3, instr);
3730
}
3731

    
3732

    
3733
void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
3734
  Register context = ToRegister(instr->context());
3735
  Register result = ToRegister(instr->result());
3736
  __ mov(result,
3737
         Operand(context, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
3738
}
3739

    
3740

    
3741
void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
3742
  Register global = ToRegister(instr->global());
3743
  Register result = ToRegister(instr->result());
3744
  __ mov(result, FieldOperand(global, GlobalObject::kGlobalReceiverOffset));
3745
}
3746

    
3747

    
3748
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3749
                                 int formal_parameter_count,
3750
                                 int arity,
3751
                                 LInstruction* instr,
3752
                                 CallKind call_kind,
3753
                                 EDIState edi_state) {
3754
  bool dont_adapt_arguments =
3755
      formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3756
  bool can_invoke_directly =
3757
      dont_adapt_arguments || formal_parameter_count == arity;
3758

    
3759
  if (can_invoke_directly) {
3760
    if (edi_state == EDI_UNINITIALIZED) {
3761
      __ LoadHeapObject(edi, function);
3762
    }
3763

    
3764
    // Change context.
3765
    __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
3766

    
3767
    // Set eax to arguments count if adaption is not needed. Assumes that eax
3768
    // is available to write to at this point.
3769
    if (dont_adapt_arguments) {
3770
      __ mov(eax, arity);
3771
    }
3772

    
3773
    // Invoke function directly.
3774
    __ SetCallKind(ecx, call_kind);
3775
    if (function.is_identical_to(info()->closure())) {
3776
      __ CallSelf();
3777
    } else {
3778
      __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
3779
    }
3780
    RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3781
  } else {
3782
    // We need to adapt arguments.
3783
    LPointerMap* pointers = instr->pointer_map();
3784
    SafepointGenerator generator(
3785
        this, pointers, Safepoint::kLazyDeopt);
3786
    ParameterCount count(arity);
3787
    ParameterCount expected(formal_parameter_count);
3788
    __ InvokeFunction(
3789
        function, expected, count, CALL_FUNCTION, generator, call_kind);
3790
  }
3791
}
3792

    
3793

    
3794
void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
3795
  ASSERT(ToRegister(instr->result()).is(eax));
3796
  CallKnownFunction(instr->hydrogen()->function(),
3797
                    instr->hydrogen()->formal_parameter_count(),
3798
                    instr->arity(),
3799
                    instr,
3800
                    CALL_AS_METHOD,
3801
                    EDI_UNINITIALIZED);
3802
}
3803

    
3804

    
3805
void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3806
  Register input_reg = ToRegister(instr->value());
3807
  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
3808
         factory()->heap_number_map());
3809
  DeoptimizeIf(not_equal, instr->environment());
3810

    
3811
  Label slow, allocated, done;
3812
  Register tmp = input_reg.is(eax) ? ecx : eax;
3813
  Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx;
3814

    
3815
  // Preserve the value of all registers.
3816
  PushSafepointRegistersScope scope(this);
3817

    
3818
  __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
3819
  // Check the sign of the argument. If the argument is positive, just
3820
  // return it. We do not need to patch the stack since |input| and
3821
  // |result| are the same register and |input| will be restored
3822
  // unchanged by popping safepoint registers.
3823
  __ test(tmp, Immediate(HeapNumber::kSignMask));
3824
  __ j(zero, &done);
3825

    
3826
  __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow);
3827
  __ jmp(&allocated, Label::kNear);
3828

    
3829
  // Slow case: Call the runtime system to do the number allocation.
3830
  __ bind(&slow);
3831
  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0,
3832
                          instr, instr->context());
3833
  // Set the pointer to the new heap number in tmp.
3834
  if (!tmp.is(eax)) __ mov(tmp, eax);
3835
  // Restore input_reg after call to runtime.
3836
  __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
3837

    
3838
  __ bind(&allocated);
3839
  __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset));
3840
  __ and_(tmp2, ~HeapNumber::kSignMask);
3841
  __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2);
3842
  __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
3843
  __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2);
3844
  __ StoreToSafepointRegisterSlot(input_reg, tmp);
3845

    
3846
  __ bind(&done);
3847
}
3848

    
3849

    
3850
void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3851
  Register input_reg = ToRegister(instr->value());
3852
  __ test(input_reg, Operand(input_reg));
3853
  Label is_positive;
3854
  __ j(not_sign, &is_positive, Label::kNear);
3855
  __ neg(input_reg);  // Sets flags.
3856
  DeoptimizeIf(negative, instr->environment());
3857
  __ bind(&is_positive);
3858
}
3859

    
3860

    
3861
void LCodeGen::DoMathAbs(LMathAbs* instr) {
3862
  // Class for deferred case.
3863
  class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
3864
   public:
3865
    DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
3866
                                    LMathAbs* instr,
3867
                                    const X87Stack& x87_stack)
3868
        : LDeferredCode(codegen, x87_stack), instr_(instr) { }
3869
    virtual void Generate() V8_OVERRIDE {
3870
      codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3871
    }
3872
    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
3873
   private:
3874
    LMathAbs* instr_;
3875
  };
3876

    
3877
  ASSERT(instr->value()->Equals(instr->result()));
3878
  Representation r = instr->hydrogen()->value()->representation();
3879

    
3880
  CpuFeatureScope scope(masm(), SSE2);
3881
  if (r.IsDouble()) {
3882
    XMMRegister scratch = double_scratch0();
3883
    XMMRegister input_reg = ToDoubleRegister(instr->value());
3884
    __ xorps(scratch, scratch);
3885
    __ subsd(scratch, input_reg);
3886
    __ andps(input_reg, scratch);
3887
  } else if (r.IsSmiOrInteger32()) {
3888
    EmitIntegerMathAbs(instr);
3889
  } else {  // Tagged case.
3890
    DeferredMathAbsTaggedHeapNumber* deferred =
3891
        new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr, x87_stack_);
3892
    Register input_reg = ToRegister(instr->value());
3893
    // Smi check.
3894
    __ JumpIfNotSmi(input_reg, deferred->entry());
3895
    EmitIntegerMathAbs(instr);
3896
    __ bind(deferred->exit());
3897
  }
3898
}
3899

    
3900

    
3901
void LCodeGen::DoMathFloor(LMathFloor* instr) {
3902
  CpuFeatureScope scope(masm(), SSE2);
3903
  XMMRegister xmm_scratch = double_scratch0();
3904
  Register output_reg = ToRegister(instr->result());
3905
  XMMRegister input_reg = ToDoubleRegister(instr->value());
3906

    
3907
  if (CpuFeatures::IsSupported(SSE4_1)) {
3908
    CpuFeatureScope scope(masm(), SSE4_1);
3909
    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3910
      // Deoptimize on negative zero.
3911
      Label non_zero;
3912
      __ xorps(xmm_scratch, xmm_scratch);  // Zero the register.
3913
      __ ucomisd(input_reg, xmm_scratch);
3914
      __ j(not_equal, &non_zero, Label::kNear);
3915
      __ movmskpd(output_reg, input_reg);
3916
      __ test(output_reg, Immediate(1));
3917
      DeoptimizeIf(not_zero, instr->environment());
3918
      __ bind(&non_zero);
3919
    }
3920
    __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
3921
    __ cvttsd2si(output_reg, Operand(xmm_scratch));
3922
    // Overflow is signalled with minint.
3923
    __ cmp(output_reg, 0x80000000u);
3924
    DeoptimizeIf(equal, instr->environment());
3925
  } else {
3926
    Label negative_sign, done;
3927
    // Deoptimize on unordered.
3928
    __ xorps(xmm_scratch, xmm_scratch);  // Zero the register.
3929
    __ ucomisd(input_reg, xmm_scratch);
3930
    DeoptimizeIf(parity_even, instr->environment());
3931
    __ j(below, &negative_sign, Label::kNear);
3932

    
3933
    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3934
      // Check for negative zero.
3935
      Label positive_sign;
3936
      __ j(above, &positive_sign, Label::kNear);
3937
      __ movmskpd(output_reg, input_reg);
3938
      __ test(output_reg, Immediate(1));
3939
      DeoptimizeIf(not_zero, instr->environment());
3940
      __ Set(output_reg, Immediate(0));
3941
      __ jmp(&done, Label::kNear);
3942
      __ bind(&positive_sign);
3943
    }
3944

    
3945
    // Use truncating instruction (OK because input is positive).
3946
    __ cvttsd2si(output_reg, Operand(input_reg));
3947
    // Overflow is signalled with minint.
3948
    __ cmp(output_reg, 0x80000000u);
3949
    DeoptimizeIf(equal, instr->environment());
3950
    __ jmp(&done, Label::kNear);
3951

    
3952
    // Non-zero negative reaches here.
3953
    __ bind(&negative_sign);
3954
    // Truncate, then compare and compensate.
3955
    __ cvttsd2si(output_reg, Operand(input_reg));
3956
    __ Cvtsi2sd(xmm_scratch, output_reg);
3957
    __ ucomisd(input_reg, xmm_scratch);
3958
    __ j(equal, &done, Label::kNear);
3959
    __ sub(output_reg, Immediate(1));
3960
    DeoptimizeIf(overflow, instr->environment());
3961

    
3962
    __ bind(&done);
3963
  }
3964
}
3965

    
3966

    
3967
void LCodeGen::DoMathRound(LMathRound* instr) {
3968
  CpuFeatureScope scope(masm(), SSE2);
3969
  Register output_reg = ToRegister(instr->result());
3970
  XMMRegister input_reg = ToDoubleRegister(instr->value());
3971
  XMMRegister xmm_scratch = double_scratch0();
3972
  XMMRegister input_temp = ToDoubleRegister(instr->temp());
3973
  ExternalReference one_half = ExternalReference::address_of_one_half();
3974
  ExternalReference minus_one_half =
3975
      ExternalReference::address_of_minus_one_half();
3976

    
3977
  Label done, round_to_zero, below_one_half, do_not_compensate;
3978
  __ movsd(xmm_scratch, Operand::StaticVariable(one_half));
3979
  __ ucomisd(xmm_scratch, input_reg);
3980
  __ j(above, &below_one_half);
3981

    
3982
  // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
3983
  __ addsd(xmm_scratch, input_reg);
3984
  __ cvttsd2si(output_reg, Operand(xmm_scratch));
3985
  // Overflow is signalled with minint.
3986
  __ cmp(output_reg, 0x80000000u);
3987
  __ RecordComment("D2I conversion overflow");
3988
  DeoptimizeIf(equal, instr->environment());
3989
  __ jmp(&done);
3990

    
3991
  __ bind(&below_one_half);
3992
  __ movsd(xmm_scratch, Operand::StaticVariable(minus_one_half));
3993
  __ ucomisd(xmm_scratch, input_reg);
3994
  __ j(below_equal, &round_to_zero);
3995

    
3996
  // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
3997
  // compare and compensate.
3998
  __ movsd(input_temp, input_reg);  // Do not alter input_reg.
3999
  __ subsd(input_temp, xmm_scratch);
4000
  __ cvttsd2si(output_reg, Operand(input_temp));
4001
  // Catch minint due to overflow, and to prevent overflow when compensating.
4002
  __ cmp(output_reg, 0x80000000u);
4003
  __ RecordComment("D2I conversion overflow");
4004
  DeoptimizeIf(equal, instr->environment());
4005

    
4006
  __ Cvtsi2sd(xmm_scratch, output_reg);
4007
  __ ucomisd(xmm_scratch, input_temp);
4008
  __ j(equal, &done);
4009
  __ sub(output_reg, Immediate(1));
4010
  // No overflow because we already ruled out minint.
4011
  __ jmp(&done);
4012

    
4013
  __ bind(&round_to_zero);
4014
  // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
4015
  // we can ignore the difference between a result of -0 and +0.
4016
  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4017
    // If the sign is positive, we return +0.
4018
    __ movmskpd(output_reg, input_reg);
4019
    __ test(output_reg, Immediate(1));
4020
    __ RecordComment("Minus zero");
4021
    DeoptimizeIf(not_zero, instr->environment());
4022
  }
4023
  __ Set(output_reg, Immediate(0));
4024
  __ bind(&done);
4025
}
4026

    
4027

    
4028
void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
4029
  CpuFeatureScope scope(masm(), SSE2);
4030
  XMMRegister input_reg = ToDoubleRegister(instr->value());
4031
  ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
4032
  __ sqrtsd(input_reg, input_reg);
4033
}
4034

    
4035

    
4036
void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
4037
  CpuFeatureScope scope(masm(), SSE2);
4038
  XMMRegister xmm_scratch = double_scratch0();
4039
  XMMRegister input_reg = ToDoubleRegister(instr->value());
4040
  Register scratch = ToRegister(instr->temp());
4041
  ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
4042

    
4043
  // Note that according to ECMA-262 15.8.2.13:
4044
  // Math.pow(-Infinity, 0.5) == Infinity
4045
  // Math.sqrt(-Infinity) == NaN
4046
  Label done, sqrt;
4047
  // Check base for -Infinity.  According to IEEE-754, single-precision
4048
  // -Infinity has the highest 9 bits set and the lowest 23 bits cleared.
4049
  __ mov(scratch, 0xFF800000);
4050
  __ movd(xmm_scratch, scratch);
4051
  __ cvtss2sd(xmm_scratch, xmm_scratch);
4052
  __ ucomisd(input_reg, xmm_scratch);
4053
  // Comparing -Infinity with NaN results in "unordered", which sets the
4054
  // zero flag as if both were equal.  However, it also sets the carry flag.
4055
  __ j(not_equal, &sqrt, Label::kNear);
4056
  __ j(carry, &sqrt, Label::kNear);
4057
  // If input is -Infinity, return Infinity.
4058
  __ xorps(input_reg, input_reg);
4059
  __ subsd(input_reg, xmm_scratch);
4060
  __ jmp(&done, Label::kNear);
4061

    
4062
  // Square root.
4063
  __ bind(&sqrt);
4064
  __ xorps(xmm_scratch, xmm_scratch);
4065
  __ addsd(input_reg, xmm_scratch);  // Convert -0 to +0.
4066
  __ sqrtsd(input_reg, input_reg);
4067
  __ bind(&done);
4068
}
4069

    
4070

    
4071
void LCodeGen::DoPower(LPower* instr) {
4072
  Representation exponent_type = instr->hydrogen()->right()->representation();
4073
  // Having marked this as a call, we can use any registers.
4074
  // Just make sure that the input/output registers are the expected ones.
4075
  ASSERT(!instr->right()->IsDoubleRegister() ||
4076
         ToDoubleRegister(instr->right()).is(xmm1));
4077
  ASSERT(!instr->right()->IsRegister() ||
4078
         ToRegister(instr->right()).is(eax));
4079
  ASSERT(ToDoubleRegister(instr->left()).is(xmm2));
4080
  ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
4081

    
4082
  if (exponent_type.IsSmi()) {
4083
    MathPowStub stub(MathPowStub::TAGGED);
4084
    __ CallStub(&stub);
4085
  } else if (exponent_type.IsTagged()) {
4086
    Label no_deopt;
4087
    __ JumpIfSmi(eax, &no_deopt);
4088
    __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
4089
    DeoptimizeIf(not_equal, instr->environment());
4090
    __ bind(&no_deopt);
4091
    MathPowStub stub(MathPowStub::TAGGED);
4092
    __ CallStub(&stub);
4093
  } else if (exponent_type.IsInteger32()) {
4094
    MathPowStub stub(MathPowStub::INTEGER);
4095
    __ CallStub(&stub);
4096
  } else {
4097
    ASSERT(exponent_type.IsDouble());
4098
    MathPowStub stub(MathPowStub::DOUBLE);
4099
    __ CallStub(&stub);
4100
  }
4101
}
4102

    
4103

    
4104
void LCodeGen::DoRandom(LRandom* instr) {
4105
  CpuFeatureScope scope(masm(), SSE2);
4106

    
4107
  // Assert that the register size is indeed the size of each seed.
4108
  static const int kSeedSize = sizeof(uint32_t);
4109
  STATIC_ASSERT(kPointerSize == kSeedSize);
4110

    
4111
  // Load native context
4112
  Register global_object = ToRegister(instr->global_object());
4113
  Register native_context = global_object;
4114
  __ mov(native_context, FieldOperand(
4115
          global_object, GlobalObject::kNativeContextOffset));
4116

    
4117
  // Load state (FixedArray of the native context's random seeds)
4118
  static const int kRandomSeedOffset =
4119
      FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
4120
  Register state = native_context;
4121
  __ mov(state, FieldOperand(native_context, kRandomSeedOffset));
4122

    
4123
  // Load state[0].
4124
  Register state0 = ToRegister(instr->scratch());
4125
  __ mov(state0, FieldOperand(state, ByteArray::kHeaderSize));
4126
  // Load state[1].
4127
  Register state1 = ToRegister(instr->scratch2());
4128
  __ mov(state1, FieldOperand(state, ByteArray::kHeaderSize + kSeedSize));
4129

    
4130
  // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
4131
  Register scratch3 = ToRegister(instr->scratch3());
4132
  __ movzx_w(scratch3, state0);
4133
  __ imul(scratch3, scratch3, 18273);
4134
  __ shr(state0, 16);
4135
  __ add(state0, scratch3);
4136
  // Save state[0].
4137
  __ mov(FieldOperand(state, ByteArray::kHeaderSize), state0);
4138

    
4139
  // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
4140
  __ movzx_w(scratch3, state1);
4141
  __ imul(scratch3, scratch3, 36969);
4142
  __ shr(state1, 16);
4143
  __ add(state1, scratch3);
4144
  // Save state[1].
4145
  __ mov(FieldOperand(state, ByteArray::kHeaderSize + kSeedSize), state1);
4146

    
4147
  // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
4148
  Register random = state0;
4149
  __ shl(random, 14);
4150
  __ and_(state1, Immediate(0x3FFFF));
4151
  __ add(random, state1);
4152

    
4153
  // Convert 32 random bits in random to 0.(32 random bits) in a double
4154
  // by computing:
4155
  // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
4156
  XMMRegister result = ToDoubleRegister(instr->result());
4157
  XMMRegister scratch4 = double_scratch0();
4158
  __ mov(scratch3, Immediate(0x49800000));  // 1.0 x 2^20 as single.
4159
  __ movd(scratch4, scratch3);
4160
  __ movd(result, random);
4161
  __ cvtss2sd(scratch4, scratch4);
4162
  __ xorps(result, scratch4);
4163
  __ subsd(result, scratch4);
4164
}
4165

    
4166

    
4167
void LCodeGen::DoMathLog(LMathLog* instr) {
4168
  CpuFeatureScope scope(masm(), SSE2);
4169
  ASSERT(instr->value()->Equals(instr->result()));
4170
  XMMRegister input_reg = ToDoubleRegister(instr->value());
4171
  XMMRegister xmm_scratch = double_scratch0();
4172
  Label positive, done, zero;
4173
  __ xorps(xmm_scratch, xmm_scratch);
4174
  __ ucomisd(input_reg, xmm_scratch);
4175
  __ j(above, &positive, Label::kNear);
4176
  __ j(equal, &zero, Label::kNear);
4177
  ExternalReference nan =
4178
      ExternalReference::address_of_canonical_non_hole_nan();
4179
  __ movsd(input_reg, Operand::StaticVariable(nan));
4180
  __ jmp(&done, Label::kNear);
4181
  __ bind(&zero);
4182
  ExternalReference ninf =
4183
      ExternalReference::address_of_negative_infinity();
4184
  __ movsd(input_reg, Operand::StaticVariable(ninf));
4185
  __ jmp(&done, Label::kNear);
4186
  __ bind(&positive);
4187
  __ fldln2();
4188
  __ sub(Operand(esp), Immediate(kDoubleSize));
4189
  __ movsd(Operand(esp, 0), input_reg);
4190
  __ fld_d(Operand(esp, 0));
4191
  __ fyl2x();
4192
  __ fstp_d(Operand(esp, 0));
4193
  __ movsd(input_reg, Operand(esp, 0));
4194
  __ add(Operand(esp), Immediate(kDoubleSize));
4195
  __ bind(&done);
4196
}
4197

    
4198

    
4199
void LCodeGen::DoMathExp(LMathExp* instr) {
4200
  CpuFeatureScope scope(masm(), SSE2);
4201
  XMMRegister input = ToDoubleRegister(instr->value());
4202
  XMMRegister result = ToDoubleRegister(instr->result());
4203
  XMMRegister temp0 = double_scratch0();
4204
  Register temp1 = ToRegister(instr->temp1());
4205
  Register temp2 = ToRegister(instr->temp2());
4206

    
4207
  MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2);
4208
}
4209

    
4210

    
4211
void LCodeGen::DoMathTan(LMathTan* instr) {
4212
  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
4213
  // Set the context register to a GC-safe fake value. Clobbering it is
4214
  // OK because this instruction is marked as a call.
4215
  __ Set(esi, Immediate(0));
4216
  TranscendentalCacheStub stub(TranscendentalCache::TAN,
4217
                               TranscendentalCacheStub::UNTAGGED);
4218
  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
4219
}
4220

    
4221

    
4222
void LCodeGen::DoMathCos(LMathCos* instr) {
4223
  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
4224
  // Set the context register to a GC-safe fake value. Clobbering it is
4225
  // OK because this instruction is marked as a call.
4226
  __ Set(esi, Immediate(0));
4227
  TranscendentalCacheStub stub(TranscendentalCache::COS,
4228
                               TranscendentalCacheStub::UNTAGGED);
4229
  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
4230
}
4231

    
4232

    
4233
void LCodeGen::DoMathSin(LMathSin* instr) {
4234
  ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
4235
  // Set the context register to a GC-safe fake value. Clobbering it is
4236
  // OK because this instruction is marked as a call.
4237
  __ Set(esi, Immediate(0));
4238
  TranscendentalCacheStub stub(TranscendentalCache::SIN,
4239
                               TranscendentalCacheStub::UNTAGGED);
4240
  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
4241
}
4242

    
4243

    
4244
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
4245
  ASSERT(ToRegister(instr->context()).is(esi));
4246
  ASSERT(ToRegister(instr->function()).is(edi));
4247
  ASSERT(instr->HasPointerMap());
4248

    
4249
  Handle<JSFunction> known_function = instr->hydrogen()->known_function();
4250
  if (known_function.is_null()) {
4251
    LPointerMap* pointers = instr->pointer_map();
4252
    SafepointGenerator generator(
4253
        this, pointers, Safepoint::kLazyDeopt);
4254
    ParameterCount count(instr->arity());
4255
    __ InvokeFunction(edi, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
4256
  } else {
4257
    CallKnownFunction(known_function,
4258
                      instr->hydrogen()->formal_parameter_count(),
4259
                      instr->arity(),
4260
                      instr,
4261
                      CALL_AS_METHOD,
4262
                      EDI_CONTAINS_TARGET);
4263
  }
4264
}
4265

    
4266

    
4267
void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
4268
  ASSERT(ToRegister(instr->context()).is(esi));
4269
  ASSERT(ToRegister(instr->key()).is(ecx));
4270
  ASSERT(ToRegister(instr->result()).is(eax));
4271

    
4272
  int arity = instr->arity();
4273
  Handle<Code> ic =
4274
      isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
4275
  CallCode(ic, RelocInfo::CODE_TARGET, instr);
4276
}
4277

    
4278

    
4279
void LCodeGen::DoCallNamed(LCallNamed* instr) {
4280
  ASSERT(ToRegister(instr->context()).is(esi));
4281
  ASSERT(ToRegister(instr->result()).is(eax));
4282

    
4283
  int arity = instr->arity();
4284
  RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
4285
  Handle<Code> ic =
4286
      isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
4287
  __ mov(ecx, instr->name());
4288
  CallCode(ic, mode, instr);
4289
}
4290

    
4291

    
4292
void LCodeGen::DoCallFunction(LCallFunction* instr) {
4293
  ASSERT(ToRegister(instr->context()).is(esi));
4294
  ASSERT(ToRegister(instr->function()).is(edi));
4295
  ASSERT(ToRegister(instr->result()).is(eax));
4296

    
4297
  int arity = instr->arity();
4298
  CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
4299
  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
4300
}
4301

    
4302

    
4303
void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
4304
  ASSERT(ToRegister(instr->context()).is(esi));
4305
  ASSERT(ToRegister(instr->result()).is(eax));
4306

    
4307
  int arity = instr->arity();
4308
  RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
4309
  Handle<Code> ic =
4310
      isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
4311
  __ mov(ecx, instr->name());
4312
  CallCode(ic, mode, instr);
4313
}
4314

    
4315

    
4316
void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
4317
  ASSERT(ToRegister(instr->result()).is(eax));
4318
  CallKnownFunction(instr->hydrogen()->target(),
4319
                    instr->hydrogen()->formal_parameter_count(),
4320
                    instr->arity(),
4321
                    instr,
4322
                    CALL_AS_FUNCTION,
4323
                    EDI_UNINITIALIZED);
4324
}
4325

    
4326

    
4327
void LCodeGen::DoCallNew(LCallNew* instr) {
4328
  ASSERT(ToRegister(instr->context()).is(esi));
4329
  ASSERT(ToRegister(instr->constructor()).is(edi));
4330
  ASSERT(ToRegister(instr->result()).is(eax));
4331

    
4332
  // No cell in ebx for construct type feedback in optimized code
4333
  Handle<Object> undefined_value(isolate()->factory()->undefined_value());
4334
  __ mov(ebx, Immediate(undefined_value));
4335
  CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
4336
  __ Set(eax, Immediate(instr->arity()));
4337
  CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4338
}
4339

    
4340

    
4341
void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
4342
  ASSERT(ToRegister(instr->context()).is(esi));
4343
  ASSERT(ToRegister(instr->constructor()).is(edi));
4344
  ASSERT(ToRegister(instr->result()).is(eax));
4345

    
4346
  __ Set(eax, Immediate(instr->arity()));
4347
  __ mov(ebx, instr->hydrogen()->property_cell());
4348
  ElementsKind kind = instr->hydrogen()->elements_kind();
4349
  AllocationSiteOverrideMode override_mode =
4350
      (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
4351
          ? DISABLE_ALLOCATION_SITES
4352
          : DONT_OVERRIDE;
4353
  ContextCheckMode context_mode = CONTEXT_CHECK_NOT_REQUIRED;
4354

    
4355
  if (instr->arity() == 0) {
4356
    ArrayNoArgumentConstructorStub stub(kind, context_mode, override_mode);
4357
    CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4358
  } else if (instr->arity() == 1) {
4359
    Label done;
4360
    if (IsFastPackedElementsKind(kind)) {
4361
      Label packed_case;
4362
      // We might need a change here
4363
      // look at the first argument
4364
      __ mov(ecx, Operand(esp, 0));
4365
      __ test(ecx, ecx);
4366
      __ j(zero, &packed_case);
4367

    
4368
      ElementsKind holey_kind = GetHoleyElementsKind(kind);
4369
      ArraySingleArgumentConstructorStub stub(holey_kind, context_mode,
4370
                                              override_mode);
4371
      CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4372
      __ jmp(&done);
4373
      __ bind(&packed_case);
4374
    }
4375

    
4376
    ArraySingleArgumentConstructorStub stub(kind, context_mode, override_mode);
4377
    CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4378
    __ bind(&done);
4379
  } else {
4380
    ArrayNArgumentsConstructorStub stub(kind, context_mode, override_mode);
4381
    CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4382
  }
4383
}
4384

    
4385

    
4386
void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
4387
  CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
4388
}
4389

    
4390

    
4391
void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
4392
  Register function = ToRegister(instr->function());
4393
  Register code_object = ToRegister(instr->code_object());
4394
  __ lea(code_object, FieldOperand(code_object, Code::kHeaderSize));
4395
  __ mov(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
4396
}
4397

    
4398

    
4399
void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
4400
  Register result = ToRegister(instr->result());
4401
  Register base = ToRegister(instr->base_object());
4402
  __ lea(result, Operand(base, instr->offset()));
4403
}
4404

    
4405

    
4406
void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4407
  Representation representation = instr->representation();
4408

    
4409
  HObjectAccess access = instr->hydrogen()->access();
4410
  int offset = access.offset();
4411

    
4412
  if (access.IsExternalMemory()) {
4413
    ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4414
    MemOperand operand = instr->object()->IsConstantOperand()
4415
        ? MemOperand::StaticVariable(
4416
            ToExternalReference(LConstantOperand::cast(instr->object())))
4417
        : MemOperand(ToRegister(instr->object()), offset);
4418
    if (instr->value()->IsConstantOperand()) {
4419
      ASSERT(!representation.IsByte());
4420
      LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4421
      __ mov(operand, Immediate(ToInteger32(operand_value)));
4422
    } else {
4423
      Register value = ToRegister(instr->value());
4424
      if (representation.IsByte()) {
4425
        __ mov_b(operand, value);
4426
      } else {
4427
        __ mov(operand, value);
4428
      }
4429
    }
4430
    return;
4431
  }
4432

    
4433
  Register object = ToRegister(instr->object());
4434
  Handle<Map> transition = instr->transition();
4435

    
4436
  if (FLAG_track_fields && representation.IsSmi()) {
4437
    if (instr->value()->IsConstantOperand()) {
4438
      LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4439
      if (!IsSmi(operand_value)) {
4440
        DeoptimizeIf(no_condition, instr->environment());
4441
      }
4442
    }
4443
  } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
4444
    if (instr->value()->IsConstantOperand()) {
4445
      LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4446
      if (IsInteger32(operand_value)) {
4447
        DeoptimizeIf(no_condition, instr->environment());
4448
      }
4449
    } else {
4450
      if (!instr->hydrogen()->value()->type().IsHeapObject()) {
4451
        Register value = ToRegister(instr->value());
4452
        __ test(value, Immediate(kSmiTagMask));
4453
        DeoptimizeIf(zero, instr->environment());
4454
      }
4455
    }
4456
  } else if (FLAG_track_double_fields && representation.IsDouble()) {
4457
    ASSERT(transition.is_null());
4458
    ASSERT(access.IsInobject());
4459
    ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4460
    if (CpuFeatures::IsSupported(SSE2)) {
4461
      CpuFeatureScope scope(masm(), SSE2);
4462
      XMMRegister value = ToDoubleRegister(instr->value());
4463
      __ movsd(FieldOperand(object, offset), value);
4464
    } else {
4465
      X87Register value = ToX87Register(instr->value());
4466
      X87Mov(FieldOperand(object, offset), value);
4467
    }
4468
    return;
4469
  }
4470

    
4471
  if (!transition.is_null()) {
4472
    if (!instr->hydrogen()->NeedsWriteBarrierForMap()) {
4473
      __ mov(FieldOperand(object, HeapObject::kMapOffset), transition);
4474
    } else {
4475
      Register temp = ToRegister(instr->temp());
4476
      Register temp_map = ToRegister(instr->temp_map());
4477
      __ mov(temp_map, transition);
4478
      __ mov(FieldOperand(object, HeapObject::kMapOffset), temp_map);
4479
      // Update the write barrier for the map field.
4480
      __ RecordWriteField(object,
4481
                          HeapObject::kMapOffset,
4482
                          temp_map,
4483
                          temp,
4484
                          GetSaveFPRegsMode(),
4485
                          OMIT_REMEMBERED_SET,
4486
                          OMIT_SMI_CHECK);
4487
    }
4488
  }
4489

    
4490
  // Do the store.
4491
  SmiCheck check_needed =
4492
      instr->hydrogen()->value()->IsHeapObject()
4493
          ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4494

    
4495
  Register write_register = object;
4496
  if (!access.IsInobject()) {
4497
    write_register = ToRegister(instr->temp());
4498
    __ mov(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
4499
  }
4500

    
4501
  MemOperand operand = FieldOperand(write_register, offset);
4502
  if (instr->value()->IsConstantOperand()) {
4503
    LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4504
    if (operand_value->IsRegister()) {
4505
      Register value = ToRegister(operand_value);
4506
      if (representation.IsByte()) {
4507
        __ mov_b(operand, value);
4508
      } else {
4509
        __ mov(operand, value);
4510
      }
4511
    } else {
4512
      Handle<Object> handle_value = ToHandle(operand_value);
4513
      ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4514
      __ mov(operand, handle_value);
4515
    }
4516
  } else {
4517
    Register value = ToRegister(instr->value());
4518
    if (representation.IsByte()) {
4519
      __ mov_b(operand, value);
4520
    } else {
4521
      __ mov(operand, value);
4522
    }
4523
  }
4524

    
4525
  if (instr->hydrogen()->NeedsWriteBarrier()) {
4526
    Register value = ToRegister(instr->value());
4527
    Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
4528
    // Update the write barrier for the object for in-object properties.
4529
    __ RecordWriteField(write_register,
4530
                        offset,
4531
                        value,
4532
                        temp,
4533
                        GetSaveFPRegsMode(),
4534
                        EMIT_REMEMBERED_SET,
4535
                        check_needed);
4536
  }
4537
}
4538

    
4539

    
4540
void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4541
  ASSERT(ToRegister(instr->context()).is(esi));
4542
  ASSERT(ToRegister(instr->object()).is(edx));
4543
  ASSERT(ToRegister(instr->value()).is(eax));
4544

    
4545
  __ mov(ecx, instr->name());
4546
  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
4547
      ? isolate()->builtins()->StoreIC_Initialize_Strict()
4548
      : isolate()->builtins()->StoreIC_Initialize();
4549
  CallCode(ic, RelocInfo::CODE_TARGET, instr);
4550
}
4551

    
4552

    
4553
void LCodeGen::ApplyCheckIf(Condition cc, LBoundsCheck* check) {
4554
  if (FLAG_debug_code && check->hydrogen()->skip_check()) {
4555
    Label done;
4556
    __ j(NegateCondition(cc), &done, Label::kNear);
4557
    __ int3();
4558
    __ bind(&done);
4559
  } else {
4560
    DeoptimizeIf(cc, check->environment());
4561
  }
4562
}
4563

    
4564

    
4565
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4566
  if (instr->hydrogen()->skip_check() && !FLAG_debug_code) return;
4567

    
4568
  if (instr->index()->IsConstantOperand()) {
4569
    Immediate immediate =
4570
        ToImmediate(LConstantOperand::cast(instr->index()),
4571
                    instr->hydrogen()->length()->representation());
4572
    __ cmp(ToOperand(instr->length()), immediate);
4573
    Condition condition =
4574
        instr->hydrogen()->allow_equality() ? below : below_equal;
4575
    ApplyCheckIf(condition, instr);
4576
  } else {
4577
    __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
4578
    Condition condition =
4579
        instr->hydrogen()->allow_equality() ? above : above_equal;
4580
    ApplyCheckIf(condition, instr);
4581
  }
4582
}
4583

    
4584

    
4585
void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4586
  ElementsKind elements_kind = instr->elements_kind();
4587
  LOperand* key = instr->key();
4588
  if (!key->IsConstantOperand() &&
4589
      ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
4590
                                  elements_kind)) {
4591
    __ SmiUntag(ToRegister(key));
4592
  }
4593
  Operand operand(BuildFastArrayOperand(
4594
      instr->elements(),
4595
      key,
4596
      instr->hydrogen()->key()->representation(),
4597
      elements_kind,
4598
      0,
4599
      instr->additional_index()));
4600
  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
4601
    if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
4602
      CpuFeatureScope scope(masm(), SSE2);
4603
      XMMRegister xmm_scratch = double_scratch0();
4604
      __ cvtsd2ss(xmm_scratch, ToDoubleRegister(instr->value()));
4605
      __ movss(operand, xmm_scratch);
4606
    } else {
4607
      __ fld(0);
4608
      __ fstp_s(operand);
4609
    }
4610
  } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
4611
    if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
4612
      CpuFeatureScope scope(masm(), SSE2);
4613
      __ movsd(operand, ToDoubleRegister(instr->value()));
4614
    } else {
4615
      X87Mov(operand, ToX87Register(instr->value()));
4616
    }
4617
  } else {
4618
    Register value = ToRegister(instr->value());
4619
    switch (elements_kind) {
4620
      case EXTERNAL_PIXEL_ELEMENTS:
4621
      case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
4622
      case EXTERNAL_BYTE_ELEMENTS:
4623
        __ mov_b(operand, value);
4624
        break;
4625
      case EXTERNAL_SHORT_ELEMENTS:
4626
      case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
4627
        __ mov_w(operand, value);
4628
        break;
4629
      case EXTERNAL_INT_ELEMENTS:
4630
      case EXTERNAL_UNSIGNED_INT_ELEMENTS:
4631
        __ mov(operand, value);
4632
        break;
4633
      case EXTERNAL_FLOAT_ELEMENTS:
4634
      case EXTERNAL_DOUBLE_ELEMENTS:
4635
      case FAST_SMI_ELEMENTS:
4636
      case FAST_ELEMENTS:
4637
      case FAST_DOUBLE_ELEMENTS:
4638
      case FAST_HOLEY_SMI_ELEMENTS:
4639
      case FAST_HOLEY_ELEMENTS:
4640
      case FAST_HOLEY_DOUBLE_ELEMENTS:
4641
      case DICTIONARY_ELEMENTS:
4642
      case NON_STRICT_ARGUMENTS_ELEMENTS:
4643
        UNREACHABLE();
4644
        break;
4645
    }
4646
  }
4647
}
4648

    
4649

    
4650
void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4651
  ExternalReference canonical_nan_reference =
4652
      ExternalReference::address_of_canonical_non_hole_nan();
4653
  Operand double_store_operand = BuildFastArrayOperand(
4654
      instr->elements(),
4655
      instr->key(),
4656
      instr->hydrogen()->key()->representation(),
4657
      FAST_DOUBLE_ELEMENTS,
4658
      FixedDoubleArray::kHeaderSize - kHeapObjectTag,
4659
      instr->additional_index());
4660

    
4661
  if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
4662
    CpuFeatureScope scope(masm(), SSE2);
4663
    XMMRegister value = ToDoubleRegister(instr->value());
4664

    
4665
    if (instr->NeedsCanonicalization()) {
4666
      Label have_value;
4667

    
4668
      __ ucomisd(value, value);
4669
      __ j(parity_odd, &have_value);  // NaN.
4670

    
4671
      __ movsd(value, Operand::StaticVariable(canonical_nan_reference));
4672
      __ bind(&have_value);
4673
    }
4674

    
4675
    __ movsd(double_store_operand, value);
4676
  } else {
4677
    // Can't use SSE2 in the serializer
4678
    if (instr->hydrogen()->IsConstantHoleStore()) {
4679
      // This means we should store the (double) hole. No floating point
4680
      // registers required.
4681
      double nan_double = FixedDoubleArray::hole_nan_as_double();
4682
      uint64_t int_val = BitCast<uint64_t, double>(nan_double);
4683
      int32_t lower = static_cast<int32_t>(int_val);
4684
      int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
4685

    
4686
      __ mov(double_store_operand, Immediate(lower));
4687
      Operand double_store_operand2 = BuildFastArrayOperand(
4688
          instr->elements(),
4689
          instr->key(),
4690
          instr->hydrogen()->key()->representation(),
4691
          FAST_DOUBLE_ELEMENTS,
4692
          FixedDoubleArray::kHeaderSize - kHeapObjectTag + kPointerSize,
4693
          instr->additional_index());
4694
      __ mov(double_store_operand2, Immediate(upper));
4695
    } else {
4696
      Label no_special_nan_handling;
4697
      X87Register value = ToX87Register(instr->value());
4698
      X87Fxch(value);
4699

    
4700
      if (instr->NeedsCanonicalization()) {
4701
        __ fld(0);
4702
        __ fld(0);
4703
        __ FCmp();
4704

    
4705
        __ j(parity_odd, &no_special_nan_handling);
4706
        __ sub(esp, Immediate(kDoubleSize));
4707
        __ fst_d(MemOperand(esp, 0));
4708
        __ cmp(MemOperand(esp, sizeof(kHoleNanLower32)),
4709
               Immediate(kHoleNanUpper32));
4710
        __ add(esp, Immediate(kDoubleSize));
4711
        Label canonicalize;
4712
        __ j(not_equal, &canonicalize);
4713
        __ jmp(&no_special_nan_handling);
4714
        __ bind(&canonicalize);
4715
        __ fstp(0);
4716
        __ fld_d(Operand::StaticVariable(canonical_nan_reference));
4717
      }
4718

    
4719
      __ bind(&no_special_nan_handling);
4720
      __ fst_d(double_store_operand);
4721
    }
4722
  }
4723
}
4724

    
4725

    
4726
void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4727
  Register elements = ToRegister(instr->elements());
4728
  Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
4729

    
4730
  Operand operand = BuildFastArrayOperand(
4731
      instr->elements(),
4732
      instr->key(),
4733
      instr->hydrogen()->key()->representation(),
4734
      FAST_ELEMENTS,
4735
      FixedArray::kHeaderSize - kHeapObjectTag,
4736
      instr->additional_index());
4737
  if (instr->value()->IsRegister()) {
4738
    __ mov(operand, ToRegister(instr->value()));
4739
  } else {
4740
    LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
4741
    if (IsSmi(operand_value)) {
4742
      Immediate immediate = ToImmediate(operand_value, Representation::Smi());
4743
      __ mov(operand, immediate);
4744
    } else {
4745
      ASSERT(!IsInteger32(operand_value));
4746
      Handle<Object> handle_value = ToHandle(operand_value);
4747
      __ mov(operand, handle_value);
4748
    }
4749
  }
4750

    
4751
  if (instr->hydrogen()->NeedsWriteBarrier()) {
4752
    ASSERT(instr->value()->IsRegister());
4753
    Register value = ToRegister(instr->value());
4754
    ASSERT(!instr->key()->IsConstantOperand());
4755
    SmiCheck check_needed =
4756
        instr->hydrogen()->value()->IsHeapObject()
4757
          ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4758
    // Compute address of modified element and store it into key register.
4759
    __ lea(key, operand);
4760
    __ RecordWrite(elements,
4761
                   key,
4762
                   value,
4763
                   GetSaveFPRegsMode(),
4764
                   EMIT_REMEMBERED_SET,
4765
                   check_needed);
4766
  }
4767
}
4768

    
4769

    
4770
void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4771
  // By cases...external, fast-double, fast
4772
  if (instr->is_external()) {
4773
    DoStoreKeyedExternalArray(instr);
4774
  } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4775
    DoStoreKeyedFixedDoubleArray(instr);
4776
  } else {
4777
    DoStoreKeyedFixedArray(instr);
4778
  }
4779
}
4780

    
4781

    
4782
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4783
  ASSERT(ToRegister(instr->context()).is(esi));
4784
  ASSERT(ToRegister(instr->object()).is(edx));
4785
  ASSERT(ToRegister(instr->key()).is(ecx));
4786
  ASSERT(ToRegister(instr->value()).is(eax));
4787

    
4788
  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
4789
      ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
4790
      : isolate()->builtins()->KeyedStoreIC_Initialize();
4791
  CallCode(ic, RelocInfo::CODE_TARGET, instr);
4792
}
4793

    
4794

    
4795
void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4796
  Register object = ToRegister(instr->object());
4797
  Register temp = ToRegister(instr->temp());
4798
  Label no_memento_found;
4799
  __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
4800
  DeoptimizeIf(equal, instr->environment());
4801
  __ bind(&no_memento_found);
4802
}
4803

    
4804

    
4805
void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4806
  Register object_reg = ToRegister(instr->object());
4807

    
4808
  Handle<Map> from_map = instr->original_map();
4809
  Handle<Map> to_map = instr->transitioned_map();
4810
  ElementsKind from_kind = instr->from_kind();
4811
  ElementsKind to_kind = instr->to_kind();
4812

    
4813
  Label not_applicable;
4814
  bool is_simple_map_transition =
4815
      IsSimpleMapChangeTransition(from_kind, to_kind);
4816
  Label::Distance branch_distance =
4817
      is_simple_map_transition ? Label::kNear : Label::kFar;
4818
  __ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
4819
  __ j(not_equal, &not_applicable, branch_distance);
4820
  if (is_simple_map_transition) {
4821
    Register new_map_reg = ToRegister(instr->new_map_temp());
4822
    __ mov(FieldOperand(object_reg, HeapObject::kMapOffset),
4823
           Immediate(to_map));
4824
    // Write barrier.
4825
    ASSERT_NE(instr->temp(), NULL);
4826
    __ RecordWriteForMap(object_reg, to_map, new_map_reg,
4827
                         ToRegister(instr->temp()),
4828
                         kDontSaveFPRegs);
4829
  } else {
4830
    PushSafepointRegistersScope scope(this);
4831
    if (!object_reg.is(eax)) {
4832
      __ push(object_reg);
4833
    }
4834
    LoadContextFromDeferred(instr->context());
4835
    if (!object_reg.is(eax)) {
4836
      __ pop(eax);
4837
    }
4838
    __ mov(ebx, to_map);
4839
    TransitionElementsKindStub stub(from_kind, to_kind);
4840
    __ CallStub(&stub);
4841
    RecordSafepointWithRegisters(
4842
        instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4843
  }
4844
  __ bind(&not_applicable);
4845
}
4846

    
4847

    
4848
void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4849
  class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
4850
   public:
4851
    DeferredStringCharCodeAt(LCodeGen* codegen,
4852
                             LStringCharCodeAt* instr,
4853
                             const X87Stack& x87_stack)
4854
        : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4855
    virtual void Generate() V8_OVERRIDE {
4856
      codegen()->DoDeferredStringCharCodeAt(instr_);
4857
    }
4858
    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4859
   private:
4860
    LStringCharCodeAt* instr_;
4861
  };
4862

    
4863
  DeferredStringCharCodeAt* deferred =
4864
      new(zone()) DeferredStringCharCodeAt(this, instr, x87_stack_);
4865

    
4866
  StringCharLoadGenerator::Generate(masm(),
4867
                                    factory(),
4868
                                    ToRegister(instr->string()),
4869
                                    ToRegister(instr->index()),
4870
                                    ToRegister(instr->result()),
4871
                                    deferred->entry());
4872
  __ bind(deferred->exit());
4873
}
4874

    
4875

    
4876
void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4877
  Register string = ToRegister(instr->string());
4878
  Register result = ToRegister(instr->result());
4879

    
4880
  // TODO(3095996): Get rid of this. For now, we need to make the
4881
  // result register contain a valid pointer because it is already
4882
  // contained in the register pointer map.
4883
  __ Set(result, Immediate(0));
4884

    
4885
  PushSafepointRegistersScope scope(this);
4886
  __ push(string);
4887
  // Push the index as a smi. This is safe because of the checks in
4888
  // DoStringCharCodeAt above.
4889
  STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
4890
  if (instr->index()->IsConstantOperand()) {
4891
    Immediate immediate = ToImmediate(LConstantOperand::cast(instr->index()),
4892
                                      Representation::Smi());
4893
    __ push(immediate);
4894
  } else {
4895
    Register index = ToRegister(instr->index());
4896
    __ SmiTag(index);
4897
    __ push(index);
4898
  }
4899
  CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2,
4900
                          instr, instr->context());
4901
  __ AssertSmi(eax);
4902
  __ SmiUntag(eax);
4903
  __ StoreToSafepointRegisterSlot(result, eax);
4904
}
4905

    
4906

    
4907
void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4908
  class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
4909
   public:
4910
    DeferredStringCharFromCode(LCodeGen* codegen,
4911
                               LStringCharFromCode* instr,
4912
                               const X87Stack& x87_stack)
4913
        : LDeferredCode(codegen, x87_stack), instr_(instr) { }
4914
    virtual void Generate() V8_OVERRIDE {
4915
      codegen()->DoDeferredStringCharFromCode(instr_);
4916
    }
4917
    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4918
   private:
4919
    LStringCharFromCode* instr_;
4920
  };
4921

    
4922
  DeferredStringCharFromCode* deferred =
4923
      new(zone()) DeferredStringCharFromCode(this, instr, x87_stack_);
4924

    
4925
  ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
4926
  Register char_code = ToRegister(instr->char_code());
4927
  Register result = ToRegister(instr->result());
4928
  ASSERT(!char_code.is(result));
4929

    
4930
  __ cmp(char_code, String::kMaxOneByteCharCode);
4931
  __ j(above, deferred->entry());
4932
  __ Set(result, Immediate(factory()->single_character_string_cache()));
4933
  __ mov(result, FieldOperand(result,
4934
                              char_code, times_pointer_size,
4935
                              FixedArray::kHeaderSize));
4936
  __ cmp(result, factory()->undefined_value());
4937
  __ j(equal, deferred->entry());
4938
  __ bind(deferred->exit());
4939
}
4940

    
4941

    
4942
void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4943
  Register char_code = ToRegister(instr->char_code());
4944
  Register result = ToRegister(instr->result());
4945

    
4946
  // TODO(3095996): Get rid of this. For now, we need to make the
4947
  // result register contain a valid pointer because it is already
4948
  // contained in the register pointer map.
4949
  __ Set(result, Immediate(0));
4950

    
4951
  PushSafepointRegistersScope scope(this);
4952
  __ SmiTag(char_code);
4953
  __ push(char_code);
4954
  CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
4955
  __ StoreToSafepointRegisterSlot(result, eax);
4956
}
4957

    
4958

    
4959
void LCodeGen::DoStringAdd(LStringAdd* instr) {
4960
  EmitPushTaggedOperand(instr->left());
4961
  EmitPushTaggedOperand(instr->right());
4962
  StringAddStub stub(instr->hydrogen()->flags());
4963
  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
4964
}
4965

    
4966

    
4967
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4968
  LOperand* input = instr->value();
4969
  LOperand* output = instr->result();
4970
  ASSERT(input->IsRegister() || input->IsStackSlot());
4971
  ASSERT(output->IsDoubleRegister());
4972
  if (CpuFeatures::IsSupported(SSE2)) {
4973
    CpuFeatureScope scope(masm(), SSE2);
4974
    __ Cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
4975
  } else if (input->IsRegister()) {
4976
    Register input_reg = ToRegister(input);
4977
    __ push(input_reg);
4978
    X87Mov(ToX87Register(output), Operand(esp, 0), kX87IntOperand);
4979
    __ pop(input_reg);
4980
  } else {
4981
    X87Mov(ToX87Register(output), ToOperand(input), kX87IntOperand);
4982
  }
4983
}
4984

    
4985

    
4986
void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
4987
  Register input = ToRegister(instr->value());
4988
  __ SmiTag(input);
4989
  if (!instr->hydrogen()->value()->HasRange() ||
4990
      !instr->hydrogen()->value()->range()->IsInSmiRange()) {
4991
    DeoptimizeIf(overflow, instr->environment());
4992
  }
4993
}
4994

    
4995

    
4996
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4997
  LOperand* input = instr->value();
4998
  LOperand* output = instr->result();
4999
  if (CpuFeatures::IsSupported(SSE2)) {
5000
    CpuFeatureScope scope(masm(), SSE2);
5001
    LOperand* temp = instr->temp();
5002

    
5003
    __ LoadUint32(ToDoubleRegister(output),
5004
                  ToRegister(input),
5005
                  ToDoubleRegister(temp));
5006
  } else {
5007
    X87Register res = ToX87Register(output);
5008
    X87PrepareToWrite(res);
5009
    __ LoadUint32NoSSE2(ToRegister(input));
5010
    X87CommitWrite(res);
5011
  }
5012
}
5013

    
5014

    
5015
void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) {
5016
  Register input = ToRegister(instr->value());
5017
  if (!instr->hydrogen()->value()->HasRange() ||
5018
      !instr->hydrogen()->value()->range()->IsInSmiRange()) {
5019
    __ test(input, Immediate(0xc0000000));
5020
    DeoptimizeIf(not_zero, instr->environment());
5021
  }
5022
  __ SmiTag(input);
5023
}
5024

    
5025

    
5026
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
5027
  class DeferredNumberTagI V8_FINAL : public LDeferredCode {
5028
   public:
5029
    DeferredNumberTagI(LCodeGen* codegen,
5030
                       LNumberTagI* instr,
5031
                       const X87Stack& x87_stack)
5032
        : LDeferredCode(codegen, x87_stack), instr_(instr) { }
5033
    virtual void Generate() V8_OVERRIDE {
5034
      codegen()->DoDeferredNumberTagI(instr_, instr_->value(), SIGNED_INT32);
5035
    }
5036
    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5037
   private:
5038
    LNumberTagI* instr_;
5039
  };
5040

    
5041
  LOperand* input = instr->value();
5042
  ASSERT(input->IsRegister() && input->Equals(instr->result()));
5043
  Register reg = ToRegister(input);
5044

    
5045
  DeferredNumberTagI* deferred =
5046
      new(zone()) DeferredNumberTagI(this, instr, x87_stack_);
5047
  __ SmiTag(reg);
5048
  __ j(overflow, deferred->entry());
5049
  __ bind(deferred->exit());
5050
}
5051

    
5052

    
5053
void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
5054
  class DeferredNumberTagU V8_FINAL : public LDeferredCode {
5055
   public:
5056
    DeferredNumberTagU(LCodeGen* codegen,
5057
                       LNumberTagU* instr,
5058
                       const X87Stack& x87_stack)
5059
        : LDeferredCode(codegen, x87_stack), instr_(instr) { }
5060
    virtual void Generate() V8_OVERRIDE {
5061
      codegen()->DoDeferredNumberTagI(instr_, instr_->value(), UNSIGNED_INT32);
5062
    }
5063
    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5064
   private:
5065
    LNumberTagU* instr_;
5066
  };
5067

    
5068
  LOperand* input = instr->value();
5069
  ASSERT(input->IsRegister() && input->Equals(instr->result()));
5070
  Register reg = ToRegister(input);
5071

    
5072
  DeferredNumberTagU* deferred =
5073
      new(zone()) DeferredNumberTagU(this, instr, x87_stack_);
5074
  __ cmp(reg, Immediate(Smi::kMaxValue));
5075
  __ j(above, deferred->entry());
5076
  __ SmiTag(reg);
5077
  __ bind(deferred->exit());
5078
}
5079

    
5080

    
5081
void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
5082
                                    LOperand* value,
5083
                                    IntegerSignedness signedness) {
5084
  Label slow;
5085
  Register reg = ToRegister(value);
5086
  Register tmp = reg.is(eax) ? ecx : eax;
5087
  XMMRegister xmm_scratch = double_scratch0();
5088

    
5089
  // Preserve the value of all registers.
5090
  PushSafepointRegistersScope scope(this);
5091

    
5092
  Label done;
5093

    
5094
  if (signedness == SIGNED_INT32) {
5095
    // There was overflow, so bits 30 and 31 of the original integer
5096
    // disagree. Try to allocate a heap number in new space and store
5097
    // the value in there. If that fails, call the runtime system.
5098
    __ SmiUntag(reg);
5099
    __ xor_(reg, 0x80000000);
5100
    if (CpuFeatures::IsSupported(SSE2)) {
5101
      CpuFeatureScope feature_scope(masm(), SSE2);
5102
      __ Cvtsi2sd(xmm_scratch, Operand(reg));
5103
    } else {
5104
      __ push(reg);
5105
      __ fild_s(Operand(esp, 0));
5106
      __ pop(reg);
5107
    }
5108
  } else {
5109
    if (CpuFeatures::IsSupported(SSE2)) {
5110
      CpuFeatureScope feature_scope(masm(), SSE2);
5111
      __ LoadUint32(xmm_scratch, reg,
5112
                    ToDoubleRegister(LNumberTagU::cast(instr)->temp()));
5113
    } else {
5114
      // There's no fild variant for unsigned values, so zero-extend to a 64-bit
5115
      // int manually.
5116
      __ push(Immediate(0));
5117
      __ push(reg);
5118
      __ fild_d(Operand(esp, 0));
5119
      __ pop(reg);
5120
      __ pop(reg);
5121
    }
5122
  }
5123

    
5124
  if (FLAG_inline_new) {
5125
    __ AllocateHeapNumber(reg, tmp, no_reg, &slow);
5126
    __ jmp(&done, Label::kNear);
5127
  }
5128

    
5129
  // Slow case: Call the runtime system to do the number allocation.
5130
  __ bind(&slow);
5131

    
5132
  // TODO(3095996): Put a valid pointer value in the stack slot where the result
5133
  // register is stored, as this register is in the pointer map, but contains an
5134
  // integer value.
5135
  __ StoreToSafepointRegisterSlot(reg, Immediate(0));
5136
  // NumberTagI and NumberTagD use the context from the frame, rather than
5137
  // the environment's HContext or HInlinedContext value.
5138
  // They only call Runtime::kAllocateHeapNumber.
5139
  // The corresponding HChange instructions are added in a phase that does
5140
  // not have easy access to the local context.
5141
  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
5142
  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
5143
  RecordSafepointWithRegisters(
5144
      instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
5145
  if (!reg.is(eax)) __ mov(reg, eax);
5146

    
5147
  // Done. Put the value in xmm_scratch into the value of the allocated heap
5148
  // number.
5149
  __ bind(&done);
5150
  if (CpuFeatures::IsSupported(SSE2)) {
5151
    CpuFeatureScope feature_scope(masm(), SSE2);
5152
    __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm_scratch);
5153
  } else {
5154
    __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
5155
  }
5156
  __ StoreToSafepointRegisterSlot(reg, reg);
5157
}
5158

    
5159

    
5160
void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
5161
  class DeferredNumberTagD V8_FINAL : public LDeferredCode {
5162
   public:
5163
    DeferredNumberTagD(LCodeGen* codegen,
5164
                       LNumberTagD* instr,
5165
                       const X87Stack& x87_stack)
5166
        : LDeferredCode(codegen, x87_stack), instr_(instr) { }
5167
    virtual void Generate() V8_OVERRIDE {
5168
      codegen()->DoDeferredNumberTagD(instr_);
5169
    }
5170
    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5171
   private:
5172
    LNumberTagD* instr_;
5173
  };
5174

    
5175
  Register reg = ToRegister(instr->result());
5176

    
5177
  bool use_sse2 = CpuFeatures::IsSupported(SSE2);
5178
  if (!use_sse2) {
5179
    // Put the value to the top of stack
5180
    X87Register src = ToX87Register(instr->value());
5181
    X87LoadForUsage(src);
5182
  }
5183

    
5184
  DeferredNumberTagD* deferred =
5185
      new(zone()) DeferredNumberTagD(this, instr, x87_stack_);
5186
  if (FLAG_inline_new) {
5187
    Register tmp = ToRegister(instr->temp());
5188
    __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
5189
  } else {
5190
    __ jmp(deferred->entry());
5191
  }
5192
  __ bind(deferred->exit());
5193
  if (use_sse2) {
5194
    CpuFeatureScope scope(masm(), SSE2);
5195
    XMMRegister input_reg = ToDoubleRegister(instr->value());
5196
    __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
5197
  } else {
5198
    __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
5199
  }
5200
}
5201

    
5202

    
5203
void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
5204
  // TODO(3095996): Get rid of this. For now, we need to make the
5205
  // result register contain a valid pointer because it is already
5206
  // contained in the register pointer map.
5207
  Register reg = ToRegister(instr->result());
5208
  __ Set(reg, Immediate(0));
5209

    
5210
  PushSafepointRegistersScope scope(this);
5211
  // NumberTagI and NumberTagD use the context from the frame, rather than
5212
  // the environment's HContext or HInlinedContext value.
5213
  // They only call Runtime::kAllocateHeapNumber.
5214
  // The corresponding HChange instructions are added in a phase that does
5215
  // not have easy access to the local context.
5216
  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
5217
  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
5218
  RecordSafepointWithRegisters(
5219
      instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
5220
  __ StoreToSafepointRegisterSlot(reg, eax);
5221
}
5222

    
5223

    
5224
void LCodeGen::DoSmiTag(LSmiTag* instr) {
5225
  LOperand* input = instr->value();
5226
  ASSERT(input->IsRegister() && input->Equals(instr->result()));
5227
  ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
5228
  __ SmiTag(ToRegister(input));
5229
}
5230

    
5231

    
5232
void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
5233
  LOperand* input = instr->value();
5234
  Register result = ToRegister(input);
5235
  ASSERT(input->IsRegister() && input->Equals(instr->result()));
5236
  if (instr->needs_check()) {
5237
    __ test(result, Immediate(kSmiTagMask));
5238
    DeoptimizeIf(not_zero, instr->environment());
5239
  } else {
5240
    __ AssertSmi(result);
5241
  }
5242
  __ SmiUntag(result);
5243
}
5244

    
5245

    
5246
void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg,
5247
                                      Register temp_reg,
5248
                                      X87Register res_reg,
5249
                                      bool can_convert_undefined_to_nan,
5250
                                      bool deoptimize_on_minus_zero,
5251
                                      LEnvironment* env,
5252
                                      NumberUntagDMode mode) {
5253
  Label load_smi, done;
5254

    
5255
  X87PrepareToWrite(res_reg);
5256
  if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
5257
    // Smi check.
5258
    __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
5259

    
5260
    // Heap number map check.
5261
    __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5262
           factory()->heap_number_map());
5263
    if (!can_convert_undefined_to_nan) {
5264
      DeoptimizeIf(not_equal, env);
5265
    } else {
5266
      Label heap_number, convert;
5267
      __ j(equal, &heap_number, Label::kNear);
5268

    
5269
      // Convert undefined (or hole) to NaN.
5270
      __ cmp(input_reg, factory()->undefined_value());
5271
      DeoptimizeIf(not_equal, env);
5272

    
5273
      __ bind(&convert);
5274
      ExternalReference nan =
5275
          ExternalReference::address_of_canonical_non_hole_nan();
5276
      __ fld_d(Operand::StaticVariable(nan));
5277
      __ jmp(&done, Label::kNear);
5278

    
5279
      __ bind(&heap_number);
5280
    }
5281
    // Heap number to x87 conversion.
5282
    __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
5283
    if (deoptimize_on_minus_zero) {
5284
      __ fldz();
5285
      __ FCmp();
5286
      __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
5287
      __ j(not_zero, &done, Label::kNear);
5288

    
5289
      // Use general purpose registers to check if we have -0.0
5290
      __ mov(temp_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
5291
      __ test(temp_reg, Immediate(HeapNumber::kSignMask));
5292
      __ j(zero, &done, Label::kNear);
5293

    
5294
      // Pop FPU stack before deoptimizing.
5295
      __ fstp(0);
5296
      DeoptimizeIf(not_zero, env);
5297
    }
5298
    __ jmp(&done, Label::kNear);
5299
  } else {
5300
    ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
5301
  }
5302

    
5303
  __ bind(&load_smi);
5304
  // Clobbering a temp is faster than re-tagging the
5305
  // input register since we avoid dependencies.
5306
  __ mov(temp_reg, input_reg);
5307
  __ SmiUntag(temp_reg);  // Untag smi before converting to float.
5308
  __ push(temp_reg);
5309
  __ fild_s(Operand(esp, 0));
5310
  __ add(esp, Immediate(kPointerSize));
5311
  __ bind(&done);
5312
  X87CommitWrite(res_reg);
5313
}
5314

    
5315

    
5316
void LCodeGen::EmitNumberUntagD(Register input_reg,
5317
                                Register temp_reg,
5318
                                XMMRegister result_reg,
5319
                                bool can_convert_undefined_to_nan,
5320
                                bool deoptimize_on_minus_zero,
5321
                                LEnvironment* env,
5322
                                NumberUntagDMode mode) {
5323
  Label convert, load_smi, done;
5324

    
5325
  if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
5326
    // Smi check.
5327
    __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
5328

    
5329
    // Heap number map check.
5330
    __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5331
           factory()->heap_number_map());
5332
    if (can_convert_undefined_to_nan) {
5333
      __ j(not_equal, &convert, Label::kNear);
5334
    } else {
5335
      DeoptimizeIf(not_equal, env);
5336
    }
5337

    
5338
    // Heap number to XMM conversion.
5339
    __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
5340

    
5341
    if (deoptimize_on_minus_zero) {
5342
      XMMRegister xmm_scratch = double_scratch0();
5343
      __ xorps(xmm_scratch, xmm_scratch);
5344
      __ ucomisd(result_reg, xmm_scratch);
5345
      __ j(not_zero, &done, Label::kNear);
5346
      __ movmskpd(temp_reg, result_reg);
5347
      __ test_b(temp_reg, 1);
5348
      DeoptimizeIf(not_zero, env);
5349
    }
5350
    __ jmp(&done, Label::kNear);
5351

    
5352
    if (can_convert_undefined_to_nan) {
5353
      __ bind(&convert);
5354

    
5355
      // Convert undefined (and hole) to NaN.
5356
      __ cmp(input_reg, factory()->undefined_value());
5357
      DeoptimizeIf(not_equal, env);
5358

    
5359
      ExternalReference nan =
5360
          ExternalReference::address_of_canonical_non_hole_nan();
5361
      __ movsd(result_reg, Operand::StaticVariable(nan));
5362
      __ jmp(&done, Label::kNear);
5363
    }
5364
  } else {
5365
    ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
5366
  }
5367

    
5368
  __ bind(&load_smi);
5369
  // Smi to XMM conversion. Clobbering a temp is faster than re-tagging the
5370
  // input register since we avoid dependencies.
5371
  __ mov(temp_reg, input_reg);
5372
  __ SmiUntag(temp_reg);  // Untag smi before converting to float.
5373
  __ Cvtsi2sd(result_reg, Operand(temp_reg));
5374
  __ bind(&done);
5375
}
5376

    
5377

    
5378
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
5379
  Register input_reg = ToRegister(instr->value());
5380

    
5381
  if (instr->truncating()) {
5382
    Label no_heap_number, check_bools, check_false;
5383

    
5384
    // Heap number map check.
5385
    __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5386
           factory()->heap_number_map());
5387
    __ j(not_equal, &no_heap_number, Label::kNear);
5388
    __ TruncateHeapNumberToI(input_reg, input_reg);
5389
    __ jmp(done);
5390

    
5391
    __ bind(&no_heap_number);
5392
    // Check for Oddballs. Undefined/False is converted to zero and True to one
5393
    // for truncating conversions.
5394
    __ cmp(input_reg, factory()->undefined_value());
5395
    __ j(not_equal, &check_bools, Label::kNear);
5396
    __ Set(input_reg, Immediate(0));
5397
    __ jmp(done);
5398

    
5399
    __ bind(&check_bools);
5400
    __ cmp(input_reg, factory()->true_value());
5401
    __ j(not_equal, &check_false, Label::kNear);
5402
    __ Set(input_reg, Immediate(1));
5403
    __ jmp(done);
5404

    
5405
    __ bind(&check_false);
5406
    __ cmp(input_reg, factory()->false_value());
5407
    __ RecordComment("Deferred TaggedToI: cannot truncate");
5408
    DeoptimizeIf(not_equal, instr->environment());
5409
    __ Set(input_reg, Immediate(0));
5410
    __ jmp(done);
5411
  } else {
5412
    Label bailout;
5413
    XMMRegister scratch = (instr->temp() != NULL)
5414
        ? ToDoubleRegister(instr->temp())
5415
        : no_xmm_reg;
5416
    __ TaggedToI(input_reg, input_reg, scratch,
5417
                 instr->hydrogen()->GetMinusZeroMode(), &bailout);
5418
    __ jmp(done);
5419
    __ bind(&bailout);
5420
    DeoptimizeIf(no_condition, instr->environment());
5421
  }
5422
}
5423

    
5424

    
5425
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5426
  class DeferredTaggedToI V8_FINAL : public LDeferredCode {
5427
   public:
5428
    DeferredTaggedToI(LCodeGen* codegen,
5429
                      LTaggedToI* instr,
5430
                      const X87Stack& x87_stack)
5431
        : LDeferredCode(codegen, x87_stack), instr_(instr) { }
5432
    virtual void Generate() V8_OVERRIDE {
5433
      codegen()->DoDeferredTaggedToI(instr_, done());
5434
    }
5435
    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5436
   private:
5437
    LTaggedToI* instr_;
5438
  };
5439

    
5440
  LOperand* input = instr->value();
5441
  ASSERT(input->IsRegister());
5442
  Register input_reg = ToRegister(input);
5443
  ASSERT(input_reg.is(ToRegister(instr->result())));
5444

    
5445
  if (instr->hydrogen()->value()->representation().IsSmi()) {
5446
    __ SmiUntag(input_reg);
5447
  } else {
5448
    DeferredTaggedToI* deferred =
5449
        new(zone()) DeferredTaggedToI(this, instr, x87_stack_);
5450

    
5451
    __ JumpIfNotSmi(input_reg, deferred->entry());
5452
    __ SmiUntag(input_reg);
5453
    __ bind(deferred->exit());
5454
  }
5455
}
5456

    
5457

    
5458
void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
5459
  LOperand* input = instr->value();
5460
  ASSERT(input->IsRegister());
5461
  LOperand* temp = instr->temp();
5462
  ASSERT(temp->IsRegister());
5463
  LOperand* result = instr->result();
5464
  ASSERT(result->IsDoubleRegister());
5465

    
5466
  Register input_reg = ToRegister(input);
5467
  bool deoptimize_on_minus_zero =
5468
      instr->hydrogen()->deoptimize_on_minus_zero();
5469
  Register temp_reg = ToRegister(temp);
5470

    
5471
  HValue* value = instr->hydrogen()->value();
5472
  NumberUntagDMode mode = value->representation().IsSmi()
5473
      ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
5474

    
5475
  if (CpuFeatures::IsSupported(SSE2)) {
5476
    CpuFeatureScope scope(masm(), SSE2);
5477
    XMMRegister result_reg = ToDoubleRegister(result);
5478
    EmitNumberUntagD(input_reg,
5479
                     temp_reg,
5480
                     result_reg,
5481
                     instr->hydrogen()->can_convert_undefined_to_nan(),
5482
                     deoptimize_on_minus_zero,
5483
                     instr->environment(),
5484
                     mode);
5485
  } else {
5486
    EmitNumberUntagDNoSSE2(input_reg,
5487
                           temp_reg,
5488
                           ToX87Register(instr->result()),
5489
                           instr->hydrogen()->can_convert_undefined_to_nan(),
5490
                           deoptimize_on_minus_zero,
5491
                           instr->environment(),
5492
                           mode);
5493
  }
5494
}
5495

    
5496

    
5497
void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
5498
  LOperand* input = instr->value();
5499
  ASSERT(input->IsDoubleRegister());
5500
  LOperand* result = instr->result();
5501
  ASSERT(result->IsRegister());
5502
  Register result_reg = ToRegister(result);
5503

    
5504
  if (instr->truncating()) {
5505
    if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
5506
      CpuFeatureScope scope(masm(), SSE2);
5507
      XMMRegister input_reg = ToDoubleRegister(input);
5508
      __ TruncateDoubleToI(result_reg, input_reg);
5509
    } else {
5510
      X87Register input_reg = ToX87Register(input);
5511
      X87Fxch(input_reg);
5512
      __ TruncateX87TOSToI(result_reg);
5513
    }
5514
  } else {
5515
    Label bailout, done;
5516
    if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
5517
      CpuFeatureScope scope(masm(), SSE2);
5518
      XMMRegister input_reg = ToDoubleRegister(input);
5519
      XMMRegister xmm_scratch = double_scratch0();
5520
       __ DoubleToI(result_reg, input_reg, xmm_scratch,
5521
           instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
5522
    } else {
5523
      X87Register input_reg = ToX87Register(input);
5524
      X87Fxch(input_reg);
5525
      __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
5526
                   &bailout, Label::kNear);
5527
    }
5528
    __ jmp(&done, Label::kNear);
5529
    __ bind(&bailout);
5530
    DeoptimizeIf(no_condition, instr->environment());
5531
    __ bind(&done);
5532
  }
5533
}
5534

    
5535

    
5536
void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5537
  LOperand* input = instr->value();
5538
  ASSERT(input->IsDoubleRegister());
5539
  LOperand* result = instr->result();
5540
  ASSERT(result->IsRegister());
5541
  Register result_reg = ToRegister(result);
5542

    
5543
  Label bailout, done;
5544
  if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
5545
    CpuFeatureScope scope(masm(), SSE2);
5546
    XMMRegister input_reg = ToDoubleRegister(input);
5547
    XMMRegister xmm_scratch = double_scratch0();
5548
    __ DoubleToI(result_reg, input_reg, xmm_scratch,
5549
        instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
5550
  } else {
5551
    X87Register input_reg = ToX87Register(input);
5552
    X87Fxch(input_reg);
5553
    __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
5554
        &bailout, Label::kNear);
5555
  }
5556
  __ jmp(&done, Label::kNear);
5557
  __ bind(&bailout);
5558
  DeoptimizeIf(no_condition, instr->environment());
5559
  __ bind(&done);
5560

    
5561
  __ SmiTag(result_reg);
5562
  DeoptimizeIf(overflow, instr->environment());
5563
}
5564

    
5565

    
5566
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5567
  LOperand* input = instr->value();
5568
  __ test(ToOperand(input), Immediate(kSmiTagMask));
5569
  DeoptimizeIf(not_zero, instr->environment());
5570
}
5571

    
5572

    
5573
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5574
  if (!instr->hydrogen()->value()->IsHeapObject()) {
5575
    LOperand* input = instr->value();
5576
    __ test(ToOperand(input), Immediate(kSmiTagMask));
5577
    DeoptimizeIf(zero, instr->environment());
5578
  }
5579
}
5580

    
5581

    
5582
void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5583
  Register input = ToRegister(instr->value());
5584
  Register temp = ToRegister(instr->temp());
5585

    
5586
  __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
5587

    
5588
  if (instr->hydrogen()->is_interval_check()) {
5589
    InstanceType first;
5590
    InstanceType last;
5591
    instr->hydrogen()->GetCheckInterval(&first, &last);
5592

    
5593
    __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
5594
            static_cast<int8_t>(first));
5595

    
5596
    // If there is only one type in the interval check for equality.
5597
    if (first == last) {
5598
      DeoptimizeIf(not_equal, instr->environment());
5599
    } else {
5600
      DeoptimizeIf(below, instr->environment());
5601
      // Omit check for the last type.
5602
      if (last != LAST_TYPE) {
5603
        __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
5604
                static_cast<int8_t>(last));
5605
        DeoptimizeIf(above, instr->environment());
5606
      }
5607
    }
5608
  } else {
5609
    uint8_t mask;
5610
    uint8_t tag;
5611
    instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5612

    
5613
    if (IsPowerOf2(mask)) {
5614
      ASSERT(tag == 0 || IsPowerOf2(tag));
5615
      __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask);
5616
      DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment());
5617
    } else {
5618
      __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
5619
      __ and_(temp, mask);
5620
      __ cmp(temp, tag);
5621
      DeoptimizeIf(not_equal, instr->environment());
5622
    }
5623
  }
5624
}
5625

    
5626

    
5627
void LCodeGen::DoCheckValue(LCheckValue* instr) {
5628
  Handle<HeapObject> object = instr->hydrogen()->object().handle();
5629
  if (instr->hydrogen()->object_in_new_space()) {
5630
    Register reg = ToRegister(instr->value());
5631
    Handle<Cell> cell = isolate()->factory()->NewCell(object);
5632
    __ cmp(reg, Operand::ForCell(cell));
5633
  } else {
5634
    Operand operand = ToOperand(instr->value());
5635
    __ cmp(operand, object);
5636
  }
5637
  DeoptimizeIf(not_equal, instr->environment());
5638
}
5639

    
5640

    
5641
void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5642
  {
5643
    PushSafepointRegistersScope scope(this);
5644
    __ push(object);
5645
    __ xor_(esi, esi);
5646
    __ CallRuntimeSaveDoubles(Runtime::kMigrateInstance);
5647
    RecordSafepointWithRegisters(
5648
        instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5649

    
5650
    __ test(eax, Immediate(kSmiTagMask));
5651
  }
5652
  DeoptimizeIf(zero, instr->environment());
5653
}
5654

    
5655

    
5656
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5657
  class DeferredCheckMaps V8_FINAL : public LDeferredCode {
5658
   public:
5659
    DeferredCheckMaps(LCodeGen* codegen,
5660
                      LCheckMaps* instr,
5661
                      Register object,
5662
                      const X87Stack& x87_stack)
5663
        : LDeferredCode(codegen, x87_stack), instr_(instr), object_(object) {
5664
      SetExit(check_maps());
5665
    }
5666
    virtual void Generate() V8_OVERRIDE {
5667
      codegen()->DoDeferredInstanceMigration(instr_, object_);
5668
    }
5669
    Label* check_maps() { return &check_maps_; }
5670
    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5671
   private:
5672
    LCheckMaps* instr_;
5673
    Label check_maps_;
5674
    Register object_;
5675
  };
5676

    
5677
  if (instr->hydrogen()->CanOmitMapChecks()) return;
5678

    
5679
  LOperand* input = instr->value();
5680
  ASSERT(input->IsRegister());
5681
  Register reg = ToRegister(input);
5682

    
5683
  DeferredCheckMaps* deferred = NULL;
5684
  if (instr->hydrogen()->has_migration_target()) {
5685
    deferred = new(zone()) DeferredCheckMaps(this, instr, reg, x87_stack_);
5686
    __ bind(deferred->check_maps());
5687
  }
5688

    
5689
  UniqueSet<Map> map_set = instr->hydrogen()->map_set();
5690
  Label success;
5691
  for (int i = 0; i < map_set.size() - 1; i++) {
5692
    Handle<Map> map = map_set.at(i).handle();
5693
    __ CompareMap(reg, map, &success);
5694
    __ j(equal, &success);
5695
  }
5696

    
5697
  Handle<Map> map = map_set.at(map_set.size() - 1).handle();
5698
  __ CompareMap(reg, map, &success);
5699
  if (instr->hydrogen()->has_migration_target()) {
5700
    __ j(not_equal, deferred->entry());
5701
  } else {
5702
    DeoptimizeIf(not_equal, instr->environment());
5703
  }
5704

    
5705
  __ bind(&success);
5706
}
5707

    
5708

    
5709
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5710
  CpuFeatureScope scope(masm(), SSE2);
5711
  XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
5712
  XMMRegister xmm_scratch = double_scratch0();
5713
  Register result_reg = ToRegister(instr->result());
5714
  __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg);
5715
}
5716

    
5717

    
5718
void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5719
  ASSERT(instr->unclamped()->Equals(instr->result()));
5720
  Register value_reg = ToRegister(instr->result());
5721
  __ ClampUint8(value_reg);
5722
}
5723

    
5724

    
5725
void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5726
  CpuFeatureScope scope(masm(), SSE2);
5727

    
5728
  ASSERT(instr->unclamped()->Equals(instr->result()));
5729
  Register input_reg = ToRegister(instr->unclamped());
5730
  XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
5731
  XMMRegister xmm_scratch = double_scratch0();
5732
  Label is_smi, done, heap_number;
5733

    
5734
  __ JumpIfSmi(input_reg, &is_smi);
5735

    
5736
  // Check for heap number
5737
  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5738
         factory()->heap_number_map());
5739
  __ j(equal, &heap_number, Label::kNear);
5740

    
5741
  // Check for undefined. Undefined is converted to zero for clamping
5742
  // conversions.
5743
  __ cmp(input_reg, factory()->undefined_value());
5744
  DeoptimizeIf(not_equal, instr->environment());
5745
  __ mov(input_reg, 0);
5746
  __ jmp(&done, Label::kNear);
5747

    
5748
  // Heap number
5749
  __ bind(&heap_number);
5750
  __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
5751
  __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg);
5752
  __ jmp(&done, Label::kNear);
5753

    
5754
  // smi
5755
  __ bind(&is_smi);
5756
  __ SmiUntag(input_reg);
5757
  __ ClampUint8(input_reg);
5758
  __ bind(&done);
5759
}
5760

    
5761

    
5762
void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
5763
  Register input_reg = ToRegister(instr->unclamped());
5764
  Register result_reg = ToRegister(instr->result());
5765
  Register scratch = ToRegister(instr->scratch());
5766
  Register scratch2 = ToRegister(instr->scratch2());
5767
  Register scratch3 = ToRegister(instr->scratch3());
5768
  Label is_smi, done, heap_number, valid_exponent,
5769
      largest_value, zero_result, maybe_nan_or_infinity;
5770

    
5771
  __ JumpIfSmi(input_reg, &is_smi);
5772

    
5773
  // Check for heap number
5774
  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
5775
         factory()->heap_number_map());
5776
  __ j(equal, &heap_number, Label::kFar);
5777

    
5778
  // Check for undefined. Undefined is converted to zero for clamping
5779
  // conversions.
5780
  __ cmp(input_reg, factory()->undefined_value());
5781
  DeoptimizeIf(not_equal, instr->environment());
5782
  __ jmp(&zero_result);
5783

    
5784
  // Heap number
5785
  __ bind(&heap_number);
5786

    
5787
  // Surprisingly, all of the hand-crafted bit-manipulations below are much
5788
  // faster than the x86 FPU built-in instruction, especially since "banker's
5789
  // rounding" would be additionally very expensive
5790

    
5791
  // Get exponent word.
5792
  __ mov(scratch, FieldOperand(input_reg, HeapNumber::kExponentOffset));
5793
  __ mov(scratch3, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
5794

    
5795
  // Test for negative values --> clamp to zero
5796
  __ test(scratch, scratch);
5797
  __ j(negative, &zero_result);
5798

    
5799
  // Get exponent alone in scratch2.
5800
  __ mov(scratch2, scratch);
5801
  __ and_(scratch2, HeapNumber::kExponentMask);
5802
  __ shr(scratch2, HeapNumber::kExponentShift);
5803
  __ j(zero, &zero_result);
5804
  __ sub(scratch2, Immediate(HeapNumber::kExponentBias - 1));
5805
  __ j(negative, &zero_result);
5806

    
5807
  const uint32_t non_int8_exponent = 7;
5808
  __ cmp(scratch2, Immediate(non_int8_exponent + 1));
5809
  // If the exponent is too big, check for special values.
5810
  __ j(greater, &maybe_nan_or_infinity, Label::kNear);
5811

    
5812
  __ bind(&valid_exponent);
5813
  // Exponent word in scratch, exponent in scratch2. We know that 0 <= exponent
5814
  // < 7. The shift bias is the number of bits to shift the mantissa such that
5815
  // with an exponent of 7 such the that top-most one is in bit 30, allowing
5816
  // detection the rounding overflow of a 255.5 to 256 (bit 31 goes from 0 to
5817
  // 1).
5818
  int shift_bias = (30 - HeapNumber::kExponentShift) - 7 - 1;
5819
  __ lea(result_reg, MemOperand(scratch2, shift_bias));
5820
  // Here result_reg (ecx) is the shift, scratch is the exponent word.  Get the
5821
  // top bits of the mantissa.
5822
  __ and_(scratch, HeapNumber::kMantissaMask);
5823
  // Put back the implicit 1 of the mantissa
5824
  __ or_(scratch, 1 << HeapNumber::kExponentShift);
5825
  // Shift up to round
5826
  __ shl_cl(scratch);
5827
  // Use "banker's rounding" to spec: If fractional part of number is 0.5, then
5828
  // use the bit in the "ones" place and add it to the "halves" place, which has
5829
  // the effect of rounding to even.
5830
  __ mov(scratch2, scratch);
5831
  const uint32_t one_half_bit_shift = 30 - sizeof(uint8_t) * 8;
5832
  const uint32_t one_bit_shift = one_half_bit_shift + 1;
5833
  __ and_(scratch2, Immediate((1 << one_bit_shift) - 1));
5834
  __ cmp(scratch2, Immediate(1 << one_half_bit_shift));
5835
  Label no_round;
5836
  __ j(less, &no_round);
5837
  Label round_up;
5838
  __ mov(scratch2, Immediate(1 << one_half_bit_shift));
5839
  __ j(greater, &round_up);
5840
  __ test(scratch3, scratch3);
5841
  __ j(not_zero, &round_up);
5842
  __ mov(scratch2, scratch);
5843
  __ and_(scratch2, Immediate(1 << one_bit_shift));
5844
  __ shr(scratch2, 1);
5845
  __ bind(&round_up);
5846
  __ add(scratch, scratch2);
5847
  __ j(overflow, &largest_value);
5848
  __ bind(&no_round);
5849
  __ shr(scratch, 23);
5850
  __ mov(result_reg, scratch);
5851
  __ jmp(&done, Label::kNear);
5852

    
5853
  __ bind(&maybe_nan_or_infinity);
5854
  // Check for NaN/Infinity, all other values map to 255
5855
  __ cmp(scratch2, Immediate(HeapNumber::kInfinityOrNanExponent + 1));
5856
  __ j(not_equal, &largest_value, Label::kNear);
5857

    
5858
  // Check for NaN, which differs from Infinity in that at least one mantissa
5859
  // bit is set.
5860
  __ and_(scratch, HeapNumber::kMantissaMask);
5861
  __ or_(scratch, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
5862
  __ j(not_zero, &zero_result);  // M!=0 --> NaN
5863
  // Infinity -> Fall through to map to 255.
5864

    
5865
  __ bind(&largest_value);
5866
  __ mov(result_reg, Immediate(255));
5867
  __ jmp(&done, Label::kNear);
5868

    
5869
  __ bind(&zero_result);
5870
  __ xor_(result_reg, result_reg);
5871
  __ jmp(&done);
5872

    
5873
  // smi
5874
  __ bind(&is_smi);
5875
  if (!input_reg.is(result_reg)) {
5876
    __ mov(result_reg, input_reg);
5877
  }
5878
  __ SmiUntag(result_reg);
5879
  __ ClampUint8(result_reg);
5880
  __ bind(&done);
5881
}
5882

    
5883

    
5884
void LCodeGen::DoAllocate(LAllocate* instr) {
5885
  class DeferredAllocate V8_FINAL : public LDeferredCode {
5886
   public:
5887
    DeferredAllocate(LCodeGen* codegen,
5888
                     LAllocate* instr,
5889
                     const X87Stack& x87_stack)
5890
        : LDeferredCode(codegen, x87_stack), instr_(instr) { }
5891
    virtual void Generate() V8_OVERRIDE {
5892
      codegen()->DoDeferredAllocate(instr_);
5893
    }
5894
    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5895
   private:
5896
    LAllocate* instr_;
5897
  };
5898

    
5899
  DeferredAllocate* deferred =
5900
      new(zone()) DeferredAllocate(this, instr, x87_stack_);
5901

    
5902
  Register result = ToRegister(instr->result());
5903
  Register temp = ToRegister(instr->temp());
5904

    
5905
  // Allocate memory for the object.
5906
  AllocationFlags flags = TAG_OBJECT;
5907
  if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5908
    flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5909
  }
5910
  if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5911
    ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
5912
    ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5913
    flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
5914
  } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5915
    ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5916
    flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
5917
  }
5918

    
5919
  if (instr->size()->IsConstantOperand()) {
5920
    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5921
    __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
5922
  } else {
5923
    Register size = ToRegister(instr->size());
5924
    __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
5925
  }
5926

    
5927
  __ bind(deferred->exit());
5928

    
5929
  if (instr->hydrogen()->MustPrefillWithFiller()) {
5930
    if (instr->size()->IsConstantOperand()) {
5931
      int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5932
      __ mov(temp, (size / kPointerSize) - 1);
5933
    } else {
5934
      temp = ToRegister(instr->size());
5935
      __ shr(temp, kPointerSizeLog2);
5936
      __ dec(temp);
5937
    }
5938
    Label loop;
5939
    __ bind(&loop);
5940
    __ mov(FieldOperand(result, temp, times_pointer_size, 0),
5941
        isolate()->factory()->one_pointer_filler_map());
5942
    __ dec(temp);
5943
    __ j(not_zero, &loop);
5944
  }
5945
}
5946

    
5947

    
5948
void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5949
  Register result = ToRegister(instr->result());
5950

    
5951
  // TODO(3095996): Get rid of this. For now, we need to make the
5952
  // result register contain a valid pointer because it is already
5953
  // contained in the register pointer map.
5954
  __ mov(result, Immediate(Smi::FromInt(0)));
5955

    
5956
  PushSafepointRegistersScope scope(this);
5957
  if (instr->size()->IsRegister()) {
5958
    Register size = ToRegister(instr->size());
5959
    ASSERT(!size.is(result));
5960
    __ SmiTag(ToRegister(instr->size()));
5961
    __ push(size);
5962
  } else {
5963
    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5964
    __ push(Immediate(Smi::FromInt(size)));
5965
  }
5966

    
5967
  if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5968
    ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
5969
    ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5970
    CallRuntimeFromDeferred(
5971
        Runtime::kAllocateInOldPointerSpace, 1, instr, instr->context());
5972
  } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5973
    ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5974
    CallRuntimeFromDeferred(
5975
        Runtime::kAllocateInOldDataSpace, 1, instr, instr->context());
5976
  } else {
5977
    CallRuntimeFromDeferred(
5978
        Runtime::kAllocateInNewSpace, 1, instr, instr->context());
5979
  }
5980
  __ StoreToSafepointRegisterSlot(result, eax);
5981
}
5982

    
5983

    
5984
void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5985
  ASSERT(ToRegister(instr->value()).is(eax));
5986
  __ push(eax);
5987
  CallRuntime(Runtime::kToFastProperties, 1, instr);
5988
}
5989

    
5990

    
5991
void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5992
  ASSERT(ToRegister(instr->context()).is(esi));
5993
  Label materialized;
5994
  // Registers will be used as follows:
5995
  // ecx = literals array.
5996
  // ebx = regexp literal.
5997
  // eax = regexp literal clone.
5998
  // esi = context.
5999
  int literal_offset =
6000
      FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
6001
  __ LoadHeapObject(ecx, instr->hydrogen()->literals());
6002
  __ mov(ebx, FieldOperand(ecx, literal_offset));
6003
  __ cmp(ebx, factory()->undefined_value());
6004
  __ j(not_equal, &materialized, Label::kNear);
6005

    
6006
  // Create regexp literal using runtime function
6007
  // Result will be in eax.
6008
  __ push(ecx);
6009
  __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
6010
  __ push(Immediate(instr->hydrogen()->pattern()));
6011
  __ push(Immediate(instr->hydrogen()->flags()));
6012
  CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
6013
  __ mov(ebx, eax);
6014

    
6015
  __ bind(&materialized);
6016
  int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
6017
  Label allocated, runtime_allocate;
6018
  __ Allocate(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
6019
  __ jmp(&allocated);
6020

    
6021
  __ bind(&runtime_allocate);
6022
  __ push(ebx);
6023
  __ push(Immediate(Smi::FromInt(size)));
6024
  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
6025
  __ pop(ebx);
6026

    
6027
  __ bind(&allocated);
6028
  // Copy the content into the newly allocated memory.
6029
  // (Unroll copy loop once for better throughput).
6030
  for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
6031
    __ mov(edx, FieldOperand(ebx, i));
6032
    __ mov(ecx, FieldOperand(ebx, i + kPointerSize));
6033
    __ mov(FieldOperand(eax, i), edx);
6034
    __ mov(FieldOperand(eax, i + kPointerSize), ecx);
6035
  }
6036
  if ((size % (2 * kPointerSize)) != 0) {
6037
    __ mov(edx, FieldOperand(ebx, size - kPointerSize));
6038
    __ mov(FieldOperand(eax, size - kPointerSize), edx);
6039
  }
6040
}
6041

    
6042

    
6043
void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
6044
  ASSERT(ToRegister(instr->context()).is(esi));
6045
  // Use the fast case closure allocation code that allocates in new
6046
  // space for nested functions that don't need literals cloning.
6047
  bool pretenure = instr->hydrogen()->pretenure();
6048
  if (!pretenure && instr->hydrogen()->has_no_literals()) {
6049
    FastNewClosureStub stub(instr->hydrogen()->language_mode(),
6050
                            instr->hydrogen()->is_generator());
6051
    __ mov(ebx, Immediate(instr->hydrogen()->shared_info()));
6052
    CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
6053
  } else {
6054
    __ push(esi);
6055
    __ push(Immediate(instr->hydrogen()->shared_info()));
6056
    __ push(Immediate(pretenure ? factory()->true_value()
6057
                                : factory()->false_value()));
6058
    CallRuntime(Runtime::kNewClosure, 3, instr);
6059
  }
6060
}
6061

    
6062

    
6063
void LCodeGen::DoTypeof(LTypeof* instr) {
6064
  LOperand* input = instr->value();
6065
  EmitPushTaggedOperand(input);
6066
  CallRuntime(Runtime::kTypeof, 1, instr);
6067
}
6068

    
6069

    
6070
void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
6071
  Register input = ToRegister(instr->value());
6072

    
6073
  Condition final_branch_condition =
6074
      EmitTypeofIs(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
6075
          input, instr->type_literal());
6076
  if (final_branch_condition != no_condition) {
6077
    EmitBranch(instr, final_branch_condition);
6078
  }
6079
}
6080

    
6081

    
6082
Condition LCodeGen::EmitTypeofIs(Label* true_label,
6083
                                 Label* false_label,
6084
                                 Register input,
6085
                                 Handle<String> type_name) {
6086
  Condition final_branch_condition = no_condition;
6087
  if (type_name->Equals(heap()->number_string())) {
6088
    __ JumpIfSmi(input, true_label);
6089
    __ cmp(FieldOperand(input, HeapObject::kMapOffset),
6090
           factory()->heap_number_map());
6091
    final_branch_condition = equal;
6092

    
6093
  } else if (type_name->Equals(heap()->string_string())) {
6094
    __ JumpIfSmi(input, false_label);
6095
    __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
6096
    __ j(above_equal, false_label);
6097
    __ test_b(FieldOperand(input, Map::kBitFieldOffset),
6098
              1 << Map::kIsUndetectable);
6099
    final_branch_condition = zero;
6100

    
6101
  } else if (type_name->Equals(heap()->symbol_string())) {
6102
    __ JumpIfSmi(input, false_label);
6103
    __ CmpObjectType(input, SYMBOL_TYPE, input);
6104
    final_branch_condition = equal;
6105

    
6106
  } else if (type_name->Equals(heap()->boolean_string())) {
6107
    __ cmp(input, factory()->true_value());
6108
    __ j(equal, true_label);
6109
    __ cmp(input, factory()->false_value());
6110
    final_branch_condition = equal;
6111

    
6112
  } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
6113
    __ cmp(input, factory()->null_value());
6114
    final_branch_condition = equal;
6115

    
6116
  } else if (type_name->Equals(heap()->undefined_string())) {
6117
    __ cmp(input, factory()->undefined_value());
6118
    __ j(equal, true_label);
6119
    __ JumpIfSmi(input, false_label);
6120
    // Check for undetectable objects => true.
6121
    __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
6122
    __ test_b(FieldOperand(input, Map::kBitFieldOffset),
6123
              1 << Map::kIsUndetectable);
6124
    final_branch_condition = not_zero;
6125

    
6126
  } else if (type_name->Equals(heap()->function_string())) {
6127
    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
6128
    __ JumpIfSmi(input, false_label);
6129
    __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
6130
    __ j(equal, true_label);
6131
    __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
6132
    final_branch_condition = equal;
6133

    
6134
  } else if (type_name->Equals(heap()->object_string())) {
6135
    __ JumpIfSmi(input, false_label);
6136
    if (!FLAG_harmony_typeof) {
6137
      __ cmp(input, factory()->null_value());
6138
      __ j(equal, true_label);
6139
    }
6140
    __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
6141
    __ j(below, false_label);
6142
    __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
6143
    __ j(above, false_label);
6144
    // Check for undetectable objects => false.
6145
    __ test_b(FieldOperand(input, Map::kBitFieldOffset),
6146
              1 << Map::kIsUndetectable);
6147
    final_branch_condition = zero;
6148

    
6149
  } else {
6150
    __ jmp(false_label);
6151
  }
6152
  return final_branch_condition;
6153
}
6154

    
6155

    
6156
void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
6157
  Register temp = ToRegister(instr->temp());
6158

    
6159
  EmitIsConstructCall(temp);
6160
  EmitBranch(instr, equal);
6161
}
6162

    
6163

    
6164
void LCodeGen::EmitIsConstructCall(Register temp) {
6165
  // Get the frame pointer for the calling frame.
6166
  __ mov(temp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
6167

    
6168
  // Skip the arguments adaptor frame if it exists.
6169
  Label check_frame_marker;
6170
  __ cmp(Operand(temp, StandardFrameConstants::kContextOffset),
6171
         Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
6172
  __ j(not_equal, &check_frame_marker, Label::kNear);
6173
  __ mov(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
6174

    
6175
  // Check the marker in the calling frame.
6176
  __ bind(&check_frame_marker);
6177
  __ cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
6178
         Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
6179
}
6180

    
6181

    
6182
void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
6183
  if (!info()->IsStub()) {
6184
    // Ensure that we have enough space after the previous lazy-bailout
6185
    // instruction for patching the code here.
6186
    int current_pc = masm()->pc_offset();
6187
    if (current_pc < last_lazy_deopt_pc_ + space_needed) {
6188
      int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
6189
      __ Nop(padding_size);
6190
    }
6191
  }
6192
  last_lazy_deopt_pc_ = masm()->pc_offset();
6193
}
6194

    
6195

    
6196
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
6197
  EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
6198
  ASSERT(instr->HasEnvironment());
6199
  LEnvironment* env = instr->environment();
6200
  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
6201
  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
6202
}
6203

    
6204

    
6205
void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
6206
  Deoptimizer::BailoutType type = instr->hydrogen()->type();
6207
  // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
6208
  // needed return address), even though the implementation of LAZY and EAGER is
6209
  // now identical. When LAZY is eventually completely folded into EAGER, remove
6210
  // the special case below.
6211
  if (info()->IsStub() && type == Deoptimizer::EAGER) {
6212
    type = Deoptimizer::LAZY;
6213
  }
6214
  Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
6215
  DeoptimizeIf(no_condition, instr->environment(), type);
6216
}
6217

    
6218

    
6219
void LCodeGen::DoDummyUse(LDummyUse* instr) {
6220
  // Nothing to see here, move on!
6221
}
6222

    
6223

    
6224
void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
6225
  PushSafepointRegistersScope scope(this);
6226
  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
6227
  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
6228
  RecordSafepointWithLazyDeopt(
6229
      instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
6230
  ASSERT(instr->HasEnvironment());
6231
  LEnvironment* env = instr->environment();
6232
  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
6233
}
6234

    
6235

    
6236
void LCodeGen::DoStackCheck(LStackCheck* instr) {
6237
  class DeferredStackCheck V8_FINAL : public LDeferredCode {
6238
   public:
6239
    DeferredStackCheck(LCodeGen* codegen,
6240
                       LStackCheck* instr,
6241
                       const X87Stack& x87_stack)
6242
        : LDeferredCode(codegen, x87_stack), instr_(instr) { }
6243
    virtual void Generate() V8_OVERRIDE {
6244
      codegen()->DoDeferredStackCheck(instr_);
6245
    }
6246
    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
6247
   private:
6248
    LStackCheck* instr_;
6249
  };
6250

    
6251
  ASSERT(instr->HasEnvironment());
6252
  LEnvironment* env = instr->environment();
6253
  // There is no LLazyBailout instruction for stack-checks. We have to
6254
  // prepare for lazy deoptimization explicitly here.
6255
  if (instr->hydrogen()->is_function_entry()) {
6256
    // Perform stack overflow check.
6257
    Label done;
6258
    ExternalReference stack_limit =
6259
        ExternalReference::address_of_stack_limit(isolate());
6260
    __ cmp(esp, Operand::StaticVariable(stack_limit));
6261
    __ j(above_equal, &done, Label::kNear);
6262

    
6263
    ASSERT(instr->context()->IsRegister());
6264
    ASSERT(ToRegister(instr->context()).is(esi));
6265
    CallCode(isolate()->builtins()->StackCheck(),
6266
             RelocInfo::CODE_TARGET,
6267
             instr);
6268
    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
6269
    __ bind(&done);
6270
    RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
6271
    safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
6272
  } else {
6273
    ASSERT(instr->hydrogen()->is_backwards_branch());
6274
    // Perform stack overflow check if this goto needs it before jumping.
6275
    DeferredStackCheck* deferred_stack_check =
6276
        new(zone()) DeferredStackCheck(this, instr, x87_stack_);
6277
    ExternalReference stack_limit =
6278
        ExternalReference::address_of_stack_limit(isolate());
6279
    __ cmp(esp, Operand::StaticVariable(stack_limit));
6280
    __ j(below, deferred_stack_check->entry());
6281
    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
6282
    __ bind(instr->done_label());
6283
    deferred_stack_check->SetExit(instr->done_label());
6284
    RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
6285
    // Don't record a deoptimization index for the safepoint here.
6286
    // This will be done explicitly when emitting call and the safepoint in
6287
    // the deferred code.
6288
  }
6289
}
6290

    
6291

    
6292
void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
6293
  // This is a pseudo-instruction that ensures that the environment here is
6294
  // properly registered for deoptimization and records the assembler's PC
6295
  // offset.
6296
  LEnvironment* environment = instr->environment();
6297

    
6298
  // If the environment were already registered, we would have no way of
6299
  // backpatching it with the spill slot operands.
6300
  ASSERT(!environment->HasBeenRegistered());
6301
  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
6302

    
6303
  GenerateOsrPrologue();
6304
}
6305

    
6306

    
6307
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
6308
  __ cmp(eax, isolate()->factory()->undefined_value());
6309
  DeoptimizeIf(equal, instr->environment());
6310

    
6311
  __ cmp(eax, isolate()->factory()->null_value());
6312
  DeoptimizeIf(equal, instr->environment());
6313

    
6314
  __ test(eax, Immediate(kSmiTagMask));
6315
  DeoptimizeIf(zero, instr->environment());
6316

    
6317
  STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
6318
  __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
6319
  DeoptimizeIf(below_equal, instr->environment());
6320

    
6321
  Label use_cache, call_runtime;
6322
  __ CheckEnumCache(&call_runtime);
6323

    
6324
  __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
6325
  __ jmp(&use_cache, Label::kNear);
6326

    
6327
  // Get the set of properties to enumerate.
6328
  __ bind(&call_runtime);
6329
  __ push(eax);
6330
  CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
6331

    
6332
  __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
6333
         isolate()->factory()->meta_map());
6334
  DeoptimizeIf(not_equal, instr->environment());
6335
  __ bind(&use_cache);
6336
}
6337

    
6338

    
6339
void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
6340
  Register map = ToRegister(instr->map());
6341
  Register result = ToRegister(instr->result());
6342
  Label load_cache, done;
6343
  __ EnumLength(result, map);
6344
  __ cmp(result, Immediate(Smi::FromInt(0)));
6345
  __ j(not_equal, &load_cache);
6346
  __ mov(result, isolate()->factory()->empty_fixed_array());
6347
  __ jmp(&done);
6348

    
6349
  __ bind(&load_cache);
6350
  __ LoadInstanceDescriptors(map, result);
6351
  __ mov(result,
6352
         FieldOperand(result, DescriptorArray::kEnumCacheOffset));
6353
  __ mov(result,
6354
         FieldOperand(result, FixedArray::SizeFor(instr->idx())));
6355
  __ bind(&done);
6356
  __ test(result, result);
6357
  DeoptimizeIf(equal, instr->environment());
6358
}
6359

    
6360

    
6361
void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
6362
  Register object = ToRegister(instr->value());
6363
  __ cmp(ToRegister(instr->map()),
6364
         FieldOperand(object, HeapObject::kMapOffset));
6365
  DeoptimizeIf(not_equal, instr->environment());
6366
}
6367

    
6368

    
6369
void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
6370
  Register object = ToRegister(instr->object());
6371
  Register index = ToRegister(instr->index());
6372

    
6373
  Label out_of_object, done;
6374
  __ cmp(index, Immediate(0));
6375
  __ j(less, &out_of_object);
6376
  __ mov(object, FieldOperand(object,
6377
                              index,
6378
                              times_half_pointer_size,
6379
                              JSObject::kHeaderSize));
6380
  __ jmp(&done, Label::kNear);
6381

    
6382
  __ bind(&out_of_object);
6383
  __ mov(object, FieldOperand(object, JSObject::kPropertiesOffset));
6384
  __ neg(index);
6385
  // Index is now equal to out of object property index plus 1.
6386
  __ mov(object, FieldOperand(object,
6387
                              index,
6388
                              times_half_pointer_size,
6389
                              FixedArray::kHeaderSize - kPointerSize));
6390
  __ bind(&done);
6391
}
6392

    
6393

    
6394
#undef __
6395

    
6396
} }  // namespace v8::internal
6397

    
6398
#endif  // V8_TARGET_ARCH_IA32