The data contained in this repository can be downloaded to your computer using one of several clients.
Please see the documentation of your version control software client for more information.

Please select the desired protocol below to get the URL.

This URL has Read-Only access.

Statistics
| Branch: | Revision:

main_repo / deps / v8 / src / mips / lithium-codegen-mips.cc @ f230a1cf

History | View | Annotate | Download (199 KB)

1
// Copyright 2012 the V8 project authors. All rights reserved.
2
// Redistribution and use in source and binary forms, with or without
3
// modification, are permitted provided that the following conditions are
4
// met:
5
//
6
//     * Redistributions of source code must retain the above copyright
7
//       notice, this list of conditions and the following disclaimer.
8
//     * Redistributions in binary form must reproduce the above
9
//       copyright notice, this list of conditions and the following
10
//       disclaimer in the documentation and/or other materials provided
11
//       with the distribution.
12
//     * Neither the name of Google Inc. nor the names of its
13
//       contributors may be used to endorse or promote products derived
14
//       from this software without specific prior written permission.
15
//
16
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27

    
28
#include "v8.h"
29

    
30
#include "mips/lithium-codegen-mips.h"
31
#include "mips/lithium-gap-resolver-mips.h"
32
#include "code-stubs.h"
33
#include "stub-cache.h"
34
#include "hydrogen-osr.h"
35

    
36
namespace v8 {
37
namespace internal {
38

    
39

    
40
class SafepointGenerator V8_FINAL  : public CallWrapper {
41
 public:
42
  SafepointGenerator(LCodeGen* codegen,
43
                     LPointerMap* pointers,
44
                     Safepoint::DeoptMode mode)
45
      : codegen_(codegen),
46
        pointers_(pointers),
47
        deopt_mode_(mode) { }
48
  virtual ~SafepointGenerator() {}
49

    
50
  virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
51

    
52
  virtual void AfterCall() const V8_OVERRIDE {
53
    codegen_->RecordSafepoint(pointers_, deopt_mode_);
54
  }
55

    
56
 private:
57
  LCodeGen* codegen_;
58
  LPointerMap* pointers_;
59
  Safepoint::DeoptMode deopt_mode_;
60
};
61

    
62

    
63
#define __ masm()->
64

    
65
bool LCodeGen::GenerateCode() {
66
  LPhase phase("Z_Code generation", chunk());
67
  ASSERT(is_unused());
68
  status_ = GENERATING;
69

    
70
  // Open a frame scope to indicate that there is a frame on the stack.  The
71
  // NONE indicates that the scope shouldn't actually generate code to set up
72
  // the frame (that is done in GeneratePrologue).
73
  FrameScope frame_scope(masm_, StackFrame::NONE);
74

    
75
  return GeneratePrologue() &&
76
      GenerateBody() &&
77
      GenerateDeferredCode() &&
78
      GenerateDeoptJumpTable() &&
79
      GenerateSafepointTable();
80
}
81

    
82

    
83
void LCodeGen::FinishCode(Handle<Code> code) {
84
  ASSERT(is_done());
85
  code->set_stack_slots(GetStackSlotCount());
86
  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
87
  if (FLAG_weak_embedded_maps_in_optimized_code) {
88
    RegisterDependentCodeForEmbeddedMaps(code);
89
  }
90
  PopulateDeoptimizationData(code);
91
  info()->CommitDependencies(code);
92
}
93

    
94

    
95
void LChunkBuilder::Abort(BailoutReason reason) {
96
  info()->set_bailout_reason(reason);
97
  status_ = ABORTED;
98
}
99

    
100

    
101
bool LCodeGen::GeneratePrologue() {
102
  ASSERT(is_generating());
103

    
104
  if (info()->IsOptimizing()) {
105
    ProfileEntryHookStub::MaybeCallEntryHook(masm_);
106

    
107
#ifdef DEBUG
108
    if (strlen(FLAG_stop_at) > 0 &&
109
        info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
110
      __ stop("stop_at");
111
    }
112
#endif
113

    
114
    // a1: Callee's JS function.
115
    // cp: Callee's context.
116
    // fp: Caller's frame pointer.
117
    // lr: Caller's pc.
118

    
119
    // Strict mode functions and builtins need to replace the receiver
120
    // with undefined when called as functions (without an explicit
121
    // receiver object). r5 is zero for method calls and non-zero for
122
    // function calls.
123
    if (!info_->is_classic_mode() || info_->is_native()) {
124
      Label ok;
125
      __ Branch(&ok, eq, t1, Operand(zero_reg));
126

    
127
      int receiver_offset = scope()->num_parameters() * kPointerSize;
128
      __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
129
      __ sw(a2, MemOperand(sp, receiver_offset));
130
      __ bind(&ok);
131
    }
132
  }
133

    
134
  info()->set_prologue_offset(masm_->pc_offset());
135
  if (NeedsEagerFrame()) {
136
    __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
137
    frame_is_built_ = true;
138
    info_->AddNoFrameRange(0, masm_->pc_offset());
139
  }
140

    
141
  // Reserve space for the stack slots needed by the code.
142
  int slots = GetStackSlotCount();
143
  if (slots > 0) {
144
    if (FLAG_debug_code) {
145
      __ Subu(sp,  sp, Operand(slots * kPointerSize));
146
      __ push(a0);
147
      __ push(a1);
148
      __ Addu(a0, sp, Operand(slots *  kPointerSize));
149
      __ li(a1, Operand(kSlotsZapValue));
150
      Label loop;
151
      __ bind(&loop);
152
      __ Subu(a0, a0, Operand(kPointerSize));
153
      __ sw(a1, MemOperand(a0, 2 * kPointerSize));
154
      __ Branch(&loop, ne, a0, Operand(sp));
155
      __ pop(a1);
156
      __ pop(a0);
157
    } else {
158
      __ Subu(sp, sp, Operand(slots * kPointerSize));
159
    }
160
  }
161

    
162
  if (info()->saves_caller_doubles()) {
163
    Comment(";;; Save clobbered callee double registers");
164
    int count = 0;
165
    BitVector* doubles = chunk()->allocated_double_registers();
166
    BitVector::Iterator save_iterator(doubles);
167
    while (!save_iterator.Done()) {
168
      __ sdc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
169
              MemOperand(sp, count * kDoubleSize));
170
      save_iterator.Advance();
171
      count++;
172
    }
173
  }
174

    
175
  // Possibly allocate a local context.
176
  int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
177
  if (heap_slots > 0) {
178
    Comment(";;; Allocate local context");
179
    // Argument to NewContext is the function, which is in a1.
180
    __ push(a1);
181
    if (heap_slots <= FastNewContextStub::kMaximumSlots) {
182
      FastNewContextStub stub(heap_slots);
183
      __ CallStub(&stub);
184
    } else {
185
      __ CallRuntime(Runtime::kNewFunctionContext, 1);
186
    }
187
    RecordSafepoint(Safepoint::kNoLazyDeopt);
188
    // Context is returned in both v0 and cp.  It replaces the context
189
    // passed to us.  It's saved in the stack and kept live in cp.
190
    __ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
191
    // Copy any necessary parameters into the context.
192
    int num_parameters = scope()->num_parameters();
193
    for (int i = 0; i < num_parameters; i++) {
194
      Variable* var = scope()->parameter(i);
195
      if (var->IsContextSlot()) {
196
        int parameter_offset = StandardFrameConstants::kCallerSPOffset +
197
            (num_parameters - 1 - i) * kPointerSize;
198
        // Load parameter from stack.
199
        __ lw(a0, MemOperand(fp, parameter_offset));
200
        // Store it in the context.
201
        MemOperand target = ContextOperand(cp, var->index());
202
        __ sw(a0, target);
203
        // Update the write barrier. This clobbers a3 and a0.
204
        __ RecordWriteContextSlot(
205
            cp, target.offset(), a0, a3, GetRAState(), kSaveFPRegs);
206
      }
207
    }
208
    Comment(";;; End allocate local context");
209
  }
210

    
211
  // Trace the call.
212
  if (FLAG_trace && info()->IsOptimizing()) {
213
    // We have not executed any compiled code yet, so cp still holds the
214
    // incoming context.
215
    __ CallRuntime(Runtime::kTraceEnter, 0);
216
  }
217
  return !is_aborted();
218
}
219

    
220

    
221
void LCodeGen::GenerateOsrPrologue() {
222
  // Generate the OSR entry prologue at the first unknown OSR value, or if there
223
  // are none, at the OSR entrypoint instruction.
224
  if (osr_pc_offset_ >= 0) return;
225

    
226
  osr_pc_offset_ = masm()->pc_offset();
227

    
228
  // Adjust the frame size, subsuming the unoptimized frame into the
229
  // optimized frame.
230
  int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
231
  ASSERT(slots >= 0);
232
  __ Subu(sp, sp, Operand(slots * kPointerSize));
233
}
234

    
235

    
236
bool LCodeGen::GenerateDeferredCode() {
237
  ASSERT(is_generating());
238
  if (deferred_.length() > 0) {
239
    for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
240
      LDeferredCode* code = deferred_[i];
241

    
242
      HValue* value =
243
          instructions_->at(code->instruction_index())->hydrogen_value();
244
      RecordAndWritePosition(value->position());
245

    
246
      Comment(";;; <@%d,#%d> "
247
              "-------------------- Deferred %s --------------------",
248
              code->instruction_index(),
249
              code->instr()->hydrogen_value()->id(),
250
              code->instr()->Mnemonic());
251
      __ bind(code->entry());
252
      if (NeedsDeferredFrame()) {
253
        Comment(";;; Build frame");
254
        ASSERT(!frame_is_built_);
255
        ASSERT(info()->IsStub());
256
        frame_is_built_ = true;
257
        __ MultiPush(cp.bit() | fp.bit() | ra.bit());
258
        __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
259
        __ push(scratch0());
260
        __ Addu(fp, sp, Operand(2 * kPointerSize));
261
        Comment(";;; Deferred code");
262
      }
263
      code->Generate();
264
      if (NeedsDeferredFrame()) {
265
        Comment(";;; Destroy frame");
266
        ASSERT(frame_is_built_);
267
        __ pop(at);
268
        __ MultiPop(cp.bit() | fp.bit() | ra.bit());
269
        frame_is_built_ = false;
270
      }
271
      __ jmp(code->exit());
272
    }
273
  }
274
  // Deferred code is the last part of the instruction sequence. Mark
275
  // the generated code as done unless we bailed out.
276
  if (!is_aborted()) status_ = DONE;
277
  return !is_aborted();
278
}
279

    
280

    
281
bool LCodeGen::GenerateDeoptJumpTable() {
282
  if (deopt_jump_table_.length() > 0) {
283
    Comment(";;; -------------------- Jump table --------------------");
284
  }
285
  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
286
  Label table_start;
287
  __ bind(&table_start);
288
  Label needs_frame;
289
  for (int i = 0; i < deopt_jump_table_.length(); i++) {
290
    __ bind(&deopt_jump_table_[i].label);
291
    Address entry = deopt_jump_table_[i].address;
292
    Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type;
293
    int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
294
    if (id == Deoptimizer::kNotDeoptimizationEntry) {
295
      Comment(";;; jump table entry %d.", i);
296
    } else {
297
      Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
298
    }
299
    __ li(t9, Operand(ExternalReference::ForDeoptEntry(entry)));
300
    if (deopt_jump_table_[i].needs_frame) {
301
      if (needs_frame.is_bound()) {
302
        __ Branch(&needs_frame);
303
      } else {
304
        __ bind(&needs_frame);
305
        __ MultiPush(cp.bit() | fp.bit() | ra.bit());
306
        // This variant of deopt can only be used with stubs. Since we don't
307
        // have a function pointer to install in the stack frame that we're
308
        // building, install a special marker there instead.
309
        ASSERT(info()->IsStub());
310
        __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
311
        __ push(scratch0());
312
        __ Addu(fp, sp, Operand(2 * kPointerSize));
313
        __ Call(t9);
314
      }
315
    } else {
316
      __ Call(t9);
317
    }
318
  }
319
  __ RecordComment("]");
320

    
321
  // The deoptimization jump table is the last part of the instruction
322
  // sequence. Mark the generated code as done unless we bailed out.
323
  if (!is_aborted()) status_ = DONE;
324
  return !is_aborted();
325
}
326

    
327

    
328
bool LCodeGen::GenerateSafepointTable() {
329
  ASSERT(is_done());
330
  safepoints_.Emit(masm(), GetStackSlotCount());
331
  return !is_aborted();
332
}
333

    
334

    
335
Register LCodeGen::ToRegister(int index) const {
336
  return Register::FromAllocationIndex(index);
337
}
338

    
339

    
340
DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
341
  return DoubleRegister::FromAllocationIndex(index);
342
}
343

    
344

    
345
Register LCodeGen::ToRegister(LOperand* op) const {
346
  ASSERT(op->IsRegister());
347
  return ToRegister(op->index());
348
}
349

    
350

    
351
Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
352
  if (op->IsRegister()) {
353
    return ToRegister(op->index());
354
  } else if (op->IsConstantOperand()) {
355
    LConstantOperand* const_op = LConstantOperand::cast(op);
356
    HConstant* constant = chunk_->LookupConstant(const_op);
357
    Handle<Object> literal = constant->handle(isolate());
358
    Representation r = chunk_->LookupLiteralRepresentation(const_op);
359
    if (r.IsInteger32()) {
360
      ASSERT(literal->IsNumber());
361
      __ li(scratch, Operand(static_cast<int32_t>(literal->Number())));
362
    } else if (r.IsSmi()) {
363
      ASSERT(constant->HasSmiValue());
364
      __ li(scratch, Operand(Smi::FromInt(constant->Integer32Value())));
365
    } else if (r.IsDouble()) {
366
      Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
367
    } else {
368
      ASSERT(r.IsSmiOrTagged());
369
      __ LoadObject(scratch, literal);
370
    }
371
    return scratch;
372
  } else if (op->IsStackSlot() || op->IsArgument()) {
373
    __ lw(scratch, ToMemOperand(op));
374
    return scratch;
375
  }
376
  UNREACHABLE();
377
  return scratch;
378
}
379

    
380

    
381
DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
382
  ASSERT(op->IsDoubleRegister());
383
  return ToDoubleRegister(op->index());
384
}
385

    
386

    
387
DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
388
                                                FloatRegister flt_scratch,
389
                                                DoubleRegister dbl_scratch) {
390
  if (op->IsDoubleRegister()) {
391
    return ToDoubleRegister(op->index());
392
  } else if (op->IsConstantOperand()) {
393
    LConstantOperand* const_op = LConstantOperand::cast(op);
394
    HConstant* constant = chunk_->LookupConstant(const_op);
395
    Handle<Object> literal = constant->handle(isolate());
396
    Representation r = chunk_->LookupLiteralRepresentation(const_op);
397
    if (r.IsInteger32()) {
398
      ASSERT(literal->IsNumber());
399
      __ li(at, Operand(static_cast<int32_t>(literal->Number())));
400
      __ mtc1(at, flt_scratch);
401
      __ cvt_d_w(dbl_scratch, flt_scratch);
402
      return dbl_scratch;
403
    } else if (r.IsDouble()) {
404
      Abort(kUnsupportedDoubleImmediate);
405
    } else if (r.IsTagged()) {
406
      Abort(kUnsupportedTaggedImmediate);
407
    }
408
  } else if (op->IsStackSlot() || op->IsArgument()) {
409
    MemOperand mem_op = ToMemOperand(op);
410
    __ ldc1(dbl_scratch, mem_op);
411
    return dbl_scratch;
412
  }
413
  UNREACHABLE();
414
  return dbl_scratch;
415
}
416

    
417

    
418
Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
419
  HConstant* constant = chunk_->LookupConstant(op);
420
  ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
421
  return constant->handle(isolate());
422
}
423

    
424

    
425
bool LCodeGen::IsInteger32(LConstantOperand* op) const {
426
  return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
427
}
428

    
429

    
430
bool LCodeGen::IsSmi(LConstantOperand* op) const {
431
  return chunk_->LookupLiteralRepresentation(op).IsSmi();
432
}
433

    
434

    
435
int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
436
  return ToRepresentation(op, Representation::Integer32());
437
}
438

    
439

    
440
int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
441
                                   const Representation& r) const {
442
  HConstant* constant = chunk_->LookupConstant(op);
443
  int32_t value = constant->Integer32Value();
444
  if (r.IsInteger32()) return value;
445
  ASSERT(r.IsSmiOrTagged());
446
  return reinterpret_cast<int32_t>(Smi::FromInt(value));
447
}
448

    
449

    
450
Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
451
  HConstant* constant = chunk_->LookupConstant(op);
452
  return Smi::FromInt(constant->Integer32Value());
453
}
454

    
455

    
456
double LCodeGen::ToDouble(LConstantOperand* op) const {
457
  HConstant* constant = chunk_->LookupConstant(op);
458
  ASSERT(constant->HasDoubleValue());
459
  return constant->DoubleValue();
460
}
461

    
462

    
463
Operand LCodeGen::ToOperand(LOperand* op) {
464
  if (op->IsConstantOperand()) {
465
    LConstantOperand* const_op = LConstantOperand::cast(op);
466
    HConstant* constant = chunk()->LookupConstant(const_op);
467
    Representation r = chunk_->LookupLiteralRepresentation(const_op);
468
    if (r.IsSmi()) {
469
      ASSERT(constant->HasSmiValue());
470
      return Operand(Smi::FromInt(constant->Integer32Value()));
471
    } else if (r.IsInteger32()) {
472
      ASSERT(constant->HasInteger32Value());
473
      return Operand(constant->Integer32Value());
474
    } else if (r.IsDouble()) {
475
      Abort(kToOperandUnsupportedDoubleImmediate);
476
    }
477
    ASSERT(r.IsTagged());
478
    return Operand(constant->handle(isolate()));
479
  } else if (op->IsRegister()) {
480
    return Operand(ToRegister(op));
481
  } else if (op->IsDoubleRegister()) {
482
    Abort(kToOperandIsDoubleRegisterUnimplemented);
483
    return Operand(0);
484
  }
485
  // Stack slots not implemented, use ToMemOperand instead.
486
  UNREACHABLE();
487
  return Operand(0);
488
}
489

    
490

    
491
MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
492
  ASSERT(!op->IsRegister());
493
  ASSERT(!op->IsDoubleRegister());
494
  ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
495
  return MemOperand(fp, StackSlotOffset(op->index()));
496
}
497

    
498

    
499
MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
500
  ASSERT(op->IsDoubleStackSlot());
501
  return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
502
}
503

    
504

    
505
void LCodeGen::WriteTranslation(LEnvironment* environment,
506
                                Translation* translation) {
507
  if (environment == NULL) return;
508

    
509
  // The translation includes one command per value in the environment.
510
  int translation_size = environment->translation_size();
511
  // The output frame height does not include the parameters.
512
  int height = translation_size - environment->parameter_count();
513

    
514
  WriteTranslation(environment->outer(), translation);
515
  bool has_closure_id = !info()->closure().is_null() &&
516
      !info()->closure().is_identical_to(environment->closure());
517
  int closure_id = has_closure_id
518
      ? DefineDeoptimizationLiteral(environment->closure())
519
      : Translation::kSelfLiteralId;
520

    
521
  switch (environment->frame_type()) {
522
    case JS_FUNCTION:
523
      translation->BeginJSFrame(environment->ast_id(), closure_id, height);
524
      break;
525
    case JS_CONSTRUCT:
526
      translation->BeginConstructStubFrame(closure_id, translation_size);
527
      break;
528
    case JS_GETTER:
529
      ASSERT(translation_size == 1);
530
      ASSERT(height == 0);
531
      translation->BeginGetterStubFrame(closure_id);
532
      break;
533
    case JS_SETTER:
534
      ASSERT(translation_size == 2);
535
      ASSERT(height == 0);
536
      translation->BeginSetterStubFrame(closure_id);
537
      break;
538
    case STUB:
539
      translation->BeginCompiledStubFrame();
540
      break;
541
    case ARGUMENTS_ADAPTOR:
542
      translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
543
      break;
544
  }
545

    
546
  int object_index = 0;
547
  int dematerialized_index = 0;
548
  for (int i = 0; i < translation_size; ++i) {
549
    LOperand* value = environment->values()->at(i);
550
    AddToTranslation(environment,
551
                     translation,
552
                     value,
553
                     environment->HasTaggedValueAt(i),
554
                     environment->HasUint32ValueAt(i),
555
                     &object_index,
556
                     &dematerialized_index);
557
  }
558
}
559

    
560

    
561
void LCodeGen::AddToTranslation(LEnvironment* environment,
562
                                Translation* translation,
563
                                LOperand* op,
564
                                bool is_tagged,
565
                                bool is_uint32,
566
                                int* object_index_pointer,
567
                                int* dematerialized_index_pointer) {
568
  if (op == LEnvironment::materialization_marker()) {
569
    int object_index = (*object_index_pointer)++;
570
    if (environment->ObjectIsDuplicateAt(object_index)) {
571
      int dupe_of = environment->ObjectDuplicateOfAt(object_index);
572
      translation->DuplicateObject(dupe_of);
573
      return;
574
    }
575
    int object_length = environment->ObjectLengthAt(object_index);
576
    if (environment->ObjectIsArgumentsAt(object_index)) {
577
      translation->BeginArgumentsObject(object_length);
578
    } else {
579
      translation->BeginCapturedObject(object_length);
580
    }
581
    int dematerialized_index = *dematerialized_index_pointer;
582
    int env_offset = environment->translation_size() + dematerialized_index;
583
    *dematerialized_index_pointer += object_length;
584
    for (int i = 0; i < object_length; ++i) {
585
      LOperand* value = environment->values()->at(env_offset + i);
586
      AddToTranslation(environment,
587
                       translation,
588
                       value,
589
                       environment->HasTaggedValueAt(env_offset + i),
590
                       environment->HasUint32ValueAt(env_offset + i),
591
                       object_index_pointer,
592
                       dematerialized_index_pointer);
593
    }
594
    return;
595
  }
596

    
597
  if (op->IsStackSlot()) {
598
    if (is_tagged) {
599
      translation->StoreStackSlot(op->index());
600
    } else if (is_uint32) {
601
      translation->StoreUint32StackSlot(op->index());
602
    } else {
603
      translation->StoreInt32StackSlot(op->index());
604
    }
605
  } else if (op->IsDoubleStackSlot()) {
606
    translation->StoreDoubleStackSlot(op->index());
607
  } else if (op->IsArgument()) {
608
    ASSERT(is_tagged);
609
    int src_index = GetStackSlotCount() + op->index();
610
    translation->StoreStackSlot(src_index);
611
  } else if (op->IsRegister()) {
612
    Register reg = ToRegister(op);
613
    if (is_tagged) {
614
      translation->StoreRegister(reg);
615
    } else if (is_uint32) {
616
      translation->StoreUint32Register(reg);
617
    } else {
618
      translation->StoreInt32Register(reg);
619
    }
620
  } else if (op->IsDoubleRegister()) {
621
    DoubleRegister reg = ToDoubleRegister(op);
622
    translation->StoreDoubleRegister(reg);
623
  } else if (op->IsConstantOperand()) {
624
    HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
625
    int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
626
    translation->StoreLiteral(src_index);
627
  } else {
628
    UNREACHABLE();
629
  }
630
}
631

    
632

    
633
void LCodeGen::CallCode(Handle<Code> code,
634
                        RelocInfo::Mode mode,
635
                        LInstruction* instr) {
636
  CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
637
}
638

    
639

    
640
void LCodeGen::CallCodeGeneric(Handle<Code> code,
641
                               RelocInfo::Mode mode,
642
                               LInstruction* instr,
643
                               SafepointMode safepoint_mode) {
644
  EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
645
  ASSERT(instr != NULL);
646
  __ Call(code, mode);
647
  RecordSafepointWithLazyDeopt(instr, safepoint_mode);
648
}
649

    
650

    
651
void LCodeGen::CallRuntime(const Runtime::Function* function,
652
                           int num_arguments,
653
                           LInstruction* instr,
654
                           SaveFPRegsMode save_doubles) {
655
  ASSERT(instr != NULL);
656

    
657
  __ CallRuntime(function, num_arguments, save_doubles);
658

    
659
  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
660
}
661

    
662

    
663
void LCodeGen::LoadContextFromDeferred(LOperand* context) {
664
  if (context->IsRegister()) {
665
    __ Move(cp, ToRegister(context));
666
  } else if (context->IsStackSlot()) {
667
    __ lw(cp, ToMemOperand(context));
668
  } else if (context->IsConstantOperand()) {
669
    HConstant* constant =
670
        chunk_->LookupConstant(LConstantOperand::cast(context));
671
    __ LoadObject(cp, Handle<Object>::cast(constant->handle(isolate())));
672
  } else {
673
    UNREACHABLE();
674
  }
675
}
676

    
677

    
678
void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
679
                                       int argc,
680
                                       LInstruction* instr,
681
                                       LOperand* context) {
682
  LoadContextFromDeferred(context);
683
  __ CallRuntimeSaveDoubles(id);
684
  RecordSafepointWithRegisters(
685
      instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
686
}
687

    
688

    
689
void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
690
                                                    Safepoint::DeoptMode mode) {
691
  if (!environment->HasBeenRegistered()) {
692
    // Physical stack frame layout:
693
    // -x ............. -4  0 ..................................... y
694
    // [incoming arguments] [spill slots] [pushed outgoing arguments]
695

    
696
    // Layout of the environment:
697
    // 0 ..................................................... size-1
698
    // [parameters] [locals] [expression stack including arguments]
699

    
700
    // Layout of the translation:
701
    // 0 ........................................................ size - 1 + 4
702
    // [expression stack including arguments] [locals] [4 words] [parameters]
703
    // |>------------  translation_size ------------<|
704

    
705
    int frame_count = 0;
706
    int jsframe_count = 0;
707
    for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
708
      ++frame_count;
709
      if (e->frame_type() == JS_FUNCTION) {
710
        ++jsframe_count;
711
      }
712
    }
713
    Translation translation(&translations_, frame_count, jsframe_count, zone());
714
    WriteTranslation(environment, &translation);
715
    int deoptimization_index = deoptimizations_.length();
716
    int pc_offset = masm()->pc_offset();
717
    environment->Register(deoptimization_index,
718
                          translation.index(),
719
                          (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
720
    deoptimizations_.Add(environment, zone());
721
  }
722
}
723

    
724

    
725
void LCodeGen::DeoptimizeIf(Condition condition,
726
                            LEnvironment* environment,
727
                            Deoptimizer::BailoutType bailout_type,
728
                            Register src1,
729
                            const Operand& src2) {
730
  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
731
  ASSERT(environment->HasBeenRegistered());
732
  int id = environment->deoptimization_index();
733
  ASSERT(info()->IsOptimizing() || info()->IsStub());
734
  Address entry =
735
      Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
736
  if (entry == NULL) {
737
    Abort(kBailoutWasNotPrepared);
738
    return;
739
  }
740

    
741
  ASSERT(FLAG_deopt_every_n_times < 2);  // Other values not supported on MIPS.
742
  if (FLAG_deopt_every_n_times == 1 &&
743
      !info()->IsStub() &&
744
      info()->opt_count() == id) {
745
    ASSERT(frame_is_built_);
746
    __ Call(entry, RelocInfo::RUNTIME_ENTRY);
747
    return;
748
  }
749

    
750
  if (info()->ShouldTrapOnDeopt()) {
751
    Label skip;
752
    if (condition != al) {
753
      __ Branch(&skip, NegateCondition(condition), src1, src2);
754
    }
755
    __ stop("trap_on_deopt");
756
    __ bind(&skip);
757
  }
758

    
759
  ASSERT(info()->IsStub() || frame_is_built_);
760
  if (condition == al && frame_is_built_) {
761
    __ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
762
  } else {
763
    // We often have several deopts to the same entry, reuse the last
764
    // jump entry if this is the case.
765
    if (deopt_jump_table_.is_empty() ||
766
        (deopt_jump_table_.last().address != entry) ||
767
        (deopt_jump_table_.last().bailout_type != bailout_type) ||
768
        (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
769
      Deoptimizer::JumpTableEntry table_entry(entry,
770
                                              bailout_type,
771
                                              !frame_is_built_);
772
      deopt_jump_table_.Add(table_entry, zone());
773
    }
774
    __ Branch(&deopt_jump_table_.last().label, condition, src1, src2);
775
  }
776
}
777

    
778

    
779
void LCodeGen::DeoptimizeIf(Condition condition,
780
                            LEnvironment* environment,
781
                            Register src1,
782
                            const Operand& src2) {
783
  Deoptimizer::BailoutType bailout_type = info()->IsStub()
784
      ? Deoptimizer::LAZY
785
      : Deoptimizer::EAGER;
786
  DeoptimizeIf(condition, environment, bailout_type, src1, src2);
787
}
788

    
789

    
790
void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
791
  ZoneList<Handle<Map> > maps(1, zone());
792
  ZoneList<Handle<JSObject> > objects(1, zone());
793
  int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
794
  for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
795
    if (Code::IsWeakEmbeddedObject(code->kind(), it.rinfo()->target_object())) {
796
      if (it.rinfo()->target_object()->IsMap()) {
797
        Handle<Map> map(Map::cast(it.rinfo()->target_object()));
798
        maps.Add(map, zone());
799
      } else if (it.rinfo()->target_object()->IsJSObject()) {
800
        Handle<JSObject> object(JSObject::cast(it.rinfo()->target_object()));
801
        objects.Add(object, zone());
802
      }
803
    }
804
  }
805
#ifdef VERIFY_HEAP
806
  // This disables verification of weak embedded objects after full GC.
807
  // AddDependentCode can cause a GC, which would observe the state where
808
  // this code is not yet in the depended code lists of the embedded maps.
809
  NoWeakObjectVerificationScope disable_verification_of_embedded_objects;
810
#endif
811
  for (int i = 0; i < maps.length(); i++) {
812
    maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
813
  }
814
  for (int i = 0; i < objects.length(); i++) {
815
    AddWeakObjectToCodeDependency(isolate()->heap(), objects.at(i), code);
816
  }
817
}
818

    
819

    
820
void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
821
  int length = deoptimizations_.length();
822
  if (length == 0) return;
823
  Handle<DeoptimizationInputData> data =
824
      factory()->NewDeoptimizationInputData(length, TENURED);
825

    
826
  Handle<ByteArray> translations =
827
      translations_.CreateByteArray(isolate()->factory());
828
  data->SetTranslationByteArray(*translations);
829
  data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
830

    
831
  Handle<FixedArray> literals =
832
      factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
833
  { AllowDeferredHandleDereference copy_handles;
834
    for (int i = 0; i < deoptimization_literals_.length(); i++) {
835
      literals->set(i, *deoptimization_literals_[i]);
836
    }
837
    data->SetLiteralArray(*literals);
838
  }
839

    
840
  data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
841
  data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
842

    
843
  // Populate the deoptimization entries.
844
  for (int i = 0; i < length; i++) {
845
    LEnvironment* env = deoptimizations_[i];
846
    data->SetAstId(i, env->ast_id());
847
    data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
848
    data->SetArgumentsStackHeight(i,
849
                                  Smi::FromInt(env->arguments_stack_height()));
850
    data->SetPc(i, Smi::FromInt(env->pc_offset()));
851
  }
852
  code->set_deoptimization_data(*data);
853
}
854

    
855

    
856
int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
857
  int result = deoptimization_literals_.length();
858
  for (int i = 0; i < deoptimization_literals_.length(); ++i) {
859
    if (deoptimization_literals_[i].is_identical_to(literal)) return i;
860
  }
861
  deoptimization_literals_.Add(literal, zone());
862
  return result;
863
}
864

    
865

    
866
void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
867
  ASSERT(deoptimization_literals_.length() == 0);
868

    
869
  const ZoneList<Handle<JSFunction> >* inlined_closures =
870
      chunk()->inlined_closures();
871

    
872
  for (int i = 0, length = inlined_closures->length();
873
       i < length;
874
       i++) {
875
    DefineDeoptimizationLiteral(inlined_closures->at(i));
876
  }
877

    
878
  inlined_function_count_ = deoptimization_literals_.length();
879
}
880

    
881

    
882
void LCodeGen::RecordSafepointWithLazyDeopt(
883
    LInstruction* instr, SafepointMode safepoint_mode) {
884
  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
885
    RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
886
  } else {
887
    ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
888
    RecordSafepointWithRegisters(
889
        instr->pointer_map(), 0, Safepoint::kLazyDeopt);
890
  }
891
}
892

    
893

    
894
void LCodeGen::RecordSafepoint(
895
    LPointerMap* pointers,
896
    Safepoint::Kind kind,
897
    int arguments,
898
    Safepoint::DeoptMode deopt_mode) {
899
  ASSERT(expected_safepoint_kind_ == kind);
900

    
901
  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
902
  Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
903
      kind, arguments, deopt_mode);
904
  for (int i = 0; i < operands->length(); i++) {
905
    LOperand* pointer = operands->at(i);
906
    if (pointer->IsStackSlot()) {
907
      safepoint.DefinePointerSlot(pointer->index(), zone());
908
    } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
909
      safepoint.DefinePointerRegister(ToRegister(pointer), zone());
910
    }
911
  }
912
}
913

    
914

    
915
void LCodeGen::RecordSafepoint(LPointerMap* pointers,
916
                               Safepoint::DeoptMode deopt_mode) {
917
  RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
918
}
919

    
920

    
921
void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
922
  LPointerMap empty_pointers(zone());
923
  RecordSafepoint(&empty_pointers, deopt_mode);
924
}
925

    
926

    
927
void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
928
                                            int arguments,
929
                                            Safepoint::DeoptMode deopt_mode) {
930
  RecordSafepoint(
931
      pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
932
}
933

    
934

    
935
void LCodeGen::RecordSafepointWithRegistersAndDoubles(
936
    LPointerMap* pointers,
937
    int arguments,
938
    Safepoint::DeoptMode deopt_mode) {
939
  RecordSafepoint(
940
      pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
941
}
942

    
943

    
944
void LCodeGen::RecordAndWritePosition(int position) {
945
  if (position == RelocInfo::kNoPosition) return;
946
  masm()->positions_recorder()->RecordPosition(position);
947
  masm()->positions_recorder()->WriteRecordedPositions();
948
}
949

    
950

    
951
static const char* LabelType(LLabel* label) {
952
  if (label->is_loop_header()) return " (loop header)";
953
  if (label->is_osr_entry()) return " (OSR entry)";
954
  return "";
955
}
956

    
957

    
958
void LCodeGen::DoLabel(LLabel* label) {
959
  Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
960
          current_instruction_,
961
          label->hydrogen_value()->id(),
962
          label->block_id(),
963
          LabelType(label));
964
  __ bind(label->label());
965
  current_block_ = label->block_id();
966
  DoGap(label);
967
}
968

    
969

    
970
void LCodeGen::DoParallelMove(LParallelMove* move) {
971
  resolver_.Resolve(move);
972
}
973

    
974

    
975
void LCodeGen::DoGap(LGap* gap) {
976
  for (int i = LGap::FIRST_INNER_POSITION;
977
       i <= LGap::LAST_INNER_POSITION;
978
       i++) {
979
    LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
980
    LParallelMove* move = gap->GetParallelMove(inner_pos);
981
    if (move != NULL) DoParallelMove(move);
982
  }
983
}
984

    
985

    
986
void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
987
  DoGap(instr);
988
}
989

    
990

    
991
void LCodeGen::DoParameter(LParameter* instr) {
992
  // Nothing to do.
993
}
994

    
995

    
996
void LCodeGen::DoCallStub(LCallStub* instr) {
997
  ASSERT(ToRegister(instr->context()).is(cp));
998
  ASSERT(ToRegister(instr->result()).is(v0));
999
  switch (instr->hydrogen()->major_key()) {
1000
    case CodeStub::RegExpConstructResult: {
1001
      RegExpConstructResultStub stub;
1002
      CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1003
      break;
1004
    }
1005
    case CodeStub::RegExpExec: {
1006
      RegExpExecStub stub;
1007
      CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1008
      break;
1009
    }
1010
    case CodeStub::SubString: {
1011
      SubStringStub stub;
1012
      CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1013
      break;
1014
    }
1015
    case CodeStub::StringCompare: {
1016
      StringCompareStub stub;
1017
      CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1018
      break;
1019
    }
1020
    case CodeStub::TranscendentalCache: {
1021
      __ lw(a0, MemOperand(sp, 0));
1022
      TranscendentalCacheStub stub(instr->transcendental_type(),
1023
                                   TranscendentalCacheStub::TAGGED);
1024
      CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1025
      break;
1026
    }
1027
    default:
1028
      UNREACHABLE();
1029
  }
1030
}
1031

    
1032

    
1033
void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1034
  GenerateOsrPrologue();
1035
}
1036

    
1037

    
1038
void LCodeGen::DoModI(LModI* instr) {
1039
  HMod* hmod = instr->hydrogen();
1040
  HValue* left = hmod->left();
1041
  HValue* right = hmod->right();
1042
  if (hmod->HasPowerOf2Divisor()) {
1043
    const Register left_reg = ToRegister(instr->left());
1044
    const Register result_reg = ToRegister(instr->result());
1045

    
1046
    // Note: The code below even works when right contains kMinInt.
1047
    int32_t divisor = Abs(right->GetInteger32Constant());
1048

    
1049
    Label left_is_not_negative, done;
1050
    if (left->CanBeNegative()) {
1051
      __ Branch(left_reg.is(result_reg) ? PROTECT : USE_DELAY_SLOT,
1052
                &left_is_not_negative, ge, left_reg, Operand(zero_reg));
1053
      __ subu(result_reg, zero_reg, left_reg);
1054
      __ And(result_reg, result_reg, divisor - 1);
1055
      if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1056
        DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg));
1057
      }
1058
      __ Branch(USE_DELAY_SLOT, &done);
1059
      __ subu(result_reg, zero_reg, result_reg);
1060
    }
1061

    
1062
    __ bind(&left_is_not_negative);
1063
    __ And(result_reg, left_reg, divisor - 1);
1064
    __ bind(&done);
1065

    
1066
  } else if (hmod->fixed_right_arg().has_value) {
1067
    const Register left_reg = ToRegister(instr->left());
1068
    const Register result_reg = ToRegister(instr->result());
1069
    const Register right_reg = ToRegister(instr->right());
1070

    
1071
    int32_t divisor = hmod->fixed_right_arg().value;
1072
    ASSERT(IsPowerOf2(divisor));
1073

    
1074
    // Check if our assumption of a fixed right operand still holds.
1075
    DeoptimizeIf(ne, instr->environment(), right_reg, Operand(divisor));
1076

    
1077
    Label left_is_not_negative, done;
1078
    if (left->CanBeNegative()) {
1079
      __ Branch(left_reg.is(result_reg) ? PROTECT : USE_DELAY_SLOT,
1080
                &left_is_not_negative, ge, left_reg, Operand(zero_reg));
1081
      __ subu(result_reg, zero_reg, left_reg);
1082
      __ And(result_reg, result_reg, divisor - 1);
1083
      if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1084
        DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg));
1085
      }
1086
      __ Branch(USE_DELAY_SLOT, &done);
1087
      __ subu(result_reg, zero_reg, result_reg);
1088
    }
1089

    
1090
    __ bind(&left_is_not_negative);
1091
    __ And(result_reg, left_reg, divisor - 1);
1092
    __ bind(&done);
1093

    
1094
  } else {
1095
    const Register scratch = scratch0();
1096
    const Register left_reg = ToRegister(instr->left());
1097
    const Register result_reg = ToRegister(instr->result());
1098

    
1099
    // div runs in the background while we check for special cases.
1100
    Register right_reg = EmitLoadRegister(instr->right(), scratch);
1101
    __ div(left_reg, right_reg);
1102

    
1103
    Label done;
1104
    // Check for x % 0, we have to deopt in this case because we can't return a
1105
    // NaN.
1106
    if (right->CanBeZero()) {
1107
      DeoptimizeIf(eq, instr->environment(), right_reg, Operand(zero_reg));
1108
    }
1109

    
1110
    // Check for kMinInt % -1, we have to deopt if we care about -0, because we
1111
    // can't return that.
1112
    if (left->RangeCanInclude(kMinInt) && right->RangeCanInclude(-1)) {
1113
      Label left_not_min_int;
1114
      __ Branch(&left_not_min_int, ne, left_reg, Operand(kMinInt));
1115
      // TODO(svenpanne) Don't deopt when we don't care about -0.
1116
      DeoptimizeIf(eq, instr->environment(), right_reg, Operand(-1));
1117
      __ bind(&left_not_min_int);
1118
    }
1119

    
1120
    // TODO(svenpanne) Only emit the test/deopt if we have to.
1121
    __ Branch(USE_DELAY_SLOT, &done, ge, left_reg, Operand(zero_reg));
1122
    __ mfhi(result_reg);
1123

    
1124
    if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1125
      DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg));
1126
    }
1127
    __ bind(&done);
1128
  }
1129
}
1130

    
1131

    
1132
void LCodeGen::EmitSignedIntegerDivisionByConstant(
1133
    Register result,
1134
    Register dividend,
1135
    int32_t divisor,
1136
    Register remainder,
1137
    Register scratch,
1138
    LEnvironment* environment) {
1139
  ASSERT(!AreAliased(dividend, scratch, at, no_reg));
1140

    
1141
  uint32_t divisor_abs = abs(divisor);
1142

    
1143
  int32_t power_of_2_factor =
1144
    CompilerIntrinsics::CountTrailingZeros(divisor_abs);
1145

    
1146
  switch (divisor_abs) {
1147
    case 0:
1148
      DeoptimizeIf(al, environment);
1149
      return;
1150

    
1151
    case 1:
1152
      if (divisor > 0) {
1153
        __ Move(result, dividend);
1154
      } else {
1155
        __ SubuAndCheckForOverflow(result, zero_reg, dividend, scratch);
1156
        DeoptimizeIf(lt, environment, scratch, Operand(zero_reg));
1157
      }
1158
      // Compute the remainder.
1159
      __ Move(remainder, zero_reg);
1160
      return;
1161

    
1162
    default:
1163
      if (IsPowerOf2(divisor_abs)) {
1164
        // Branch and condition free code for integer division by a power
1165
        // of two.
1166
        int32_t power = WhichPowerOf2(divisor_abs);
1167
        if (power > 1) {
1168
          __ sra(scratch, dividend, power - 1);
1169
        }
1170
        __ srl(scratch, scratch, 32 - power);
1171
        __ Addu(scratch, dividend, Operand(scratch));
1172
        __ sra(result, scratch,  power);
1173
        // Negate if necessary.
1174
        // We don't need to check for overflow because the case '-1' is
1175
        // handled separately.
1176
        if (divisor < 0) {
1177
          ASSERT(divisor != -1);
1178
          __ Subu(result, zero_reg, Operand(result));
1179
        }
1180
        // Compute the remainder.
1181
        if (divisor > 0) {
1182
          __ sll(scratch, result, power);
1183
          __ Subu(remainder, dividend, Operand(scratch));
1184
        } else {
1185
          __ sll(scratch, result, power);
1186
          __ Addu(remainder, dividend, Operand(scratch));
1187
        }
1188
        return;
1189
      } else if (LChunkBuilder::HasMagicNumberForDivisor(divisor)) {
1190
        // Use magic numbers for a few specific divisors.
1191
        // Details and proofs can be found in:
1192
        // - Hacker's Delight, Henry S. Warren, Jr.
1193
        // - The PowerPC Compiler Writer's Guide
1194
        // and probably many others.
1195
        //
1196
        // We handle
1197
        //   <divisor with magic numbers> * <power of 2>
1198
        // but not
1199
        //   <divisor with magic numbers> * <other divisor with magic numbers>
1200
        DivMagicNumbers magic_numbers =
1201
          DivMagicNumberFor(divisor_abs >> power_of_2_factor);
1202
        // Branch and condition free code for integer division by a power
1203
        // of two.
1204
        const int32_t M = magic_numbers.M;
1205
        const int32_t s = magic_numbers.s + power_of_2_factor;
1206

    
1207
        __ li(scratch, Operand(M));
1208
        __ mult(dividend, scratch);
1209
        __ mfhi(scratch);
1210
        if (M < 0) {
1211
          __ Addu(scratch, scratch, Operand(dividend));
1212
        }
1213
        if (s > 0) {
1214
          __ sra(scratch, scratch, s);
1215
          __ mov(scratch, scratch);
1216
        }
1217
        __ srl(at, dividend, 31);
1218
        __ Addu(result, scratch, Operand(at));
1219
        if (divisor < 0) __ Subu(result, zero_reg, Operand(result));
1220
        // Compute the remainder.
1221
        __ li(scratch, Operand(divisor));
1222
        __ Mul(scratch, result, Operand(scratch));
1223
        __ Subu(remainder, dividend, Operand(scratch));
1224
      } else {
1225
        __ li(scratch, Operand(divisor));
1226
        __ div(dividend, scratch);
1227
        __ mfhi(remainder);
1228
        __ mflo(result);
1229
      }
1230
  }
1231
}
1232

    
1233

    
1234
void LCodeGen::DoDivI(LDivI* instr) {
1235
  const Register left = ToRegister(instr->left());
1236
  const Register right = ToRegister(instr->right());
1237
  const Register result = ToRegister(instr->result());
1238

    
1239
  // On MIPS div is asynchronous - it will run in the background while we
1240
  // check for special cases.
1241
  __ div(left, right);
1242

    
1243
  // Check for x / 0.
1244
  if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
1245
    DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
1246
  }
1247

    
1248
  // Check for (0 / -x) that will produce negative zero.
1249
  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1250
    Label left_not_zero;
1251
    __ Branch(&left_not_zero, ne, left, Operand(zero_reg));
1252
    DeoptimizeIf(lt, instr->environment(), right, Operand(zero_reg));
1253
    __ bind(&left_not_zero);
1254
  }
1255

    
1256
  // Check for (kMinInt / -1).
1257
  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1258
    Label left_not_min_int;
1259
    __ Branch(&left_not_min_int, ne, left, Operand(kMinInt));
1260
    DeoptimizeIf(eq, instr->environment(), right, Operand(-1));
1261
    __ bind(&left_not_min_int);
1262
  }
1263

    
1264
  if (!instr->hydrogen()->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1265
    __ mfhi(result);
1266
    DeoptimizeIf(ne, instr->environment(), result, Operand(zero_reg));
1267
  }
1268
  __ mflo(result);
1269
}
1270

    
1271

    
1272
void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1273
  DoubleRegister addend = ToDoubleRegister(instr->addend());
1274
  DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
1275
  DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1276

    
1277
  // This is computed in-place.
1278
  ASSERT(addend.is(ToDoubleRegister(instr->result())));
1279

    
1280
  __ madd_d(addend, addend, multiplier, multiplicand);
1281
}
1282

    
1283

    
1284
void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
1285
  const Register result = ToRegister(instr->result());
1286
  const Register left = ToRegister(instr->left());
1287
  const Register remainder = ToRegister(instr->temp());
1288
  const Register scratch = scratch0();
1289

    
1290
  if (instr->right()->IsConstantOperand()) {
1291
    Label done;
1292
    int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
1293
    if (divisor < 0) {
1294
      DeoptimizeIf(eq, instr->environment(), left, Operand(zero_reg));
1295
    }
1296
    EmitSignedIntegerDivisionByConstant(result,
1297
                                        left,
1298
                                        divisor,
1299
                                        remainder,
1300
                                        scratch,
1301
                                        instr->environment());
1302
    // We performed a truncating division. Correct the result if necessary.
1303
    __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
1304
    __ Xor(scratch , remainder, Operand(divisor));
1305
    __ Branch(&done, ge, scratch, Operand(zero_reg));
1306
    __ Subu(result, result, Operand(1));
1307
    __ bind(&done);
1308
  } else {
1309
    Label done;
1310
    const Register right = ToRegister(instr->right());
1311

    
1312
    // On MIPS div is asynchronous - it will run in the background while we
1313
    // check for special cases.
1314
    __ div(left, right);
1315

    
1316
    // Check for x / 0.
1317
    DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg));
1318

    
1319
    // Check for (0 / -x) that will produce negative zero.
1320
    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1321
      Label left_not_zero;
1322
      __ Branch(&left_not_zero, ne, left, Operand(zero_reg));
1323
      DeoptimizeIf(lt, instr->environment(), right, Operand(zero_reg));
1324
      __ bind(&left_not_zero);
1325
    }
1326

    
1327
    // Check for (kMinInt / -1).
1328
    if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1329
      Label left_not_min_int;
1330
      __ Branch(&left_not_min_int, ne, left, Operand(kMinInt));
1331
      DeoptimizeIf(eq, instr->environment(), right, Operand(-1));
1332
      __ bind(&left_not_min_int);
1333
    }
1334

    
1335
    __ mfhi(remainder);
1336
    __ mflo(result);
1337

    
1338
    // We performed a truncating division. Correct the result if necessary.
1339
    __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
1340
    __ Xor(scratch , remainder, Operand(right));
1341
    __ Branch(&done, ge, scratch, Operand(zero_reg));
1342
    __ Subu(result, result, Operand(1));
1343
    __ bind(&done);
1344
  }
1345
}
1346

    
1347

    
1348
void LCodeGen::DoMulI(LMulI* instr) {
1349
  Register scratch = scratch0();
1350
  Register result = ToRegister(instr->result());
1351
  // Note that result may alias left.
1352
  Register left = ToRegister(instr->left());
1353
  LOperand* right_op = instr->right();
1354

    
1355
  bool bailout_on_minus_zero =
1356
    instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1357
  bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1358

    
1359
  if (right_op->IsConstantOperand()) {
1360
    int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1361

    
1362
    if (bailout_on_minus_zero && (constant < 0)) {
1363
      // The case of a null constant will be handled separately.
1364
      // If constant is negative and left is null, the result should be -0.
1365
      DeoptimizeIf(eq, instr->environment(), left, Operand(zero_reg));
1366
    }
1367

    
1368
    switch (constant) {
1369
      case -1:
1370
        if (overflow) {
1371
          __ SubuAndCheckForOverflow(result, zero_reg, left, scratch);
1372
          DeoptimizeIf(lt, instr->environment(), scratch, Operand(zero_reg));
1373
        } else {
1374
          __ Subu(result, zero_reg, left);
1375
        }
1376
        break;
1377
      case 0:
1378
        if (bailout_on_minus_zero) {
1379
          // If left is strictly negative and the constant is null, the
1380
          // result is -0. Deoptimize if required, otherwise return 0.
1381
          DeoptimizeIf(lt, instr->environment(), left, Operand(zero_reg));
1382
        }
1383
        __ mov(result, zero_reg);
1384
        break;
1385
      case 1:
1386
        // Nothing to do.
1387
        __ Move(result, left);
1388
        break;
1389
      default:
1390
        // Multiplying by powers of two and powers of two plus or minus
1391
        // one can be done faster with shifted operands.
1392
        // For other constants we emit standard code.
1393
        int32_t mask = constant >> 31;
1394
        uint32_t constant_abs = (constant + mask) ^ mask;
1395

    
1396
        if (IsPowerOf2(constant_abs)) {
1397
          int32_t shift = WhichPowerOf2(constant_abs);
1398
          __ sll(result, left, shift);
1399
          // Correct the sign of the result if the constant is negative.
1400
          if (constant < 0)  __ Subu(result, zero_reg, result);
1401
        } else if (IsPowerOf2(constant_abs - 1)) {
1402
          int32_t shift = WhichPowerOf2(constant_abs - 1);
1403
          __ sll(scratch, left, shift);
1404
          __ Addu(result, scratch, left);
1405
          // Correct the sign of the result if the constant is negative.
1406
          if (constant < 0)  __ Subu(result, zero_reg, result);
1407
        } else if (IsPowerOf2(constant_abs + 1)) {
1408
          int32_t shift = WhichPowerOf2(constant_abs + 1);
1409
          __ sll(scratch, left, shift);
1410
          __ Subu(result, scratch, left);
1411
          // Correct the sign of the result if the constant is negative.
1412
          if (constant < 0)  __ Subu(result, zero_reg, result);
1413
        } else {
1414
          // Generate standard code.
1415
          __ li(at, constant);
1416
          __ Mul(result, left, at);
1417
        }
1418
    }
1419

    
1420
  } else {
1421
    ASSERT(right_op->IsRegister());
1422
    Register right = ToRegister(right_op);
1423

    
1424
    if (overflow) {
1425
      // hi:lo = left * right.
1426
      if (instr->hydrogen()->representation().IsSmi()) {
1427
        __ SmiUntag(result, left);
1428
        __ mult(result, right);
1429
        __ mfhi(scratch);
1430
        __ mflo(result);
1431
      } else {
1432
        __ mult(left, right);
1433
        __ mfhi(scratch);
1434
        __ mflo(result);
1435
      }
1436
      __ sra(at, result, 31);
1437
      DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
1438
    } else {
1439
      if (instr->hydrogen()->representation().IsSmi()) {
1440
        __ SmiUntag(result, left);
1441
        __ Mul(result, result, right);
1442
      } else {
1443
        __ Mul(result, left, right);
1444
      }
1445
    }
1446

    
1447
    if (bailout_on_minus_zero) {
1448
      Label done;
1449
      __ Xor(at, left, right);
1450
      __ Branch(&done, ge, at, Operand(zero_reg));
1451
      // Bail out if the result is minus zero.
1452
      DeoptimizeIf(eq,
1453
                   instr->environment(),
1454
                   result,
1455
                   Operand(zero_reg));
1456
      __ bind(&done);
1457
    }
1458
  }
1459
}
1460

    
1461

    
1462
void LCodeGen::DoBitI(LBitI* instr) {
1463
  LOperand* left_op = instr->left();
1464
  LOperand* right_op = instr->right();
1465
  ASSERT(left_op->IsRegister());
1466
  Register left = ToRegister(left_op);
1467
  Register result = ToRegister(instr->result());
1468
  Operand right(no_reg);
1469

    
1470
  if (right_op->IsStackSlot() || right_op->IsArgument()) {
1471
    right = Operand(EmitLoadRegister(right_op, at));
1472
  } else {
1473
    ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
1474
    right = ToOperand(right_op);
1475
  }
1476

    
1477
  switch (instr->op()) {
1478
    case Token::BIT_AND:
1479
      __ And(result, left, right);
1480
      break;
1481
    case Token::BIT_OR:
1482
      __ Or(result, left, right);
1483
      break;
1484
    case Token::BIT_XOR:
1485
      if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
1486
        __ Nor(result, zero_reg, left);
1487
      } else {
1488
        __ Xor(result, left, right);
1489
      }
1490
      break;
1491
    default:
1492
      UNREACHABLE();
1493
      break;
1494
  }
1495
}
1496

    
1497

    
1498
void LCodeGen::DoShiftI(LShiftI* instr) {
1499
  // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1500
  // result may alias either of them.
1501
  LOperand* right_op = instr->right();
1502
  Register left = ToRegister(instr->left());
1503
  Register result = ToRegister(instr->result());
1504
  Register scratch = scratch0();
1505

    
1506
  if (right_op->IsRegister()) {
1507
    // No need to mask the right operand on MIPS, it is built into the variable
1508
    // shift instructions.
1509
    switch (instr->op()) {
1510
      case Token::ROR:
1511
        __ Ror(result, left, Operand(ToRegister(right_op)));
1512
        break;
1513
      case Token::SAR:
1514
        __ srav(result, left, ToRegister(right_op));
1515
        break;
1516
      case Token::SHR:
1517
        __ srlv(result, left, ToRegister(right_op));
1518
        if (instr->can_deopt()) {
1519
          DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
1520
        }
1521
        break;
1522
      case Token::SHL:
1523
        __ sllv(result, left, ToRegister(right_op));
1524
        break;
1525
      default:
1526
        UNREACHABLE();
1527
        break;
1528
    }
1529
  } else {
1530
    // Mask the right_op operand.
1531
    int value = ToInteger32(LConstantOperand::cast(right_op));
1532
    uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1533
    switch (instr->op()) {
1534
      case Token::ROR:
1535
        if (shift_count != 0) {
1536
          __ Ror(result, left, Operand(shift_count));
1537
        } else {
1538
          __ Move(result, left);
1539
        }
1540
        break;
1541
      case Token::SAR:
1542
        if (shift_count != 0) {
1543
          __ sra(result, left, shift_count);
1544
        } else {
1545
          __ Move(result, left);
1546
        }
1547
        break;
1548
      case Token::SHR:
1549
        if (shift_count != 0) {
1550
          __ srl(result, left, shift_count);
1551
        } else {
1552
          if (instr->can_deopt()) {
1553
            __ And(at, left, Operand(0x80000000));
1554
            DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
1555
          }
1556
          __ Move(result, left);
1557
        }
1558
        break;
1559
      case Token::SHL:
1560
        if (shift_count != 0) {
1561
          if (instr->hydrogen_value()->representation().IsSmi() &&
1562
              instr->can_deopt()) {
1563
            if (shift_count != 1) {
1564
              __ sll(result, left, shift_count - 1);
1565
              __ SmiTagCheckOverflow(result, result, scratch);
1566
            } else {
1567
              __ SmiTagCheckOverflow(result, left, scratch);
1568
            }
1569
            DeoptimizeIf(lt, instr->environment(), scratch, Operand(zero_reg));
1570
          } else {
1571
            __ sll(result, left, shift_count);
1572
          }
1573
        } else {
1574
          __ Move(result, left);
1575
        }
1576
        break;
1577
      default:
1578
        UNREACHABLE();
1579
        break;
1580
    }
1581
  }
1582
}
1583

    
1584

    
1585
void LCodeGen::DoSubI(LSubI* instr) {
1586
  LOperand* left = instr->left();
1587
  LOperand* right = instr->right();
1588
  LOperand* result = instr->result();
1589
  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1590

    
1591
  if (!can_overflow) {
1592
    if (right->IsStackSlot() || right->IsArgument()) {
1593
      Register right_reg = EmitLoadRegister(right, at);
1594
      __ Subu(ToRegister(result), ToRegister(left), Operand(right_reg));
1595
    } else {
1596
      ASSERT(right->IsRegister() || right->IsConstantOperand());
1597
      __ Subu(ToRegister(result), ToRegister(left), ToOperand(right));
1598
    }
1599
  } else {  // can_overflow.
1600
    Register overflow = scratch0();
1601
    Register scratch = scratch1();
1602
    if (right->IsStackSlot() ||
1603
        right->IsArgument() ||
1604
        right->IsConstantOperand()) {
1605
      Register right_reg = EmitLoadRegister(right, scratch);
1606
      __ SubuAndCheckForOverflow(ToRegister(result),
1607
                                 ToRegister(left),
1608
                                 right_reg,
1609
                                 overflow);  // Reg at also used as scratch.
1610
    } else {
1611
      ASSERT(right->IsRegister());
1612
      // Due to overflow check macros not supporting constant operands,
1613
      // handling the IsConstantOperand case was moved to prev if clause.
1614
      __ SubuAndCheckForOverflow(ToRegister(result),
1615
                                 ToRegister(left),
1616
                                 ToRegister(right),
1617
                                 overflow);  // Reg at also used as scratch.
1618
    }
1619
    DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
1620
  }
1621
}
1622

    
1623

    
1624
void LCodeGen::DoConstantI(LConstantI* instr) {
1625
  __ li(ToRegister(instr->result()), Operand(instr->value()));
1626
}
1627

    
1628

    
1629
void LCodeGen::DoConstantS(LConstantS* instr) {
1630
  __ li(ToRegister(instr->result()), Operand(instr->value()));
1631
}
1632

    
1633

    
1634
void LCodeGen::DoConstantD(LConstantD* instr) {
1635
  ASSERT(instr->result()->IsDoubleRegister());
1636
  DoubleRegister result = ToDoubleRegister(instr->result());
1637
  double v = instr->value();
1638
  __ Move(result, v);
1639
}
1640

    
1641

    
1642
void LCodeGen::DoConstantE(LConstantE* instr) {
1643
  __ li(ToRegister(instr->result()), Operand(instr->value()));
1644
}
1645

    
1646

    
1647
void LCodeGen::DoConstantT(LConstantT* instr) {
1648
  Handle<Object> value = instr->value(isolate());
1649
  AllowDeferredHandleDereference smi_check;
1650
  __ LoadObject(ToRegister(instr->result()), value);
1651
}
1652

    
1653

    
1654
void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1655
  Register result = ToRegister(instr->result());
1656
  Register map = ToRegister(instr->value());
1657
  __ EnumLength(result, map);
1658
}
1659

    
1660

    
1661
void LCodeGen::DoElementsKind(LElementsKind* instr) {
1662
  Register result = ToRegister(instr->result());
1663
  Register input = ToRegister(instr->value());
1664

    
1665
  // Load map into |result|.
1666
  __ lw(result, FieldMemOperand(input, HeapObject::kMapOffset));
1667
  // Load the map's "bit field 2" into |result|. We only need the first byte,
1668
  // but the following bit field extraction takes care of that anyway.
1669
  __ lbu(result, FieldMemOperand(result, Map::kBitField2Offset));
1670
  // Retrieve elements_kind from bit field 2.
1671
  __ Ext(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount);
1672
}
1673

    
1674

    
1675
void LCodeGen::DoValueOf(LValueOf* instr) {
1676
  Register input = ToRegister(instr->value());
1677
  Register result = ToRegister(instr->result());
1678
  Register map = ToRegister(instr->temp());
1679
  Label done;
1680

    
1681
  if (!instr->hydrogen()->value()->IsHeapObject()) {
1682
    // If the object is a smi return the object.
1683
    __ Move(result, input);
1684
    __ JumpIfSmi(input, &done);
1685
  }
1686

    
1687
  // If the object is not a value type, return the object.
1688
  __ GetObjectType(input, map, map);
1689
  __ Branch(&done, ne, map, Operand(JS_VALUE_TYPE));
1690
  __ lw(result, FieldMemOperand(input, JSValue::kValueOffset));
1691

    
1692
  __ bind(&done);
1693
}
1694

    
1695

    
1696
void LCodeGen::DoDateField(LDateField* instr) {
1697
  Register object = ToRegister(instr->date());
1698
  Register result = ToRegister(instr->result());
1699
  Register scratch = ToRegister(instr->temp());
1700
  Smi* index = instr->index();
1701
  Label runtime, done;
1702
  ASSERT(object.is(a0));
1703
  ASSERT(result.is(v0));
1704
  ASSERT(!scratch.is(scratch0()));
1705
  ASSERT(!scratch.is(object));
1706

    
1707
  __ And(at, object, Operand(kSmiTagMask));
1708
  DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
1709
  __ GetObjectType(object, scratch, scratch);
1710
  DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_DATE_TYPE));
1711

    
1712
  if (index->value() == 0) {
1713
    __ lw(result, FieldMemOperand(object, JSDate::kValueOffset));
1714
  } else {
1715
    if (index->value() < JSDate::kFirstUncachedField) {
1716
      ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1717
      __ li(scratch, Operand(stamp));
1718
      __ lw(scratch, MemOperand(scratch));
1719
      __ lw(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
1720
      __ Branch(&runtime, ne, scratch, Operand(scratch0()));
1721
      __ lw(result, FieldMemOperand(object, JSDate::kValueOffset +
1722
                                            kPointerSize * index->value()));
1723
      __ jmp(&done);
1724
    }
1725
    __ bind(&runtime);
1726
    __ PrepareCallCFunction(2, scratch);
1727
    __ li(a1, Operand(index));
1728
    __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1729
    __ bind(&done);
1730
  }
1731
}
1732

    
1733

    
1734
void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1735
  Register string = ToRegister(instr->string());
1736
  LOperand* index_op = instr->index();
1737
  Register value = ToRegister(instr->value());
1738
  Register scratch = scratch0();
1739
  String::Encoding encoding = instr->encoding();
1740

    
1741
  if (FLAG_debug_code) {
1742
    __ lw(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
1743
    __ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1744

    
1745
    __ And(scratch, scratch,
1746
           Operand(kStringRepresentationMask | kStringEncodingMask));
1747
    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1748
    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1749
    __ Subu(at, scratch, Operand(encoding == String::ONE_BYTE_ENCODING
1750
                                ? one_byte_seq_type : two_byte_seq_type));
1751
    __ Check(eq, kUnexpectedStringType, at, Operand(zero_reg));
1752
  }
1753

    
1754
  if (index_op->IsConstantOperand()) {
1755
    int constant_index = ToInteger32(LConstantOperand::cast(index_op));
1756
    if (encoding == String::ONE_BYTE_ENCODING) {
1757
      __ sb(value,
1758
          FieldMemOperand(string, SeqString::kHeaderSize + constant_index));
1759
    } else {
1760
      __ sh(value,
1761
          FieldMemOperand(string, SeqString::kHeaderSize + constant_index * 2));
1762
    }
1763
  } else {
1764
    Register index = ToRegister(index_op);
1765
    if (encoding == String::ONE_BYTE_ENCODING) {
1766
      __ Addu(scratch, string, Operand(index));
1767
      __ sb(value, FieldMemOperand(scratch, SeqString::kHeaderSize));
1768
    } else {
1769
      __ sll(scratch, index, 1);
1770
      __ Addu(scratch, string, scratch);
1771
      __ sh(value, FieldMemOperand(scratch, SeqString::kHeaderSize));
1772
    }
1773
  }
1774
}
1775

    
1776

    
1777
void LCodeGen::DoThrow(LThrow* instr) {
1778
  Register input_reg = EmitLoadRegister(instr->value(), at);
1779
  __ push(input_reg);
1780
  ASSERT(ToRegister(instr->context()).is(cp));
1781
  CallRuntime(Runtime::kThrow, 1, instr);
1782

    
1783
  if (FLAG_debug_code) {
1784
    __ stop("Unreachable code.");
1785
  }
1786
}
1787

    
1788

    
1789
void LCodeGen::DoAddI(LAddI* instr) {
1790
  LOperand* left = instr->left();
1791
  LOperand* right = instr->right();
1792
  LOperand* result = instr->result();
1793
  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1794

    
1795
  if (!can_overflow) {
1796
    if (right->IsStackSlot() || right->IsArgument()) {
1797
      Register right_reg = EmitLoadRegister(right, at);
1798
      __ Addu(ToRegister(result), ToRegister(left), Operand(right_reg));
1799
    } else {
1800
      ASSERT(right->IsRegister() || right->IsConstantOperand());
1801
      __ Addu(ToRegister(result), ToRegister(left), ToOperand(right));
1802
    }
1803
  } else {  // can_overflow.
1804
    Register overflow = scratch0();
1805
    Register scratch = scratch1();
1806
    if (right->IsStackSlot() ||
1807
        right->IsArgument() ||
1808
        right->IsConstantOperand()) {
1809
      Register right_reg = EmitLoadRegister(right, scratch);
1810
      __ AdduAndCheckForOverflow(ToRegister(result),
1811
                                 ToRegister(left),
1812
                                 right_reg,
1813
                                 overflow);  // Reg at also used as scratch.
1814
    } else {
1815
      ASSERT(right->IsRegister());
1816
      // Due to overflow check macros not supporting constant operands,
1817
      // handling the IsConstantOperand case was moved to prev if clause.
1818
      __ AdduAndCheckForOverflow(ToRegister(result),
1819
                                 ToRegister(left),
1820
                                 ToRegister(right),
1821
                                 overflow);  // Reg at also used as scratch.
1822
    }
1823
    DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
1824
  }
1825
}
1826

    
1827

    
1828
void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1829
  LOperand* left = instr->left();
1830
  LOperand* right = instr->right();
1831
  HMathMinMax::Operation operation = instr->hydrogen()->operation();
1832
  Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
1833
  if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
1834
    Register left_reg = ToRegister(left);
1835
    Operand right_op = (right->IsRegister() || right->IsConstantOperand())
1836
        ? ToOperand(right)
1837
        : Operand(EmitLoadRegister(right, at));
1838
    Register result_reg = ToRegister(instr->result());
1839
    Label return_right, done;
1840
    if (!result_reg.is(left_reg)) {
1841
      __ Branch(&return_right, NegateCondition(condition), left_reg, right_op);
1842
      __ mov(result_reg, left_reg);
1843
      __ Branch(&done);
1844
    }
1845
    __ Branch(&done, condition, left_reg, right_op);
1846
    __ bind(&return_right);
1847
    __ Addu(result_reg, zero_reg, right_op);
1848
    __ bind(&done);
1849
  } else {
1850
    ASSERT(instr->hydrogen()->representation().IsDouble());
1851
    FPURegister left_reg = ToDoubleRegister(left);
1852
    FPURegister right_reg = ToDoubleRegister(right);
1853
    FPURegister result_reg = ToDoubleRegister(instr->result());
1854
    Label check_nan_left, check_zero, return_left, return_right, done;
1855
    __ BranchF(&check_zero, &check_nan_left, eq, left_reg, right_reg);
1856
    __ BranchF(&return_left, NULL, condition, left_reg, right_reg);
1857
    __ Branch(&return_right);
1858

    
1859
    __ bind(&check_zero);
1860
    // left == right != 0.
1861
    __ BranchF(&return_left, NULL, ne, left_reg, kDoubleRegZero);
1862
    // At this point, both left and right are either 0 or -0.
1863
    if (operation == HMathMinMax::kMathMin) {
1864
      __ neg_d(left_reg, left_reg);
1865
      __ sub_d(result_reg, left_reg, right_reg);
1866
      __ neg_d(result_reg, result_reg);
1867
    } else {
1868
      __ add_d(result_reg, left_reg, right_reg);
1869
    }
1870
    __ Branch(&done);
1871

    
1872
    __ bind(&check_nan_left);
1873
    // left == NaN.
1874
    __ BranchF(NULL, &return_left, eq, left_reg, left_reg);
1875
    __ bind(&return_right);
1876
    if (!right_reg.is(result_reg)) {
1877
      __ mov_d(result_reg, right_reg);
1878
    }
1879
    __ Branch(&done);
1880

    
1881
    __ bind(&return_left);
1882
    if (!left_reg.is(result_reg)) {
1883
      __ mov_d(result_reg, left_reg);
1884
    }
1885
    __ bind(&done);
1886
  }
1887
}
1888

    
1889

    
1890
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1891
  DoubleRegister left = ToDoubleRegister(instr->left());
1892
  DoubleRegister right = ToDoubleRegister(instr->right());
1893
  DoubleRegister result = ToDoubleRegister(instr->result());
1894
  switch (instr->op()) {
1895
    case Token::ADD:
1896
      __ add_d(result, left, right);
1897
      break;
1898
    case Token::SUB:
1899
      __ sub_d(result, left, right);
1900
      break;
1901
    case Token::MUL:
1902
      __ mul_d(result, left, right);
1903
      break;
1904
    case Token::DIV:
1905
      __ div_d(result, left, right);
1906
      break;
1907
    case Token::MOD: {
1908
      // Save a0-a3 on the stack.
1909
      RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit();
1910
      __ MultiPush(saved_regs);
1911

    
1912
      __ PrepareCallCFunction(0, 2, scratch0());
1913
      __ SetCallCDoubleArguments(left, right);
1914
      __ CallCFunction(
1915
          ExternalReference::double_fp_operation(Token::MOD, isolate()),
1916
          0, 2);
1917
      // Move the result in the double result register.
1918
      __ GetCFunctionDoubleResult(result);
1919

    
1920
      // Restore saved register.
1921
      __ MultiPop(saved_regs);
1922
      break;
1923
    }
1924
    default:
1925
      UNREACHABLE();
1926
      break;
1927
  }
1928
}
1929

    
1930

    
1931
void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1932
  ASSERT(ToRegister(instr->context()).is(cp));
1933
  ASSERT(ToRegister(instr->left()).is(a1));
1934
  ASSERT(ToRegister(instr->right()).is(a0));
1935
  ASSERT(ToRegister(instr->result()).is(v0));
1936

    
1937
  BinaryOpStub stub(instr->op(), NO_OVERWRITE);
1938
  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1939
  // Other arch use a nop here, to signal that there is no inlined
1940
  // patchable code. Mips does not need the nop, since our marker
1941
  // instruction (andi zero_reg) will never be used in normal code.
1942
}
1943

    
1944

    
1945
template<class InstrType>
1946
void LCodeGen::EmitBranch(InstrType instr,
1947
                          Condition condition,
1948
                          Register src1,
1949
                          const Operand& src2) {
1950
  int left_block = instr->TrueDestination(chunk_);
1951
  int right_block = instr->FalseDestination(chunk_);
1952

    
1953
  int next_block = GetNextEmittedBlock();
1954
  if (right_block == left_block || condition == al) {
1955
    EmitGoto(left_block);
1956
  } else if (left_block == next_block) {
1957
    __ Branch(chunk_->GetAssemblyLabel(right_block),
1958
              NegateCondition(condition), src1, src2);
1959
  } else if (right_block == next_block) {
1960
    __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
1961
  } else {
1962
    __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
1963
    __ Branch(chunk_->GetAssemblyLabel(right_block));
1964
  }
1965
}
1966

    
1967

    
1968
template<class InstrType>
1969
void LCodeGen::EmitBranchF(InstrType instr,
1970
                           Condition condition,
1971
                           FPURegister src1,
1972
                           FPURegister src2) {
1973
  int right_block = instr->FalseDestination(chunk_);
1974
  int left_block = instr->TrueDestination(chunk_);
1975

    
1976
  int next_block = GetNextEmittedBlock();
1977
  if (right_block == left_block) {
1978
    EmitGoto(left_block);
1979
  } else if (left_block == next_block) {
1980
    __ BranchF(chunk_->GetAssemblyLabel(right_block), NULL,
1981
               NegateCondition(condition), src1, src2);
1982
  } else if (right_block == next_block) {
1983
    __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
1984
               condition, src1, src2);
1985
  } else {
1986
    __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
1987
               condition, src1, src2);
1988
    __ Branch(chunk_->GetAssemblyLabel(right_block));
1989
  }
1990
}
1991

    
1992

    
1993
template<class InstrType>
1994
void LCodeGen::EmitFalseBranchF(InstrType instr,
1995
                                Condition condition,
1996
                                FPURegister src1,
1997
                                FPURegister src2) {
1998
  int false_block = instr->FalseDestination(chunk_);
1999
  __ BranchF(chunk_->GetAssemblyLabel(false_block), NULL,
2000
             condition, src1, src2);
2001
}
2002

    
2003

    
2004
void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
2005
  __ stop("LDebugBreak");
2006
}
2007

    
2008

    
2009
void LCodeGen::DoBranch(LBranch* instr) {
2010
  Representation r = instr->hydrogen()->value()->representation();
2011
  if (r.IsInteger32() || r.IsSmi()) {
2012
    ASSERT(!info()->IsStub());
2013
    Register reg = ToRegister(instr->value());
2014
    EmitBranch(instr, ne, reg, Operand(zero_reg));
2015
  } else if (r.IsDouble()) {
2016
    ASSERT(!info()->IsStub());
2017
    DoubleRegister reg = ToDoubleRegister(instr->value());
2018
    // Test the double value. Zero and NaN are false.
2019
    EmitBranchF(instr, nue, reg, kDoubleRegZero);
2020
  } else {
2021
    ASSERT(r.IsTagged());
2022
    Register reg = ToRegister(instr->value());
2023
    HType type = instr->hydrogen()->value()->type();
2024
    if (type.IsBoolean()) {
2025
      ASSERT(!info()->IsStub());
2026
      __ LoadRoot(at, Heap::kTrueValueRootIndex);
2027
      EmitBranch(instr, eq, reg, Operand(at));
2028
    } else if (type.IsSmi()) {
2029
      ASSERT(!info()->IsStub());
2030
      EmitBranch(instr, ne, reg, Operand(zero_reg));
2031
    } else if (type.IsJSArray()) {
2032
      ASSERT(!info()->IsStub());
2033
      EmitBranch(instr, al, zero_reg, Operand(zero_reg));
2034
    } else if (type.IsHeapNumber()) {
2035
      ASSERT(!info()->IsStub());
2036
      DoubleRegister dbl_scratch = double_scratch0();
2037
      __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2038
      // Test the double value. Zero and NaN are false.
2039
      EmitBranchF(instr, nue, dbl_scratch, kDoubleRegZero);
2040
    } else if (type.IsString()) {
2041
      ASSERT(!info()->IsStub());
2042
      __ lw(at, FieldMemOperand(reg, String::kLengthOffset));
2043
      EmitBranch(instr, ne, at, Operand(zero_reg));
2044
    } else {
2045
      ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2046
      // Avoid deopts in the case where we've never executed this path before.
2047
      if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2048

    
2049
      if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2050
        // undefined -> false.
2051
        __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
2052
        __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
2053
      }
2054
      if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2055
        // Boolean -> its value.
2056
        __ LoadRoot(at, Heap::kTrueValueRootIndex);
2057
        __ Branch(instr->TrueLabel(chunk_), eq, reg, Operand(at));
2058
        __ LoadRoot(at, Heap::kFalseValueRootIndex);
2059
        __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
2060
      }
2061
      if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2062
        // 'null' -> false.
2063
        __ LoadRoot(at, Heap::kNullValueRootIndex);
2064
        __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
2065
      }
2066

    
2067
      if (expected.Contains(ToBooleanStub::SMI)) {
2068
        // Smis: 0 -> false, all other -> true.
2069
        __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg));
2070
        __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2071
      } else if (expected.NeedsMap()) {
2072
        // If we need a map later and have a Smi -> deopt.
2073
        __ And(at, reg, Operand(kSmiTagMask));
2074
        DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
2075
      }
2076

    
2077
      const Register map = scratch0();
2078
      if (expected.NeedsMap()) {
2079
        __ lw(map, FieldMemOperand(reg, HeapObject::kMapOffset));
2080
        if (expected.CanBeUndetectable()) {
2081
          // Undetectable -> false.
2082
          __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
2083
          __ And(at, at, Operand(1 << Map::kIsUndetectable));
2084
          __ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg));
2085
        }
2086
      }
2087

    
2088
      if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2089
        // spec object -> true.
2090
        __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
2091
        __ Branch(instr->TrueLabel(chunk_),
2092
                  ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
2093
      }
2094

    
2095
      if (expected.Contains(ToBooleanStub::STRING)) {
2096
        // String value -> false iff empty.
2097
        Label not_string;
2098
        __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
2099
        __ Branch(&not_string, ge , at, Operand(FIRST_NONSTRING_TYPE));
2100
        __ lw(at, FieldMemOperand(reg, String::kLengthOffset));
2101
        __ Branch(instr->TrueLabel(chunk_), ne, at, Operand(zero_reg));
2102
        __ Branch(instr->FalseLabel(chunk_));
2103
        __ bind(&not_string);
2104
      }
2105

    
2106
      if (expected.Contains(ToBooleanStub::SYMBOL)) {
2107
        // Symbol value -> true.
2108
        const Register scratch = scratch1();
2109
        __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
2110
        __ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE));
2111
      }
2112

    
2113
      if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2114
        // heap number -> false iff +0, -0, or NaN.
2115
        DoubleRegister dbl_scratch = double_scratch0();
2116
        Label not_heap_number;
2117
        __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
2118
        __ Branch(&not_heap_number, ne, map, Operand(at));
2119
        __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2120
        __ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2121
                   ne, dbl_scratch, kDoubleRegZero);
2122
        // Falls through if dbl_scratch == 0.
2123
        __ Branch(instr->FalseLabel(chunk_));
2124
        __ bind(&not_heap_number);
2125
      }
2126

    
2127
      if (!expected.IsGeneric()) {
2128
        // We've seen something for the first time -> deopt.
2129
        // This can only happen if we are not generic already.
2130
        DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
2131
      }
2132
    }
2133
  }
2134
}
2135

    
2136

    
2137
void LCodeGen::EmitGoto(int block) {
2138
  if (!IsNextEmittedBlock(block)) {
2139
    __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
2140
  }
2141
}
2142

    
2143

    
2144
void LCodeGen::DoGoto(LGoto* instr) {
2145
  EmitGoto(instr->block_id());
2146
}
2147

    
2148

    
2149
Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2150
  Condition cond = kNoCondition;
2151
  switch (op) {
2152
    case Token::EQ:
2153
    case Token::EQ_STRICT:
2154
      cond = eq;
2155
      break;
2156
    case Token::NE:
2157
    case Token::NE_STRICT:
2158
      cond = ne;
2159
      break;
2160
    case Token::LT:
2161
      cond = is_unsigned ? lo : lt;
2162
      break;
2163
    case Token::GT:
2164
      cond = is_unsigned ? hi : gt;
2165
      break;
2166
    case Token::LTE:
2167
      cond = is_unsigned ? ls : le;
2168
      break;
2169
    case Token::GTE:
2170
      cond = is_unsigned ? hs : ge;
2171
      break;
2172
    case Token::IN:
2173
    case Token::INSTANCEOF:
2174
    default:
2175
      UNREACHABLE();
2176
  }
2177
  return cond;
2178
}
2179

    
2180

    
2181
void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2182
  LOperand* left = instr->left();
2183
  LOperand* right = instr->right();
2184
  Condition cond = TokenToCondition(instr->op(), false);
2185

    
2186
  if (left->IsConstantOperand() && right->IsConstantOperand()) {
2187
    // We can statically evaluate the comparison.
2188
    double left_val = ToDouble(LConstantOperand::cast(left));
2189
    double right_val = ToDouble(LConstantOperand::cast(right));
2190
    int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2191
        instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2192
    EmitGoto(next_block);
2193
  } else {
2194
    if (instr->is_double()) {
2195
      // Compare left and right as doubles and load the
2196
      // resulting flags into the normal status register.
2197
      FPURegister left_reg = ToDoubleRegister(left);
2198
      FPURegister right_reg = ToDoubleRegister(right);
2199

    
2200
      // If a NaN is involved, i.e. the result is unordered,
2201
      // jump to false block label.
2202
      __ BranchF(NULL, instr->FalseLabel(chunk_), eq,
2203
                 left_reg, right_reg);
2204

    
2205
      EmitBranchF(instr, cond, left_reg, right_reg);
2206
    } else {
2207
      Register cmp_left;
2208
      Operand cmp_right = Operand(0);
2209

    
2210
      if (right->IsConstantOperand()) {
2211
        int32_t value = ToInteger32(LConstantOperand::cast(right));
2212
        if (instr->hydrogen_value()->representation().IsSmi()) {
2213
          cmp_left = ToRegister(left);
2214
          cmp_right = Operand(Smi::FromInt(value));
2215
        } else {
2216
          cmp_left = ToRegister(left);
2217
          cmp_right = Operand(value);
2218
        }
2219
      } else if (left->IsConstantOperand()) {
2220
        int32_t value = ToInteger32(LConstantOperand::cast(left));
2221
        if (instr->hydrogen_value()->representation().IsSmi()) {
2222
           cmp_left = ToRegister(right);
2223
           cmp_right = Operand(Smi::FromInt(value));
2224
        } else {
2225
          cmp_left = ToRegister(right);
2226
          cmp_right = Operand(value);
2227
        }
2228
        // We transposed the operands. Reverse the condition.
2229
        cond = ReverseCondition(cond);
2230
      } else {
2231
        cmp_left = ToRegister(left);
2232
        cmp_right = Operand(ToRegister(right));
2233
      }
2234

    
2235
      EmitBranch(instr, cond, cmp_left, cmp_right);
2236
    }
2237
  }
2238
}
2239

    
2240

    
2241
void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2242
  Register left = ToRegister(instr->left());
2243
  Register right = ToRegister(instr->right());
2244

    
2245
  EmitBranch(instr, eq, left, Operand(right));
2246
}
2247

    
2248

    
2249
void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2250
  if (instr->hydrogen()->representation().IsTagged()) {
2251
    Register input_reg = ToRegister(instr->object());
2252
    __ li(at, Operand(factory()->the_hole_value()));
2253
    EmitBranch(instr, eq, input_reg, Operand(at));
2254
    return;
2255
  }
2256

    
2257
  DoubleRegister input_reg = ToDoubleRegister(instr->object());
2258
  EmitFalseBranchF(instr, eq, input_reg, input_reg);
2259

    
2260
  Register scratch = scratch0();
2261
  __ FmoveHigh(scratch, input_reg);
2262
  EmitBranch(instr, eq, scratch, Operand(kHoleNanUpper32));
2263
}
2264

    
2265

    
2266
Condition LCodeGen::EmitIsObject(Register input,
2267
                                 Register temp1,
2268
                                 Register temp2,
2269
                                 Label* is_not_object,
2270
                                 Label* is_object) {
2271
  __ JumpIfSmi(input, is_not_object);
2272

    
2273
  __ LoadRoot(temp2, Heap::kNullValueRootIndex);
2274
  __ Branch(is_object, eq, input, Operand(temp2));
2275

    
2276
  // Load map.
2277
  __ lw(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
2278
  // Undetectable objects behave like undefined.
2279
  __ lbu(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
2280
  __ And(temp2, temp2, Operand(1 << Map::kIsUndetectable));
2281
  __ Branch(is_not_object, ne, temp2, Operand(zero_reg));
2282

    
2283
  // Load instance type and check that it is in object type range.
2284
  __ lbu(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
2285
  __ Branch(is_not_object,
2286
            lt, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2287

    
2288
  return le;
2289
}
2290

    
2291

    
2292
void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2293
  Register reg = ToRegister(instr->value());
2294
  Register temp1 = ToRegister(instr->temp());
2295
  Register temp2 = scratch0();
2296

    
2297
  Condition true_cond =
2298
      EmitIsObject(reg, temp1, temp2,
2299
          instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
2300

    
2301
  EmitBranch(instr, true_cond, temp2,
2302
             Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
2303
}
2304

    
2305

    
2306
Condition LCodeGen::EmitIsString(Register input,
2307
                                 Register temp1,
2308
                                 Label* is_not_string,
2309
                                 SmiCheck check_needed = INLINE_SMI_CHECK) {
2310
  if (check_needed == INLINE_SMI_CHECK) {
2311
    __ JumpIfSmi(input, is_not_string);
2312
  }
2313
  __ GetObjectType(input, temp1, temp1);
2314

    
2315
  return lt;
2316
}
2317

    
2318

    
2319
void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2320
  Register reg = ToRegister(instr->value());
2321
  Register temp1 = ToRegister(instr->temp());
2322

    
2323
  SmiCheck check_needed =
2324
      instr->hydrogen()->value()->IsHeapObject()
2325
          ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2326
  Condition true_cond =
2327
      EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
2328

    
2329
  EmitBranch(instr, true_cond, temp1,
2330
             Operand(FIRST_NONSTRING_TYPE));
2331
}
2332

    
2333

    
2334
void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2335
  Register input_reg = EmitLoadRegister(instr->value(), at);
2336
  __ And(at, input_reg, kSmiTagMask);
2337
  EmitBranch(instr, eq, at, Operand(zero_reg));
2338
}
2339

    
2340

    
2341
void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2342
  Register input = ToRegister(instr->value());
2343
  Register temp = ToRegister(instr->temp());
2344

    
2345
  if (!instr->hydrogen()->value()->IsHeapObject()) {
2346
    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2347
  }
2348
  __ lw(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2349
  __ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
2350
  __ And(at, temp, Operand(1 << Map::kIsUndetectable));
2351
  EmitBranch(instr, ne, at, Operand(zero_reg));
2352
}
2353

    
2354

    
2355
static Condition ComputeCompareCondition(Token::Value op) {
2356
  switch (op) {
2357
    case Token::EQ_STRICT:
2358
    case Token::EQ:
2359
      return eq;
2360
    case Token::LT:
2361
      return lt;
2362
    case Token::GT:
2363
      return gt;
2364
    case Token::LTE:
2365
      return le;
2366
    case Token::GTE:
2367
      return ge;
2368
    default:
2369
      UNREACHABLE();
2370
      return kNoCondition;
2371
  }
2372
}
2373

    
2374

    
2375
void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2376
  ASSERT(ToRegister(instr->context()).is(cp));
2377
  Token::Value op = instr->op();
2378

    
2379
  Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2380
  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2381

    
2382
  Condition condition = ComputeCompareCondition(op);
2383

    
2384
  EmitBranch(instr, condition, v0, Operand(zero_reg));
2385
}
2386

    
2387

    
2388
static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2389
  InstanceType from = instr->from();
2390
  InstanceType to = instr->to();
2391
  if (from == FIRST_TYPE) return to;
2392
  ASSERT(from == to || to == LAST_TYPE);
2393
  return from;
2394
}
2395

    
2396

    
2397
static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2398
  InstanceType from = instr->from();
2399
  InstanceType to = instr->to();
2400
  if (from == to) return eq;
2401
  if (to == LAST_TYPE) return hs;
2402
  if (from == FIRST_TYPE) return ls;
2403
  UNREACHABLE();
2404
  return eq;
2405
}
2406

    
2407

    
2408
void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2409
  Register scratch = scratch0();
2410
  Register input = ToRegister(instr->value());
2411

    
2412
  if (!instr->hydrogen()->value()->IsHeapObject()) {
2413
    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2414
  }
2415

    
2416
  __ GetObjectType(input, scratch, scratch);
2417
  EmitBranch(instr,
2418
             BranchCondition(instr->hydrogen()),
2419
             scratch,
2420
             Operand(TestType(instr->hydrogen())));
2421
}
2422

    
2423

    
2424
void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2425
  Register input = ToRegister(instr->value());
2426
  Register result = ToRegister(instr->result());
2427

    
2428
  __ AssertString(input);
2429

    
2430
  __ lw(result, FieldMemOperand(input, String::kHashFieldOffset));
2431
  __ IndexFromHash(result, result);
2432
}
2433

    
2434

    
2435
void LCodeGen::DoHasCachedArrayIndexAndBranch(
2436
    LHasCachedArrayIndexAndBranch* instr) {
2437
  Register input = ToRegister(instr->value());
2438
  Register scratch = scratch0();
2439

    
2440
  __ lw(scratch,
2441
         FieldMemOperand(input, String::kHashFieldOffset));
2442
  __ And(at, scratch, Operand(String::kContainsCachedArrayIndexMask));
2443
  EmitBranch(instr, eq, at, Operand(zero_reg));
2444
}
2445

    
2446

    
2447
// Branches to a label or falls through with the answer in flags.  Trashes
2448
// the temp registers, but not the input.
2449
void LCodeGen::EmitClassOfTest(Label* is_true,
2450
                               Label* is_false,
2451
                               Handle<String>class_name,
2452
                               Register input,
2453
                               Register temp,
2454
                               Register temp2) {
2455
  ASSERT(!input.is(temp));
2456
  ASSERT(!input.is(temp2));
2457
  ASSERT(!temp.is(temp2));
2458

    
2459
  __ JumpIfSmi(input, is_false);
2460

    
2461
  if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
2462
    // Assuming the following assertions, we can use the same compares to test
2463
    // for both being a function type and being in the object type range.
2464
    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2465
    STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2466
                  FIRST_SPEC_OBJECT_TYPE + 1);
2467
    STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2468
                  LAST_SPEC_OBJECT_TYPE - 1);
2469
    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2470

    
2471
    __ GetObjectType(input, temp, temp2);
2472
    __ Branch(is_false, lt, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
2473
    __ Branch(is_true, eq, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
2474
    __ Branch(is_true, eq, temp2, Operand(LAST_SPEC_OBJECT_TYPE));
2475
  } else {
2476
    // Faster code path to avoid two compares: subtract lower bound from the
2477
    // actual type and do a signed compare with the width of the type range.
2478
    __ GetObjectType(input, temp, temp2);
2479
    __ Subu(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2480
    __ Branch(is_false, gt, temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2481
                                           FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2482
  }
2483

    
2484
  // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2485
  // Check if the constructor in the map is a function.
2486
  __ lw(temp, FieldMemOperand(temp, Map::kConstructorOffset));
2487

    
2488
  // Objects with a non-function constructor have class 'Object'.
2489
  __ GetObjectType(temp, temp2, temp2);
2490
  if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
2491
    __ Branch(is_true, ne, temp2, Operand(JS_FUNCTION_TYPE));
2492
  } else {
2493
    __ Branch(is_false, ne, temp2, Operand(JS_FUNCTION_TYPE));
2494
  }
2495

    
2496
  // temp now contains the constructor function. Grab the
2497
  // instance class name from there.
2498
  __ lw(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2499
  __ lw(temp, FieldMemOperand(temp,
2500
                               SharedFunctionInfo::kInstanceClassNameOffset));
2501
  // The class name we are testing against is internalized since it's a literal.
2502
  // The name in the constructor is internalized because of the way the context
2503
  // is booted.  This routine isn't expected to work for random API-created
2504
  // classes and it doesn't have to because you can't access it with natives
2505
  // syntax.  Since both sides are internalized it is sufficient to use an
2506
  // identity comparison.
2507

    
2508
  // End with the address of this class_name instance in temp register.
2509
  // On MIPS, the caller must do the comparison with Handle<String>class_name.
2510
}
2511

    
2512

    
2513
void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2514
  Register input = ToRegister(instr->value());
2515
  Register temp = scratch0();
2516
  Register temp2 = ToRegister(instr->temp());
2517
  Handle<String> class_name = instr->hydrogen()->class_name();
2518

    
2519
  EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2520
                  class_name, input, temp, temp2);
2521

    
2522
  EmitBranch(instr, eq, temp, Operand(class_name));
2523
}
2524

    
2525

    
2526
void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2527
  Register reg = ToRegister(instr->value());
2528
  Register temp = ToRegister(instr->temp());
2529

    
2530
  __ lw(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2531
  EmitBranch(instr, eq, temp, Operand(instr->map()));
2532
}
2533

    
2534

    
2535
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2536
  ASSERT(ToRegister(instr->context()).is(cp));
2537
  Label true_label, done;
2538
  ASSERT(ToRegister(instr->left()).is(a0));  // Object is in a0.
2539
  ASSERT(ToRegister(instr->right()).is(a1));  // Function is in a1.
2540
  Register result = ToRegister(instr->result());
2541
  ASSERT(result.is(v0));
2542

    
2543
  InstanceofStub stub(InstanceofStub::kArgsInRegisters);
2544
  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2545

    
2546
  __ Branch(&true_label, eq, result, Operand(zero_reg));
2547
  __ li(result, Operand(factory()->false_value()));
2548
  __ Branch(&done);
2549
  __ bind(&true_label);
2550
  __ li(result, Operand(factory()->true_value()));
2551
  __ bind(&done);
2552
}
2553

    
2554

    
2555
void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2556
  class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
2557
   public:
2558
    DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2559
                                  LInstanceOfKnownGlobal* instr)
2560
        : LDeferredCode(codegen), instr_(instr) { }
2561
    virtual void Generate() V8_OVERRIDE {
2562
      codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2563
    }
2564
    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
2565
    Label* map_check() { return &map_check_; }
2566

    
2567
   private:
2568
    LInstanceOfKnownGlobal* instr_;
2569
    Label map_check_;
2570
  };
2571

    
2572
  DeferredInstanceOfKnownGlobal* deferred;
2573
  deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
2574

    
2575
  Label done, false_result;
2576
  Register object = ToRegister(instr->value());
2577
  Register temp = ToRegister(instr->temp());
2578
  Register result = ToRegister(instr->result());
2579

    
2580
  ASSERT(object.is(a0));
2581
  ASSERT(result.is(v0));
2582

    
2583
  // A Smi is not instance of anything.
2584
  __ JumpIfSmi(object, &false_result);
2585

    
2586
  // This is the inlined call site instanceof cache. The two occurences of the
2587
  // hole value will be patched to the last map/result pair generated by the
2588
  // instanceof stub.
2589
  Label cache_miss;
2590
  Register map = temp;
2591
  __ lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
2592

    
2593
  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2594
  __ bind(deferred->map_check());  // Label for calculating code patching.
2595
  // We use Factory::the_hole_value() on purpose instead of loading from the
2596
  // root array to force relocation to be able to later patch with
2597
  // the cached map.
2598
  Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
2599
  __ li(at, Operand(Handle<Object>(cell)));
2600
  __ lw(at, FieldMemOperand(at, PropertyCell::kValueOffset));
2601
  __ Branch(&cache_miss, ne, map, Operand(at));
2602
  // We use Factory::the_hole_value() on purpose instead of loading from the
2603
  // root array to force relocation to be able to later patch
2604
  // with true or false.
2605
  __ li(result, Operand(factory()->the_hole_value()), CONSTANT_SIZE);
2606
  __ Branch(&done);
2607

    
2608
  // The inlined call site cache did not match. Check null and string before
2609
  // calling the deferred code.
2610
  __ bind(&cache_miss);
2611
  // Null is not instance of anything.
2612
  __ LoadRoot(temp, Heap::kNullValueRootIndex);
2613
  __ Branch(&false_result, eq, object, Operand(temp));
2614

    
2615
  // String values is not instance of anything.
2616
  Condition cc = __ IsObjectStringType(object, temp, temp);
2617
  __ Branch(&false_result, cc, temp, Operand(zero_reg));
2618

    
2619
  // Go to the deferred code.
2620
  __ Branch(deferred->entry());
2621

    
2622
  __ bind(&false_result);
2623
  __ LoadRoot(result, Heap::kFalseValueRootIndex);
2624

    
2625
  // Here result has either true or false. Deferred code also produces true or
2626
  // false object.
2627
  __ bind(deferred->exit());
2628
  __ bind(&done);
2629
}
2630

    
2631

    
2632
void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2633
                                               Label* map_check) {
2634
  Register result = ToRegister(instr->result());
2635
  ASSERT(result.is(v0));
2636

    
2637
  InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
2638
  flags = static_cast<InstanceofStub::Flags>(
2639
      flags | InstanceofStub::kArgsInRegisters);
2640
  flags = static_cast<InstanceofStub::Flags>(
2641
      flags | InstanceofStub::kCallSiteInlineCheck);
2642
  flags = static_cast<InstanceofStub::Flags>(
2643
      flags | InstanceofStub::kReturnTrueFalseObject);
2644
  InstanceofStub stub(flags);
2645

    
2646
  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
2647
  LoadContextFromDeferred(instr->context());
2648

    
2649
  // Get the temp register reserved by the instruction. This needs to be t0 as
2650
  // its slot of the pushing of safepoint registers is used to communicate the
2651
  // offset to the location of the map check.
2652
  Register temp = ToRegister(instr->temp());
2653
  ASSERT(temp.is(t0));
2654
  __ LoadHeapObject(InstanceofStub::right(), instr->function());
2655
  static const int kAdditionalDelta = 7;
2656
  int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
2657
  Label before_push_delta;
2658
  __ bind(&before_push_delta);
2659
  {
2660
    Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2661
    __ li(temp, Operand(delta * kPointerSize), CONSTANT_SIZE);
2662
    __ StoreToSafepointRegisterSlot(temp, temp);
2663
  }
2664
  CallCodeGeneric(stub.GetCode(isolate()),
2665
                  RelocInfo::CODE_TARGET,
2666
                  instr,
2667
                  RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2668
  LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2669
  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2670
  // Put the result value into the result register slot and
2671
  // restore all registers.
2672
  __ StoreToSafepointRegisterSlot(result, result);
2673
}
2674

    
2675

    
2676
void LCodeGen::DoCmpT(LCmpT* instr) {
2677
  ASSERT(ToRegister(instr->context()).is(cp));
2678
  Token::Value op = instr->op();
2679

    
2680
  Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2681
  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2682
  // On MIPS there is no need for a "no inlined smi code" marker (nop).
2683

    
2684
  Condition condition = ComputeCompareCondition(op);
2685
  // A minor optimization that relies on LoadRoot always emitting one
2686
  // instruction.
2687
  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
2688
  Label done, check;
2689
  __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg));
2690
  __ bind(&check);
2691
  __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2692
  ASSERT_EQ(1, masm()->InstructionsGeneratedSince(&check));
2693
  __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2694
  __ bind(&done);
2695
}
2696

    
2697

    
2698
void LCodeGen::DoReturn(LReturn* instr) {
2699
  if (FLAG_trace && info()->IsOptimizing()) {
2700
    // Push the return value on the stack as the parameter.
2701
    // Runtime::TraceExit returns its parameter in v0. We're leaving the code
2702
    // managed by the register allocator and tearing down the frame, it's
2703
    // safe to write to the context register.
2704
    __ push(v0);
2705
    __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2706
    __ CallRuntime(Runtime::kTraceExit, 1);
2707
  }
2708
  if (info()->saves_caller_doubles()) {
2709
    ASSERT(NeedsEagerFrame());
2710
    BitVector* doubles = chunk()->allocated_double_registers();
2711
    BitVector::Iterator save_iterator(doubles);
2712
    int count = 0;
2713
    while (!save_iterator.Done()) {
2714
      __ ldc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
2715
              MemOperand(sp, count * kDoubleSize));
2716
      save_iterator.Advance();
2717
      count++;
2718
    }
2719
  }
2720
  int no_frame_start = -1;
2721
  if (NeedsEagerFrame()) {
2722
    __ mov(sp, fp);
2723
    no_frame_start = masm_->pc_offset();
2724
    __ Pop(ra, fp);
2725
  }
2726
  if (instr->has_constant_parameter_count()) {
2727
    int parameter_count = ToInteger32(instr->constant_parameter_count());
2728
    int32_t sp_delta = (parameter_count + 1) * kPointerSize;
2729
    if (sp_delta != 0) {
2730
      __ Addu(sp, sp, Operand(sp_delta));
2731
    }
2732
  } else {
2733
    Register reg = ToRegister(instr->parameter_count());
2734
    // The argument count parameter is a smi
2735
    __ SmiUntag(reg);
2736
    __ sll(at, reg, kPointerSizeLog2);
2737
    __ Addu(sp, sp, at);
2738
  }
2739

    
2740
  __ Jump(ra);
2741

    
2742
  if (no_frame_start != -1) {
2743
    info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
2744
  }
2745
}
2746

    
2747

    
2748
void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2749
  Register result = ToRegister(instr->result());
2750
  __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
2751
  __ lw(result, FieldMemOperand(at, Cell::kValueOffset));
2752
  if (instr->hydrogen()->RequiresHoleCheck()) {
2753
    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2754
    DeoptimizeIf(eq, instr->environment(), result, Operand(at));
2755
  }
2756
}
2757

    
2758

    
2759
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2760
  ASSERT(ToRegister(instr->context()).is(cp));
2761
  ASSERT(ToRegister(instr->global_object()).is(a0));
2762
  ASSERT(ToRegister(instr->result()).is(v0));
2763

    
2764
  __ li(a2, Operand(instr->name()));
2765
  RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET
2766
                                             : RelocInfo::CODE_TARGET_CONTEXT;
2767
  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2768
  CallCode(ic, mode, instr);
2769
}
2770

    
2771

    
2772
void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2773
  Register value = ToRegister(instr->value());
2774
  Register cell = scratch0();
2775

    
2776
  // Load the cell.
2777
  __ li(cell, Operand(instr->hydrogen()->cell().handle()));
2778

    
2779
  // If the cell we are storing to contains the hole it could have
2780
  // been deleted from the property dictionary. In that case, we need
2781
  // to update the property details in the property dictionary to mark
2782
  // it as no longer deleted.
2783
  if (instr->hydrogen()->RequiresHoleCheck()) {
2784
    // We use a temp to check the payload.
2785
    Register payload = ToRegister(instr->temp());
2786
    __ lw(payload, FieldMemOperand(cell, Cell::kValueOffset));
2787
    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2788
    DeoptimizeIf(eq, instr->environment(), payload, Operand(at));
2789
  }
2790

    
2791
  // Store the value.
2792
  __ sw(value, FieldMemOperand(cell, Cell::kValueOffset));
2793
  // Cells are always rescanned, so no write barrier here.
2794
}
2795

    
2796

    
2797
void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
2798
  ASSERT(ToRegister(instr->context()).is(cp));
2799
  ASSERT(ToRegister(instr->global_object()).is(a1));
2800
  ASSERT(ToRegister(instr->value()).is(a0));
2801

    
2802
  __ li(a2, Operand(instr->name()));
2803
  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
2804
      ? isolate()->builtins()->StoreIC_Initialize_Strict()
2805
      : isolate()->builtins()->StoreIC_Initialize();
2806
  CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
2807
}
2808

    
2809

    
2810
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2811
  Register context = ToRegister(instr->context());
2812
  Register result = ToRegister(instr->result());
2813

    
2814
  __ lw(result, ContextOperand(context, instr->slot_index()));
2815
  if (instr->hydrogen()->RequiresHoleCheck()) {
2816
    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2817

    
2818
    if (instr->hydrogen()->DeoptimizesOnHole()) {
2819
      DeoptimizeIf(eq, instr->environment(), result, Operand(at));
2820
    } else {
2821
      Label is_not_hole;
2822
      __ Branch(&is_not_hole, ne, result, Operand(at));
2823
      __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2824
      __ bind(&is_not_hole);
2825
    }
2826
  }
2827
}
2828

    
2829

    
2830
void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2831
  Register context = ToRegister(instr->context());
2832
  Register value = ToRegister(instr->value());
2833
  Register scratch = scratch0();
2834
  MemOperand target = ContextOperand(context, instr->slot_index());
2835

    
2836
  Label skip_assignment;
2837

    
2838
  if (instr->hydrogen()->RequiresHoleCheck()) {
2839
    __ lw(scratch, target);
2840
    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2841

    
2842
    if (instr->hydrogen()->DeoptimizesOnHole()) {
2843
      DeoptimizeIf(eq, instr->environment(), scratch, Operand(at));
2844
    } else {
2845
      __ Branch(&skip_assignment, ne, scratch, Operand(at));
2846
    }
2847
  }
2848

    
2849
  __ sw(value, target);
2850
  if (instr->hydrogen()->NeedsWriteBarrier()) {
2851
    SmiCheck check_needed =
2852
        instr->hydrogen()->value()->IsHeapObject()
2853
            ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2854
    __ RecordWriteContextSlot(context,
2855
                              target.offset(),
2856
                              value,
2857
                              scratch0(),
2858
                              GetRAState(),
2859
                              kSaveFPRegs,
2860
                              EMIT_REMEMBERED_SET,
2861
                              check_needed);
2862
  }
2863

    
2864
  __ bind(&skip_assignment);
2865
}
2866

    
2867

    
2868
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2869
  HObjectAccess access = instr->hydrogen()->access();
2870
  int offset = access.offset();
2871
  Register object = ToRegister(instr->object());
2872

    
2873
  if (access.IsExternalMemory()) {
2874
    Register result = ToRegister(instr->result());
2875
    MemOperand operand = MemOperand(object, offset);
2876
    if (access.representation().IsByte()) {
2877
      __ lb(result, operand);
2878
    } else {
2879
      __ lw(result, operand);
2880
    }
2881
    return;
2882
  }
2883

    
2884
  if (instr->hydrogen()->representation().IsDouble()) {
2885
    DoubleRegister result = ToDoubleRegister(instr->result());
2886
    __ ldc1(result, FieldMemOperand(object, offset));
2887
    return;
2888
  }
2889

    
2890
  Register result = ToRegister(instr->result());
2891
  if (!access.IsInobject()) {
2892
    __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2893
    object = result;
2894
  }
2895
  MemOperand operand = FieldMemOperand(object, offset);
2896
  if (access.representation().IsByte()) {
2897
    __ lb(result, operand);
2898
  } else {
2899
    __ lw(result, operand);
2900
  }
2901
}
2902

    
2903

    
2904
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2905
  ASSERT(ToRegister(instr->context()).is(cp));
2906
  ASSERT(ToRegister(instr->object()).is(a0));
2907
  ASSERT(ToRegister(instr->result()).is(v0));
2908

    
2909
  // Name is always in a2.
2910
  __ li(a2, Operand(instr->name()));
2911
  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2912
  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2913
}
2914

    
2915

    
2916
void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2917
  Register scratch = scratch0();
2918
  Register function = ToRegister(instr->function());
2919
  Register result = ToRegister(instr->result());
2920

    
2921
  // Check that the function really is a function. Load map into the
2922
  // result register.
2923
  __ GetObjectType(function, result, scratch);
2924
  DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_FUNCTION_TYPE));
2925

    
2926
  // Make sure that the function has an instance prototype.
2927
  Label non_instance;
2928
  __ lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
2929
  __ And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
2930
  __ Branch(&non_instance, ne, scratch, Operand(zero_reg));
2931

    
2932
  // Get the prototype or initial map from the function.
2933
  __ lw(result,
2934
         FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2935

    
2936
  // Check that the function has a prototype or an initial map.
2937
  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2938
  DeoptimizeIf(eq, instr->environment(), result, Operand(at));
2939

    
2940
  // If the function does not have an initial map, we're done.
2941
  Label done;
2942
  __ GetObjectType(result, scratch, scratch);
2943
  __ Branch(&done, ne, scratch, Operand(MAP_TYPE));
2944

    
2945
  // Get the prototype from the initial map.
2946
  __ lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
2947
  __ Branch(&done);
2948

    
2949
  // Non-instance prototype: Fetch prototype from constructor field
2950
  // in initial map.
2951
  __ bind(&non_instance);
2952
  __ lw(result, FieldMemOperand(result, Map::kConstructorOffset));
2953

    
2954
  // All done.
2955
  __ bind(&done);
2956
}
2957

    
2958

    
2959
void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
2960
  Register result = ToRegister(instr->result());
2961
  __ LoadRoot(result, instr->index());
2962
}
2963

    
2964

    
2965
void LCodeGen::DoLoadExternalArrayPointer(
2966
    LLoadExternalArrayPointer* instr) {
2967
  Register to_reg = ToRegister(instr->result());
2968
  Register from_reg  = ToRegister(instr->object());
2969
  __ lw(to_reg, FieldMemOperand(from_reg,
2970
                                ExternalArray::kExternalPointerOffset));
2971
}
2972

    
2973

    
2974
void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2975
  Register arguments = ToRegister(instr->arguments());
2976
  Register result = ToRegister(instr->result());
2977
  if (instr->length()->IsConstantOperand() &&
2978
      instr->index()->IsConstantOperand()) {
2979
    int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
2980
    int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
2981
    int index = (const_length - const_index) + 1;
2982
    __ lw(result, MemOperand(arguments, index * kPointerSize));
2983
  } else {
2984
    Register length = ToRegister(instr->length());
2985
    Register index = ToRegister(instr->index());
2986
    // There are two words between the frame pointer and the last argument.
2987
    // Subtracting from length accounts for one of them, add one more.
2988
    __ subu(length, length, index);
2989
    __ Addu(length, length, Operand(1));
2990
    __ sll(length, length, kPointerSizeLog2);
2991
    __ Addu(at, arguments, Operand(length));
2992
    __ lw(result, MemOperand(at, 0));
2993
  }
2994
}
2995

    
2996

    
2997
void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
2998
  Register external_pointer = ToRegister(instr->elements());
2999
  Register key = no_reg;
3000
  ElementsKind elements_kind = instr->elements_kind();
3001
  bool key_is_constant = instr->key()->IsConstantOperand();
3002
  int constant_key = 0;
3003
  if (key_is_constant) {
3004
    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3005
    if (constant_key & 0xF0000000) {
3006
      Abort(kArrayIndexConstantValueTooBig);
3007
    }
3008
  } else {
3009
    key = ToRegister(instr->key());
3010
  }
3011
  int element_size_shift = ElementsKindToShiftSize(elements_kind);
3012
  int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3013
      ? (element_size_shift - kSmiTagSize) : element_size_shift;
3014
  int additional_offset = instr->additional_index() << element_size_shift;
3015

    
3016
  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
3017
      elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
3018
    FPURegister result = ToDoubleRegister(instr->result());
3019
    if (key_is_constant) {
3020
      __ Addu(scratch0(), external_pointer, constant_key << element_size_shift);
3021
    } else {
3022
      __ sll(scratch0(), key, shift_size);
3023
      __ Addu(scratch0(), scratch0(), external_pointer);
3024
    }
3025
    if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
3026
      __ lwc1(result, MemOperand(scratch0(), additional_offset));
3027
      __ cvt_d_s(result, result);
3028
    } else  {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
3029
      __ ldc1(result, MemOperand(scratch0(), additional_offset));
3030
    }
3031
  } else {
3032
    Register result = ToRegister(instr->result());
3033
    MemOperand mem_operand = PrepareKeyedOperand(
3034
        key, external_pointer, key_is_constant, constant_key,
3035
        element_size_shift, shift_size,
3036
        instr->additional_index(), additional_offset);
3037
    switch (elements_kind) {
3038
      case EXTERNAL_BYTE_ELEMENTS:
3039
        __ lb(result, mem_operand);
3040
        break;
3041
      case EXTERNAL_PIXEL_ELEMENTS:
3042
      case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
3043
        __ lbu(result, mem_operand);
3044
        break;
3045
      case EXTERNAL_SHORT_ELEMENTS:
3046
        __ lh(result, mem_operand);
3047
        break;
3048
      case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
3049
        __ lhu(result, mem_operand);
3050
        break;
3051
      case EXTERNAL_INT_ELEMENTS:
3052
        __ lw(result, mem_operand);
3053
        break;
3054
      case EXTERNAL_UNSIGNED_INT_ELEMENTS:
3055
        __ lw(result, mem_operand);
3056
        if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3057
          DeoptimizeIf(Ugreater_equal, instr->environment(),
3058
              result, Operand(0x80000000));
3059
        }
3060
        break;
3061
      case EXTERNAL_FLOAT_ELEMENTS:
3062
      case EXTERNAL_DOUBLE_ELEMENTS:
3063
      case FAST_DOUBLE_ELEMENTS:
3064
      case FAST_ELEMENTS:
3065
      case FAST_SMI_ELEMENTS:
3066
      case FAST_HOLEY_DOUBLE_ELEMENTS:
3067
      case FAST_HOLEY_ELEMENTS:
3068
      case FAST_HOLEY_SMI_ELEMENTS:
3069
      case DICTIONARY_ELEMENTS:
3070
      case NON_STRICT_ARGUMENTS_ELEMENTS:
3071
        UNREACHABLE();
3072
        break;
3073
    }
3074
  }
3075
}
3076

    
3077

    
3078
void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3079
  Register elements = ToRegister(instr->elements());
3080
  bool key_is_constant = instr->key()->IsConstantOperand();
3081
  Register key = no_reg;
3082
  DoubleRegister result = ToDoubleRegister(instr->result());
3083
  Register scratch = scratch0();
3084

    
3085
  int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
3086

    
3087
  int base_offset =
3088
      FixedDoubleArray::kHeaderSize - kHeapObjectTag +
3089
      (instr->additional_index() << element_size_shift);
3090
  if (key_is_constant) {
3091
    int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3092
    if (constant_key & 0xF0000000) {
3093
      Abort(kArrayIndexConstantValueTooBig);
3094
    }
3095
    base_offset += constant_key << element_size_shift;
3096
  }
3097
  __ Addu(scratch, elements, Operand(base_offset));
3098

    
3099
  if (!key_is_constant) {
3100
    key = ToRegister(instr->key());
3101
    int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3102
        ? (element_size_shift - kSmiTagSize) : element_size_shift;
3103
    __ sll(at, key, shift_size);
3104
    __ Addu(scratch, scratch, at);
3105
  }
3106

    
3107
  __ ldc1(result, MemOperand(scratch));
3108

    
3109
  if (instr->hydrogen()->RequiresHoleCheck()) {
3110
    __ lw(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
3111
    DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
3112
  }
3113
}
3114

    
3115

    
3116
void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3117
  Register elements = ToRegister(instr->elements());
3118
  Register result = ToRegister(instr->result());
3119
  Register scratch = scratch0();
3120
  Register store_base = scratch;
3121
  int offset = 0;
3122

    
3123
  if (instr->key()->IsConstantOperand()) {
3124
    LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3125
    offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
3126
                                           instr->additional_index());
3127
    store_base = elements;
3128
  } else {
3129
    Register key = ToRegister(instr->key());
3130
    // Even though the HLoadKeyed instruction forces the input
3131
    // representation for the key to be an integer, the input gets replaced
3132
    // during bound check elimination with the index argument to the bounds
3133
    // check, which can be tagged, so that case must be handled here, too.
3134
    if (instr->hydrogen()->key()->representation().IsSmi()) {
3135
      __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
3136
      __ addu(scratch, elements, scratch);
3137
    } else {
3138
      __ sll(scratch, key, kPointerSizeLog2);
3139
      __ addu(scratch, elements, scratch);
3140
    }
3141
    offset = FixedArray::OffsetOfElementAt(instr->additional_index());
3142
  }
3143
  __ lw(result, FieldMemOperand(store_base, offset));
3144

    
3145
  // Check for the hole value.
3146
  if (instr->hydrogen()->RequiresHoleCheck()) {
3147
    if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3148
      __ And(scratch, result, Operand(kSmiTagMask));
3149
      DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
3150
    } else {
3151
      __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3152
      DeoptimizeIf(eq, instr->environment(), result, Operand(scratch));
3153
    }
3154
  }
3155
}
3156

    
3157

    
3158
void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3159
  if (instr->is_external()) {
3160
    DoLoadKeyedExternalArray(instr);
3161
  } else if (instr->hydrogen()->representation().IsDouble()) {
3162
    DoLoadKeyedFixedDoubleArray(instr);
3163
  } else {
3164
    DoLoadKeyedFixedArray(instr);
3165
  }
3166
}
3167

    
3168

    
3169
MemOperand LCodeGen::PrepareKeyedOperand(Register key,
3170
                                         Register base,
3171
                                         bool key_is_constant,
3172
                                         int constant_key,
3173
                                         int element_size,
3174
                                         int shift_size,
3175
                                         int additional_index,
3176
                                         int additional_offset) {
3177
  if (additional_index != 0 && !key_is_constant) {
3178
    additional_index *= 1 << (element_size - shift_size);
3179
    __ Addu(scratch0(), key, Operand(additional_index));
3180
  }
3181

    
3182
  if (key_is_constant) {
3183
    return MemOperand(base,
3184
                      (constant_key << element_size) + additional_offset);
3185
  }
3186

    
3187
  if (additional_index == 0) {
3188
    if (shift_size >= 0) {
3189
      __ sll(scratch0(), key, shift_size);
3190
      __ Addu(scratch0(), base, scratch0());
3191
      return MemOperand(scratch0());
3192
    } else {
3193
      ASSERT_EQ(-1, shift_size);
3194
      __ srl(scratch0(), key, 1);
3195
      __ Addu(scratch0(), base, scratch0());
3196
      return MemOperand(scratch0());
3197
    }
3198
  }
3199

    
3200
  if (shift_size >= 0) {
3201
    __ sll(scratch0(), scratch0(), shift_size);
3202
    __ Addu(scratch0(), base, scratch0());
3203
    return MemOperand(scratch0());
3204
  } else {
3205
    ASSERT_EQ(-1, shift_size);
3206
    __ srl(scratch0(), scratch0(), 1);
3207
    __ Addu(scratch0(), base, scratch0());
3208
    return MemOperand(scratch0());
3209
  }
3210
}
3211

    
3212

    
3213
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3214
  ASSERT(ToRegister(instr->context()).is(cp));
3215
  ASSERT(ToRegister(instr->object()).is(a1));
3216
  ASSERT(ToRegister(instr->key()).is(a0));
3217

    
3218
  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
3219
  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3220
}
3221

    
3222

    
3223
void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3224
  Register scratch = scratch0();
3225
  Register temp = scratch1();
3226
  Register result = ToRegister(instr->result());
3227

    
3228
  if (instr->hydrogen()->from_inlined()) {
3229
    __ Subu(result, sp, 2 * kPointerSize);
3230
  } else {
3231
    // Check if the calling frame is an arguments adaptor frame.
3232
    Label done, adapted;
3233
    __ lw(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3234
    __ lw(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
3235
    __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3236

    
3237
    // Result is the frame pointer for the frame if not adapted and for the real
3238
    // frame below the adaptor frame if adapted.
3239
    __ Movn(result, fp, temp);  // Move only if temp is not equal to zero (ne).
3240
    __ Movz(result, scratch, temp);  // Move only if temp is equal to zero (eq).
3241
  }
3242
}
3243

    
3244

    
3245
void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3246
  Register elem = ToRegister(instr->elements());
3247
  Register result = ToRegister(instr->result());
3248

    
3249
  Label done;
3250

    
3251
  // If no arguments adaptor frame the number of arguments is fixed.
3252
  __ Addu(result, zero_reg, Operand(scope()->num_parameters()));
3253
  __ Branch(&done, eq, fp, Operand(elem));
3254

    
3255
  // Arguments adaptor frame present. Get argument length from there.
3256
  __ lw(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3257
  __ lw(result,
3258
        MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
3259
  __ SmiUntag(result);
3260

    
3261
  // Argument length is in result register.
3262
  __ bind(&done);
3263
}
3264

    
3265

    
3266
void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3267
  Register receiver = ToRegister(instr->receiver());
3268
  Register function = ToRegister(instr->function());
3269
  Register scratch = scratch0();
3270

    
3271
  // If the receiver is null or undefined, we have to pass the global
3272
  // object as a receiver to normal functions. Values have to be
3273
  // passed unchanged to builtins and strict-mode functions.
3274
  Label global_object, receiver_ok;
3275

    
3276
  // Do not transform the receiver to object for strict mode
3277
  // functions.
3278
  __ lw(scratch,
3279
         FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3280
  __ lw(scratch,
3281
         FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
3282

    
3283
  // Do not transform the receiver to object for builtins.
3284
  int32_t strict_mode_function_mask =
3285
                  1 <<  (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
3286
  int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
3287
  __ And(scratch, scratch, Operand(strict_mode_function_mask | native_mask));
3288
  __ Branch(&receiver_ok, ne, scratch, Operand(zero_reg));
3289

    
3290
  // Normal function. Replace undefined or null with global receiver.
3291
  __ LoadRoot(scratch, Heap::kNullValueRootIndex);
3292
  __ Branch(&global_object, eq, receiver, Operand(scratch));
3293
  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3294
  __ Branch(&global_object, eq, receiver, Operand(scratch));
3295

    
3296
  // Deoptimize if the receiver is not a JS object.
3297
  __ And(scratch, receiver, Operand(kSmiTagMask));
3298
  DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
3299

    
3300
  __ GetObjectType(receiver, scratch, scratch);
3301
  DeoptimizeIf(lt, instr->environment(),
3302
               scratch, Operand(FIRST_SPEC_OBJECT_TYPE));
3303
  __ Branch(&receiver_ok);
3304

    
3305
  __ bind(&global_object);
3306
  __ lw(receiver, GlobalObjectOperand());
3307
  __ lw(receiver,
3308
         FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
3309
  __ bind(&receiver_ok);
3310
}
3311

    
3312

    
3313
void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3314
  Register receiver = ToRegister(instr->receiver());
3315
  Register function = ToRegister(instr->function());
3316
  Register length = ToRegister(instr->length());
3317
  Register elements = ToRegister(instr->elements());
3318
  Register scratch = scratch0();
3319
  ASSERT(receiver.is(a0));  // Used for parameter count.
3320
  ASSERT(function.is(a1));  // Required by InvokeFunction.
3321
  ASSERT(ToRegister(instr->result()).is(v0));
3322

    
3323
  // Copy the arguments to this function possibly from the
3324
  // adaptor frame below it.
3325
  const uint32_t kArgumentsLimit = 1 * KB;
3326
  DeoptimizeIf(hi, instr->environment(), length, Operand(kArgumentsLimit));
3327

    
3328
  // Push the receiver and use the register to keep the original
3329
  // number of arguments.
3330
  __ push(receiver);
3331
  __ Move(receiver, length);
3332
  // The arguments are at a one pointer size offset from elements.
3333
  __ Addu(elements, elements, Operand(1 * kPointerSize));
3334

    
3335
  // Loop through the arguments pushing them onto the execution
3336
  // stack.
3337
  Label invoke, loop;
3338
  // length is a small non-negative integer, due to the test above.
3339
  __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg));
3340
  __ sll(scratch, length, 2);
3341
  __ bind(&loop);
3342
  __ Addu(scratch, elements, scratch);
3343
  __ lw(scratch, MemOperand(scratch));
3344
  __ push(scratch);
3345
  __ Subu(length, length, Operand(1));
3346
  __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg));
3347
  __ sll(scratch, length, 2);
3348

    
3349
  __ bind(&invoke);
3350
  ASSERT(instr->HasPointerMap());
3351
  LPointerMap* pointers = instr->pointer_map();
3352
  SafepointGenerator safepoint_generator(
3353
      this, pointers, Safepoint::kLazyDeopt);
3354
  // The number of arguments is stored in receiver which is a0, as expected
3355
  // by InvokeFunction.
3356
  ParameterCount actual(receiver);
3357
  __ InvokeFunction(function, actual, CALL_FUNCTION,
3358
                    safepoint_generator, CALL_AS_METHOD);
3359
}
3360

    
3361

    
3362
void LCodeGen::DoPushArgument(LPushArgument* instr) {
3363
  LOperand* argument = instr->value();
3364
  if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
3365
    Abort(kDoPushArgumentNotImplementedForDoubleType);
3366
  } else {
3367
    Register argument_reg = EmitLoadRegister(argument, at);
3368
    __ push(argument_reg);
3369
  }
3370
}
3371

    
3372

    
3373
void LCodeGen::DoDrop(LDrop* instr) {
3374
  __ Drop(instr->count());
3375
}
3376

    
3377

    
3378
void LCodeGen::DoThisFunction(LThisFunction* instr) {
3379
  Register result = ToRegister(instr->result());
3380
  __ lw(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
3381
}
3382

    
3383

    
3384
void LCodeGen::DoContext(LContext* instr) {
3385
  // If there is a non-return use, the context must be moved to a register.
3386
  Register result = ToRegister(instr->result());
3387
  if (info()->IsOptimizing()) {
3388
    __ lw(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
3389
  } else {
3390
    // If there is no frame, the context must be in cp.
3391
    ASSERT(result.is(cp));
3392
  }
3393
}
3394

    
3395

    
3396
void LCodeGen::DoOuterContext(LOuterContext* instr) {
3397
  Register context = ToRegister(instr->context());
3398
  Register result = ToRegister(instr->result());
3399
  __ lw(result,
3400
        MemOperand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
3401
}
3402

    
3403

    
3404
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3405
  ASSERT(ToRegister(instr->context()).is(cp));
3406
  __ LoadHeapObject(scratch0(), instr->hydrogen()->pairs());
3407
  __ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
3408
  // The context is the first argument.
3409
  __ Push(cp, scratch0(), scratch1());
3410
  CallRuntime(Runtime::kDeclareGlobals, 3, instr);
3411
}
3412

    
3413

    
3414
void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
3415
  Register context = ToRegister(instr->context());
3416
  Register result = ToRegister(instr->result());
3417
  __ lw(result, ContextOperand(context, Context::GLOBAL_OBJECT_INDEX));
3418
}
3419

    
3420

    
3421
void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
3422
  Register global = ToRegister(instr->global_object());
3423
  Register result = ToRegister(instr->result());
3424
  __ lw(result, FieldMemOperand(global, GlobalObject::kGlobalReceiverOffset));
3425
}
3426

    
3427

    
3428
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3429
                                 int formal_parameter_count,
3430
                                 int arity,
3431
                                 LInstruction* instr,
3432
                                 CallKind call_kind,
3433
                                 A1State a1_state) {
3434
  bool dont_adapt_arguments =
3435
      formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3436
  bool can_invoke_directly =
3437
      dont_adapt_arguments || formal_parameter_count == arity;
3438

    
3439
  LPointerMap* pointers = instr->pointer_map();
3440

    
3441
  if (can_invoke_directly) {
3442
    if (a1_state == A1_UNINITIALIZED) {
3443
      __ LoadHeapObject(a1, function);
3444
    }
3445

    
3446
    // Change context.
3447
    __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3448

    
3449
    // Set r0 to arguments count if adaption is not needed. Assumes that r0
3450
    // is available to write to at this point.
3451
    if (dont_adapt_arguments) {
3452
      __ li(a0, Operand(arity));
3453
    }
3454

    
3455
    // Invoke function.
3456
    __ SetCallKind(t1, call_kind);
3457
    __ lw(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
3458
    __ Call(at);
3459

    
3460
    // Set up deoptimization.
3461
    RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3462
  } else {
3463
    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3464
    ParameterCount count(arity);
3465
    ParameterCount expected(formal_parameter_count);
3466
    __ InvokeFunction(
3467
        function, expected, count, CALL_FUNCTION, generator, call_kind);
3468
  }
3469
}
3470

    
3471

    
3472
void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
3473
  ASSERT(ToRegister(instr->result()).is(v0));
3474
  __ mov(a0, v0);
3475
  CallKnownFunction(instr->hydrogen()->function(),
3476
                    instr->hydrogen()->formal_parameter_count(),
3477
                    instr->arity(),
3478
                    instr,
3479
                    CALL_AS_METHOD,
3480
                    A1_UNINITIALIZED);
3481
}
3482

    
3483

    
3484
void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3485
  ASSERT(instr->context() != NULL);
3486
  ASSERT(ToRegister(instr->context()).is(cp));
3487
  Register input = ToRegister(instr->value());
3488
  Register result = ToRegister(instr->result());
3489
  Register scratch = scratch0();
3490

    
3491
  // Deoptimize if not a heap number.
3492
  __ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3493
  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3494
  DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
3495

    
3496
  Label done;
3497
  Register exponent = scratch0();
3498
  scratch = no_reg;
3499
  __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3500
  // Check the sign of the argument. If the argument is positive, just
3501
  // return it.
3502
  __ Move(result, input);
3503
  __ And(at, exponent, Operand(HeapNumber::kSignMask));
3504
  __ Branch(&done, eq, at, Operand(zero_reg));
3505

    
3506
  // Input is negative. Reverse its sign.
3507
  // Preserve the value of all registers.
3508
  {
3509
    PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3510

    
3511
    // Registers were saved at the safepoint, so we can use
3512
    // many scratch registers.
3513
    Register tmp1 = input.is(a1) ? a0 : a1;
3514
    Register tmp2 = input.is(a2) ? a0 : a2;
3515
    Register tmp3 = input.is(a3) ? a0 : a3;
3516
    Register tmp4 = input.is(t0) ? a0 : t0;
3517

    
3518
    // exponent: floating point exponent value.
3519

    
3520
    Label allocated, slow;
3521
    __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3522
    __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3523
    __ Branch(&allocated);
3524

    
3525
    // Slow case: Call the runtime system to do the number allocation.
3526
    __ bind(&slow);
3527

    
3528
    CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
3529
                            instr->context());
3530
    // Set the pointer to the new heap number in tmp.
3531
    if (!tmp1.is(v0))
3532
      __ mov(tmp1, v0);
3533
    // Restore input_reg after call to runtime.
3534
    __ LoadFromSafepointRegisterSlot(input, input);
3535
    __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3536

    
3537
    __ bind(&allocated);
3538
    // exponent: floating point exponent value.
3539
    // tmp1: allocated heap number.
3540
    __ And(exponent, exponent, Operand(~HeapNumber::kSignMask));
3541
    __ sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
3542
    __ lw(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
3543
    __ sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
3544

    
3545
    __ StoreToSafepointRegisterSlot(tmp1, result);
3546
  }
3547

    
3548
  __ bind(&done);
3549
}
3550

    
3551

    
3552
void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3553
  Register input = ToRegister(instr->value());
3554
  Register result = ToRegister(instr->result());
3555
  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
3556
  Label done;
3557
  __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
3558
  __ mov(result, input);
3559
  __ subu(result, zero_reg, input);
3560
  // Overflow if result is still negative, i.e. 0x80000000.
3561
  DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
3562
  __ bind(&done);
3563
}
3564

    
3565

    
3566
void LCodeGen::DoMathAbs(LMathAbs* instr) {
3567
  // Class for deferred case.
3568
  class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
3569
   public:
3570
    DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3571
        : LDeferredCode(codegen), instr_(instr) { }
3572
    virtual void Generate() V8_OVERRIDE {
3573
      codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3574
    }
3575
    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
3576
   private:
3577
    LMathAbs* instr_;
3578
  };
3579

    
3580
  Representation r = instr->hydrogen()->value()->representation();
3581
  if (r.IsDouble()) {
3582
    FPURegister input = ToDoubleRegister(instr->value());
3583
    FPURegister result = ToDoubleRegister(instr->result());
3584
    __ abs_d(result, input);
3585
  } else if (r.IsSmiOrInteger32()) {
3586
    EmitIntegerMathAbs(instr);
3587
  } else {
3588
    // Representation is tagged.
3589
    DeferredMathAbsTaggedHeapNumber* deferred =
3590
        new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3591
    Register input = ToRegister(instr->value());
3592
    // Smi check.
3593
    __ JumpIfNotSmi(input, deferred->entry());
3594
    // If smi, handle it directly.
3595
    EmitIntegerMathAbs(instr);
3596
    __ bind(deferred->exit());
3597
  }
3598
}
3599

    
3600

    
3601
void LCodeGen::DoMathFloor(LMathFloor* instr) {
3602
  DoubleRegister input = ToDoubleRegister(instr->value());
3603
  Register result = ToRegister(instr->result());
3604
  Register scratch1 = scratch0();
3605
  Register except_flag = ToRegister(instr->temp());
3606

    
3607
  __ EmitFPUTruncate(kRoundToMinusInf,
3608
                     result,
3609
                     input,
3610
                     scratch1,
3611
                     double_scratch0(),
3612
                     except_flag);
3613

    
3614
  // Deopt if the operation did not succeed.
3615
  DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
3616

    
3617
  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3618
    // Test for -0.
3619
    Label done;
3620
    __ Branch(&done, ne, result, Operand(zero_reg));
3621
    __ mfc1(scratch1, input.high());
3622
    __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
3623
    DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
3624
    __ bind(&done);
3625
  }
3626
}
3627

    
3628

    
3629
void LCodeGen::DoMathRound(LMathRound* instr) {
3630
  DoubleRegister input = ToDoubleRegister(instr->value());
3631
  Register result = ToRegister(instr->result());
3632
  DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
3633
  Register scratch = scratch0();
3634
  Label done, check_sign_on_zero;
3635

    
3636
  // Extract exponent bits.
3637
  __ mfc1(result, input.high());
3638
  __ Ext(scratch,
3639
         result,
3640
         HeapNumber::kExponentShift,
3641
         HeapNumber::kExponentBits);
3642

    
3643
  // If the number is in ]-0.5, +0.5[, the result is +/- 0.
3644
  Label skip1;
3645
  __ Branch(&skip1, gt, scratch, Operand(HeapNumber::kExponentBias - 2));
3646
  __ mov(result, zero_reg);
3647
  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3648
    __ Branch(&check_sign_on_zero);
3649
  } else {
3650
    __ Branch(&done);
3651
  }
3652
  __ bind(&skip1);
3653

    
3654
  // The following conversion will not work with numbers
3655
  // outside of ]-2^32, 2^32[.
3656
  DeoptimizeIf(ge, instr->environment(), scratch,
3657
               Operand(HeapNumber::kExponentBias + 32));
3658

    
3659
  // Save the original sign for later comparison.
3660
  __ And(scratch, result, Operand(HeapNumber::kSignMask));
3661

    
3662
  __ Move(double_scratch0(), 0.5);
3663
  __ add_d(double_scratch0(), input, double_scratch0());
3664

    
3665
  // Check sign of the result: if the sign changed, the input
3666
  // value was in ]0.5, 0[ and the result should be -0.
3667
  __ mfc1(result, double_scratch0().high());
3668
  __ Xor(result, result, Operand(scratch));
3669
  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3670
    // ARM uses 'mi' here, which is 'lt'
3671
    DeoptimizeIf(lt, instr->environment(), result,
3672
                 Operand(zero_reg));
3673
  } else {
3674
    Label skip2;
3675
    // ARM uses 'mi' here, which is 'lt'
3676
    // Negating it results in 'ge'
3677
    __ Branch(&skip2, ge, result, Operand(zero_reg));
3678
    __ mov(result, zero_reg);
3679
    __ Branch(&done);
3680
    __ bind(&skip2);
3681
  }
3682

    
3683
  Register except_flag = scratch;
3684
  __ EmitFPUTruncate(kRoundToMinusInf,
3685
                     result,
3686
                     double_scratch0(),
3687
                     at,
3688
                     double_scratch1,
3689
                     except_flag);
3690

    
3691
  DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
3692

    
3693
  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3694
    // Test for -0.
3695
    __ Branch(&done, ne, result, Operand(zero_reg));
3696
    __ bind(&check_sign_on_zero);
3697
    __ mfc1(scratch, input.high());
3698
    __ And(scratch, scratch, Operand(HeapNumber::kSignMask));
3699
    DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
3700
  }
3701
  __ bind(&done);
3702
}
3703

    
3704

    
3705
void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3706
  DoubleRegister input = ToDoubleRegister(instr->value());
3707
  DoubleRegister result = ToDoubleRegister(instr->result());
3708
  __ sqrt_d(result, input);
3709
}
3710

    
3711

    
3712
void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3713
  DoubleRegister input = ToDoubleRegister(instr->value());
3714
  DoubleRegister result = ToDoubleRegister(instr->result());
3715
  DoubleRegister temp = ToDoubleRegister(instr->temp());
3716

    
3717
  ASSERT(!input.is(result));
3718

    
3719
  // Note that according to ECMA-262 15.8.2.13:
3720
  // Math.pow(-Infinity, 0.5) == Infinity
3721
  // Math.sqrt(-Infinity) == NaN
3722
  Label done;
3723
  __ Move(temp, -V8_INFINITY);
3724
  __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, temp, input);
3725
  // Set up Infinity in the delay slot.
3726
  // result is overwritten if the branch is not taken.
3727
  __ neg_d(result, temp);
3728

    
3729
  // Add +0 to convert -0 to +0.
3730
  __ add_d(result, input, kDoubleRegZero);
3731
  __ sqrt_d(result, result);
3732
  __ bind(&done);
3733
}
3734

    
3735

    
3736
void LCodeGen::DoPower(LPower* instr) {
3737
  Representation exponent_type = instr->hydrogen()->right()->representation();
3738
  // Having marked this as a call, we can use any registers.
3739
  // Just make sure that the input/output registers are the expected ones.
3740
  ASSERT(!instr->right()->IsDoubleRegister() ||
3741
         ToDoubleRegister(instr->right()).is(f4));
3742
  ASSERT(!instr->right()->IsRegister() ||
3743
         ToRegister(instr->right()).is(a2));
3744
  ASSERT(ToDoubleRegister(instr->left()).is(f2));
3745
  ASSERT(ToDoubleRegister(instr->result()).is(f0));
3746

    
3747
  if (exponent_type.IsSmi()) {
3748
    MathPowStub stub(MathPowStub::TAGGED);
3749
    __ CallStub(&stub);
3750
  } else if (exponent_type.IsTagged()) {
3751
    Label no_deopt;
3752
    __ JumpIfSmi(a2, &no_deopt);
3753
    __ lw(t3, FieldMemOperand(a2, HeapObject::kMapOffset));
3754
    DeoptimizeIf(ne, instr->environment(), t3, Operand(at));
3755
    __ bind(&no_deopt);
3756
    MathPowStub stub(MathPowStub::TAGGED);
3757
    __ CallStub(&stub);
3758
  } else if (exponent_type.IsInteger32()) {
3759
    MathPowStub stub(MathPowStub::INTEGER);
3760
    __ CallStub(&stub);
3761
  } else {
3762
    ASSERT(exponent_type.IsDouble());
3763
    MathPowStub stub(MathPowStub::DOUBLE);
3764
    __ CallStub(&stub);
3765
  }
3766
}
3767

    
3768

    
3769
void LCodeGen::DoRandom(LRandom* instr) {
3770
  // Assert that the register size is indeed the size of each seed.
3771
  static const int kSeedSize = sizeof(uint32_t);
3772
  STATIC_ASSERT(kPointerSize == kSeedSize);
3773

    
3774
  // Load native context.
3775
  Register global_object = ToRegister(instr->global_object());
3776
  Register native_context = global_object;
3777
  __ lw(native_context, FieldMemOperand(
3778
          global_object, GlobalObject::kNativeContextOffset));
3779

    
3780
  // Load state (FixedArray of the native context's random seeds).
3781
  static const int kRandomSeedOffset =
3782
      FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
3783
  Register state = native_context;
3784
  __ lw(state, FieldMemOperand(native_context, kRandomSeedOffset));
3785

    
3786
  // Load state[0].
3787
  Register state0 = ToRegister(instr->scratch());
3788
  __ lw(state0, FieldMemOperand(state, ByteArray::kHeaderSize));
3789
  // Load state[1].
3790
  Register state1 = ToRegister(instr->scratch2());
3791
  __ lw(state1, FieldMemOperand(state, ByteArray::kHeaderSize + kSeedSize));
3792

    
3793
  // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
3794
  Register scratch3 = ToRegister(instr->scratch3());
3795
  Register scratch4 = scratch0();
3796
  __ And(scratch3, state0, Operand(0xFFFF));
3797
  __ li(scratch4, Operand(18273));
3798
  __ Mul(scratch3, scratch3, scratch4);
3799
  __ srl(state0, state0, 16);
3800
  __ Addu(state0, scratch3, state0);
3801
  // Save state[0].
3802
  __ sw(state0, FieldMemOperand(state, ByteArray::kHeaderSize));
3803

    
3804
  // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
3805
  __ And(scratch3, state1, Operand(0xFFFF));
3806
  __ li(scratch4, Operand(36969));
3807
  __ Mul(scratch3, scratch3, scratch4);
3808
  __ srl(state1, state1, 16),
3809
  __ Addu(state1, scratch3, state1);
3810
  // Save state[1].
3811
  __ sw(state1, FieldMemOperand(state, ByteArray::kHeaderSize + kSeedSize));
3812

    
3813
  // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
3814
  Register random = scratch4;
3815
  __ And(random, state1, Operand(0x3FFFF));
3816
  __ sll(state0, state0, 14);
3817
  __ Addu(random, random, state0);
3818

    
3819
  // 0x41300000 is the top half of 1.0 x 2^20 as a double.
3820
  __ li(scratch3, Operand(0x41300000));
3821
  // Move 0x41300000xxxxxxxx (x = random bits in v0) to FPU.
3822
  DoubleRegister result = ToDoubleRegister(instr->result());
3823
  __ Move(result, random, scratch3);
3824
  // Move 0x4130000000000000 to FPU.
3825
  DoubleRegister scratch5 = double_scratch0();
3826
  __ Move(scratch5, zero_reg, scratch3);
3827
  __ sub_d(result, result, scratch5);
3828
}
3829

    
3830

    
3831
void LCodeGen::DoMathExp(LMathExp* instr) {
3832
  DoubleRegister input = ToDoubleRegister(instr->value());
3833
  DoubleRegister result = ToDoubleRegister(instr->result());
3834
  DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
3835
  DoubleRegister double_scratch2 = double_scratch0();
3836
  Register temp1 = ToRegister(instr->temp1());
3837
  Register temp2 = ToRegister(instr->temp2());
3838

    
3839
  MathExpGenerator::EmitMathExp(
3840
      masm(), input, result, double_scratch1, double_scratch2,
3841
      temp1, temp2, scratch0());
3842
}
3843

    
3844

    
3845
void LCodeGen::DoMathLog(LMathLog* instr) {
3846
  ASSERT(ToDoubleRegister(instr->result()).is(f4));
3847
  // Set the context register to a GC-safe fake value. Clobbering it is
3848
  // OK because this instruction is marked as a call.
3849
  __ mov(cp, zero_reg);
3850
  TranscendentalCacheStub stub(TranscendentalCache::LOG,
3851
                               TranscendentalCacheStub::UNTAGGED);
3852
  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
3853
}
3854

    
3855

    
3856
void LCodeGen::DoMathTan(LMathTan* instr) {
3857
  ASSERT(ToDoubleRegister(instr->result()).is(f4));
3858
  // Set the context register to a GC-safe fake value. Clobbering it is
3859
  // OK because this instruction is marked as a call.
3860
  __ mov(cp, zero_reg);
3861
  TranscendentalCacheStub stub(TranscendentalCache::TAN,
3862
                               TranscendentalCacheStub::UNTAGGED);
3863
  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
3864
}
3865

    
3866

    
3867
void LCodeGen::DoMathCos(LMathCos* instr) {
3868
  ASSERT(ToDoubleRegister(instr->result()).is(f4));
3869
  // Set the context register to a GC-safe fake value. Clobbering it is
3870
  // OK because this instruction is marked as a call.
3871
  __ mov(cp, zero_reg);
3872
  TranscendentalCacheStub stub(TranscendentalCache::COS,
3873
                               TranscendentalCacheStub::UNTAGGED);
3874
  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
3875
}
3876

    
3877

    
3878
void LCodeGen::DoMathSin(LMathSin* instr) {
3879
  ASSERT(ToDoubleRegister(instr->result()).is(f4));
3880
  // Set the context register to a GC-safe fake value. Clobbering it is
3881
  // OK because this instruction is marked as a call.
3882
  __ mov(cp, zero_reg);
3883
  TranscendentalCacheStub stub(TranscendentalCache::SIN,
3884
                               TranscendentalCacheStub::UNTAGGED);
3885
  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
3886
}
3887

    
3888

    
3889
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3890
  ASSERT(ToRegister(instr->context()).is(cp));
3891
  ASSERT(ToRegister(instr->function()).is(a1));
3892
  ASSERT(instr->HasPointerMap());
3893

    
3894
  Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3895
  if (known_function.is_null()) {
3896
    LPointerMap* pointers = instr->pointer_map();
3897
    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3898
    ParameterCount count(instr->arity());
3899
    __ InvokeFunction(a1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
3900
  } else {
3901
    CallKnownFunction(known_function,
3902
                      instr->hydrogen()->formal_parameter_count(),
3903
                      instr->arity(),
3904
                      instr,
3905
                      CALL_AS_METHOD,
3906
                      A1_CONTAINS_TARGET);
3907
  }
3908
}
3909

    
3910

    
3911
void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
3912
  ASSERT(ToRegister(instr->context()).is(cp));
3913
  ASSERT(ToRegister(instr->result()).is(v0));
3914

    
3915
  int arity = instr->arity();
3916
  Handle<Code> ic =
3917
      isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
3918
  CallCode(ic, RelocInfo::CODE_TARGET, instr);
3919
}
3920

    
3921

    
3922
void LCodeGen::DoCallNamed(LCallNamed* instr) {
3923
  ASSERT(ToRegister(instr->context()).is(cp));
3924
  ASSERT(ToRegister(instr->result()).is(v0));
3925

    
3926
  int arity = instr->arity();
3927
  RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
3928
  Handle<Code> ic =
3929
      isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
3930
  __ li(a2, Operand(instr->name()));
3931
  CallCode(ic, mode, instr);
3932
}
3933

    
3934

    
3935
void LCodeGen::DoCallFunction(LCallFunction* instr) {
3936
  ASSERT(ToRegister(instr->context()).is(cp));
3937
  ASSERT(ToRegister(instr->function()).is(a1));
3938
  ASSERT(ToRegister(instr->result()).is(v0));
3939

    
3940
  int arity = instr->arity();
3941
  CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
3942
  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
3943
}
3944

    
3945

    
3946
void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
3947
  ASSERT(ToRegister(instr->context()).is(cp));
3948
  ASSERT(ToRegister(instr->result()).is(v0));
3949

    
3950
  int arity = instr->arity();
3951
  RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
3952
  Handle<Code> ic =
3953
      isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
3954
  __ li(a2, Operand(instr->name()));
3955
  CallCode(ic, mode, instr);
3956
}
3957

    
3958

    
3959
void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
3960
  ASSERT(ToRegister(instr->result()).is(v0));
3961
  CallKnownFunction(instr->hydrogen()->target(),
3962
                    instr->hydrogen()->formal_parameter_count(),
3963
                    instr->arity(),
3964
                    instr,
3965
                    CALL_AS_FUNCTION,
3966
                    A1_UNINITIALIZED);
3967
}
3968

    
3969

    
3970
void LCodeGen::DoCallNew(LCallNew* instr) {
3971
  ASSERT(ToRegister(instr->context()).is(cp));
3972
  ASSERT(ToRegister(instr->constructor()).is(a1));
3973
  ASSERT(ToRegister(instr->result()).is(v0));
3974

    
3975
  __ li(a0, Operand(instr->arity()));
3976
  // No cell in a2 for construct type feedback in optimized code
3977
  Handle<Object> undefined_value(isolate()->factory()->undefined_value());
3978
  __ li(a2, Operand(undefined_value));
3979
  CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
3980
  CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
3981
}
3982

    
3983

    
3984
void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
3985
  ASSERT(ToRegister(instr->context()).is(cp));
3986
  ASSERT(ToRegister(instr->constructor()).is(a1));
3987
  ASSERT(ToRegister(instr->result()).is(v0));
3988

    
3989
  __ li(a0, Operand(instr->arity()));
3990
  __ li(a2, Operand(instr->hydrogen()->property_cell()));
3991
  ElementsKind kind = instr->hydrogen()->elements_kind();
3992
  AllocationSiteOverrideMode override_mode =
3993
      (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
3994
          ? DISABLE_ALLOCATION_SITES
3995
          : DONT_OVERRIDE;
3996
  ContextCheckMode context_mode = CONTEXT_CHECK_NOT_REQUIRED;
3997

    
3998
  if (instr->arity() == 0) {
3999
    ArrayNoArgumentConstructorStub stub(kind, context_mode, override_mode);
4000
    CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4001
  } else if (instr->arity() == 1) {
4002
    Label done;
4003
    if (IsFastPackedElementsKind(kind)) {
4004
      Label packed_case;
4005
      // We might need a change here,
4006
      // look at the first argument.
4007
      __ lw(t1, MemOperand(sp, 0));
4008
      __ Branch(&packed_case, eq, t1, Operand(zero_reg));
4009

    
4010
      ElementsKind holey_kind = GetHoleyElementsKind(kind);
4011
      ArraySingleArgumentConstructorStub stub(holey_kind, context_mode,
4012
                                              override_mode);
4013
      CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4014
      __ jmp(&done);
4015
      __ bind(&packed_case);
4016
    }
4017

    
4018
    ArraySingleArgumentConstructorStub stub(kind, context_mode, override_mode);
4019
    CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4020
    __ bind(&done);
4021
  } else {
4022
    ArrayNArgumentsConstructorStub stub(kind, context_mode, override_mode);
4023
    CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4024
  }
4025
}
4026

    
4027

    
4028
void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
4029
  CallRuntime(instr->function(), instr->arity(), instr);
4030
}
4031

    
4032

    
4033
void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
4034
  Register function = ToRegister(instr->function());
4035
  Register code_object = ToRegister(instr->code_object());
4036
  __ Addu(code_object, code_object,
4037
          Operand(Code::kHeaderSize - kHeapObjectTag));
4038
  __ sw(code_object,
4039
        FieldMemOperand(function, JSFunction::kCodeEntryOffset));
4040
}
4041

    
4042

    
4043
void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
4044
  Register result = ToRegister(instr->result());
4045
  Register base = ToRegister(instr->base_object());
4046
  __ Addu(result, base, Operand(instr->offset()));
4047
}
4048

    
4049

    
4050
void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4051
  Representation representation = instr->representation();
4052

    
4053
  Register object = ToRegister(instr->object());
4054
  Register scratch = scratch0();
4055
  HObjectAccess access = instr->hydrogen()->access();
4056
  int offset = access.offset();
4057

    
4058
  if (access.IsExternalMemory()) {
4059
    Register value = ToRegister(instr->value());
4060
    MemOperand operand = MemOperand(object, offset);
4061
    if (representation.IsByte()) {
4062
      __ sb(value, operand);
4063
    } else {
4064
      __ sw(value, operand);
4065
    }
4066
    return;
4067
  }
4068

    
4069
  Handle<Map> transition = instr->transition();
4070

    
4071
  if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
4072
    Register value = ToRegister(instr->value());
4073
    if (!instr->hydrogen()->value()->type().IsHeapObject()) {
4074
      __ And(scratch, value, Operand(kSmiTagMask));
4075
      DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
4076
    }
4077
  } else if (FLAG_track_double_fields && representation.IsDouble()) {
4078
    ASSERT(transition.is_null());
4079
    ASSERT(access.IsInobject());
4080
    ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4081
    DoubleRegister value = ToDoubleRegister(instr->value());
4082
    __ sdc1(value, FieldMemOperand(object, offset));
4083
    return;
4084
  }
4085

    
4086
  if (!transition.is_null()) {
4087
    __ li(scratch, Operand(transition));
4088
    __ sw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4089
    if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
4090
      Register temp = ToRegister(instr->temp());
4091
      // Update the write barrier for the map field.
4092
      __ RecordWriteField(object,
4093
                          HeapObject::kMapOffset,
4094
                          scratch,
4095
                          temp,
4096
                          GetRAState(),
4097
                          kSaveFPRegs,
4098
                          OMIT_REMEMBERED_SET,
4099
                          OMIT_SMI_CHECK);
4100
    }
4101
  }
4102

    
4103
  // Do the store.
4104
  Register value = ToRegister(instr->value());
4105
  ASSERT(!object.is(value));
4106
  SmiCheck check_needed =
4107
      instr->hydrogen()->value()->IsHeapObject()
4108
          ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4109
  if (access.IsInobject()) {
4110
    MemOperand operand = FieldMemOperand(object, offset);
4111
    if (representation.IsByte()) {
4112
      __ sb(value, operand);
4113
    } else {
4114
      __ sw(value, operand);
4115
    }
4116
    if (instr->hydrogen()->NeedsWriteBarrier()) {
4117
      // Update the write barrier for the object for in-object properties.
4118
      __ RecordWriteField(object,
4119
                          offset,
4120
                          value,
4121
                          scratch,
4122
                          GetRAState(),
4123
                          kSaveFPRegs,
4124
                          EMIT_REMEMBERED_SET,
4125
                          check_needed);
4126
    }
4127
  } else {
4128
    __ lw(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
4129
    MemOperand operand = FieldMemOperand(scratch, offset);
4130
    if (representation.IsByte()) {
4131
      __ sb(value, operand);
4132
    } else {
4133
      __ sw(value, operand);
4134
    }
4135
    if (instr->hydrogen()->NeedsWriteBarrier()) {
4136
      // Update the write barrier for the properties array.
4137
      // object is used as a scratch register.
4138
      __ RecordWriteField(scratch,
4139
                          offset,
4140
                          value,
4141
                          object,
4142
                          GetRAState(),
4143
                          kSaveFPRegs,
4144
                          EMIT_REMEMBERED_SET,
4145
                          check_needed);
4146
    }
4147
  }
4148
}
4149

    
4150

    
4151
void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4152
  ASSERT(ToRegister(instr->context()).is(cp));
4153
  ASSERT(ToRegister(instr->object()).is(a1));
4154
  ASSERT(ToRegister(instr->value()).is(a0));
4155

    
4156
  // Name is always in a2.
4157
  __ li(a2, Operand(instr->name()));
4158
  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
4159
      ? isolate()->builtins()->StoreIC_Initialize_Strict()
4160
      : isolate()->builtins()->StoreIC_Initialize();
4161
  CallCode(ic, RelocInfo::CODE_TARGET, instr);
4162
}
4163

    
4164

    
4165
void LCodeGen::ApplyCheckIf(Condition condition,
4166
                            LBoundsCheck* check,
4167
                            Register src1,
4168
                            const Operand& src2) {
4169
  if (FLAG_debug_code && check->hydrogen()->skip_check()) {
4170
    Label done;
4171
    __ Branch(&done, NegateCondition(condition), src1, src2);
4172
    __ stop("eliminated bounds check failed");
4173
    __ bind(&done);
4174
  } else {
4175
    DeoptimizeIf(condition, check->environment(), src1, src2);
4176
  }
4177
}
4178

    
4179

    
4180
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4181
  if (instr->hydrogen()->skip_check()) return;
4182

    
4183
  Condition condition = instr->hydrogen()->allow_equality() ? hi : hs;
4184
  if (instr->index()->IsConstantOperand()) {
4185
    int constant_index =
4186
        ToInteger32(LConstantOperand::cast(instr->index()));
4187
    if (instr->hydrogen()->length()->representation().IsSmi()) {
4188
      __ li(at, Operand(Smi::FromInt(constant_index)));
4189
    } else {
4190
      __ li(at, Operand(constant_index));
4191
    }
4192
    ApplyCheckIf(condition,
4193
                 instr,
4194
                 at,
4195
                 Operand(ToRegister(instr->length())));
4196
  } else {
4197
    ApplyCheckIf(condition,
4198
                 instr,
4199
                 ToRegister(instr->index()),
4200
                 Operand(ToRegister(instr->length())));
4201
  }
4202
}
4203

    
4204

    
4205
void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4206
  Register external_pointer = ToRegister(instr->elements());
4207
  Register key = no_reg;
4208
  ElementsKind elements_kind = instr->elements_kind();
4209
  bool key_is_constant = instr->key()->IsConstantOperand();
4210
  int constant_key = 0;
4211
  if (key_is_constant) {
4212
    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4213
    if (constant_key & 0xF0000000) {
4214
      Abort(kArrayIndexConstantValueTooBig);
4215
    }
4216
  } else {
4217
    key = ToRegister(instr->key());
4218
  }
4219
  int element_size_shift = ElementsKindToShiftSize(elements_kind);
4220
  int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4221
      ? (element_size_shift - kSmiTagSize) : element_size_shift;
4222
  int additional_offset = instr->additional_index() << element_size_shift;
4223

    
4224
  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
4225
      elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
4226
    Register address = scratch0();
4227
    FPURegister value(ToDoubleRegister(instr->value()));
4228
    if (key_is_constant) {
4229
      if (constant_key != 0) {
4230
        __ Addu(address, external_pointer,
4231
                Operand(constant_key << element_size_shift));
4232
      } else {
4233
        address = external_pointer;
4234
      }
4235
    } else {
4236
      __ sll(address, key, shift_size);
4237
      __ Addu(address, external_pointer, address);
4238
    }
4239

    
4240
    if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
4241
      __ cvt_s_d(double_scratch0(), value);
4242
      __ swc1(double_scratch0(), MemOperand(address, additional_offset));
4243
    } else {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
4244
      __ sdc1(value, MemOperand(address, additional_offset));
4245
    }
4246
  } else {
4247
    Register value(ToRegister(instr->value()));
4248
    MemOperand mem_operand = PrepareKeyedOperand(
4249
        key, external_pointer, key_is_constant, constant_key,
4250
        element_size_shift, shift_size,
4251
        instr->additional_index(), additional_offset);
4252
    switch (elements_kind) {
4253
      case EXTERNAL_PIXEL_ELEMENTS:
4254
      case EXTERNAL_BYTE_ELEMENTS:
4255
      case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
4256
        __ sb(value, mem_operand);
4257
        break;
4258
      case EXTERNAL_SHORT_ELEMENTS:
4259
      case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
4260
        __ sh(value, mem_operand);
4261
        break;
4262
      case EXTERNAL_INT_ELEMENTS:
4263
      case EXTERNAL_UNSIGNED_INT_ELEMENTS:
4264
        __ sw(value, mem_operand);
4265
        break;
4266
      case EXTERNAL_FLOAT_ELEMENTS:
4267
      case EXTERNAL_DOUBLE_ELEMENTS:
4268
      case FAST_DOUBLE_ELEMENTS:
4269
      case FAST_ELEMENTS:
4270
      case FAST_SMI_ELEMENTS:
4271
      case FAST_HOLEY_DOUBLE_ELEMENTS:
4272
      case FAST_HOLEY_ELEMENTS:
4273
      case FAST_HOLEY_SMI_ELEMENTS:
4274
      case DICTIONARY_ELEMENTS:
4275
      case NON_STRICT_ARGUMENTS_ELEMENTS:
4276
        UNREACHABLE();
4277
        break;
4278
    }
4279
  }
4280
}
4281

    
4282

    
4283
void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4284
  DoubleRegister value = ToDoubleRegister(instr->value());
4285
  Register elements = ToRegister(instr->elements());
4286
  Register scratch = scratch0();
4287
  DoubleRegister double_scratch = double_scratch0();
4288
  bool key_is_constant = instr->key()->IsConstantOperand();
4289
  Label not_nan, done;
4290

    
4291
  // Calculate the effective address of the slot in the array to store the
4292
  // double value.
4293
  int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
4294
  if (key_is_constant) {
4295
    int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4296
    if (constant_key & 0xF0000000) {
4297
      Abort(kArrayIndexConstantValueTooBig);
4298
    }
4299
    __ Addu(scratch, elements,
4300
            Operand((constant_key << element_size_shift) +
4301
                    FixedDoubleArray::kHeaderSize - kHeapObjectTag));
4302
  } else {
4303
    int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4304
        ? (element_size_shift - kSmiTagSize) : element_size_shift;
4305
    __ Addu(scratch, elements,
4306
            Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
4307
    __ sll(at, ToRegister(instr->key()), shift_size);
4308
    __ Addu(scratch, scratch, at);
4309
  }
4310

    
4311
  if (instr->NeedsCanonicalization()) {
4312
    Label is_nan;
4313
    // Check for NaN. All NaNs must be canonicalized.
4314
    __ BranchF(NULL, &is_nan, eq, value, value);
4315
    __ Branch(&not_nan);
4316

    
4317
    // Only load canonical NaN if the comparison above set the overflow.
4318
    __ bind(&is_nan);
4319
    __ Move(double_scratch,
4320
            FixedDoubleArray::canonical_not_the_hole_nan_as_double());
4321
    __ sdc1(double_scratch, MemOperand(scratch, instr->additional_index() <<
4322
        element_size_shift));
4323
    __ Branch(&done);
4324
  }
4325

    
4326
  __ bind(&not_nan);
4327
  __ sdc1(value, MemOperand(scratch, instr->additional_index() <<
4328
      element_size_shift));
4329
  __ bind(&done);
4330
}
4331

    
4332

    
4333
void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4334
  Register value = ToRegister(instr->value());
4335
  Register elements = ToRegister(instr->elements());
4336
  Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
4337
      : no_reg;
4338
  Register scratch = scratch0();
4339
  Register store_base = scratch;
4340
  int offset = 0;
4341

    
4342
  // Do the store.
4343
  if (instr->key()->IsConstantOperand()) {
4344
    ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4345
    LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
4346
    offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
4347
                                           instr->additional_index());
4348
    store_base = elements;
4349
  } else {
4350
    // Even though the HLoadKeyed instruction forces the input
4351
    // representation for the key to be an integer, the input gets replaced
4352
    // during bound check elimination with the index argument to the bounds
4353
    // check, which can be tagged, so that case must be handled here, too.
4354
    if (instr->hydrogen()->key()->representation().IsSmi()) {
4355
      __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
4356
      __ addu(scratch, elements, scratch);
4357
    } else {
4358
      __ sll(scratch, key, kPointerSizeLog2);
4359
      __ addu(scratch, elements, scratch);
4360
    }
4361
    offset = FixedArray::OffsetOfElementAt(instr->additional_index());
4362
  }
4363
  __ sw(value, FieldMemOperand(store_base, offset));
4364

    
4365
  if (instr->hydrogen()->NeedsWriteBarrier()) {
4366
    SmiCheck check_needed =
4367
        instr->hydrogen()->value()->IsHeapObject()
4368
            ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4369
    // Compute address of modified element and store it into key register.
4370
    __ Addu(key, store_base, Operand(offset - kHeapObjectTag));
4371
    __ RecordWrite(elements,
4372
                   key,
4373
                   value,
4374
                   GetRAState(),
4375
                   kSaveFPRegs,
4376
                   EMIT_REMEMBERED_SET,
4377
                   check_needed);
4378
  }
4379
}
4380

    
4381

    
4382
void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4383
  // By cases: external, fast double
4384
  if (instr->is_external()) {
4385
    DoStoreKeyedExternalArray(instr);
4386
  } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4387
    DoStoreKeyedFixedDoubleArray(instr);
4388
  } else {
4389
    DoStoreKeyedFixedArray(instr);
4390
  }
4391
}
4392

    
4393

    
4394
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4395
  ASSERT(ToRegister(instr->context()).is(cp));
4396
  ASSERT(ToRegister(instr->object()).is(a2));
4397
  ASSERT(ToRegister(instr->key()).is(a1));
4398
  ASSERT(ToRegister(instr->value()).is(a0));
4399

    
4400
  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
4401
      ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
4402
      : isolate()->builtins()->KeyedStoreIC_Initialize();
4403
  CallCode(ic, RelocInfo::CODE_TARGET, instr);
4404
}
4405

    
4406

    
4407
void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4408
  Register object_reg = ToRegister(instr->object());
4409
  Register scratch = scratch0();
4410

    
4411
  Handle<Map> from_map = instr->original_map();
4412
  Handle<Map> to_map = instr->transitioned_map();
4413
  ElementsKind from_kind = instr->from_kind();
4414
  ElementsKind to_kind = instr->to_kind();
4415

    
4416
  Label not_applicable;
4417
  __ lw(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4418
  __ Branch(&not_applicable, ne, scratch, Operand(from_map));
4419

    
4420
  if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4421
    Register new_map_reg = ToRegister(instr->new_map_temp());
4422
    __ li(new_map_reg, Operand(to_map));
4423
    __ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4424
    // Write barrier.
4425
    __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
4426
                        scratch, GetRAState(), kDontSaveFPRegs);
4427
  } else {
4428
    ASSERT(ToRegister(instr->context()).is(cp));
4429
    PushSafepointRegistersScope scope(
4430
        this, Safepoint::kWithRegistersAndDoubles);
4431
    __ mov(a0, object_reg);
4432
    __ li(a1, Operand(to_map));
4433
    TransitionElementsKindStub stub(from_kind, to_kind);
4434
    __ CallStub(&stub);
4435
    RecordSafepointWithRegistersAndDoubles(
4436
        instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4437
  }
4438
  __ bind(&not_applicable);
4439
}
4440

    
4441

    
4442
void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4443
  Register object = ToRegister(instr->object());
4444
  Register temp = ToRegister(instr->temp());
4445
  Label no_memento_found;
4446
  __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found,
4447
                                     ne, &no_memento_found);
4448
  DeoptimizeIf(al, instr->environment());
4449
  __ bind(&no_memento_found);
4450
}
4451

    
4452

    
4453
void LCodeGen::DoStringAdd(LStringAdd* instr) {
4454
  ASSERT(ToRegister(instr->context()).is(cp));
4455
  __ push(ToRegister(instr->left()));
4456
  __ push(ToRegister(instr->right()));
4457
  StringAddStub stub(instr->hydrogen()->flags());
4458
  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
4459
}
4460

    
4461

    
4462
void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4463
  class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
4464
   public:
4465
    DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4466
        : LDeferredCode(codegen), instr_(instr) { }
4467
    virtual void Generate() V8_OVERRIDE {
4468
      codegen()->DoDeferredStringCharCodeAt(instr_);
4469
    }
4470
    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4471
   private:
4472
    LStringCharCodeAt* instr_;
4473
  };
4474

    
4475
  DeferredStringCharCodeAt* deferred =
4476
      new(zone()) DeferredStringCharCodeAt(this, instr);
4477
  StringCharLoadGenerator::Generate(masm(),
4478
                                    ToRegister(instr->string()),
4479
                                    ToRegister(instr->index()),
4480
                                    ToRegister(instr->result()),
4481
                                    deferred->entry());
4482
  __ bind(deferred->exit());
4483
}
4484

    
4485

    
4486
void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4487
  Register string = ToRegister(instr->string());
4488
  Register result = ToRegister(instr->result());
4489
  Register scratch = scratch0();
4490

    
4491
  // TODO(3095996): Get rid of this. For now, we need to make the
4492
  // result register contain a valid pointer because it is already
4493
  // contained in the register pointer map.
4494
  __ mov(result, zero_reg);
4495

    
4496
  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4497
  __ push(string);
4498
  // Push the index as a smi. This is safe because of the checks in
4499
  // DoStringCharCodeAt above.
4500
  if (instr->index()->IsConstantOperand()) {
4501
    int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4502
    __ Addu(scratch, zero_reg, Operand(Smi::FromInt(const_index)));
4503
    __ push(scratch);
4504
  } else {
4505
    Register index = ToRegister(instr->index());
4506
    __ SmiTag(index);
4507
    __ push(index);
4508
  }
4509
  CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr,
4510
                          instr->context());
4511
  __ AssertSmi(v0);
4512
  __ SmiUntag(v0);
4513
  __ StoreToSafepointRegisterSlot(v0, result);
4514
}
4515

    
4516

    
4517
void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4518
  class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
4519
   public:
4520
    DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4521
        : LDeferredCode(codegen), instr_(instr) { }
4522
    virtual void Generate() V8_OVERRIDE {
4523
      codegen()->DoDeferredStringCharFromCode(instr_);
4524
    }
4525
    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4526
   private:
4527
    LStringCharFromCode* instr_;
4528
  };
4529

    
4530
  DeferredStringCharFromCode* deferred =
4531
      new(zone()) DeferredStringCharFromCode(this, instr);
4532

    
4533
  ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
4534
  Register char_code = ToRegister(instr->char_code());
4535
  Register result = ToRegister(instr->result());
4536
  Register scratch = scratch0();
4537
  ASSERT(!char_code.is(result));
4538

    
4539
  __ Branch(deferred->entry(), hi,
4540
            char_code, Operand(String::kMaxOneByteCharCode));
4541
  __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4542
  __ sll(scratch, char_code, kPointerSizeLog2);
4543
  __ Addu(result, result, scratch);
4544
  __ lw(result, FieldMemOperand(result, FixedArray::kHeaderSize));
4545
  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4546
  __ Branch(deferred->entry(), eq, result, Operand(scratch));
4547
  __ bind(deferred->exit());
4548
}
4549

    
4550

    
4551
void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4552
  Register char_code = ToRegister(instr->char_code());
4553
  Register result = ToRegister(instr->result());
4554

    
4555
  // TODO(3095996): Get rid of this. For now, we need to make the
4556
  // result register contain a valid pointer because it is already
4557
  // contained in the register pointer map.
4558
  __ mov(result, zero_reg);
4559

    
4560
  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4561
  __ SmiTag(char_code);
4562
  __ push(char_code);
4563
  CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
4564
  __ StoreToSafepointRegisterSlot(v0, result);
4565
}
4566

    
4567

    
4568
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4569
  LOperand* input = instr->value();
4570
  ASSERT(input->IsRegister() || input->IsStackSlot());
4571
  LOperand* output = instr->result();
4572
  ASSERT(output->IsDoubleRegister());
4573
  FPURegister single_scratch = double_scratch0().low();
4574
  if (input->IsStackSlot()) {
4575
    Register scratch = scratch0();
4576
    __ lw(scratch, ToMemOperand(input));
4577
    __ mtc1(scratch, single_scratch);
4578
  } else {
4579
    __ mtc1(ToRegister(input), single_scratch);
4580
  }
4581
  __ cvt_d_w(ToDoubleRegister(output), single_scratch);
4582
}
4583

    
4584

    
4585
void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
4586
  LOperand* input = instr->value();
4587
  ASSERT(input->IsRegister());
4588
  LOperand* output = instr->result();
4589
  ASSERT(output->IsRegister());
4590
  Register scratch = scratch0();
4591

    
4592
  __ SmiTagCheckOverflow(ToRegister(output), ToRegister(input), scratch);
4593
  if (!instr->hydrogen()->value()->HasRange() ||
4594
      !instr->hydrogen()->value()->range()->IsInSmiRange()) {
4595
    DeoptimizeIf(lt, instr->environment(), scratch, Operand(zero_reg));
4596
  }
4597
}
4598

    
4599

    
4600
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4601
  LOperand* input = instr->value();
4602
  LOperand* output = instr->result();
4603

    
4604
  FPURegister dbl_scratch = double_scratch0();
4605
  __ mtc1(ToRegister(input), dbl_scratch);
4606
  __ Cvt_d_uw(ToDoubleRegister(output), dbl_scratch, f22);
4607
}
4608

    
4609

    
4610
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4611
  class DeferredNumberTagI V8_FINAL : public LDeferredCode {
4612
   public:
4613
    DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4614
        : LDeferredCode(codegen), instr_(instr) { }
4615
    virtual void Generate() V8_OVERRIDE {
4616
      codegen()->DoDeferredNumberTagI(instr_,
4617
                                      instr_->value(),
4618
                                      SIGNED_INT32);
4619
    }
4620
    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4621
   private:
4622
    LNumberTagI* instr_;
4623
  };
4624

    
4625
  Register src = ToRegister(instr->value());
4626
  Register dst = ToRegister(instr->result());
4627
  Register overflow = scratch0();
4628

    
4629
  DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
4630
  __ SmiTagCheckOverflow(dst, src, overflow);
4631
  __ BranchOnOverflow(deferred->entry(), overflow);
4632
  __ bind(deferred->exit());
4633
}
4634

    
4635

    
4636
void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4637
  class DeferredNumberTagU V8_FINAL : public LDeferredCode {
4638
   public:
4639
    DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4640
        : LDeferredCode(codegen), instr_(instr) { }
4641
    virtual void Generate() V8_OVERRIDE {
4642
      codegen()->DoDeferredNumberTagI(instr_,
4643
                                      instr_->value(),
4644
                                      UNSIGNED_INT32);
4645
    }
4646
    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4647
   private:
4648
    LNumberTagU* instr_;
4649
  };
4650

    
4651
  LOperand* input = instr->value();
4652
  ASSERT(input->IsRegister() && input->Equals(instr->result()));
4653
  Register reg = ToRegister(input);
4654

    
4655
  DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4656
  __ Branch(deferred->entry(), hi, reg, Operand(Smi::kMaxValue));
4657
  __ SmiTag(reg, reg);
4658
  __ bind(deferred->exit());
4659
}
4660

    
4661

    
4662
void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
4663
                                    LOperand* value,
4664
                                    IntegerSignedness signedness) {
4665
  Label slow;
4666
  Register src = ToRegister(value);
4667
  Register dst = ToRegister(instr->result());
4668
  DoubleRegister dbl_scratch = double_scratch0();
4669

    
4670
  // Preserve the value of all registers.
4671
  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4672

    
4673
  Label done;
4674
  if (signedness == SIGNED_INT32) {
4675
    // There was overflow, so bits 30 and 31 of the original integer
4676
    // disagree. Try to allocate a heap number in new space and store
4677
    // the value in there. If that fails, call the runtime system.
4678
    if (dst.is(src)) {
4679
      __ SmiUntag(src, dst);
4680
      __ Xor(src, src, Operand(0x80000000));
4681
    }
4682
    __ mtc1(src, dbl_scratch);
4683
    __ cvt_d_w(dbl_scratch, dbl_scratch);
4684
  } else {
4685
    __ mtc1(src, dbl_scratch);
4686
    __ Cvt_d_uw(dbl_scratch, dbl_scratch, f22);
4687
  }
4688

    
4689
  if (FLAG_inline_new) {
4690
    __ LoadRoot(scratch0(), Heap::kHeapNumberMapRootIndex);
4691
    __ AllocateHeapNumber(t1, a3, t0, scratch0(), &slow, DONT_TAG_RESULT);
4692
    __ Move(dst, t1);
4693
    __ Branch(&done);
4694
  }
4695

    
4696
  // Slow case: Call the runtime system to do the number allocation.
4697
  __ bind(&slow);
4698

    
4699
  // TODO(3095996): Put a valid pointer value in the stack slot where the result
4700
  // register is stored, as this register is in the pointer map, but contains an
4701
  // integer value.
4702
  __ StoreToSafepointRegisterSlot(zero_reg, dst);
4703
  // NumberTagI and NumberTagD use the context from the frame, rather than
4704
  // the environment's HContext or HInlinedContext value.
4705
  // They only call Runtime::kAllocateHeapNumber.
4706
  // The corresponding HChange instructions are added in a phase that does
4707
  // not have easy access to the local context.
4708
  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4709
  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4710
  RecordSafepointWithRegisters(
4711
      instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4712
  __ Move(dst, v0);
4713
  __ Subu(dst, dst, kHeapObjectTag);
4714

    
4715
  // Done. Put the value in dbl_scratch into the value of the allocated heap
4716
  // number.
4717
  __ bind(&done);
4718
  __ sdc1(dbl_scratch, MemOperand(dst, HeapNumber::kValueOffset));
4719
  __ Addu(dst, dst, kHeapObjectTag);
4720
  __ StoreToSafepointRegisterSlot(dst, dst);
4721
}
4722

    
4723

    
4724
void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4725
  class DeferredNumberTagD V8_FINAL : public LDeferredCode {
4726
   public:
4727
    DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4728
        : LDeferredCode(codegen), instr_(instr) { }
4729
    virtual void Generate() V8_OVERRIDE {
4730
      codegen()->DoDeferredNumberTagD(instr_);
4731
    }
4732
    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4733
   private:
4734
    LNumberTagD* instr_;
4735
  };
4736

    
4737
  DoubleRegister input_reg = ToDoubleRegister(instr->value());
4738
  Register scratch = scratch0();
4739
  Register reg = ToRegister(instr->result());
4740
  Register temp1 = ToRegister(instr->temp());
4741
  Register temp2 = ToRegister(instr->temp2());
4742

    
4743
  DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4744
  if (FLAG_inline_new) {
4745
    __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4746
    // We want the untagged address first for performance
4747
    __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
4748
                          DONT_TAG_RESULT);
4749
  } else {
4750
    __ Branch(deferred->entry());
4751
  }
4752
  __ bind(deferred->exit());
4753
  __ sdc1(input_reg, MemOperand(reg, HeapNumber::kValueOffset));
4754
  // Now that we have finished with the object's real address tag it
4755
  __ Addu(reg, reg, kHeapObjectTag);
4756
}
4757

    
4758

    
4759
void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4760
  // TODO(3095996): Get rid of this. For now, we need to make the
4761
  // result register contain a valid pointer because it is already
4762
  // contained in the register pointer map.
4763
  Register reg = ToRegister(instr->result());
4764
  __ mov(reg, zero_reg);
4765

    
4766
  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4767
  // NumberTagI and NumberTagD use the context from the frame, rather than
4768
  // the environment's HContext or HInlinedContext value.
4769
  // They only call Runtime::kAllocateHeapNumber.
4770
  // The corresponding HChange instructions are added in a phase that does
4771
  // not have easy access to the local context.
4772
  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4773
  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4774
  RecordSafepointWithRegisters(
4775
      instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4776
  __ Subu(v0, v0, kHeapObjectTag);
4777
  __ StoreToSafepointRegisterSlot(v0, reg);
4778
}
4779

    
4780

    
4781
void LCodeGen::DoSmiTag(LSmiTag* instr) {
4782
  ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
4783
  __ SmiTag(ToRegister(instr->result()), ToRegister(instr->value()));
4784
}
4785

    
4786

    
4787
void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4788
  Register scratch = scratch0();
4789
  Register input = ToRegister(instr->value());
4790
  Register result = ToRegister(instr->result());
4791
  if (instr->needs_check()) {
4792
    STATIC_ASSERT(kHeapObjectTag == 1);
4793
    // If the input is a HeapObject, value of scratch won't be zero.
4794
    __ And(scratch, input, Operand(kHeapObjectTag));
4795
    __ SmiUntag(result, input);
4796
    DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
4797
  } else {
4798
    __ SmiUntag(result, input);
4799
  }
4800
}
4801

    
4802

    
4803
void LCodeGen::EmitNumberUntagD(Register input_reg,
4804
                                DoubleRegister result_reg,
4805
                                bool can_convert_undefined_to_nan,
4806
                                bool deoptimize_on_minus_zero,
4807
                                LEnvironment* env,
4808
                                NumberUntagDMode mode) {
4809
  Register scratch = scratch0();
4810
  Label convert, load_smi, done;
4811
  if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4812
    // Smi check.
4813
    __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4814
    // Heap number map check.
4815
    __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4816
    __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4817
    if (can_convert_undefined_to_nan) {
4818
      __ Branch(&convert, ne, scratch, Operand(at));
4819
    } else {
4820
      DeoptimizeIf(ne, env, scratch, Operand(at));
4821
    }
4822
    // Load heap number.
4823
    __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4824
    if (deoptimize_on_minus_zero) {
4825
      __ mfc1(at, result_reg.low());
4826
      __ Branch(&done, ne, at, Operand(zero_reg));
4827
      __ mfc1(scratch, result_reg.high());
4828
      DeoptimizeIf(eq, env, scratch, Operand(HeapNumber::kSignMask));
4829
    }
4830
    __ Branch(&done);
4831
    if (can_convert_undefined_to_nan) {
4832
      __ bind(&convert);
4833
      // Convert undefined (and hole) to NaN.
4834
      __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4835
      DeoptimizeIf(ne, env, input_reg, Operand(at));
4836
      __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4837
      __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
4838
      __ Branch(&done);
4839
    }
4840
  } else {
4841
    __ SmiUntag(scratch, input_reg);
4842
    ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
4843
  }
4844
  // Smi to double register conversion
4845
  __ bind(&load_smi);
4846
  // scratch: untagged value of input_reg
4847
  __ mtc1(scratch, result_reg);
4848
  __ cvt_d_w(result_reg, result_reg);
4849
  __ bind(&done);
4850
}
4851

    
4852

    
4853
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4854
  Register input_reg = ToRegister(instr->value());
4855
  Register scratch1 = scratch0();
4856
  Register scratch2 = ToRegister(instr->temp());
4857
  DoubleRegister double_scratch = double_scratch0();
4858
  DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
4859

    
4860
  ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
4861
  ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
4862

    
4863
  Label done;
4864

    
4865
  // The input is a tagged HeapObject.
4866
  // Heap number map check.
4867
  __ lw(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4868
  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4869
  // This 'at' value and scratch1 map value are used for tests in both clauses
4870
  // of the if.
4871

    
4872
  if (instr->truncating()) {
4873
    // Performs a truncating conversion of a floating point number as used by
4874
    // the JS bitwise operations.
4875
    Label no_heap_number, check_bools, check_false;
4876
    __ Branch(&no_heap_number, ne, scratch1, Operand(at));  // HeapNumber map?
4877
    __ mov(scratch2, input_reg);
4878
    __ TruncateHeapNumberToI(input_reg, scratch2);
4879
    __ Branch(&done);
4880

    
4881
    // Check for Oddballs. Undefined/False is converted to zero and True to one
4882
    // for truncating conversions.
4883
    __ bind(&no_heap_number);
4884
    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4885
    __ Branch(&check_bools, ne, input_reg, Operand(at));
4886
    ASSERT(ToRegister(instr->result()).is(input_reg));
4887
    __ Branch(USE_DELAY_SLOT, &done);
4888
    __ mov(input_reg, zero_reg);  // In delay slot.
4889

    
4890
    __ bind(&check_bools);
4891
    __ LoadRoot(at, Heap::kTrueValueRootIndex);
4892
    __ Branch(&check_false, ne, scratch2, Operand(at));
4893
    __ Branch(USE_DELAY_SLOT, &done);
4894
    __ li(input_reg, Operand(1));  // In delay slot.
4895

    
4896
    __ bind(&check_false);
4897
    __ LoadRoot(at, Heap::kFalseValueRootIndex);
4898
    DeoptimizeIf(ne, instr->environment(), scratch2, Operand(at));
4899
    __ Branch(USE_DELAY_SLOT, &done);
4900
    __ mov(input_reg, zero_reg);  // In delay slot.
4901
  } else {
4902
    // Deoptimize if we don't have a heap number.
4903
    DeoptimizeIf(ne, instr->environment(), scratch1, Operand(at));
4904

    
4905
    // Load the double value.
4906
    __ ldc1(double_scratch,
4907
            FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4908

    
4909
    Register except_flag = scratch2;
4910
    __ EmitFPUTruncate(kRoundToZero,
4911
                       input_reg,
4912
                       double_scratch,
4913
                       scratch1,
4914
                       double_scratch2,
4915
                       except_flag,
4916
                       kCheckForInexactConversion);
4917

    
4918
    // Deopt if the operation did not succeed.
4919
    DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
4920

    
4921
    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4922
      __ Branch(&done, ne, input_reg, Operand(zero_reg));
4923

    
4924
      __ mfc1(scratch1, double_scratch.high());
4925
      __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
4926
      DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
4927
    }
4928
  }
4929
  __ bind(&done);
4930
}
4931

    
4932

    
4933
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4934
  class DeferredTaggedToI V8_FINAL : public LDeferredCode {
4935
   public:
4936
    DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
4937
        : LDeferredCode(codegen), instr_(instr) { }
4938
    virtual void Generate() V8_OVERRIDE {
4939
      codegen()->DoDeferredTaggedToI(instr_);
4940
    }
4941
    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4942
   private:
4943
    LTaggedToI* instr_;
4944
  };
4945

    
4946
  LOperand* input = instr->value();
4947
  ASSERT(input->IsRegister());
4948
  ASSERT(input->Equals(instr->result()));
4949

    
4950
  Register input_reg = ToRegister(input);
4951

    
4952
  if (instr->hydrogen()->value()->representation().IsSmi()) {
4953
    __ SmiUntag(input_reg);
4954
  } else {
4955
    DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
4956

    
4957
    // Let the deferred code handle the HeapObject case.
4958
    __ JumpIfNotSmi(input_reg, deferred->entry());
4959

    
4960
    // Smi to int32 conversion.
4961
    __ SmiUntag(input_reg);
4962
    __ bind(deferred->exit());
4963
  }
4964
}
4965

    
4966

    
4967
void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4968
  LOperand* input = instr->value();
4969
  ASSERT(input->IsRegister());
4970
  LOperand* result = instr->result();
4971
  ASSERT(result->IsDoubleRegister());
4972

    
4973
  Register input_reg = ToRegister(input);
4974
  DoubleRegister result_reg = ToDoubleRegister(result);
4975

    
4976
  HValue* value = instr->hydrogen()->value();
4977
  NumberUntagDMode mode = value->representation().IsSmi()
4978
      ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
4979

    
4980
  EmitNumberUntagD(input_reg, result_reg,
4981
                   instr->hydrogen()->can_convert_undefined_to_nan(),
4982
                   instr->hydrogen()->deoptimize_on_minus_zero(),
4983
                   instr->environment(),
4984
                   mode);
4985
}
4986

    
4987

    
4988
void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4989
  Register result_reg = ToRegister(instr->result());
4990
  Register scratch1 = scratch0();
4991
  DoubleRegister double_input = ToDoubleRegister(instr->value());
4992

    
4993
  if (instr->truncating()) {
4994
    __ TruncateDoubleToI(result_reg, double_input);
4995
  } else {
4996
    Register except_flag = LCodeGen::scratch1();
4997

    
4998
    __ EmitFPUTruncate(kRoundToMinusInf,
4999
                       result_reg,
5000
                       double_input,
5001
                       scratch1,
5002
                       double_scratch0(),
5003
                       except_flag,
5004
                       kCheckForInexactConversion);
5005

    
5006
    // Deopt if the operation did not succeed (except_flag != 0).
5007
    DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
5008

    
5009
    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5010
      Label done;
5011
      __ Branch(&done, ne, result_reg, Operand(zero_reg));
5012
      __ mfc1(scratch1, double_input.high());
5013
      __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
5014
      DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
5015
      __ bind(&done);
5016
    }
5017
  }
5018
}
5019

    
5020

    
5021
void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5022
  Register result_reg = ToRegister(instr->result());
5023
  Register scratch1 = LCodeGen::scratch0();
5024
  DoubleRegister double_input = ToDoubleRegister(instr->value());
5025

    
5026
  if (instr->truncating()) {
5027
    __ TruncateDoubleToI(result_reg, double_input);
5028
  } else {
5029
    Register except_flag = LCodeGen::scratch1();
5030

    
5031
    __ EmitFPUTruncate(kRoundToMinusInf,
5032
                       result_reg,
5033
                       double_input,
5034
                       scratch1,
5035
                       double_scratch0(),
5036
                       except_flag,
5037
                       kCheckForInexactConversion);
5038

    
5039
    // Deopt if the operation did not succeed (except_flag != 0).
5040
    DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
5041

    
5042
    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5043
      Label done;
5044
      __ Branch(&done, ne, result_reg, Operand(zero_reg));
5045
      __ mfc1(scratch1, double_input.high());
5046
      __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
5047
      DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
5048
      __ bind(&done);
5049
    }
5050
  }
5051
  __ SmiTagCheckOverflow(result_reg, result_reg, scratch1);
5052
  DeoptimizeIf(lt, instr->environment(), scratch1, Operand(zero_reg));
5053
}
5054

    
5055

    
5056
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5057
  LOperand* input = instr->value();
5058
  __ And(at, ToRegister(input), Operand(kSmiTagMask));
5059
  DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
5060
}
5061

    
5062

    
5063
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5064
  if (!instr->hydrogen()->value()->IsHeapObject()) {
5065
    LOperand* input = instr->value();
5066
    __ And(at, ToRegister(input), Operand(kSmiTagMask));
5067
    DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
5068
  }
5069
}
5070

    
5071

    
5072
void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5073
  Register input = ToRegister(instr->value());
5074
  Register scratch = scratch0();
5075

    
5076
  __ GetObjectType(input, scratch, scratch);
5077

    
5078
  if (instr->hydrogen()->is_interval_check()) {
5079
    InstanceType first;
5080
    InstanceType last;
5081
    instr->hydrogen()->GetCheckInterval(&first, &last);
5082

    
5083
    // If there is only one type in the interval check for equality.
5084
    if (first == last) {
5085
      DeoptimizeIf(ne, instr->environment(), scratch, Operand(first));
5086
    } else {
5087
      DeoptimizeIf(lo, instr->environment(), scratch, Operand(first));
5088
      // Omit check for the last type.
5089
      if (last != LAST_TYPE) {
5090
        DeoptimizeIf(hi, instr->environment(), scratch, Operand(last));
5091
      }
5092
    }
5093
  } else {
5094
    uint8_t mask;
5095
    uint8_t tag;
5096
    instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5097

    
5098
    if (IsPowerOf2(mask)) {
5099
      ASSERT(tag == 0 || IsPowerOf2(tag));
5100
      __ And(at, scratch, mask);
5101
      DeoptimizeIf(tag == 0 ? ne : eq, instr->environment(),
5102
          at, Operand(zero_reg));
5103
    } else {
5104
      __ And(scratch, scratch, Operand(mask));
5105
      DeoptimizeIf(ne, instr->environment(), scratch, Operand(tag));
5106
    }
5107
  }
5108
}
5109

    
5110

    
5111
void LCodeGen::DoCheckValue(LCheckValue* instr) {
5112
  Register reg = ToRegister(instr->value());
5113
  Handle<HeapObject> object = instr->hydrogen()->object().handle();
5114
  AllowDeferredHandleDereference smi_check;
5115
  if (isolate()->heap()->InNewSpace(*object)) {
5116
    Register reg = ToRegister(instr->value());
5117
    Handle<Cell> cell = isolate()->factory()->NewCell(object);
5118
    __ li(at, Operand(Handle<Object>(cell)));
5119
    __ lw(at, FieldMemOperand(at, Cell::kValueOffset));
5120
    DeoptimizeIf(ne, instr->environment(), reg,
5121
                 Operand(at));
5122
  } else {
5123
    DeoptimizeIf(ne, instr->environment(), reg,
5124
                 Operand(object));
5125
  }
5126
}
5127

    
5128

    
5129
void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5130
  {
5131
    PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5132
    __ push(object);
5133
    __ mov(cp, zero_reg);
5134
    __ CallRuntimeSaveDoubles(Runtime::kMigrateInstance);
5135
    RecordSafepointWithRegisters(
5136
        instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5137
    __ StoreToSafepointRegisterSlot(v0, scratch0());
5138
  }
5139
  __ And(at, scratch0(), Operand(kSmiTagMask));
5140
  DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
5141
}
5142

    
5143

    
5144
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5145
  class DeferredCheckMaps V8_FINAL : public LDeferredCode {
5146
   public:
5147
    DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
5148
        : LDeferredCode(codegen), instr_(instr), object_(object) {
5149
      SetExit(check_maps());
5150
    }
5151
    virtual void Generate() V8_OVERRIDE {
5152
      codegen()->DoDeferredInstanceMigration(instr_, object_);
5153
    }
5154
    Label* check_maps() { return &check_maps_; }
5155
    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5156
   private:
5157
    LCheckMaps* instr_;
5158
    Label check_maps_;
5159
    Register object_;
5160
  };
5161

    
5162
  if (instr->hydrogen()->CanOmitMapChecks()) return;
5163
  Register map_reg = scratch0();
5164
  LOperand* input = instr->value();
5165
  ASSERT(input->IsRegister());
5166
  Register reg = ToRegister(input);
5167
  __ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
5168

    
5169
  DeferredCheckMaps* deferred = NULL;
5170
  if (instr->hydrogen()->has_migration_target()) {
5171
    deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
5172
    __ bind(deferred->check_maps());
5173
  }
5174

    
5175
  UniqueSet<Map> map_set = instr->hydrogen()->map_set();
5176
  Label success;
5177
  for (int i = 0; i < map_set.size() - 1; i++) {
5178
    Handle<Map> map = map_set.at(i).handle();
5179
    __ CompareMapAndBranch(map_reg, map, &success, eq, &success);
5180
  }
5181
  Handle<Map> map = map_set.at(map_set.size() - 1).handle();
5182
  // Do the CompareMap() directly within the Branch() and DeoptimizeIf().
5183
  if (instr->hydrogen()->has_migration_target()) {
5184
    __ Branch(deferred->entry(), ne, map_reg, Operand(map));
5185
  } else {
5186
    DeoptimizeIf(ne, instr->environment(), map_reg, Operand(map));
5187
  }
5188

    
5189
  __ bind(&success);
5190
}
5191

    
5192

    
5193
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5194
  DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
5195
  Register result_reg = ToRegister(instr->result());
5196
  DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
5197
  __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
5198
}
5199

    
5200

    
5201
void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5202
  Register unclamped_reg = ToRegister(instr->unclamped());
5203
  Register result_reg = ToRegister(instr->result());
5204
  __ ClampUint8(result_reg, unclamped_reg);
5205
}
5206

    
5207

    
5208
void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5209
  Register scratch = scratch0();
5210
  Register input_reg = ToRegister(instr->unclamped());
5211
  Register result_reg = ToRegister(instr->result());
5212
  DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
5213
  Label is_smi, done, heap_number;
5214

    
5215
  // Both smi and heap number cases are handled.
5216
  __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi);
5217

    
5218
  // Check for heap number
5219
  __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5220
  __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map()));
5221

    
5222
  // Check for undefined. Undefined is converted to zero for clamping
5223
  // conversions.
5224
  DeoptimizeIf(ne, instr->environment(), input_reg,
5225
               Operand(factory()->undefined_value()));
5226
  __ mov(result_reg, zero_reg);
5227
  __ jmp(&done);
5228

    
5229
  // Heap number
5230
  __ bind(&heap_number);
5231
  __ ldc1(double_scratch0(), FieldMemOperand(input_reg,
5232
                                             HeapNumber::kValueOffset));
5233
  __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
5234
  __ jmp(&done);
5235

    
5236
  __ bind(&is_smi);
5237
  __ ClampUint8(result_reg, scratch);
5238

    
5239
  __ bind(&done);
5240
}
5241

    
5242

    
5243
void LCodeGen::DoAllocate(LAllocate* instr) {
5244
  class DeferredAllocate V8_FINAL : public LDeferredCode {
5245
   public:
5246
    DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5247
        : LDeferredCode(codegen), instr_(instr) { }
5248
    virtual void Generate() V8_OVERRIDE {
5249
      codegen()->DoDeferredAllocate(instr_);
5250
    }
5251
    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5252
   private:
5253
    LAllocate* instr_;
5254
  };
5255

    
5256
  DeferredAllocate* deferred =
5257
      new(zone()) DeferredAllocate(this, instr);
5258

    
5259
  Register result = ToRegister(instr->result());
5260
  Register scratch = ToRegister(instr->temp1());
5261
  Register scratch2 = ToRegister(instr->temp2());
5262

    
5263
  // Allocate memory for the object.
5264
  AllocationFlags flags = TAG_OBJECT;
5265
  if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5266
    flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5267
  }
5268
  if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5269
    ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
5270
    ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5271
    flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
5272
  } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5273
    ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5274
    flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
5275
  }
5276
  if (instr->size()->IsConstantOperand()) {
5277
    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5278
    __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5279
  } else {
5280
    Register size = ToRegister(instr->size());
5281
    __ Allocate(size,
5282
                result,
5283
                scratch,
5284
                scratch2,
5285
                deferred->entry(),
5286
                flags);
5287
  }
5288

    
5289
  __ bind(deferred->exit());
5290

    
5291
  if (instr->hydrogen()->MustPrefillWithFiller()) {
5292
    if (instr->size()->IsConstantOperand()) {
5293
      int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5294
      __ li(scratch, Operand(size));
5295
    } else {
5296
      scratch = ToRegister(instr->size());
5297
    }
5298
    __ Subu(scratch, scratch, Operand(kPointerSize));
5299
    __ Subu(result, result, Operand(kHeapObjectTag));
5300
    Label loop;
5301
    __ bind(&loop);
5302
    __ li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
5303
    __ Addu(at, result, Operand(scratch));
5304
    __ sw(scratch2, MemOperand(at));
5305
    __ Subu(scratch, scratch, Operand(kPointerSize));
5306
    __ Branch(&loop, ge, scratch, Operand(zero_reg));
5307
    __ Addu(result, result, Operand(kHeapObjectTag));
5308
  }
5309
}
5310

    
5311

    
5312
void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5313
  Register result = ToRegister(instr->result());
5314

    
5315
  // TODO(3095996): Get rid of this. For now, we need to make the
5316
  // result register contain a valid pointer because it is already
5317
  // contained in the register pointer map.
5318
  __ mov(result, zero_reg);
5319

    
5320
  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5321
  if (instr->size()->IsRegister()) {
5322
    Register size = ToRegister(instr->size());
5323
    ASSERT(!size.is(result));
5324
    __ SmiTag(size);
5325
    __ push(size);
5326
  } else {
5327
    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5328
    __ Push(Smi::FromInt(size));
5329
  }
5330

    
5331
  if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5332
    ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
5333
    ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5334
    CallRuntimeFromDeferred(Runtime::kAllocateInOldPointerSpace, 1, instr,
5335
                            instr->context());
5336
  } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5337
    ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5338
    CallRuntimeFromDeferred(Runtime::kAllocateInOldDataSpace, 1, instr,
5339
                            instr->context());
5340
  } else {
5341
    CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr,
5342
                            instr->context());
5343
  }
5344
  __ StoreToSafepointRegisterSlot(v0, result);
5345
}
5346

    
5347

    
5348
void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5349
  ASSERT(ToRegister(instr->value()).is(a0));
5350
  ASSERT(ToRegister(instr->result()).is(v0));
5351
  __ push(a0);
5352
  CallRuntime(Runtime::kToFastProperties, 1, instr);
5353
}
5354

    
5355

    
5356
void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5357
  ASSERT(ToRegister(instr->context()).is(cp));
5358
  Label materialized;
5359
  // Registers will be used as follows:
5360
  // t3 = literals array.
5361
  // a1 = regexp literal.
5362
  // a0 = regexp literal clone.
5363
  // a2 and t0-t2 are used as temporaries.
5364
  int literal_offset =
5365
      FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5366
  __ LoadHeapObject(t3, instr->hydrogen()->literals());
5367
  __ lw(a1, FieldMemOperand(t3, literal_offset));
5368
  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5369
  __ Branch(&materialized, ne, a1, Operand(at));
5370

    
5371
  // Create regexp literal using runtime function
5372
  // Result will be in v0.
5373
  __ li(t2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
5374
  __ li(t1, Operand(instr->hydrogen()->pattern()));
5375
  __ li(t0, Operand(instr->hydrogen()->flags()));
5376
  __ Push(t3, t2, t1, t0);
5377
  CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5378
  __ mov(a1, v0);
5379

    
5380
  __ bind(&materialized);
5381
  int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
5382
  Label allocated, runtime_allocate;
5383

    
5384
  __ Allocate(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
5385
  __ jmp(&allocated);
5386

    
5387
  __ bind(&runtime_allocate);
5388
  __ li(a0, Operand(Smi::FromInt(size)));
5389
  __ Push(a1, a0);
5390
  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5391
  __ pop(a1);
5392

    
5393
  __ bind(&allocated);
5394
  // Copy the content into the newly allocated memory.
5395
  // (Unroll copy loop once for better throughput).
5396
  for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
5397
    __ lw(a3, FieldMemOperand(a1, i));
5398
    __ lw(a2, FieldMemOperand(a1, i + kPointerSize));
5399
    __ sw(a3, FieldMemOperand(v0, i));
5400
    __ sw(a2, FieldMemOperand(v0, i + kPointerSize));
5401
  }
5402
  if ((size % (2 * kPointerSize)) != 0) {
5403
    __ lw(a3, FieldMemOperand(a1, size - kPointerSize));
5404
    __ sw(a3, FieldMemOperand(v0, size - kPointerSize));
5405
  }
5406
}
5407

    
5408

    
5409
void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5410
  ASSERT(ToRegister(instr->context()).is(cp));
5411
  // Use the fast case closure allocation code that allocates in new
5412
  // space for nested functions that don't need literals cloning.
5413
  bool pretenure = instr->hydrogen()->pretenure();
5414
  if (!pretenure && instr->hydrogen()->has_no_literals()) {
5415
    FastNewClosureStub stub(instr->hydrogen()->language_mode(),
5416
                            instr->hydrogen()->is_generator());
5417
    __ li(a2, Operand(instr->hydrogen()->shared_info()));
5418
    CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
5419
  } else {
5420
    __ li(a2, Operand(instr->hydrogen()->shared_info()));
5421
    __ li(a1, Operand(pretenure ? factory()->true_value()
5422
                                : factory()->false_value()));
5423
    __ Push(cp, a2, a1);
5424
    CallRuntime(Runtime::kNewClosure, 3, instr);
5425
  }
5426
}
5427

    
5428

    
5429
void LCodeGen::DoTypeof(LTypeof* instr) {
5430
  ASSERT(ToRegister(instr->result()).is(v0));
5431
  Register input = ToRegister(instr->value());
5432
  __ push(input);
5433
  CallRuntime(Runtime::kTypeof, 1, instr);
5434
}
5435

    
5436

    
5437
void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5438
  Register input = ToRegister(instr->value());
5439

    
5440
  Register cmp1 = no_reg;
5441
  Operand cmp2 = Operand(no_reg);
5442

    
5443
  Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
5444
                                                  instr->FalseLabel(chunk_),
5445
                                                  input,
5446
                                                  instr->type_literal(),
5447
                                                  cmp1,
5448
                                                  cmp2);
5449

    
5450
  ASSERT(cmp1.is_valid());
5451
  ASSERT(!cmp2.is_reg() || cmp2.rm().is_valid());
5452

    
5453
  if (final_branch_condition != kNoCondition) {
5454
    EmitBranch(instr, final_branch_condition, cmp1, cmp2);
5455
  }
5456
}
5457

    
5458

    
5459
Condition LCodeGen::EmitTypeofIs(Label* true_label,
5460
                                 Label* false_label,
5461
                                 Register input,
5462
                                 Handle<String> type_name,
5463
                                 Register& cmp1,
5464
                                 Operand& cmp2) {
5465
  // This function utilizes the delay slot heavily. This is used to load
5466
  // values that are always usable without depending on the type of the input
5467
  // register.
5468
  Condition final_branch_condition = kNoCondition;
5469
  Register scratch = scratch0();
5470
  if (type_name->Equals(heap()->number_string())) {
5471
    __ JumpIfSmi(input, true_label);
5472
    __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
5473
    __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
5474
    cmp1 = input;
5475
    cmp2 = Operand(at);
5476
    final_branch_condition = eq;
5477

    
5478
  } else if (type_name->Equals(heap()->string_string())) {
5479
    __ JumpIfSmi(input, false_label);
5480
    __ GetObjectType(input, input, scratch);
5481
    __ Branch(USE_DELAY_SLOT, false_label,
5482
              ge, scratch, Operand(FIRST_NONSTRING_TYPE));
5483
    // input is an object so we can load the BitFieldOffset even if we take the
5484
    // other branch.
5485
    __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
5486
    __ And(at, at, 1 << Map::kIsUndetectable);
5487
    cmp1 = at;
5488
    cmp2 = Operand(zero_reg);
5489
    final_branch_condition = eq;
5490

    
5491
  } else if (type_name->Equals(heap()->symbol_string())) {
5492
    __ JumpIfSmi(input, false_label);
5493
    __ GetObjectType(input, input, scratch);
5494
    cmp1 = scratch;
5495
    cmp2 = Operand(SYMBOL_TYPE);
5496
    final_branch_condition = eq;
5497

    
5498
  } else if (type_name->Equals(heap()->boolean_string())) {
5499
    __ LoadRoot(at, Heap::kTrueValueRootIndex);
5500
    __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5501
    __ LoadRoot(at, Heap::kFalseValueRootIndex);
5502
    cmp1 = at;
5503
    cmp2 = Operand(input);
5504
    final_branch_condition = eq;
5505

    
5506
  } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
5507
    __ LoadRoot(at, Heap::kNullValueRootIndex);
5508
    cmp1 = at;
5509
    cmp2 = Operand(input);
5510
    final_branch_condition = eq;
5511

    
5512
  } else if (type_name->Equals(heap()->undefined_string())) {
5513
    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5514
    __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5515
    // The first instruction of JumpIfSmi is an And - it is safe in the delay
5516
    // slot.
5517
    __ JumpIfSmi(input, false_label);
5518
    // Check for undetectable objects => true.
5519
    __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
5520
    __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
5521
    __ And(at, at, 1 << Map::kIsUndetectable);
5522
    cmp1 = at;
5523
    cmp2 = Operand(zero_reg);
5524
    final_branch_condition = ne;
5525

    
5526
  } else if (type_name->Equals(heap()->function_string())) {
5527
    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
5528
    __ JumpIfSmi(input, false_label);
5529
    __ GetObjectType(input, scratch, input);
5530
    __ Branch(true_label, eq, input, Operand(JS_FUNCTION_TYPE));
5531
    cmp1 = input;
5532
    cmp2 = Operand(JS_FUNCTION_PROXY_TYPE);
5533
    final_branch_condition = eq;
5534

    
5535
  } else if (type_name->Equals(heap()->object_string())) {
5536
    __ JumpIfSmi(input, false_label);
5537
    if (!FLAG_harmony_typeof) {
5538
      __ LoadRoot(at, Heap::kNullValueRootIndex);
5539
      __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5540
    }
5541
    Register map = input;
5542
    __ GetObjectType(input, map, scratch);
5543
    __ Branch(false_label,
5544
              lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
5545
    __ Branch(USE_DELAY_SLOT, false_label,
5546
              gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
5547
    // map is still valid, so the BitField can be loaded in delay slot.
5548
    // Check for undetectable objects => false.
5549
    __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
5550
    __ And(at, at, 1 << Map::kIsUndetectable);
5551
    cmp1 = at;
5552
    cmp2 = Operand(zero_reg);
5553
    final_branch_condition = eq;
5554

    
5555
  } else {
5556
    cmp1 = at;
5557
    cmp2 = Operand(zero_reg);  // Set to valid regs, to avoid caller assertion.
5558
    __ Branch(false_label);
5559
  }
5560

    
5561
  return final_branch_condition;
5562
}
5563

    
5564

    
5565
void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
5566
  Register temp1 = ToRegister(instr->temp());
5567

    
5568
  EmitIsConstructCall(temp1, scratch0());
5569

    
5570
  EmitBranch(instr, eq, temp1,
5571
             Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
5572
}
5573

    
5574

    
5575
void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
5576
  ASSERT(!temp1.is(temp2));
5577
  // Get the frame pointer for the calling frame.
5578
  __ lw(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
5579

    
5580
  // Skip the arguments adaptor frame if it exists.
5581
  Label check_frame_marker;
5582
  __ lw(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
5583
  __ Branch(&check_frame_marker, ne, temp2,
5584
            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
5585
  __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
5586

    
5587
  // Check the marker in the calling frame.
5588
  __ bind(&check_frame_marker);
5589
  __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
5590
}
5591

    
5592

    
5593
void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5594
  if (info()->IsStub()) return;
5595
  // Ensure that we have enough space after the previous lazy-bailout
5596
  // instruction for patching the code here.
5597
  int current_pc = masm()->pc_offset();
5598
  if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5599
    int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5600
    ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
5601
    while (padding_size > 0) {
5602
      __ nop();
5603
      padding_size -= Assembler::kInstrSize;
5604
    }
5605
  }
5606
}
5607

    
5608

    
5609
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5610
  EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5611
  last_lazy_deopt_pc_ = masm()->pc_offset();
5612
  ASSERT(instr->HasEnvironment());
5613
  LEnvironment* env = instr->environment();
5614
  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5615
  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5616
}
5617

    
5618

    
5619
void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5620
  Deoptimizer::BailoutType type = instr->hydrogen()->type();
5621
  // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5622
  // needed return address), even though the implementation of LAZY and EAGER is
5623
  // now identical. When LAZY is eventually completely folded into EAGER, remove
5624
  // the special case below.
5625
  if (info()->IsStub() && type == Deoptimizer::EAGER) {
5626
    type = Deoptimizer::LAZY;
5627
  }
5628

    
5629
  Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
5630
  DeoptimizeIf(al, instr->environment(), type, zero_reg, Operand(zero_reg));
5631
}
5632

    
5633

    
5634
void LCodeGen::DoDummyUse(LDummyUse* instr) {
5635
  // Nothing to see here, move on!
5636
}
5637

    
5638

    
5639
void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5640
  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5641
  LoadContextFromDeferred(instr->context());
5642
  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5643
  RecordSafepointWithLazyDeopt(
5644
      instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5645
  ASSERT(instr->HasEnvironment());
5646
  LEnvironment* env = instr->environment();
5647
  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5648
}
5649

    
5650

    
5651
void LCodeGen::DoStackCheck(LStackCheck* instr) {
5652
  class DeferredStackCheck V8_FINAL : public LDeferredCode {
5653
   public:
5654
    DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5655
        : LDeferredCode(codegen), instr_(instr) { }
5656
    virtual void Generate() V8_OVERRIDE {
5657
      codegen()->DoDeferredStackCheck(instr_);
5658
    }
5659
    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5660
   private:
5661
    LStackCheck* instr_;
5662
  };
5663

    
5664
  ASSERT(instr->HasEnvironment());
5665
  LEnvironment* env = instr->environment();
5666
  // There is no LLazyBailout instruction for stack-checks. We have to
5667
  // prepare for lazy deoptimization explicitly here.
5668
  if (instr->hydrogen()->is_function_entry()) {
5669
    // Perform stack overflow check.
5670
    Label done;
5671
    __ LoadRoot(at, Heap::kStackLimitRootIndex);
5672
    __ Branch(&done, hs, sp, Operand(at));
5673
    ASSERT(instr->context()->IsRegister());
5674
    ASSERT(ToRegister(instr->context()).is(cp));
5675
    CallCode(isolate()->builtins()->StackCheck(),
5676
             RelocInfo::CODE_TARGET,
5677
             instr);
5678
    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5679
    last_lazy_deopt_pc_ = masm()->pc_offset();
5680
    __ bind(&done);
5681
    RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5682
    safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5683
  } else {
5684
    ASSERT(instr->hydrogen()->is_backwards_branch());
5685
    // Perform stack overflow check if this goto needs it before jumping.
5686
    DeferredStackCheck* deferred_stack_check =
5687
        new(zone()) DeferredStackCheck(this, instr);
5688
    __ LoadRoot(at, Heap::kStackLimitRootIndex);
5689
    __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
5690
    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5691
    last_lazy_deopt_pc_ = masm()->pc_offset();
5692
    __ bind(instr->done_label());
5693
    deferred_stack_check->SetExit(instr->done_label());
5694
    RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5695
    // Don't record a deoptimization index for the safepoint here.
5696
    // This will be done explicitly when emitting call and the safepoint in
5697
    // the deferred code.
5698
  }
5699
}
5700

    
5701

    
5702
void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5703
  // This is a pseudo-instruction that ensures that the environment here is
5704
  // properly registered for deoptimization and records the assembler's PC
5705
  // offset.
5706
  LEnvironment* environment = instr->environment();
5707

    
5708
  // If the environment were already registered, we would have no way of
5709
  // backpatching it with the spill slot operands.
5710
  ASSERT(!environment->HasBeenRegistered());
5711
  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5712

    
5713
  GenerateOsrPrologue();
5714
}
5715

    
5716

    
5717
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5718
  Register result = ToRegister(instr->result());
5719
  Register object = ToRegister(instr->object());
5720
  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5721
  DeoptimizeIf(eq, instr->environment(), object, Operand(at));
5722

    
5723
  Register null_value = t1;
5724
  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5725
  DeoptimizeIf(eq, instr->environment(), object, Operand(null_value));
5726

    
5727
  __ And(at, object, kSmiTagMask);
5728
  DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
5729

    
5730
  STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
5731
  __ GetObjectType(object, a1, a1);
5732
  DeoptimizeIf(le, instr->environment(), a1, Operand(LAST_JS_PROXY_TYPE));
5733

    
5734
  Label use_cache, call_runtime;
5735
  ASSERT(object.is(a0));
5736
  __ CheckEnumCache(null_value, &call_runtime);
5737

    
5738
  __ lw(result, FieldMemOperand(object, HeapObject::kMapOffset));
5739
  __ Branch(&use_cache);
5740

    
5741
  // Get the set of properties to enumerate.
5742
  __ bind(&call_runtime);
5743
  __ push(object);
5744
  CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5745

    
5746
  __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
5747
  ASSERT(result.is(v0));
5748
  __ LoadRoot(at, Heap::kMetaMapRootIndex);
5749
  DeoptimizeIf(ne, instr->environment(), a1, Operand(at));
5750
  __ bind(&use_cache);
5751
}
5752

    
5753

    
5754
void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5755
  Register map = ToRegister(instr->map());
5756
  Register result = ToRegister(instr->result());
5757
  Label load_cache, done;
5758
  __ EnumLength(result, map);
5759
  __ Branch(&load_cache, ne, result, Operand(Smi::FromInt(0)));
5760
  __ li(result, Operand(isolate()->factory()->empty_fixed_array()));
5761
  __ jmp(&done);
5762

    
5763
  __ bind(&load_cache);
5764
  __ LoadInstanceDescriptors(map, result);
5765
  __ lw(result,
5766
        FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
5767
  __ lw(result,
5768
        FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5769
  DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
5770

    
5771
  __ bind(&done);
5772
}
5773

    
5774

    
5775
void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5776
  Register object = ToRegister(instr->value());
5777
  Register map = ToRegister(instr->map());
5778
  __ lw(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
5779
  DeoptimizeIf(ne, instr->environment(), map, Operand(scratch0()));
5780
}
5781

    
5782

    
5783
void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5784
  Register object = ToRegister(instr->object());
5785
  Register index = ToRegister(instr->index());
5786
  Register result = ToRegister(instr->result());
5787
  Register scratch = scratch0();
5788

    
5789
  Label out_of_object, done;
5790
  __ Branch(USE_DELAY_SLOT, &out_of_object, lt, index, Operand(zero_reg));
5791
  __ sll(scratch, index, kPointerSizeLog2 - kSmiTagSize);  // In delay slot.
5792

    
5793
  STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
5794
  __ Addu(scratch, object, scratch);
5795
  __ lw(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
5796

    
5797
  __ Branch(&done);
5798

    
5799
  __ bind(&out_of_object);
5800
  __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5801
  // Index is equal to negated out of object property index plus 1.
5802
  __ Subu(scratch, result, scratch);
5803
  __ lw(result, FieldMemOperand(scratch,
5804
                                FixedArray::kHeaderSize - kPointerSize));
5805
  __ bind(&done);
5806
}
5807

    
5808

    
5809
#undef __
5810

    
5811
} }  // namespace v8::internal