The data contained in this repository can be downloaded to your computer using one of several clients.
Please see the documentation of your version control software client for more information.

Please select the desired protocol below to get the URL.

This URL has Read-Only access.

Statistics
| Branch: | Revision:

main_repo / deps / v8 / src / arm / lithium-codegen-arm.cc @ f230a1cf

History | View | Annotate | Download (197 KB)

1
// Copyright 2012 the V8 project authors. All rights reserved.
2
// Redistribution and use in source and binary forms, with or without
3
// modification, are permitted provided that the following conditions are
4
// met:
5
//
6
//     * Redistributions of source code must retain the above copyright
7
//       notice, this list of conditions and the following disclaimer.
8
//     * Redistributions in binary form must reproduce the above
9
//       copyright notice, this list of conditions and the following
10
//       disclaimer in the documentation and/or other materials provided
11
//       with the distribution.
12
//     * Neither the name of Google Inc. nor the names of its
13
//       contributors may be used to endorse or promote products derived
14
//       from this software without specific prior written permission.
15
//
16
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27

    
28
#include "v8.h"
29

    
30
#include "arm/lithium-codegen-arm.h"
31
#include "arm/lithium-gap-resolver-arm.h"
32
#include "code-stubs.h"
33
#include "stub-cache.h"
34
#include "hydrogen-osr.h"
35

    
36
namespace v8 {
37
namespace internal {
38

    
39

    
40
class SafepointGenerator V8_FINAL : public CallWrapper {
41
 public:
42
  SafepointGenerator(LCodeGen* codegen,
43
                     LPointerMap* pointers,
44
                     Safepoint::DeoptMode mode)
45
      : codegen_(codegen),
46
        pointers_(pointers),
47
        deopt_mode_(mode) { }
48
  virtual ~SafepointGenerator() {}
49

    
50
  virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
51

    
52
  virtual void AfterCall() const V8_OVERRIDE {
53
    codegen_->RecordSafepoint(pointers_, deopt_mode_);
54
  }
55

    
56
 private:
57
  LCodeGen* codegen_;
58
  LPointerMap* pointers_;
59
  Safepoint::DeoptMode deopt_mode_;
60
};
61

    
62

    
63
#define __ masm()->
64

    
65
bool LCodeGen::GenerateCode() {
66
  LPhase phase("Z_Code generation", chunk());
67
  ASSERT(is_unused());
68
  status_ = GENERATING;
69

    
70
  // Open a frame scope to indicate that there is a frame on the stack.  The
71
  // NONE indicates that the scope shouldn't actually generate code to set up
72
  // the frame (that is done in GeneratePrologue).
73
  FrameScope frame_scope(masm_, StackFrame::NONE);
74

    
75
  return GeneratePrologue() &&
76
      GenerateBody() &&
77
      GenerateDeferredCode() &&
78
      GenerateDeoptJumpTable() &&
79
      GenerateSafepointTable();
80
}
81

    
82

    
83
void LCodeGen::FinishCode(Handle<Code> code) {
84
  ASSERT(is_done());
85
  code->set_stack_slots(GetStackSlotCount());
86
  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
87
  if (FLAG_weak_embedded_maps_in_optimized_code) {
88
    RegisterDependentCodeForEmbeddedMaps(code);
89
  }
90
  PopulateDeoptimizationData(code);
91
  info()->CommitDependencies(code);
92
}
93

    
94

    
95
void LCodeGen::Abort(BailoutReason reason) {
96
  info()->set_bailout_reason(reason);
97
  status_ = ABORTED;
98
}
99

    
100

    
101
bool LCodeGen::GeneratePrologue() {
102
  ASSERT(is_generating());
103

    
104
  if (info()->IsOptimizing()) {
105
    ProfileEntryHookStub::MaybeCallEntryHook(masm_);
106

    
107
#ifdef DEBUG
108
    if (strlen(FLAG_stop_at) > 0 &&
109
        info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
110
      __ stop("stop_at");
111
    }
112
#endif
113

    
114
    // r1: Callee's JS function.
115
    // cp: Callee's context.
116
    // fp: Caller's frame pointer.
117
    // lr: Caller's pc.
118

    
119
    // Strict mode functions and builtins need to replace the receiver
120
    // with undefined when called as functions (without an explicit
121
    // receiver object). r5 is zero for method calls and non-zero for
122
    // function calls.
123
    if (!info_->is_classic_mode() || info_->is_native()) {
124
      __ cmp(r5, Operand::Zero());
125
      int receiver_offset = scope()->num_parameters() * kPointerSize;
126
      __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
127
      __ str(r2, MemOperand(sp, receiver_offset), ne);
128
    }
129
  }
130

    
131
  info()->set_prologue_offset(masm_->pc_offset());
132
  if (NeedsEagerFrame()) {
133
    __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
134
    frame_is_built_ = true;
135
    info_->AddNoFrameRange(0, masm_->pc_offset());
136
  }
137

    
138
  // Reserve space for the stack slots needed by the code.
139
  int slots = GetStackSlotCount();
140
  if (slots > 0) {
141
    if (FLAG_debug_code) {
142
      __ sub(sp,  sp, Operand(slots * kPointerSize));
143
      __ push(r0);
144
      __ push(r1);
145
      __ add(r0, sp, Operand(slots *  kPointerSize));
146
      __ mov(r1, Operand(kSlotsZapValue));
147
      Label loop;
148
      __ bind(&loop);
149
      __ sub(r0, r0, Operand(kPointerSize));
150
      __ str(r1, MemOperand(r0, 2 * kPointerSize));
151
      __ cmp(r0, sp);
152
      __ b(ne, &loop);
153
      __ pop(r1);
154
      __ pop(r0);
155
    } else {
156
      __ sub(sp,  sp, Operand(slots * kPointerSize));
157
    }
158
  }
159

    
160
  if (info()->saves_caller_doubles()) {
161
    Comment(";;; Save clobbered callee double registers");
162
    int count = 0;
163
    BitVector* doubles = chunk()->allocated_double_registers();
164
    BitVector::Iterator save_iterator(doubles);
165
    while (!save_iterator.Done()) {
166
      __ vstr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
167
              MemOperand(sp, count * kDoubleSize));
168
      save_iterator.Advance();
169
      count++;
170
    }
171
  }
172

    
173
  // Possibly allocate a local context.
174
  int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
175
  if (heap_slots > 0) {
176
    Comment(";;; Allocate local context");
177
    // Argument to NewContext is the function, which is in r1.
178
    __ push(r1);
179
    if (heap_slots <= FastNewContextStub::kMaximumSlots) {
180
      FastNewContextStub stub(heap_slots);
181
      __ CallStub(&stub);
182
    } else {
183
      __ CallRuntime(Runtime::kNewFunctionContext, 1);
184
    }
185
    RecordSafepoint(Safepoint::kNoLazyDeopt);
186
    // Context is returned in both r0 and cp.  It replaces the context
187
    // passed to us.  It's saved in the stack and kept live in cp.
188
    __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
189
    // Copy any necessary parameters into the context.
190
    int num_parameters = scope()->num_parameters();
191
    for (int i = 0; i < num_parameters; i++) {
192
      Variable* var = scope()->parameter(i);
193
      if (var->IsContextSlot()) {
194
        int parameter_offset = StandardFrameConstants::kCallerSPOffset +
195
            (num_parameters - 1 - i) * kPointerSize;
196
        // Load parameter from stack.
197
        __ ldr(r0, MemOperand(fp, parameter_offset));
198
        // Store it in the context.
199
        MemOperand target = ContextOperand(cp, var->index());
200
        __ str(r0, target);
201
        // Update the write barrier. This clobbers r3 and r0.
202
        __ RecordWriteContextSlot(
203
            cp,
204
            target.offset(),
205
            r0,
206
            r3,
207
            GetLinkRegisterState(),
208
            kSaveFPRegs);
209
      }
210
    }
211
    Comment(";;; End allocate local context");
212
  }
213

    
214
  // Trace the call.
215
  if (FLAG_trace && info()->IsOptimizing()) {
216
    // We have not executed any compiled code yet, so cp still holds the
217
    // incoming context.
218
    __ CallRuntime(Runtime::kTraceEnter, 0);
219
  }
220
  return !is_aborted();
221
}
222

    
223

    
224
void LCodeGen::GenerateOsrPrologue() {
225
  // Generate the OSR entry prologue at the first unknown OSR value, or if there
226
  // are none, at the OSR entrypoint instruction.
227
  if (osr_pc_offset_ >= 0) return;
228

    
229
  osr_pc_offset_ = masm()->pc_offset();
230

    
231
  // Adjust the frame size, subsuming the unoptimized frame into the
232
  // optimized frame.
233
  int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
234
  ASSERT(slots >= 0);
235
  __ sub(sp, sp, Operand(slots * kPointerSize));
236
}
237

    
238

    
239
bool LCodeGen::GenerateDeferredCode() {
240
  ASSERT(is_generating());
241
  if (deferred_.length() > 0) {
242
    for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
243
      LDeferredCode* code = deferred_[i];
244

    
245
      HValue* value =
246
          instructions_->at(code->instruction_index())->hydrogen_value();
247
      RecordAndWritePosition(value->position());
248

    
249
      Comment(";;; <@%d,#%d> "
250
              "-------------------- Deferred %s --------------------",
251
              code->instruction_index(),
252
              code->instr()->hydrogen_value()->id(),
253
              code->instr()->Mnemonic());
254
      __ bind(code->entry());
255
      if (NeedsDeferredFrame()) {
256
        Comment(";;; Build frame");
257
        ASSERT(!frame_is_built_);
258
        ASSERT(info()->IsStub());
259
        frame_is_built_ = true;
260
        __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
261
        __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
262
        __ push(scratch0());
263
        __ add(fp, sp, Operand(2 * kPointerSize));
264
        Comment(";;; Deferred code");
265
      }
266
      code->Generate();
267
      if (NeedsDeferredFrame()) {
268
        Comment(";;; Destroy frame");
269
        ASSERT(frame_is_built_);
270
        __ pop(ip);
271
        __ ldm(ia_w, sp, cp.bit() | fp.bit() | lr.bit());
272
        frame_is_built_ = false;
273
      }
274
      __ jmp(code->exit());
275
    }
276
  }
277

    
278
  // Force constant pool emission at the end of the deferred code to make
279
  // sure that no constant pools are emitted after.
280
  masm()->CheckConstPool(true, false);
281

    
282
  return !is_aborted();
283
}
284

    
285

    
286
bool LCodeGen::GenerateDeoptJumpTable() {
287
  // Check that the jump table is accessible from everywhere in the function
288
  // code, i.e. that offsets to the table can be encoded in the 24bit signed
289
  // immediate of a branch instruction.
290
  // To simplify we consider the code size from the first instruction to the
291
  // end of the jump table. We also don't consider the pc load delta.
292
  // Each entry in the jump table generates one instruction and inlines one
293
  // 32bit data after it.
294
  if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
295
      deopt_jump_table_.length() * 7)) {
296
    Abort(kGeneratedCodeIsTooLarge);
297
  }
298

    
299
  if (deopt_jump_table_.length() > 0) {
300
    Comment(";;; -------------------- Jump table --------------------");
301
  }
302
  Label table_start;
303
  __ bind(&table_start);
304
  Label needs_frame;
305
  for (int i = 0; i < deopt_jump_table_.length(); i++) {
306
    __ bind(&deopt_jump_table_[i].label);
307
    Address entry = deopt_jump_table_[i].address;
308
    Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type;
309
    int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
310
    if (id == Deoptimizer::kNotDeoptimizationEntry) {
311
      Comment(";;; jump table entry %d.", i);
312
    } else {
313
      Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
314
    }
315
    if (deopt_jump_table_[i].needs_frame) {
316
      __ mov(ip, Operand(ExternalReference::ForDeoptEntry(entry)));
317
      if (needs_frame.is_bound()) {
318
        __ b(&needs_frame);
319
      } else {
320
        __ bind(&needs_frame);
321
        __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
322
        // This variant of deopt can only be used with stubs. Since we don't
323
        // have a function pointer to install in the stack frame that we're
324
        // building, install a special marker there instead.
325
        ASSERT(info()->IsStub());
326
        __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
327
        __ push(scratch0());
328
        __ add(fp, sp, Operand(2 * kPointerSize));
329
        __ mov(lr, Operand(pc), LeaveCC, al);
330
        __ mov(pc, ip);
331
      }
332
    } else {
333
      __ mov(lr, Operand(pc), LeaveCC, al);
334
      __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
335
    }
336
    masm()->CheckConstPool(false, false);
337
  }
338

    
339
  // Force constant pool emission at the end of the deopt jump table to make
340
  // sure that no constant pools are emitted after.
341
  masm()->CheckConstPool(true, false);
342

    
343
  // The deoptimization jump table is the last part of the instruction
344
  // sequence. Mark the generated code as done unless we bailed out.
345
  if (!is_aborted()) status_ = DONE;
346
  return !is_aborted();
347
}
348

    
349

    
350
bool LCodeGen::GenerateSafepointTable() {
351
  ASSERT(is_done());
352
  safepoints_.Emit(masm(), GetStackSlotCount());
353
  return !is_aborted();
354
}
355

    
356

    
357
Register LCodeGen::ToRegister(int index) const {
358
  return Register::FromAllocationIndex(index);
359
}
360

    
361

    
362
DwVfpRegister LCodeGen::ToDoubleRegister(int index) const {
363
  return DwVfpRegister::FromAllocationIndex(index);
364
}
365

    
366

    
367
Register LCodeGen::ToRegister(LOperand* op) const {
368
  ASSERT(op->IsRegister());
369
  return ToRegister(op->index());
370
}
371

    
372

    
373
Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
374
  if (op->IsRegister()) {
375
    return ToRegister(op->index());
376
  } else if (op->IsConstantOperand()) {
377
    LConstantOperand* const_op = LConstantOperand::cast(op);
378
    HConstant* constant = chunk_->LookupConstant(const_op);
379
    Handle<Object> literal = constant->handle(isolate());
380
    Representation r = chunk_->LookupLiteralRepresentation(const_op);
381
    if (r.IsInteger32()) {
382
      ASSERT(literal->IsNumber());
383
      __ mov(scratch, Operand(static_cast<int32_t>(literal->Number())));
384
    } else if (r.IsDouble()) {
385
      Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
386
    } else {
387
      ASSERT(r.IsSmiOrTagged());
388
      __ Move(scratch, literal);
389
    }
390
    return scratch;
391
  } else if (op->IsStackSlot() || op->IsArgument()) {
392
    __ ldr(scratch, ToMemOperand(op));
393
    return scratch;
394
  }
395
  UNREACHABLE();
396
  return scratch;
397
}
398

    
399

    
400
DwVfpRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
401
  ASSERT(op->IsDoubleRegister());
402
  return ToDoubleRegister(op->index());
403
}
404

    
405

    
406
DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
407
                                               SwVfpRegister flt_scratch,
408
                                               DwVfpRegister dbl_scratch) {
409
  if (op->IsDoubleRegister()) {
410
    return ToDoubleRegister(op->index());
411
  } else if (op->IsConstantOperand()) {
412
    LConstantOperand* const_op = LConstantOperand::cast(op);
413
    HConstant* constant = chunk_->LookupConstant(const_op);
414
    Handle<Object> literal = constant->handle(isolate());
415
    Representation r = chunk_->LookupLiteralRepresentation(const_op);
416
    if (r.IsInteger32()) {
417
      ASSERT(literal->IsNumber());
418
      __ mov(ip, Operand(static_cast<int32_t>(literal->Number())));
419
      __ vmov(flt_scratch, ip);
420
      __ vcvt_f64_s32(dbl_scratch, flt_scratch);
421
      return dbl_scratch;
422
    } else if (r.IsDouble()) {
423
      Abort(kUnsupportedDoubleImmediate);
424
    } else if (r.IsTagged()) {
425
      Abort(kUnsupportedTaggedImmediate);
426
    }
427
  } else if (op->IsStackSlot() || op->IsArgument()) {
428
    // TODO(regis): Why is vldr not taking a MemOperand?
429
    // __ vldr(dbl_scratch, ToMemOperand(op));
430
    MemOperand mem_op = ToMemOperand(op);
431
    __ vldr(dbl_scratch, mem_op.rn(), mem_op.offset());
432
    return dbl_scratch;
433
  }
434
  UNREACHABLE();
435
  return dbl_scratch;
436
}
437

    
438

    
439
Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
440
  HConstant* constant = chunk_->LookupConstant(op);
441
  ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
442
  return constant->handle(isolate());
443
}
444

    
445

    
446
bool LCodeGen::IsInteger32(LConstantOperand* op) const {
447
  return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
448
}
449

    
450

    
451
bool LCodeGen::IsSmi(LConstantOperand* op) const {
452
  return chunk_->LookupLiteralRepresentation(op).IsSmi();
453
}
454

    
455

    
456
int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
457
  return ToRepresentation(op, Representation::Integer32());
458
}
459

    
460

    
461
int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
462
                                   const Representation& r) const {
463
  HConstant* constant = chunk_->LookupConstant(op);
464
  int32_t value = constant->Integer32Value();
465
  if (r.IsInteger32()) return value;
466
  ASSERT(r.IsSmiOrTagged());
467
  return reinterpret_cast<int32_t>(Smi::FromInt(value));
468
}
469

    
470

    
471
Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
472
  HConstant* constant = chunk_->LookupConstant(op);
473
  return Smi::FromInt(constant->Integer32Value());
474
}
475

    
476

    
477
double LCodeGen::ToDouble(LConstantOperand* op) const {
478
  HConstant* constant = chunk_->LookupConstant(op);
479
  ASSERT(constant->HasDoubleValue());
480
  return constant->DoubleValue();
481
}
482

    
483

    
484
Operand LCodeGen::ToOperand(LOperand* op) {
485
  if (op->IsConstantOperand()) {
486
    LConstantOperand* const_op = LConstantOperand::cast(op);
487
    HConstant* constant = chunk()->LookupConstant(const_op);
488
    Representation r = chunk_->LookupLiteralRepresentation(const_op);
489
    if (r.IsSmi()) {
490
      ASSERT(constant->HasSmiValue());
491
      return Operand(Smi::FromInt(constant->Integer32Value()));
492
    } else if (r.IsInteger32()) {
493
      ASSERT(constant->HasInteger32Value());
494
      return Operand(constant->Integer32Value());
495
    } else if (r.IsDouble()) {
496
      Abort(kToOperandUnsupportedDoubleImmediate);
497
    }
498
    ASSERT(r.IsTagged());
499
    return Operand(constant->handle(isolate()));
500
  } else if (op->IsRegister()) {
501
    return Operand(ToRegister(op));
502
  } else if (op->IsDoubleRegister()) {
503
    Abort(kToOperandIsDoubleRegisterUnimplemented);
504
    return Operand::Zero();
505
  }
506
  // Stack slots not implemented, use ToMemOperand instead.
507
  UNREACHABLE();
508
  return Operand::Zero();
509
}
510

    
511

    
512
MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
513
  ASSERT(!op->IsRegister());
514
  ASSERT(!op->IsDoubleRegister());
515
  ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
516
  return MemOperand(fp, StackSlotOffset(op->index()));
517
}
518

    
519

    
520
MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
521
  ASSERT(op->IsDoubleStackSlot());
522
  return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
523
}
524

    
525

    
526
void LCodeGen::WriteTranslation(LEnvironment* environment,
527
                                Translation* translation) {
528
  if (environment == NULL) return;
529

    
530
  // The translation includes one command per value in the environment.
531
  int translation_size = environment->translation_size();
532
  // The output frame height does not include the parameters.
533
  int height = translation_size - environment->parameter_count();
534

    
535
  WriteTranslation(environment->outer(), translation);
536
  bool has_closure_id = !info()->closure().is_null() &&
537
      !info()->closure().is_identical_to(environment->closure());
538
  int closure_id = has_closure_id
539
      ? DefineDeoptimizationLiteral(environment->closure())
540
      : Translation::kSelfLiteralId;
541

    
542
  switch (environment->frame_type()) {
543
    case JS_FUNCTION:
544
      translation->BeginJSFrame(environment->ast_id(), closure_id, height);
545
      break;
546
    case JS_CONSTRUCT:
547
      translation->BeginConstructStubFrame(closure_id, translation_size);
548
      break;
549
    case JS_GETTER:
550
      ASSERT(translation_size == 1);
551
      ASSERT(height == 0);
552
      translation->BeginGetterStubFrame(closure_id);
553
      break;
554
    case JS_SETTER:
555
      ASSERT(translation_size == 2);
556
      ASSERT(height == 0);
557
      translation->BeginSetterStubFrame(closure_id);
558
      break;
559
    case STUB:
560
      translation->BeginCompiledStubFrame();
561
      break;
562
    case ARGUMENTS_ADAPTOR:
563
      translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
564
      break;
565
  }
566

    
567
  int object_index = 0;
568
  int dematerialized_index = 0;
569
  for (int i = 0; i < translation_size; ++i) {
570
    LOperand* value = environment->values()->at(i);
571
    AddToTranslation(environment,
572
                     translation,
573
                     value,
574
                     environment->HasTaggedValueAt(i),
575
                     environment->HasUint32ValueAt(i),
576
                     &object_index,
577
                     &dematerialized_index);
578
  }
579
}
580

    
581

    
582
void LCodeGen::AddToTranslation(LEnvironment* environment,
583
                                Translation* translation,
584
                                LOperand* op,
585
                                bool is_tagged,
586
                                bool is_uint32,
587
                                int* object_index_pointer,
588
                                int* dematerialized_index_pointer) {
589
  if (op == LEnvironment::materialization_marker()) {
590
    int object_index = (*object_index_pointer)++;
591
    if (environment->ObjectIsDuplicateAt(object_index)) {
592
      int dupe_of = environment->ObjectDuplicateOfAt(object_index);
593
      translation->DuplicateObject(dupe_of);
594
      return;
595
    }
596
    int object_length = environment->ObjectLengthAt(object_index);
597
    if (environment->ObjectIsArgumentsAt(object_index)) {
598
      translation->BeginArgumentsObject(object_length);
599
    } else {
600
      translation->BeginCapturedObject(object_length);
601
    }
602
    int dematerialized_index = *dematerialized_index_pointer;
603
    int env_offset = environment->translation_size() + dematerialized_index;
604
    *dematerialized_index_pointer += object_length;
605
    for (int i = 0; i < object_length; ++i) {
606
      LOperand* value = environment->values()->at(env_offset + i);
607
      AddToTranslation(environment,
608
                       translation,
609
                       value,
610
                       environment->HasTaggedValueAt(env_offset + i),
611
                       environment->HasUint32ValueAt(env_offset + i),
612
                       object_index_pointer,
613
                       dematerialized_index_pointer);
614
    }
615
    return;
616
  }
617

    
618
  if (op->IsStackSlot()) {
619
    if (is_tagged) {
620
      translation->StoreStackSlot(op->index());
621
    } else if (is_uint32) {
622
      translation->StoreUint32StackSlot(op->index());
623
    } else {
624
      translation->StoreInt32StackSlot(op->index());
625
    }
626
  } else if (op->IsDoubleStackSlot()) {
627
    translation->StoreDoubleStackSlot(op->index());
628
  } else if (op->IsArgument()) {
629
    ASSERT(is_tagged);
630
    int src_index = GetStackSlotCount() + op->index();
631
    translation->StoreStackSlot(src_index);
632
  } else if (op->IsRegister()) {
633
    Register reg = ToRegister(op);
634
    if (is_tagged) {
635
      translation->StoreRegister(reg);
636
    } else if (is_uint32) {
637
      translation->StoreUint32Register(reg);
638
    } else {
639
      translation->StoreInt32Register(reg);
640
    }
641
  } else if (op->IsDoubleRegister()) {
642
    DoubleRegister reg = ToDoubleRegister(op);
643
    translation->StoreDoubleRegister(reg);
644
  } else if (op->IsConstantOperand()) {
645
    HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
646
    int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
647
    translation->StoreLiteral(src_index);
648
  } else {
649
    UNREACHABLE();
650
  }
651
}
652

    
653

    
654
void LCodeGen::CallCode(Handle<Code> code,
655
                        RelocInfo::Mode mode,
656
                        LInstruction* instr,
657
                        TargetAddressStorageMode storage_mode) {
658
  CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, storage_mode);
659
}
660

    
661

    
662
void LCodeGen::CallCodeGeneric(Handle<Code> code,
663
                               RelocInfo::Mode mode,
664
                               LInstruction* instr,
665
                               SafepointMode safepoint_mode,
666
                               TargetAddressStorageMode storage_mode) {
667
  EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
668
  ASSERT(instr != NULL);
669
  // Block literal pool emission to ensure nop indicating no inlined smi code
670
  // is in the correct position.
671
  Assembler::BlockConstPoolScope block_const_pool(masm());
672
  __ Call(code, mode, TypeFeedbackId::None(), al, storage_mode);
673
  RecordSafepointWithLazyDeopt(instr, safepoint_mode);
674

    
675
  // Signal that we don't inline smi code before these stubs in the
676
  // optimizing code generator.
677
  if (code->kind() == Code::BINARY_OP_IC ||
678
      code->kind() == Code::COMPARE_IC) {
679
    __ nop();
680
  }
681
}
682

    
683

    
684
void LCodeGen::CallRuntime(const Runtime::Function* function,
685
                           int num_arguments,
686
                           LInstruction* instr,
687
                           SaveFPRegsMode save_doubles) {
688
  ASSERT(instr != NULL);
689

    
690
  __ CallRuntime(function, num_arguments, save_doubles);
691

    
692
  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
693
}
694

    
695

    
696
void LCodeGen::LoadContextFromDeferred(LOperand* context) {
697
  if (context->IsRegister()) {
698
    __ Move(cp, ToRegister(context));
699
  } else if (context->IsStackSlot()) {
700
    __ ldr(cp, ToMemOperand(context));
701
  } else if (context->IsConstantOperand()) {
702
    HConstant* constant =
703
        chunk_->LookupConstant(LConstantOperand::cast(context));
704
    __ Move(cp, Handle<Object>::cast(constant->handle(isolate())));
705
  } else {
706
    UNREACHABLE();
707
  }
708
}
709

    
710

    
711
void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
712
                                       int argc,
713
                                       LInstruction* instr,
714
                                       LOperand* context) {
715
  LoadContextFromDeferred(context);
716
  __ CallRuntimeSaveDoubles(id);
717
  RecordSafepointWithRegisters(
718
      instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
719
}
720

    
721

    
722
void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
723
                                                    Safepoint::DeoptMode mode) {
724
  if (!environment->HasBeenRegistered()) {
725
    // Physical stack frame layout:
726
    // -x ............. -4  0 ..................................... y
727
    // [incoming arguments] [spill slots] [pushed outgoing arguments]
728

    
729
    // Layout of the environment:
730
    // 0 ..................................................... size-1
731
    // [parameters] [locals] [expression stack including arguments]
732

    
733
    // Layout of the translation:
734
    // 0 ........................................................ size - 1 + 4
735
    // [expression stack including arguments] [locals] [4 words] [parameters]
736
    // |>------------  translation_size ------------<|
737

    
738
    int frame_count = 0;
739
    int jsframe_count = 0;
740
    for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
741
      ++frame_count;
742
      if (e->frame_type() == JS_FUNCTION) {
743
        ++jsframe_count;
744
      }
745
    }
746
    Translation translation(&translations_, frame_count, jsframe_count, zone());
747
    WriteTranslation(environment, &translation);
748
    int deoptimization_index = deoptimizations_.length();
749
    int pc_offset = masm()->pc_offset();
750
    environment->Register(deoptimization_index,
751
                          translation.index(),
752
                          (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
753
    deoptimizations_.Add(environment, zone());
754
  }
755
}
756

    
757

    
758
void LCodeGen::DeoptimizeIf(Condition condition,
759
                            LEnvironment* environment,
760
                            Deoptimizer::BailoutType bailout_type) {
761
  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
762
  ASSERT(environment->HasBeenRegistered());
763
  int id = environment->deoptimization_index();
764
  ASSERT(info()->IsOptimizing() || info()->IsStub());
765
  Address entry =
766
      Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
767
  if (entry == NULL) {
768
    Abort(kBailoutWasNotPrepared);
769
    return;
770
  }
771

    
772
  ASSERT(FLAG_deopt_every_n_times < 2);  // Other values not supported on ARM.
773
  if (FLAG_deopt_every_n_times == 1 &&
774
      !info()->IsStub() &&
775
      info()->opt_count() == id) {
776
    ASSERT(frame_is_built_);
777
    __ Call(entry, RelocInfo::RUNTIME_ENTRY);
778
    return;
779
  }
780

    
781
  if (info()->ShouldTrapOnDeopt()) {
782
    __ stop("trap_on_deopt", condition);
783
  }
784

    
785
  ASSERT(info()->IsStub() || frame_is_built_);
786
  if (condition == al && frame_is_built_) {
787
    __ Call(entry, RelocInfo::RUNTIME_ENTRY);
788
  } else {
789
    // We often have several deopts to the same entry, reuse the last
790
    // jump entry if this is the case.
791
    if (deopt_jump_table_.is_empty() ||
792
        (deopt_jump_table_.last().address != entry) ||
793
        (deopt_jump_table_.last().bailout_type != bailout_type) ||
794
        (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
795
      Deoptimizer::JumpTableEntry table_entry(entry,
796
                                              bailout_type,
797
                                              !frame_is_built_);
798
      deopt_jump_table_.Add(table_entry, zone());
799
    }
800
    __ b(condition, &deopt_jump_table_.last().label);
801
  }
802
}
803

    
804

    
805
void LCodeGen::DeoptimizeIf(Condition condition,
806
                            LEnvironment* environment) {
807
  Deoptimizer::BailoutType bailout_type = info()->IsStub()
808
      ? Deoptimizer::LAZY
809
      : Deoptimizer::EAGER;
810
  DeoptimizeIf(condition, environment, bailout_type);
811
}
812

    
813

    
814
void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
815
  ZoneList<Handle<Map> > maps(1, zone());
816
  ZoneList<Handle<JSObject> > objects(1, zone());
817
  int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
818
  for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
819
    if (Code::IsWeakEmbeddedObject(code->kind(), it.rinfo()->target_object())) {
820
      if (it.rinfo()->target_object()->IsMap()) {
821
        Handle<Map> map(Map::cast(it.rinfo()->target_object()));
822
        maps.Add(map, zone());
823
      } else if (it.rinfo()->target_object()->IsJSObject()) {
824
        Handle<JSObject> object(JSObject::cast(it.rinfo()->target_object()));
825
        objects.Add(object, zone());
826
      }
827
    }
828
  }
829
#ifdef VERIFY_HEAP
830
  // This disables verification of weak embedded objects after full GC.
831
  // AddDependentCode can cause a GC, which would observe the state where
832
  // this code is not yet in the depended code lists of the embedded maps.
833
  NoWeakObjectVerificationScope disable_verification_of_embedded_objects;
834
#endif
835
  for (int i = 0; i < maps.length(); i++) {
836
    maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
837
  }
838
  for (int i = 0; i < objects.length(); i++) {
839
    AddWeakObjectToCodeDependency(isolate()->heap(), objects.at(i), code);
840
  }
841
}
842

    
843

    
844
void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
845
  int length = deoptimizations_.length();
846
  if (length == 0) return;
847
  Handle<DeoptimizationInputData> data =
848
      factory()->NewDeoptimizationInputData(length, TENURED);
849

    
850
  Handle<ByteArray> translations =
851
      translations_.CreateByteArray(isolate()->factory());
852
  data->SetTranslationByteArray(*translations);
853
  data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
854

    
855
  Handle<FixedArray> literals =
856
      factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
857
  { AllowDeferredHandleDereference copy_handles;
858
    for (int i = 0; i < deoptimization_literals_.length(); i++) {
859
      literals->set(i, *deoptimization_literals_[i]);
860
    }
861
    data->SetLiteralArray(*literals);
862
  }
863

    
864
  data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
865
  data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
866

    
867
  // Populate the deoptimization entries.
868
  for (int i = 0; i < length; i++) {
869
    LEnvironment* env = deoptimizations_[i];
870
    data->SetAstId(i, env->ast_id());
871
    data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
872
    data->SetArgumentsStackHeight(i,
873
                                  Smi::FromInt(env->arguments_stack_height()));
874
    data->SetPc(i, Smi::FromInt(env->pc_offset()));
875
  }
876
  code->set_deoptimization_data(*data);
877
}
878

    
879

    
880
int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
881
  int result = deoptimization_literals_.length();
882
  for (int i = 0; i < deoptimization_literals_.length(); ++i) {
883
    if (deoptimization_literals_[i].is_identical_to(literal)) return i;
884
  }
885
  deoptimization_literals_.Add(literal, zone());
886
  return result;
887
}
888

    
889

    
890
void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
891
  ASSERT(deoptimization_literals_.length() == 0);
892

    
893
  const ZoneList<Handle<JSFunction> >* inlined_closures =
894
      chunk()->inlined_closures();
895

    
896
  for (int i = 0, length = inlined_closures->length();
897
       i < length;
898
       i++) {
899
    DefineDeoptimizationLiteral(inlined_closures->at(i));
900
  }
901

    
902
  inlined_function_count_ = deoptimization_literals_.length();
903
}
904

    
905

    
906
void LCodeGen::RecordSafepointWithLazyDeopt(
907
    LInstruction* instr, SafepointMode safepoint_mode) {
908
  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
909
    RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
910
  } else {
911
    ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
912
    RecordSafepointWithRegisters(
913
        instr->pointer_map(), 0, Safepoint::kLazyDeopt);
914
  }
915
}
916

    
917

    
918
void LCodeGen::RecordSafepoint(
919
    LPointerMap* pointers,
920
    Safepoint::Kind kind,
921
    int arguments,
922
    Safepoint::DeoptMode deopt_mode) {
923
  ASSERT(expected_safepoint_kind_ == kind);
924

    
925
  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
926
  Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
927
      kind, arguments, deopt_mode);
928
  for (int i = 0; i < operands->length(); i++) {
929
    LOperand* pointer = operands->at(i);
930
    if (pointer->IsStackSlot()) {
931
      safepoint.DefinePointerSlot(pointer->index(), zone());
932
    } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
933
      safepoint.DefinePointerRegister(ToRegister(pointer), zone());
934
    }
935
  }
936
}
937

    
938

    
939
void LCodeGen::RecordSafepoint(LPointerMap* pointers,
940
                               Safepoint::DeoptMode deopt_mode) {
941
  RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
942
}
943

    
944

    
945
void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
946
  LPointerMap empty_pointers(zone());
947
  RecordSafepoint(&empty_pointers, deopt_mode);
948
}
949

    
950

    
951
void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
952
                                            int arguments,
953
                                            Safepoint::DeoptMode deopt_mode) {
954
  RecordSafepoint(
955
      pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
956
}
957

    
958

    
959
void LCodeGen::RecordSafepointWithRegistersAndDoubles(
960
    LPointerMap* pointers,
961
    int arguments,
962
    Safepoint::DeoptMode deopt_mode) {
963
  RecordSafepoint(
964
      pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
965
}
966

    
967

    
968
void LCodeGen::RecordAndWritePosition(int position) {
969
  if (position == RelocInfo::kNoPosition) return;
970
  masm()->positions_recorder()->RecordPosition(position);
971
  masm()->positions_recorder()->WriteRecordedPositions();
972
}
973

    
974

    
975
static const char* LabelType(LLabel* label) {
976
  if (label->is_loop_header()) return " (loop header)";
977
  if (label->is_osr_entry()) return " (OSR entry)";
978
  return "";
979
}
980

    
981

    
982
void LCodeGen::DoLabel(LLabel* label) {
983
  Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
984
          current_instruction_,
985
          label->hydrogen_value()->id(),
986
          label->block_id(),
987
          LabelType(label));
988
  __ bind(label->label());
989
  current_block_ = label->block_id();
990
  DoGap(label);
991
}
992

    
993

    
994
void LCodeGen::DoParallelMove(LParallelMove* move) {
995
  resolver_.Resolve(move);
996
}
997

    
998

    
999
void LCodeGen::DoGap(LGap* gap) {
1000
  for (int i = LGap::FIRST_INNER_POSITION;
1001
       i <= LGap::LAST_INNER_POSITION;
1002
       i++) {
1003
    LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
1004
    LParallelMove* move = gap->GetParallelMove(inner_pos);
1005
    if (move != NULL) DoParallelMove(move);
1006
  }
1007
}
1008

    
1009

    
1010
void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
1011
  DoGap(instr);
1012
}
1013

    
1014

    
1015
void LCodeGen::DoParameter(LParameter* instr) {
1016
  // Nothing to do.
1017
}
1018

    
1019

    
1020
void LCodeGen::DoCallStub(LCallStub* instr) {
1021
  ASSERT(ToRegister(instr->context()).is(cp));
1022
  ASSERT(ToRegister(instr->result()).is(r0));
1023
  switch (instr->hydrogen()->major_key()) {
1024
    case CodeStub::RegExpConstructResult: {
1025
      RegExpConstructResultStub stub;
1026
      CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1027
      break;
1028
    }
1029
    case CodeStub::RegExpExec: {
1030
      RegExpExecStub stub;
1031
      CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1032
      break;
1033
    }
1034
    case CodeStub::SubString: {
1035
      SubStringStub stub;
1036
      CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1037
      break;
1038
    }
1039
    case CodeStub::StringCompare: {
1040
      StringCompareStub stub;
1041
      CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1042
      break;
1043
    }
1044
    case CodeStub::TranscendentalCache: {
1045
      __ ldr(r0, MemOperand(sp, 0));
1046
      TranscendentalCacheStub stub(instr->transcendental_type(),
1047
                                   TranscendentalCacheStub::TAGGED);
1048
      CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
1049
      break;
1050
    }
1051
    default:
1052
      UNREACHABLE();
1053
  }
1054
}
1055

    
1056

    
1057
void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1058
  GenerateOsrPrologue();
1059
}
1060

    
1061

    
1062
void LCodeGen::DoModI(LModI* instr) {
1063
  HMod* hmod = instr->hydrogen();
1064
  HValue* left = hmod->left();
1065
  HValue* right = hmod->right();
1066
  if (hmod->HasPowerOf2Divisor()) {
1067
    // TODO(svenpanne) We should really do the strength reduction on the
1068
    // Hydrogen level.
1069
    Register left_reg = ToRegister(instr->left());
1070
    Register result_reg = ToRegister(instr->result());
1071

    
1072
    // Note: The code below even works when right contains kMinInt.
1073
    int32_t divisor = Abs(right->GetInteger32Constant());
1074

    
1075
    Label left_is_not_negative, done;
1076
    if (left->CanBeNegative()) {
1077
      __ cmp(left_reg, Operand::Zero());
1078
      __ b(pl, &left_is_not_negative);
1079
      __ rsb(result_reg, left_reg, Operand::Zero());
1080
      __ and_(result_reg, result_reg, Operand(divisor - 1));
1081
      __ rsb(result_reg, result_reg, Operand::Zero(), SetCC);
1082
      if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1083
        DeoptimizeIf(eq, instr->environment());
1084
      }
1085
      __ b(&done);
1086
    }
1087

    
1088
    __ bind(&left_is_not_negative);
1089
    __ and_(result_reg, left_reg, Operand(divisor - 1));
1090
    __ bind(&done);
1091

    
1092
  } else if (hmod->fixed_right_arg().has_value) {
1093
    Register left_reg = ToRegister(instr->left());
1094
    Register right_reg = ToRegister(instr->right());
1095
    Register result_reg = ToRegister(instr->result());
1096

    
1097
    int32_t divisor = hmod->fixed_right_arg().value;
1098
    ASSERT(IsPowerOf2(divisor));
1099

    
1100
    // Check if our assumption of a fixed right operand still holds.
1101
    __ cmp(right_reg, Operand(divisor));
1102
    DeoptimizeIf(ne, instr->environment());
1103

    
1104
    Label left_is_not_negative, done;
1105
    if (left->CanBeNegative()) {
1106
      __ cmp(left_reg, Operand::Zero());
1107
      __ b(pl, &left_is_not_negative);
1108
      __ rsb(result_reg, left_reg, Operand::Zero());
1109
      __ and_(result_reg, result_reg, Operand(divisor - 1));
1110
      __ rsb(result_reg, result_reg, Operand::Zero(), SetCC);
1111
      if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1112
        DeoptimizeIf(eq, instr->environment());
1113
      }
1114
      __ b(&done);
1115
    }
1116

    
1117
    __ bind(&left_is_not_negative);
1118
    __ and_(result_reg, left_reg, Operand(divisor - 1));
1119
    __ bind(&done);
1120

    
1121
  } else if (CpuFeatures::IsSupported(SUDIV)) {
1122
    CpuFeatureScope scope(masm(), SUDIV);
1123

    
1124
    Register left_reg = ToRegister(instr->left());
1125
    Register right_reg = ToRegister(instr->right());
1126
    Register result_reg = ToRegister(instr->result());
1127

    
1128
    Label done;
1129
    // Check for x % 0, sdiv might signal an exception. We have to deopt in this
1130
    // case because we can't return a NaN.
1131
    if (right->CanBeZero()) {
1132
      __ cmp(right_reg, Operand::Zero());
1133
      DeoptimizeIf(eq, instr->environment());
1134
    }
1135

    
1136
    // Check for kMinInt % -1, sdiv will return kMinInt, which is not what we
1137
    // want. We have to deopt if we care about -0, because we can't return that.
1138
    if (left->RangeCanInclude(kMinInt) && right->RangeCanInclude(-1)) {
1139
      Label no_overflow_possible;
1140
      __ cmp(left_reg, Operand(kMinInt));
1141
      __ b(ne, &no_overflow_possible);
1142
      __ cmp(right_reg, Operand(-1));
1143
      if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1144
        DeoptimizeIf(eq, instr->environment());
1145
      } else {
1146
        __ b(ne, &no_overflow_possible);
1147
        __ mov(result_reg, Operand::Zero());
1148
        __ jmp(&done);
1149
      }
1150
      __ bind(&no_overflow_possible);
1151
    }
1152

    
1153
    // For 'r3 = r1 % r2' we can have the following ARM code:
1154
    //   sdiv r3, r1, r2
1155
    //   mls r3, r3, r2, r1
1156

    
1157
    __ sdiv(result_reg, left_reg, right_reg);
1158
    __ mls(result_reg, result_reg, right_reg, left_reg);
1159

    
1160
    // If we care about -0, test if the dividend is <0 and the result is 0.
1161
    if (left->CanBeNegative() &&
1162
        hmod->CanBeZero() &&
1163
        hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1164
      __ cmp(result_reg, Operand::Zero());
1165
      __ b(ne, &done);
1166
      __ cmp(left_reg, Operand::Zero());
1167
      DeoptimizeIf(lt, instr->environment());
1168
    }
1169
    __ bind(&done);
1170

    
1171
  } else {
1172
    // General case, without any SDIV support.
1173
    Register left_reg = ToRegister(instr->left());
1174
    Register right_reg = ToRegister(instr->right());
1175
    Register result_reg = ToRegister(instr->result());
1176
    Register scratch = scratch0();
1177
    ASSERT(!scratch.is(left_reg));
1178
    ASSERT(!scratch.is(right_reg));
1179
    ASSERT(!scratch.is(result_reg));
1180
    DwVfpRegister dividend = ToDoubleRegister(instr->temp());
1181
    DwVfpRegister divisor = ToDoubleRegister(instr->temp2());
1182
    ASSERT(!divisor.is(dividend));
1183
    LowDwVfpRegister quotient = double_scratch0();
1184
    ASSERT(!quotient.is(dividend));
1185
    ASSERT(!quotient.is(divisor));
1186

    
1187
    Label done;
1188
    // Check for x % 0, we have to deopt in this case because we can't return a
1189
    // NaN.
1190
    if (right->CanBeZero()) {
1191
      __ cmp(right_reg, Operand::Zero());
1192
      DeoptimizeIf(eq, instr->environment());
1193
    }
1194

    
1195
    __ Move(result_reg, left_reg);
1196
    // Load the arguments in VFP registers. The divisor value is preloaded
1197
    // before. Be careful that 'right_reg' is only live on entry.
1198
    // TODO(svenpanne) The last comments seems to be wrong nowadays.
1199
    __ vmov(double_scratch0().low(), left_reg);
1200
    __ vcvt_f64_s32(dividend, double_scratch0().low());
1201
    __ vmov(double_scratch0().low(), right_reg);
1202
    __ vcvt_f64_s32(divisor, double_scratch0().low());
1203

    
1204
    // We do not care about the sign of the divisor. Note that we still handle
1205
    // the kMinInt % -1 case correctly, though.
1206
    __ vabs(divisor, divisor);
1207
    // Compute the quotient and round it to a 32bit integer.
1208
    __ vdiv(quotient, dividend, divisor);
1209
    __ vcvt_s32_f64(quotient.low(), quotient);
1210
    __ vcvt_f64_s32(quotient, quotient.low());
1211

    
1212
    // Compute the remainder in result.
1213
    __ vmul(double_scratch0(), divisor, quotient);
1214
    __ vcvt_s32_f64(double_scratch0().low(), double_scratch0());
1215
    __ vmov(scratch, double_scratch0().low());
1216
    __ sub(result_reg, left_reg, scratch, SetCC);
1217

    
1218
    // If we care about -0, test if the dividend is <0 and the result is 0.
1219
    if (left->CanBeNegative() &&
1220
        hmod->CanBeZero() &&
1221
        hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1222
      __ b(ne, &done);
1223
      __ cmp(left_reg, Operand::Zero());
1224
      DeoptimizeIf(mi, instr->environment());
1225
    }
1226
    __ bind(&done);
1227
  }
1228
}
1229

    
1230

    
1231
void LCodeGen::EmitSignedIntegerDivisionByConstant(
1232
    Register result,
1233
    Register dividend,
1234
    int32_t divisor,
1235
    Register remainder,
1236
    Register scratch,
1237
    LEnvironment* environment) {
1238
  ASSERT(!AreAliased(dividend, scratch, ip));
1239
  ASSERT(LChunkBuilder::HasMagicNumberForDivisor(divisor));
1240

    
1241
  uint32_t divisor_abs = abs(divisor);
1242

    
1243
  int32_t power_of_2_factor =
1244
    CompilerIntrinsics::CountTrailingZeros(divisor_abs);
1245

    
1246
  switch (divisor_abs) {
1247
    case 0:
1248
      DeoptimizeIf(al, environment);
1249
      return;
1250

    
1251
    case 1:
1252
      if (divisor > 0) {
1253
        __ Move(result, dividend);
1254
      } else {
1255
        __ rsb(result, dividend, Operand::Zero(), SetCC);
1256
        DeoptimizeIf(vs, environment);
1257
      }
1258
      // Compute the remainder.
1259
      __ mov(remainder, Operand::Zero());
1260
      return;
1261

    
1262
    default:
1263
      if (IsPowerOf2(divisor_abs)) {
1264
        // Branch and condition free code for integer division by a power
1265
        // of two.
1266
        int32_t power = WhichPowerOf2(divisor_abs);
1267
        if (power > 1) {
1268
          __ mov(scratch, Operand(dividend, ASR, power - 1));
1269
        }
1270
        __ add(scratch, dividend, Operand(scratch, LSR, 32 - power));
1271
        __ mov(result, Operand(scratch, ASR, power));
1272
        // Negate if necessary.
1273
        // We don't need to check for overflow because the case '-1' is
1274
        // handled separately.
1275
        if (divisor < 0) {
1276
          ASSERT(divisor != -1);
1277
          __ rsb(result, result, Operand::Zero());
1278
        }
1279
        // Compute the remainder.
1280
        if (divisor > 0) {
1281
          __ sub(remainder, dividend, Operand(result, LSL, power));
1282
        } else {
1283
          __ add(remainder, dividend, Operand(result, LSL, power));
1284
        }
1285
        return;
1286
      } else {
1287
        // Use magic numbers for a few specific divisors.
1288
        // Details and proofs can be found in:
1289
        // - Hacker's Delight, Henry S. Warren, Jr.
1290
        // - The PowerPC Compiler Writer’s Guide
1291
        // and probably many others.
1292
        //
1293
        // We handle
1294
        //   <divisor with magic numbers> * <power of 2>
1295
        // but not
1296
        //   <divisor with magic numbers> * <other divisor with magic numbers>
1297
        DivMagicNumbers magic_numbers =
1298
          DivMagicNumberFor(divisor_abs >> power_of_2_factor);
1299
        // Branch and condition free code for integer division by a power
1300
        // of two.
1301
        const int32_t M = magic_numbers.M;
1302
        const int32_t s = magic_numbers.s + power_of_2_factor;
1303

    
1304
        __ mov(ip, Operand(M));
1305
        __ smull(ip, scratch, dividend, ip);
1306
        if (M < 0) {
1307
          __ add(scratch, scratch, Operand(dividend));
1308
        }
1309
        if (s > 0) {
1310
          __ mov(scratch, Operand(scratch, ASR, s));
1311
        }
1312
        __ add(result, scratch, Operand(dividend, LSR, 31));
1313
        if (divisor < 0) __ rsb(result, result, Operand::Zero());
1314
        // Compute the remainder.
1315
        __ mov(ip, Operand(divisor));
1316
        // This sequence could be replaced with 'mls' when
1317
        // it gets implemented.
1318
        __ mul(scratch, result, ip);
1319
        __ sub(remainder, dividend, scratch);
1320
      }
1321
  }
1322
}
1323

    
1324

    
1325
void LCodeGen::DoDivI(LDivI* instr) {
1326
  if (instr->hydrogen()->HasPowerOf2Divisor()) {
1327
    const Register dividend = ToRegister(instr->left());
1328
    const Register result = ToRegister(instr->result());
1329
    int32_t divisor = instr->hydrogen()->right()->GetInteger32Constant();
1330
    int32_t test_value = 0;
1331
    int32_t power = 0;
1332

    
1333
    if (divisor > 0) {
1334
      test_value = divisor - 1;
1335
      power = WhichPowerOf2(divisor);
1336
    } else {
1337
      // Check for (0 / -x) that will produce negative zero.
1338
      if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1339
        __ cmp(dividend, Operand::Zero());
1340
        DeoptimizeIf(eq, instr->environment());
1341
      }
1342
      // Check for (kMinInt / -1).
1343
      if (divisor == -1 && instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1344
        __ cmp(dividend, Operand(kMinInt));
1345
        DeoptimizeIf(eq, instr->environment());
1346
      }
1347
      test_value = - divisor - 1;
1348
      power = WhichPowerOf2(-divisor);
1349
    }
1350

    
1351
    if (test_value != 0) {
1352
      if (instr->hydrogen()->CheckFlag(
1353
          HInstruction::kAllUsesTruncatingToInt32)) {
1354
        __ sub(result, dividend, Operand::Zero(), SetCC);
1355
        __ rsb(result, result, Operand::Zero(), LeaveCC, lt);
1356
        __ mov(result, Operand(result, ASR, power));
1357
        if (divisor > 0) __ rsb(result, result, Operand::Zero(), LeaveCC, lt);
1358
        if (divisor < 0) __ rsb(result, result, Operand::Zero(), LeaveCC, gt);
1359
        return;  // Don't fall through to "__ rsb" below.
1360
      } else {
1361
        // Deoptimize if remainder is not 0.
1362
        __ tst(dividend, Operand(test_value));
1363
        DeoptimizeIf(ne, instr->environment());
1364
        __ mov(result, Operand(dividend, ASR, power));
1365
        if (divisor < 0) __ rsb(result, result, Operand(0));
1366
      }
1367
    } else {
1368
      if (divisor < 0) {
1369
        __ rsb(result, dividend, Operand(0));
1370
      } else {
1371
        __ Move(result, dividend);
1372
      }
1373
    }
1374

    
1375
    return;
1376
  }
1377

    
1378
  const Register left = ToRegister(instr->left());
1379
  const Register right = ToRegister(instr->right());
1380
  const Register result = ToRegister(instr->result());
1381

    
1382
  // Check for x / 0.
1383
  if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
1384
    __ cmp(right, Operand::Zero());
1385
    DeoptimizeIf(eq, instr->environment());
1386
  }
1387

    
1388
  // Check for (0 / -x) that will produce negative zero.
1389
  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1390
    Label positive;
1391
    if (!instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
1392
      // Do the test only if it hadn't be done above.
1393
      __ cmp(right, Operand::Zero());
1394
    }
1395
    __ b(pl, &positive);
1396
    __ cmp(left, Operand::Zero());
1397
    DeoptimizeIf(eq, instr->environment());
1398
    __ bind(&positive);
1399
  }
1400

    
1401
  // Check for (kMinInt / -1).
1402
  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1403
    Label left_not_min_int;
1404
    __ cmp(left, Operand(kMinInt));
1405
    __ b(ne, &left_not_min_int);
1406
    __ cmp(right, Operand(-1));
1407
    DeoptimizeIf(eq, instr->environment());
1408
    __ bind(&left_not_min_int);
1409
  }
1410

    
1411
  if (CpuFeatures::IsSupported(SUDIV)) {
1412
    CpuFeatureScope scope(masm(), SUDIV);
1413
    __ sdiv(result, left, right);
1414

    
1415
    if (!instr->hydrogen()->CheckFlag(
1416
        HInstruction::kAllUsesTruncatingToInt32)) {
1417
      // Compute remainder and deopt if it's not zero.
1418
      const Register remainder = scratch0();
1419
      __ mls(remainder, result, right, left);
1420
      __ cmp(remainder, Operand::Zero());
1421
      DeoptimizeIf(ne, instr->environment());
1422
    }
1423
  } else {
1424
    const DoubleRegister vleft = ToDoubleRegister(instr->temp());
1425
    const DoubleRegister vright = double_scratch0();
1426
    __ vmov(double_scratch0().low(), left);
1427
    __ vcvt_f64_s32(vleft, double_scratch0().low());
1428
    __ vmov(double_scratch0().low(), right);
1429
    __ vcvt_f64_s32(vright, double_scratch0().low());
1430
    __ vdiv(vleft, vleft, vright);  // vleft now contains the result.
1431
    __ vcvt_s32_f64(double_scratch0().low(), vleft);
1432
    __ vmov(result, double_scratch0().low());
1433

    
1434
    if (!instr->hydrogen()->CheckFlag(
1435
        HInstruction::kAllUsesTruncatingToInt32)) {
1436
      // Deopt if exact conversion to integer was not possible.
1437
      // Use vright as scratch register.
1438
      __ vcvt_f64_s32(double_scratch0(), double_scratch0().low());
1439
      __ VFPCompareAndSetFlags(vleft, double_scratch0());
1440
      DeoptimizeIf(ne, instr->environment());
1441
    }
1442
  }
1443
}
1444

    
1445

    
1446
void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1447
  DwVfpRegister addend = ToDoubleRegister(instr->addend());
1448
  DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
1449
  DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1450

    
1451
  // This is computed in-place.
1452
  ASSERT(addend.is(ToDoubleRegister(instr->result())));
1453

    
1454
  __ vmla(addend, multiplier, multiplicand);
1455
}
1456

    
1457

    
1458
void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
1459
  DwVfpRegister minuend = ToDoubleRegister(instr->minuend());
1460
  DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
1461
  DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1462

    
1463
  // This is computed in-place.
1464
  ASSERT(minuend.is(ToDoubleRegister(instr->result())));
1465

    
1466
  __ vmls(minuend, multiplier, multiplicand);
1467
}
1468

    
1469

    
1470
void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
1471
  const Register result = ToRegister(instr->result());
1472
  const Register left = ToRegister(instr->left());
1473
  const Register remainder = ToRegister(instr->temp());
1474
  const Register scratch = scratch0();
1475

    
1476
  if (!CpuFeatures::IsSupported(SUDIV)) {
1477
    // If the CPU doesn't support sdiv instruction, we only optimize when we
1478
    // have magic numbers for the divisor. The standard integer division routine
1479
    // is usually slower than transitionning to VFP.
1480
    ASSERT(instr->right()->IsConstantOperand());
1481
    int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
1482
    ASSERT(LChunkBuilder::HasMagicNumberForDivisor(divisor));
1483
    if (divisor < 0) {
1484
      __ cmp(left, Operand::Zero());
1485
      DeoptimizeIf(eq, instr->environment());
1486
    }
1487
    EmitSignedIntegerDivisionByConstant(result,
1488
                                        left,
1489
                                        divisor,
1490
                                        remainder,
1491
                                        scratch,
1492
                                        instr->environment());
1493
    // We performed a truncating division. Correct the result if necessary.
1494
    __ cmp(remainder, Operand::Zero());
1495
    __ teq(remainder, Operand(divisor), ne);
1496
    __ sub(result, result, Operand(1), LeaveCC, mi);
1497
  } else {
1498
    CpuFeatureScope scope(masm(), SUDIV);
1499
    const Register right = ToRegister(instr->right());
1500

    
1501
    // Check for x / 0.
1502
    __ cmp(right, Operand::Zero());
1503
    DeoptimizeIf(eq, instr->environment());
1504

    
1505
    // Check for (kMinInt / -1).
1506
    if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1507
      Label left_not_min_int;
1508
      __ cmp(left, Operand(kMinInt));
1509
      __ b(ne, &left_not_min_int);
1510
      __ cmp(right, Operand(-1));
1511
      DeoptimizeIf(eq, instr->environment());
1512
      __ bind(&left_not_min_int);
1513
    }
1514

    
1515
    // Check for (0 / -x) that will produce negative zero.
1516
    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1517
      __ cmp(right, Operand::Zero());
1518
      __ cmp(left, Operand::Zero(), mi);
1519
      // "right" can't be null because the code would have already been
1520
      // deoptimized. The Z flag is set only if (right < 0) and (left == 0).
1521
      // In this case we need to deoptimize to produce a -0.
1522
      DeoptimizeIf(eq, instr->environment());
1523
    }
1524

    
1525
    Label done;
1526
    __ sdiv(result, left, right);
1527
    // If both operands have the same sign then we are done.
1528
    __ eor(remainder, left, Operand(right), SetCC);
1529
    __ b(pl, &done);
1530

    
1531
    // Check if the result needs to be corrected.
1532
    __ mls(remainder, result, right, left);
1533
    __ cmp(remainder, Operand::Zero());
1534
    __ sub(result, result, Operand(1), LeaveCC, ne);
1535

    
1536
    __ bind(&done);
1537
  }
1538
}
1539

    
1540

    
1541
void LCodeGen::DoMulI(LMulI* instr) {
1542
  Register result = ToRegister(instr->result());
1543
  // Note that result may alias left.
1544
  Register left = ToRegister(instr->left());
1545
  LOperand* right_op = instr->right();
1546

    
1547
  bool bailout_on_minus_zero =
1548
    instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1549
  bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1550

    
1551
  if (right_op->IsConstantOperand()) {
1552
    int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1553

    
1554
    if (bailout_on_minus_zero && (constant < 0)) {
1555
      // The case of a null constant will be handled separately.
1556
      // If constant is negative and left is null, the result should be -0.
1557
      __ cmp(left, Operand::Zero());
1558
      DeoptimizeIf(eq, instr->environment());
1559
    }
1560

    
1561
    switch (constant) {
1562
      case -1:
1563
        if (overflow) {
1564
          __ rsb(result, left, Operand::Zero(), SetCC);
1565
          DeoptimizeIf(vs, instr->environment());
1566
        } else {
1567
          __ rsb(result, left, Operand::Zero());
1568
        }
1569
        break;
1570
      case 0:
1571
        if (bailout_on_minus_zero) {
1572
          // If left is strictly negative and the constant is null, the
1573
          // result is -0. Deoptimize if required, otherwise return 0.
1574
          __ cmp(left, Operand::Zero());
1575
          DeoptimizeIf(mi, instr->environment());
1576
        }
1577
        __ mov(result, Operand::Zero());
1578
        break;
1579
      case 1:
1580
        __ Move(result, left);
1581
        break;
1582
      default:
1583
        // Multiplying by powers of two and powers of two plus or minus
1584
        // one can be done faster with shifted operands.
1585
        // For other constants we emit standard code.
1586
        int32_t mask = constant >> 31;
1587
        uint32_t constant_abs = (constant + mask) ^ mask;
1588

    
1589
        if (IsPowerOf2(constant_abs)) {
1590
          int32_t shift = WhichPowerOf2(constant_abs);
1591
          __ mov(result, Operand(left, LSL, shift));
1592
          // Correct the sign of the result is the constant is negative.
1593
          if (constant < 0)  __ rsb(result, result, Operand::Zero());
1594
        } else if (IsPowerOf2(constant_abs - 1)) {
1595
          int32_t shift = WhichPowerOf2(constant_abs - 1);
1596
          __ add(result, left, Operand(left, LSL, shift));
1597
          // Correct the sign of the result is the constant is negative.
1598
          if (constant < 0)  __ rsb(result, result, Operand::Zero());
1599
        } else if (IsPowerOf2(constant_abs + 1)) {
1600
          int32_t shift = WhichPowerOf2(constant_abs + 1);
1601
          __ rsb(result, left, Operand(left, LSL, shift));
1602
          // Correct the sign of the result is the constant is negative.
1603
          if (constant < 0)  __ rsb(result, result, Operand::Zero());
1604
        } else {
1605
          // Generate standard code.
1606
          __ mov(ip, Operand(constant));
1607
          __ mul(result, left, ip);
1608
        }
1609
    }
1610

    
1611
  } else {
1612
    ASSERT(right_op->IsRegister());
1613
    Register right = ToRegister(right_op);
1614

    
1615
    if (overflow) {
1616
      Register scratch = scratch0();
1617
      // scratch:result = left * right.
1618
      if (instr->hydrogen()->representation().IsSmi()) {
1619
        __ SmiUntag(result, left);
1620
        __ smull(result, scratch, result, right);
1621
      } else {
1622
        __ smull(result, scratch, left, right);
1623
      }
1624
      __ cmp(scratch, Operand(result, ASR, 31));
1625
      DeoptimizeIf(ne, instr->environment());
1626
    } else {
1627
      if (instr->hydrogen()->representation().IsSmi()) {
1628
        __ SmiUntag(result, left);
1629
        __ mul(result, result, right);
1630
      } else {
1631
        __ mul(result, left, right);
1632
      }
1633
    }
1634

    
1635
    if (bailout_on_minus_zero) {
1636
      Label done;
1637
      __ teq(left, Operand(right));
1638
      __ b(pl, &done);
1639
      // Bail out if the result is minus zero.
1640
      __ cmp(result, Operand::Zero());
1641
      DeoptimizeIf(eq, instr->environment());
1642
      __ bind(&done);
1643
    }
1644
  }
1645
}
1646

    
1647

    
1648
void LCodeGen::DoBitI(LBitI* instr) {
1649
  LOperand* left_op = instr->left();
1650
  LOperand* right_op = instr->right();
1651
  ASSERT(left_op->IsRegister());
1652
  Register left = ToRegister(left_op);
1653
  Register result = ToRegister(instr->result());
1654
  Operand right(no_reg);
1655

    
1656
  if (right_op->IsStackSlot() || right_op->IsArgument()) {
1657
    right = Operand(EmitLoadRegister(right_op, ip));
1658
  } else {
1659
    ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
1660
    right = ToOperand(right_op);
1661
  }
1662

    
1663
  switch (instr->op()) {
1664
    case Token::BIT_AND:
1665
      __ and_(result, left, right);
1666
      break;
1667
    case Token::BIT_OR:
1668
      __ orr(result, left, right);
1669
      break;
1670
    case Token::BIT_XOR:
1671
      if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
1672
        __ mvn(result, Operand(left));
1673
      } else {
1674
        __ eor(result, left, right);
1675
      }
1676
      break;
1677
    default:
1678
      UNREACHABLE();
1679
      break;
1680
  }
1681
}
1682

    
1683

    
1684
void LCodeGen::DoShiftI(LShiftI* instr) {
1685
  // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1686
  // result may alias either of them.
1687
  LOperand* right_op = instr->right();
1688
  Register left = ToRegister(instr->left());
1689
  Register result = ToRegister(instr->result());
1690
  Register scratch = scratch0();
1691
  if (right_op->IsRegister()) {
1692
    // Mask the right_op operand.
1693
    __ and_(scratch, ToRegister(right_op), Operand(0x1F));
1694
    switch (instr->op()) {
1695
      case Token::ROR:
1696
        __ mov(result, Operand(left, ROR, scratch));
1697
        break;
1698
      case Token::SAR:
1699
        __ mov(result, Operand(left, ASR, scratch));
1700
        break;
1701
      case Token::SHR:
1702
        if (instr->can_deopt()) {
1703
          __ mov(result, Operand(left, LSR, scratch), SetCC);
1704
          DeoptimizeIf(mi, instr->environment());
1705
        } else {
1706
          __ mov(result, Operand(left, LSR, scratch));
1707
        }
1708
        break;
1709
      case Token::SHL:
1710
        __ mov(result, Operand(left, LSL, scratch));
1711
        break;
1712
      default:
1713
        UNREACHABLE();
1714
        break;
1715
    }
1716
  } else {
1717
    // Mask the right_op operand.
1718
    int value = ToInteger32(LConstantOperand::cast(right_op));
1719
    uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1720
    switch (instr->op()) {
1721
      case Token::ROR:
1722
          if (shift_count != 0) {
1723
          __ mov(result, Operand(left, ROR, shift_count));
1724
        } else {
1725
          __ Move(result, left);
1726
        }
1727
        break;
1728
      case Token::SAR:
1729
        if (shift_count != 0) {
1730
          __ mov(result, Operand(left, ASR, shift_count));
1731
        } else {
1732
          __ Move(result, left);
1733
        }
1734
        break;
1735
      case Token::SHR:
1736
        if (shift_count != 0) {
1737
          __ mov(result, Operand(left, LSR, shift_count));
1738
        } else {
1739
          if (instr->can_deopt()) {
1740
            __ tst(left, Operand(0x80000000));
1741
            DeoptimizeIf(ne, instr->environment());
1742
          }
1743
          __ Move(result, left);
1744
        }
1745
        break;
1746
      case Token::SHL:
1747
        if (shift_count != 0) {
1748
          if (instr->hydrogen_value()->representation().IsSmi() &&
1749
              instr->can_deopt()) {
1750
            if (shift_count != 1) {
1751
              __ mov(result, Operand(left, LSL, shift_count - 1));
1752
              __ SmiTag(result, result, SetCC);
1753
            } else {
1754
              __ SmiTag(result, left, SetCC);
1755
            }
1756
            DeoptimizeIf(vs, instr->environment());
1757
          } else {
1758
            __ mov(result, Operand(left, LSL, shift_count));
1759
          }
1760
        } else {
1761
          __ Move(result, left);
1762
        }
1763
        break;
1764
      default:
1765
        UNREACHABLE();
1766
        break;
1767
    }
1768
  }
1769
}
1770

    
1771

    
1772
void LCodeGen::DoSubI(LSubI* instr) {
1773
  LOperand* left = instr->left();
1774
  LOperand* right = instr->right();
1775
  LOperand* result = instr->result();
1776
  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1777
  SBit set_cond = can_overflow ? SetCC : LeaveCC;
1778

    
1779
  if (right->IsStackSlot() || right->IsArgument()) {
1780
    Register right_reg = EmitLoadRegister(right, ip);
1781
    __ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
1782
  } else {
1783
    ASSERT(right->IsRegister() || right->IsConstantOperand());
1784
    __ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
1785
  }
1786

    
1787
  if (can_overflow) {
1788
    DeoptimizeIf(vs, instr->environment());
1789
  }
1790
}
1791

    
1792

    
1793
void LCodeGen::DoRSubI(LRSubI* instr) {
1794
  LOperand* left = instr->left();
1795
  LOperand* right = instr->right();
1796
  LOperand* result = instr->result();
1797
  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1798
  SBit set_cond = can_overflow ? SetCC : LeaveCC;
1799

    
1800
  if (right->IsStackSlot() || right->IsArgument()) {
1801
    Register right_reg = EmitLoadRegister(right, ip);
1802
    __ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
1803
  } else {
1804
    ASSERT(right->IsRegister() || right->IsConstantOperand());
1805
    __ rsb(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
1806
  }
1807

    
1808
  if (can_overflow) {
1809
    DeoptimizeIf(vs, instr->environment());
1810
  }
1811
}
1812

    
1813

    
1814
void LCodeGen::DoConstantI(LConstantI* instr) {
1815
  __ mov(ToRegister(instr->result()), Operand(instr->value()));
1816
}
1817

    
1818

    
1819
void LCodeGen::DoConstantS(LConstantS* instr) {
1820
  __ mov(ToRegister(instr->result()), Operand(instr->value()));
1821
}
1822

    
1823

    
1824
void LCodeGen::DoConstantD(LConstantD* instr) {
1825
  ASSERT(instr->result()->IsDoubleRegister());
1826
  DwVfpRegister result = ToDoubleRegister(instr->result());
1827
  double v = instr->value();
1828
  __ Vmov(result, v, scratch0());
1829
}
1830

    
1831

    
1832
void LCodeGen::DoConstantE(LConstantE* instr) {
1833
  __ mov(ToRegister(instr->result()), Operand(instr->value()));
1834
}
1835

    
1836

    
1837
void LCodeGen::DoConstantT(LConstantT* instr) {
1838
  Handle<Object> value = instr->value(isolate());
1839
  AllowDeferredHandleDereference smi_check;
1840
  __ Move(ToRegister(instr->result()), value);
1841
}
1842

    
1843

    
1844
void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
1845
  Register result = ToRegister(instr->result());
1846
  Register map = ToRegister(instr->value());
1847
  __ EnumLength(result, map);
1848
}
1849

    
1850

    
1851
void LCodeGen::DoElementsKind(LElementsKind* instr) {
1852
  Register result = ToRegister(instr->result());
1853
  Register input = ToRegister(instr->value());
1854

    
1855
  // Load map into |result|.
1856
  __ ldr(result, FieldMemOperand(input, HeapObject::kMapOffset));
1857
  // Load the map's "bit field 2" into |result|. We only need the first byte,
1858
  // but the following bit field extraction takes care of that anyway.
1859
  __ ldr(result, FieldMemOperand(result, Map::kBitField2Offset));
1860
  // Retrieve elements_kind from bit field 2.
1861
  __ ubfx(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount);
1862
}
1863

    
1864

    
1865
void LCodeGen::DoValueOf(LValueOf* instr) {
1866
  Register input = ToRegister(instr->value());
1867
  Register result = ToRegister(instr->result());
1868
  Register map = ToRegister(instr->temp());
1869
  Label done;
1870

    
1871
  if (!instr->hydrogen()->value()->IsHeapObject()) {
1872
    // If the object is a smi return the object.
1873
    __ SmiTst(input);
1874
    __ Move(result, input, eq);
1875
    __ b(eq, &done);
1876
  }
1877

    
1878
  // If the object is not a value type, return the object.
1879
  __ CompareObjectType(input, map, map, JS_VALUE_TYPE);
1880
  __ Move(result, input, ne);
1881
  __ b(ne, &done);
1882
  __ ldr(result, FieldMemOperand(input, JSValue::kValueOffset));
1883

    
1884
  __ bind(&done);
1885
}
1886

    
1887

    
1888
void LCodeGen::DoDateField(LDateField* instr) {
1889
  Register object = ToRegister(instr->date());
1890
  Register result = ToRegister(instr->result());
1891
  Register scratch = ToRegister(instr->temp());
1892
  Smi* index = instr->index();
1893
  Label runtime, done;
1894
  ASSERT(object.is(result));
1895
  ASSERT(object.is(r0));
1896
  ASSERT(!scratch.is(scratch0()));
1897
  ASSERT(!scratch.is(object));
1898

    
1899
  __ SmiTst(object);
1900
  DeoptimizeIf(eq, instr->environment());
1901
  __ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE);
1902
  DeoptimizeIf(ne, instr->environment());
1903

    
1904
  if (index->value() == 0) {
1905
    __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
1906
  } else {
1907
    if (index->value() < JSDate::kFirstUncachedField) {
1908
      ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1909
      __ mov(scratch, Operand(stamp));
1910
      __ ldr(scratch, MemOperand(scratch));
1911
      __ ldr(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
1912
      __ cmp(scratch, scratch0());
1913
      __ b(ne, &runtime);
1914
      __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
1915
                                             kPointerSize * index->value()));
1916
      __ jmp(&done);
1917
    }
1918
    __ bind(&runtime);
1919
    __ PrepareCallCFunction(2, scratch);
1920
    __ mov(r1, Operand(index));
1921
    __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1922
    __ bind(&done);
1923
  }
1924
}
1925

    
1926

    
1927
void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
1928
  Register string = ToRegister(instr->string());
1929
  LOperand* index_op = instr->index();
1930
  Register value = ToRegister(instr->value());
1931
  Register scratch = scratch0();
1932
  String::Encoding encoding = instr->encoding();
1933

    
1934
  if (FLAG_debug_code) {
1935
    __ ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
1936
    __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1937

    
1938
    __ and_(scratch, scratch,
1939
            Operand(kStringRepresentationMask | kStringEncodingMask));
1940
    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1941
    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1942
    __ cmp(scratch, Operand(encoding == String::ONE_BYTE_ENCODING
1943
                            ? one_byte_seq_type : two_byte_seq_type));
1944
    __ Check(eq, kUnexpectedStringType);
1945
  }
1946

    
1947
  if (index_op->IsConstantOperand()) {
1948
    int constant_index = ToInteger32(LConstantOperand::cast(index_op));
1949
    if (encoding == String::ONE_BYTE_ENCODING) {
1950
      __ strb(value,
1951
              FieldMemOperand(string, SeqString::kHeaderSize + constant_index));
1952
    } else {
1953
      __ strh(value,
1954
          FieldMemOperand(string, SeqString::kHeaderSize + constant_index * 2));
1955
    }
1956
  } else {
1957
    Register index = ToRegister(index_op);
1958
    if (encoding == String::ONE_BYTE_ENCODING) {
1959
      __ add(scratch, string, Operand(index));
1960
      __ strb(value, FieldMemOperand(scratch, SeqString::kHeaderSize));
1961
    } else {
1962
      __ add(scratch, string, Operand(index, LSL, 1));
1963
      __ strh(value, FieldMemOperand(scratch, SeqString::kHeaderSize));
1964
    }
1965
  }
1966
}
1967

    
1968

    
1969
void LCodeGen::DoThrow(LThrow* instr) {
1970
  Register input_reg = EmitLoadRegister(instr->value(), ip);
1971
  __ push(input_reg);
1972
  ASSERT(ToRegister(instr->context()).is(cp));
1973
  CallRuntime(Runtime::kThrow, 1, instr);
1974

    
1975
  if (FLAG_debug_code) {
1976
    __ stop("Unreachable code.");
1977
  }
1978
}
1979

    
1980

    
1981
void LCodeGen::DoAddI(LAddI* instr) {
1982
  LOperand* left = instr->left();
1983
  LOperand* right = instr->right();
1984
  LOperand* result = instr->result();
1985
  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1986
  SBit set_cond = can_overflow ? SetCC : LeaveCC;
1987

    
1988
  if (right->IsStackSlot() || right->IsArgument()) {
1989
    Register right_reg = EmitLoadRegister(right, ip);
1990
    __ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
1991
  } else {
1992
    ASSERT(right->IsRegister() || right->IsConstantOperand());
1993
    __ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
1994
  }
1995

    
1996
  if (can_overflow) {
1997
    DeoptimizeIf(vs, instr->environment());
1998
  }
1999
}
2000

    
2001

    
2002
void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
2003
  LOperand* left = instr->left();
2004
  LOperand* right = instr->right();
2005
  HMathMinMax::Operation operation = instr->hydrogen()->operation();
2006
  if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
2007
    Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
2008
    Register left_reg = ToRegister(left);
2009
    Operand right_op = (right->IsRegister() || right->IsConstantOperand())
2010
        ? ToOperand(right)
2011
        : Operand(EmitLoadRegister(right, ip));
2012
    Register result_reg = ToRegister(instr->result());
2013
    __ cmp(left_reg, right_op);
2014
    __ Move(result_reg, left_reg, condition);
2015
    __ mov(result_reg, right_op, LeaveCC, NegateCondition(condition));
2016
  } else {
2017
    ASSERT(instr->hydrogen()->representation().IsDouble());
2018
    DwVfpRegister left_reg = ToDoubleRegister(left);
2019
    DwVfpRegister right_reg = ToDoubleRegister(right);
2020
    DwVfpRegister result_reg = ToDoubleRegister(instr->result());
2021
    Label result_is_nan, return_left, return_right, check_zero, done;
2022
    __ VFPCompareAndSetFlags(left_reg, right_reg);
2023
    if (operation == HMathMinMax::kMathMin) {
2024
      __ b(mi, &return_left);
2025
      __ b(gt, &return_right);
2026
    } else {
2027
      __ b(mi, &return_right);
2028
      __ b(gt, &return_left);
2029
    }
2030
    __ b(vs, &result_is_nan);
2031
    // Left equals right => check for -0.
2032
    __ VFPCompareAndSetFlags(left_reg, 0.0);
2033
    if (left_reg.is(result_reg) || right_reg.is(result_reg)) {
2034
      __ b(ne, &done);  // left == right != 0.
2035
    } else {
2036
      __ b(ne, &return_left);  // left == right != 0.
2037
    }
2038
    // At this point, both left and right are either 0 or -0.
2039
    if (operation == HMathMinMax::kMathMin) {
2040
      // We could use a single 'vorr' instruction here if we had NEON support.
2041
      __ vneg(left_reg, left_reg);
2042
      __ vsub(result_reg, left_reg, right_reg);
2043
      __ vneg(result_reg, result_reg);
2044
    } else {
2045
      // Since we operate on +0 and/or -0, vadd and vand have the same effect;
2046
      // the decision for vadd is easy because vand is a NEON instruction.
2047
      __ vadd(result_reg, left_reg, right_reg);
2048
    }
2049
    __ b(&done);
2050

    
2051
    __ bind(&result_is_nan);
2052
    __ vadd(result_reg, left_reg, right_reg);
2053
    __ b(&done);
2054

    
2055
    __ bind(&return_right);
2056
    __ Move(result_reg, right_reg);
2057
    if (!left_reg.is(result_reg)) {
2058
      __ b(&done);
2059
    }
2060

    
2061
    __ bind(&return_left);
2062
    __ Move(result_reg, left_reg);
2063

    
2064
    __ bind(&done);
2065
  }
2066
}
2067

    
2068

    
2069
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
2070
  DwVfpRegister left = ToDoubleRegister(instr->left());
2071
  DwVfpRegister right = ToDoubleRegister(instr->right());
2072
  DwVfpRegister result = ToDoubleRegister(instr->result());
2073
  switch (instr->op()) {
2074
    case Token::ADD:
2075
      __ vadd(result, left, right);
2076
      break;
2077
    case Token::SUB:
2078
      __ vsub(result, left, right);
2079
      break;
2080
    case Token::MUL:
2081
      __ vmul(result, left, right);
2082
      break;
2083
    case Token::DIV:
2084
      __ vdiv(result, left, right);
2085
      break;
2086
    case Token::MOD: {
2087
      // Save r0-r3 on the stack.
2088
      __ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
2089

    
2090
      __ PrepareCallCFunction(0, 2, scratch0());
2091
      __ SetCallCDoubleArguments(left, right);
2092
      __ CallCFunction(
2093
          ExternalReference::double_fp_operation(Token::MOD, isolate()),
2094
          0, 2);
2095
      // Move the result in the double result register.
2096
      __ GetCFunctionDoubleResult(result);
2097

    
2098
      // Restore r0-r3.
2099
      __ ldm(ia_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
2100
      break;
2101
    }
2102
    default:
2103
      UNREACHABLE();
2104
      break;
2105
  }
2106
}
2107

    
2108

    
2109
void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2110
  ASSERT(ToRegister(instr->context()).is(cp));
2111
  ASSERT(ToRegister(instr->left()).is(r1));
2112
  ASSERT(ToRegister(instr->right()).is(r0));
2113
  ASSERT(ToRegister(instr->result()).is(r0));
2114

    
2115
  BinaryOpStub stub(instr->op(), NO_OVERWRITE);
2116
  // Block literal pool emission to ensure nop indicating no inlined smi code
2117
  // is in the correct position.
2118
  Assembler::BlockConstPoolScope block_const_pool(masm());
2119
  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2120
  __ nop();  // Signals no inlined code.
2121
}
2122

    
2123

    
2124
template<class InstrType>
2125
void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
2126
  int left_block = instr->TrueDestination(chunk_);
2127
  int right_block = instr->FalseDestination(chunk_);
2128

    
2129
  int next_block = GetNextEmittedBlock();
2130

    
2131
  if (right_block == left_block || condition == al) {
2132
    EmitGoto(left_block);
2133
  } else if (left_block == next_block) {
2134
    __ b(NegateCondition(condition), chunk_->GetAssemblyLabel(right_block));
2135
  } else if (right_block == next_block) {
2136
    __ b(condition, chunk_->GetAssemblyLabel(left_block));
2137
  } else {
2138
    __ b(condition, chunk_->GetAssemblyLabel(left_block));
2139
    __ b(chunk_->GetAssemblyLabel(right_block));
2140
  }
2141
}
2142

    
2143

    
2144
template<class InstrType>
2145
void LCodeGen::EmitFalseBranch(InstrType instr, Condition condition) {
2146
  int false_block = instr->FalseDestination(chunk_);
2147
  __ b(condition, chunk_->GetAssemblyLabel(false_block));
2148
}
2149

    
2150

    
2151
void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
2152
  __ stop("LBreak");
2153
}
2154

    
2155

    
2156
void LCodeGen::DoBranch(LBranch* instr) {
2157
  Representation r = instr->hydrogen()->value()->representation();
2158
  if (r.IsInteger32() || r.IsSmi()) {
2159
    ASSERT(!info()->IsStub());
2160
    Register reg = ToRegister(instr->value());
2161
    __ cmp(reg, Operand::Zero());
2162
    EmitBranch(instr, ne);
2163
  } else if (r.IsDouble()) {
2164
    ASSERT(!info()->IsStub());
2165
    DwVfpRegister reg = ToDoubleRegister(instr->value());
2166
    // Test the double value. Zero and NaN are false.
2167
    __ VFPCompareAndSetFlags(reg, 0.0);
2168
    __ cmp(r0, r0, vs);  // If NaN, set the Z flag. (NaN -> false)
2169
    EmitBranch(instr, ne);
2170
  } else {
2171
    ASSERT(r.IsTagged());
2172
    Register reg = ToRegister(instr->value());
2173
    HType type = instr->hydrogen()->value()->type();
2174
    if (type.IsBoolean()) {
2175
      ASSERT(!info()->IsStub());
2176
      __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2177
      EmitBranch(instr, eq);
2178
    } else if (type.IsSmi()) {
2179
      ASSERT(!info()->IsStub());
2180
      __ cmp(reg, Operand::Zero());
2181
      EmitBranch(instr, ne);
2182
    } else if (type.IsJSArray()) {
2183
      ASSERT(!info()->IsStub());
2184
      EmitBranch(instr, al);
2185
    } else if (type.IsHeapNumber()) {
2186
      ASSERT(!info()->IsStub());
2187
      DwVfpRegister dbl_scratch = double_scratch0();
2188
      __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2189
      // Test the double value. Zero and NaN are false.
2190
      __ VFPCompareAndSetFlags(dbl_scratch, 0.0);
2191
      __ cmp(r0, r0, vs);  // If NaN, set the Z flag. (NaN)
2192
      EmitBranch(instr, ne);
2193
    } else if (type.IsString()) {
2194
      ASSERT(!info()->IsStub());
2195
      __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset));
2196
      __ cmp(ip, Operand::Zero());
2197
      EmitBranch(instr, ne);
2198
    } else {
2199
      ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2200
      // Avoid deopts in the case where we've never executed this path before.
2201
      if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2202

    
2203
      if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2204
        // undefined -> false.
2205
        __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
2206
        __ b(eq, instr->FalseLabel(chunk_));
2207
      }
2208
      if (expected.Contains(ToBooleanStub::BOOLEAN)) {
2209
        // Boolean -> its value.
2210
        __ CompareRoot(reg, Heap::kTrueValueRootIndex);
2211
        __ b(eq, instr->TrueLabel(chunk_));
2212
        __ CompareRoot(reg, Heap::kFalseValueRootIndex);
2213
        __ b(eq, instr->FalseLabel(chunk_));
2214
      }
2215
      if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
2216
        // 'null' -> false.
2217
        __ CompareRoot(reg, Heap::kNullValueRootIndex);
2218
        __ b(eq, instr->FalseLabel(chunk_));
2219
      }
2220

    
2221
      if (expected.Contains(ToBooleanStub::SMI)) {
2222
        // Smis: 0 -> false, all other -> true.
2223
        __ cmp(reg, Operand::Zero());
2224
        __ b(eq, instr->FalseLabel(chunk_));
2225
        __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2226
      } else if (expected.NeedsMap()) {
2227
        // If we need a map later and have a Smi -> deopt.
2228
        __ SmiTst(reg);
2229
        DeoptimizeIf(eq, instr->environment());
2230
      }
2231

    
2232
      const Register map = scratch0();
2233
      if (expected.NeedsMap()) {
2234
        __ ldr(map, FieldMemOperand(reg, HeapObject::kMapOffset));
2235

    
2236
        if (expected.CanBeUndetectable()) {
2237
          // Undetectable -> false.
2238
          __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset));
2239
          __ tst(ip, Operand(1 << Map::kIsUndetectable));
2240
          __ b(ne, instr->FalseLabel(chunk_));
2241
        }
2242
      }
2243

    
2244
      if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2245
        // spec object -> true.
2246
        __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
2247
        __ b(ge, instr->TrueLabel(chunk_));
2248
      }
2249

    
2250
      if (expected.Contains(ToBooleanStub::STRING)) {
2251
        // String value -> false iff empty.
2252
        Label not_string;
2253
        __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
2254
        __ b(ge, &not_string);
2255
        __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset));
2256
        __ cmp(ip, Operand::Zero());
2257
        __ b(ne, instr->TrueLabel(chunk_));
2258
        __ b(instr->FalseLabel(chunk_));
2259
        __ bind(&not_string);
2260
      }
2261

    
2262
      if (expected.Contains(ToBooleanStub::SYMBOL)) {
2263
        // Symbol value -> true.
2264
        __ CompareInstanceType(map, ip, SYMBOL_TYPE);
2265
        __ b(eq, instr->TrueLabel(chunk_));
2266
      }
2267

    
2268
      if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2269
        // heap number -> false iff +0, -0, or NaN.
2270
        DwVfpRegister dbl_scratch = double_scratch0();
2271
        Label not_heap_number;
2272
        __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
2273
        __ b(ne, &not_heap_number);
2274
        __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2275
        __ VFPCompareAndSetFlags(dbl_scratch, 0.0);
2276
        __ cmp(r0, r0, vs);  // NaN -> false.
2277
        __ b(eq, instr->FalseLabel(chunk_));  // +0, -0 -> false.
2278
        __ b(instr->TrueLabel(chunk_));
2279
        __ bind(&not_heap_number);
2280
      }
2281

    
2282
      if (!expected.IsGeneric()) {
2283
        // We've seen something for the first time -> deopt.
2284
        // This can only happen if we are not generic already.
2285
        DeoptimizeIf(al, instr->environment());
2286
      }
2287
    }
2288
  }
2289
}
2290

    
2291

    
2292
void LCodeGen::EmitGoto(int block) {
2293
  if (!IsNextEmittedBlock(block)) {
2294
    __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
2295
  }
2296
}
2297

    
2298

    
2299
void LCodeGen::DoGoto(LGoto* instr) {
2300
  EmitGoto(instr->block_id());
2301
}
2302

    
2303

    
2304
Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
2305
  Condition cond = kNoCondition;
2306
  switch (op) {
2307
    case Token::EQ:
2308
    case Token::EQ_STRICT:
2309
      cond = eq;
2310
      break;
2311
    case Token::NE:
2312
    case Token::NE_STRICT:
2313
      cond = ne;
2314
      break;
2315
    case Token::LT:
2316
      cond = is_unsigned ? lo : lt;
2317
      break;
2318
    case Token::GT:
2319
      cond = is_unsigned ? hi : gt;
2320
      break;
2321
    case Token::LTE:
2322
      cond = is_unsigned ? ls : le;
2323
      break;
2324
    case Token::GTE:
2325
      cond = is_unsigned ? hs : ge;
2326
      break;
2327
    case Token::IN:
2328
    case Token::INSTANCEOF:
2329
    default:
2330
      UNREACHABLE();
2331
  }
2332
  return cond;
2333
}
2334

    
2335

    
2336
void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
2337
  LOperand* left = instr->left();
2338
  LOperand* right = instr->right();
2339
  Condition cond = TokenToCondition(instr->op(), false);
2340

    
2341
  if (left->IsConstantOperand() && right->IsConstantOperand()) {
2342
    // We can statically evaluate the comparison.
2343
    double left_val = ToDouble(LConstantOperand::cast(left));
2344
    double right_val = ToDouble(LConstantOperand::cast(right));
2345
    int next_block = EvalComparison(instr->op(), left_val, right_val) ?
2346
        instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
2347
    EmitGoto(next_block);
2348
  } else {
2349
    if (instr->is_double()) {
2350
      // Compare left and right operands as doubles and load the
2351
      // resulting flags into the normal status register.
2352
      __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
2353
      // If a NaN is involved, i.e. the result is unordered (V set),
2354
      // jump to false block label.
2355
      __ b(vs, instr->FalseLabel(chunk_));
2356
    } else {
2357
      if (right->IsConstantOperand()) {
2358
        int32_t value = ToInteger32(LConstantOperand::cast(right));
2359
        if (instr->hydrogen_value()->representation().IsSmi()) {
2360
          __ cmp(ToRegister(left), Operand(Smi::FromInt(value)));
2361
        } else {
2362
          __ cmp(ToRegister(left), Operand(value));
2363
        }
2364
      } else if (left->IsConstantOperand()) {
2365
        int32_t value = ToInteger32(LConstantOperand::cast(left));
2366
        if (instr->hydrogen_value()->representation().IsSmi()) {
2367
          __ cmp(ToRegister(right), Operand(Smi::FromInt(value)));
2368
        } else {
2369
          __ cmp(ToRegister(right), Operand(value));
2370
        }
2371
        // We transposed the operands. Reverse the condition.
2372
        cond = ReverseCondition(cond);
2373
      } else {
2374
        __ cmp(ToRegister(left), ToRegister(right));
2375
      }
2376
    }
2377
    EmitBranch(instr, cond);
2378
  }
2379
}
2380

    
2381

    
2382
void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
2383
  Register left = ToRegister(instr->left());
2384
  Register right = ToRegister(instr->right());
2385

    
2386
  __ cmp(left, Operand(right));
2387
  EmitBranch(instr, eq);
2388
}
2389

    
2390

    
2391
void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
2392
  if (instr->hydrogen()->representation().IsTagged()) {
2393
    Register input_reg = ToRegister(instr->object());
2394
    __ mov(ip, Operand(factory()->the_hole_value()));
2395
    __ cmp(input_reg, ip);
2396
    EmitBranch(instr, eq);
2397
    return;
2398
  }
2399

    
2400
  DwVfpRegister input_reg = ToDoubleRegister(instr->object());
2401
  __ VFPCompareAndSetFlags(input_reg, input_reg);
2402
  EmitFalseBranch(instr, vc);
2403

    
2404
  Register scratch = scratch0();
2405
  __ VmovHigh(scratch, input_reg);
2406
  __ cmp(scratch, Operand(kHoleNanUpper32));
2407
  EmitBranch(instr, eq);
2408
}
2409

    
2410

    
2411
Condition LCodeGen::EmitIsObject(Register input,
2412
                                 Register temp1,
2413
                                 Label* is_not_object,
2414
                                 Label* is_object) {
2415
  Register temp2 = scratch0();
2416
  __ JumpIfSmi(input, is_not_object);
2417

    
2418
  __ LoadRoot(temp2, Heap::kNullValueRootIndex);
2419
  __ cmp(input, temp2);
2420
  __ b(eq, is_object);
2421

    
2422
  // Load map.
2423
  __ ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
2424
  // Undetectable objects behave like undefined.
2425
  __ ldrb(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
2426
  __ tst(temp2, Operand(1 << Map::kIsUndetectable));
2427
  __ b(ne, is_not_object);
2428

    
2429
  // Load instance type and check that it is in object type range.
2430
  __ ldrb(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
2431
  __ cmp(temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2432
  __ b(lt, is_not_object);
2433
  __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
2434
  return le;
2435
}
2436

    
2437

    
2438
void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
2439
  Register reg = ToRegister(instr->value());
2440
  Register temp1 = ToRegister(instr->temp());
2441

    
2442
  Condition true_cond =
2443
      EmitIsObject(reg, temp1,
2444
          instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
2445

    
2446
  EmitBranch(instr, true_cond);
2447
}
2448

    
2449

    
2450
Condition LCodeGen::EmitIsString(Register input,
2451
                                 Register temp1,
2452
                                 Label* is_not_string,
2453
                                 SmiCheck check_needed = INLINE_SMI_CHECK) {
2454
  if (check_needed == INLINE_SMI_CHECK) {
2455
    __ JumpIfSmi(input, is_not_string);
2456
  }
2457
  __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
2458

    
2459
  return lt;
2460
}
2461

    
2462

    
2463
void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
2464
  Register reg = ToRegister(instr->value());
2465
  Register temp1 = ToRegister(instr->temp());
2466

    
2467
  SmiCheck check_needed =
2468
      instr->hydrogen()->value()->IsHeapObject()
2469
          ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2470
  Condition true_cond =
2471
      EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
2472

    
2473
  EmitBranch(instr, true_cond);
2474
}
2475

    
2476

    
2477
void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
2478
  Register input_reg = EmitLoadRegister(instr->value(), ip);
2479
  __ SmiTst(input_reg);
2480
  EmitBranch(instr, eq);
2481
}
2482

    
2483

    
2484
void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2485
  Register input = ToRegister(instr->value());
2486
  Register temp = ToRegister(instr->temp());
2487

    
2488
  if (!instr->hydrogen()->value()->IsHeapObject()) {
2489
    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2490
  }
2491
  __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2492
  __ ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
2493
  __ tst(temp, Operand(1 << Map::kIsUndetectable));
2494
  EmitBranch(instr, ne);
2495
}
2496

    
2497

    
2498
static Condition ComputeCompareCondition(Token::Value op) {
2499
  switch (op) {
2500
    case Token::EQ_STRICT:
2501
    case Token::EQ:
2502
      return eq;
2503
    case Token::LT:
2504
      return lt;
2505
    case Token::GT:
2506
      return gt;
2507
    case Token::LTE:
2508
      return le;
2509
    case Token::GTE:
2510
      return ge;
2511
    default:
2512
      UNREACHABLE();
2513
      return kNoCondition;
2514
  }
2515
}
2516

    
2517

    
2518
void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
2519
  ASSERT(ToRegister(instr->context()).is(cp));
2520
  Token::Value op = instr->op();
2521

    
2522
  Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2523
  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2524
  // This instruction also signals no smi code inlined.
2525
  __ cmp(r0, Operand::Zero());
2526

    
2527
  Condition condition = ComputeCompareCondition(op);
2528

    
2529
  EmitBranch(instr, condition);
2530
}
2531

    
2532

    
2533
static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
2534
  InstanceType from = instr->from();
2535
  InstanceType to = instr->to();
2536
  if (from == FIRST_TYPE) return to;
2537
  ASSERT(from == to || to == LAST_TYPE);
2538
  return from;
2539
}
2540

    
2541

    
2542
static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
2543
  InstanceType from = instr->from();
2544
  InstanceType to = instr->to();
2545
  if (from == to) return eq;
2546
  if (to == LAST_TYPE) return hs;
2547
  if (from == FIRST_TYPE) return ls;
2548
  UNREACHABLE();
2549
  return eq;
2550
}
2551

    
2552

    
2553
void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
2554
  Register scratch = scratch0();
2555
  Register input = ToRegister(instr->value());
2556

    
2557
  if (!instr->hydrogen()->value()->IsHeapObject()) {
2558
    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2559
  }
2560

    
2561
  __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
2562
  EmitBranch(instr, BranchCondition(instr->hydrogen()));
2563
}
2564

    
2565

    
2566
void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2567
  Register input = ToRegister(instr->value());
2568
  Register result = ToRegister(instr->result());
2569

    
2570
  __ AssertString(input);
2571

    
2572
  __ ldr(result, FieldMemOperand(input, String::kHashFieldOffset));
2573
  __ IndexFromHash(result, result);
2574
}
2575

    
2576

    
2577
void LCodeGen::DoHasCachedArrayIndexAndBranch(
2578
    LHasCachedArrayIndexAndBranch* instr) {
2579
  Register input = ToRegister(instr->value());
2580
  Register scratch = scratch0();
2581

    
2582
  __ ldr(scratch,
2583
         FieldMemOperand(input, String::kHashFieldOffset));
2584
  __ tst(scratch, Operand(String::kContainsCachedArrayIndexMask));
2585
  EmitBranch(instr, eq);
2586
}
2587

    
2588

    
2589
// Branches to a label or falls through with the answer in flags.  Trashes
2590
// the temp registers, but not the input.
2591
void LCodeGen::EmitClassOfTest(Label* is_true,
2592
                               Label* is_false,
2593
                               Handle<String>class_name,
2594
                               Register input,
2595
                               Register temp,
2596
                               Register temp2) {
2597
  ASSERT(!input.is(temp));
2598
  ASSERT(!input.is(temp2));
2599
  ASSERT(!temp.is(temp2));
2600

    
2601
  __ JumpIfSmi(input, is_false);
2602

    
2603
  if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
2604
    // Assuming the following assertions, we can use the same compares to test
2605
    // for both being a function type and being in the object type range.
2606
    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
2607
    STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2608
                  FIRST_SPEC_OBJECT_TYPE + 1);
2609
    STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
2610
                  LAST_SPEC_OBJECT_TYPE - 1);
2611
    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2612
    __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
2613
    __ b(lt, is_false);
2614
    __ b(eq, is_true);
2615
    __ cmp(temp2, Operand(LAST_SPEC_OBJECT_TYPE));
2616
    __ b(eq, is_true);
2617
  } else {
2618
    // Faster code path to avoid two compares: subtract lower bound from the
2619
    // actual type and do a signed compare with the width of the type range.
2620
    __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2621
    __ ldrb(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset));
2622
    __ sub(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2623
    __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2624
                          FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2625
    __ b(gt, is_false);
2626
  }
2627

    
2628
  // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2629
  // Check if the constructor in the map is a function.
2630
  __ ldr(temp, FieldMemOperand(temp, Map::kConstructorOffset));
2631

    
2632
  // Objects with a non-function constructor have class 'Object'.
2633
  __ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
2634
  if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
2635
    __ b(ne, is_true);
2636
  } else {
2637
    __ b(ne, is_false);
2638
  }
2639

    
2640
  // temp now contains the constructor function. Grab the
2641
  // instance class name from there.
2642
  __ ldr(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2643
  __ ldr(temp, FieldMemOperand(temp,
2644
                               SharedFunctionInfo::kInstanceClassNameOffset));
2645
  // The class name we are testing against is internalized since it's a literal.
2646
  // The name in the constructor is internalized because of the way the context
2647
  // is booted.  This routine isn't expected to work for random API-created
2648
  // classes and it doesn't have to because you can't access it with natives
2649
  // syntax.  Since both sides are internalized it is sufficient to use an
2650
  // identity comparison.
2651
  __ cmp(temp, Operand(class_name));
2652
  // End with the answer in flags.
2653
}
2654

    
2655

    
2656
void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
2657
  Register input = ToRegister(instr->value());
2658
  Register temp = scratch0();
2659
  Register temp2 = ToRegister(instr->temp());
2660
  Handle<String> class_name = instr->hydrogen()->class_name();
2661

    
2662
  EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2663
      class_name, input, temp, temp2);
2664

    
2665
  EmitBranch(instr, eq);
2666
}
2667

    
2668

    
2669
void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2670
  Register reg = ToRegister(instr->value());
2671
  Register temp = ToRegister(instr->temp());
2672

    
2673
  __ ldr(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2674
  __ cmp(temp, Operand(instr->map()));
2675
  EmitBranch(instr, eq);
2676
}
2677

    
2678

    
2679
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2680
  ASSERT(ToRegister(instr->context()).is(cp));
2681
  ASSERT(ToRegister(instr->left()).is(r0));  // Object is in r0.
2682
  ASSERT(ToRegister(instr->right()).is(r1));  // Function is in r1.
2683

    
2684
  InstanceofStub stub(InstanceofStub::kArgsInRegisters);
2685
  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2686

    
2687
  __ cmp(r0, Operand::Zero());
2688
  __ mov(r0, Operand(factory()->false_value()), LeaveCC, ne);
2689
  __ mov(r0, Operand(factory()->true_value()), LeaveCC, eq);
2690
}
2691

    
2692

    
2693
void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2694
  class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
2695
   public:
2696
    DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2697
                                  LInstanceOfKnownGlobal* instr)
2698
        : LDeferredCode(codegen), instr_(instr) { }
2699
    virtual void Generate() V8_OVERRIDE {
2700
      codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
2701
    }
2702
    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
2703
    Label* map_check() { return &map_check_; }
2704
   private:
2705
    LInstanceOfKnownGlobal* instr_;
2706
    Label map_check_;
2707
  };
2708

    
2709
  DeferredInstanceOfKnownGlobal* deferred;
2710
  deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
2711

    
2712
  Label done, false_result;
2713
  Register object = ToRegister(instr->value());
2714
  Register temp = ToRegister(instr->temp());
2715
  Register result = ToRegister(instr->result());
2716

    
2717
  ASSERT(object.is(r0));
2718
  ASSERT(result.is(r0));
2719

    
2720
  // A Smi is not instance of anything.
2721
  __ JumpIfSmi(object, &false_result);
2722

    
2723
  // This is the inlined call site instanceof cache. The two occurences of the
2724
  // hole value will be patched to the last map/result pair generated by the
2725
  // instanceof stub.
2726
  Label cache_miss;
2727
  Register map = temp;
2728
  __ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
2729
  {
2730
    // Block constant pool emission to ensure the positions of instructions are
2731
    // as expected by the patcher. See InstanceofStub::Generate().
2732
    Assembler::BlockConstPoolScope block_const_pool(masm());
2733
    __ bind(deferred->map_check());  // Label for calculating code patching.
2734
    // We use Factory::the_hole_value() on purpose instead of loading from the
2735
    // root array to force relocation to be able to later patch with
2736
    // the cached map.
2737
    PredictableCodeSizeScope predictable(masm_, 5 * Assembler::kInstrSize);
2738
    Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
2739
    __ mov(ip, Operand(Handle<Object>(cell)));
2740
    __ ldr(ip, FieldMemOperand(ip, PropertyCell::kValueOffset));
2741
    __ cmp(map, Operand(ip));
2742
    __ b(ne, &cache_miss);
2743
    // We use Factory::the_hole_value() on purpose instead of loading from the
2744
    // root array to force relocation to be able to later patch
2745
    // with true or false.
2746
    __ mov(result, Operand(factory()->the_hole_value()));
2747
  }
2748
  __ b(&done);
2749

    
2750
  // The inlined call site cache did not match. Check null and string before
2751
  // calling the deferred code.
2752
  __ bind(&cache_miss);
2753
  // Null is not instance of anything.
2754
  __ LoadRoot(ip, Heap::kNullValueRootIndex);
2755
  __ cmp(object, Operand(ip));
2756
  __ b(eq, &false_result);
2757

    
2758
  // String values is not instance of anything.
2759
  Condition is_string = masm_->IsObjectStringType(object, temp);
2760
  __ b(is_string, &false_result);
2761

    
2762
  // Go to the deferred code.
2763
  __ b(deferred->entry());
2764

    
2765
  __ bind(&false_result);
2766
  __ LoadRoot(result, Heap::kFalseValueRootIndex);
2767

    
2768
  // Here result has either true or false. Deferred code also produces true or
2769
  // false object.
2770
  __ bind(deferred->exit());
2771
  __ bind(&done);
2772
}
2773

    
2774

    
2775
void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2776
                                               Label* map_check) {
2777
  Register result = ToRegister(instr->result());
2778
  ASSERT(result.is(r0));
2779

    
2780
  InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
2781
  flags = static_cast<InstanceofStub::Flags>(
2782
      flags | InstanceofStub::kArgsInRegisters);
2783
  flags = static_cast<InstanceofStub::Flags>(
2784
      flags | InstanceofStub::kCallSiteInlineCheck);
2785
  flags = static_cast<InstanceofStub::Flags>(
2786
      flags | InstanceofStub::kReturnTrueFalseObject);
2787
  InstanceofStub stub(flags);
2788

    
2789
  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
2790
  LoadContextFromDeferred(instr->context());
2791

    
2792
  // Get the temp register reserved by the instruction. This needs to be r4 as
2793
  // its slot of the pushing of safepoint registers is used to communicate the
2794
  // offset to the location of the map check.
2795
  Register temp = ToRegister(instr->temp());
2796
  ASSERT(temp.is(r4));
2797
  __ Move(InstanceofStub::right(), instr->function());
2798
  static const int kAdditionalDelta = 5;
2799
  // Make sure that code size is predicable, since we use specific constants
2800
  // offsets in the code to find embedded values..
2801
  PredictableCodeSizeScope predictable(masm_, 6 * Assembler::kInstrSize);
2802
  int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
2803
  Label before_push_delta;
2804
  __ bind(&before_push_delta);
2805
  __ BlockConstPoolFor(kAdditionalDelta);
2806
  __ mov(temp, Operand(delta * kPointerSize));
2807
  // The mov above can generate one or two instructions. The delta was computed
2808
  // for two instructions, so we need to pad here in case of one instruction.
2809
  if (masm_->InstructionsGeneratedSince(&before_push_delta) != 2) {
2810
    ASSERT_EQ(1, masm_->InstructionsGeneratedSince(&before_push_delta));
2811
    __ nop();
2812
  }
2813
  __ StoreToSafepointRegisterSlot(temp, temp);
2814
  CallCodeGeneric(stub.GetCode(isolate()),
2815
                  RelocInfo::CODE_TARGET,
2816
                  instr,
2817
                  RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2818
  LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2819
  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2820
  // Put the result value into the result register slot and
2821
  // restore all registers.
2822
  __ StoreToSafepointRegisterSlot(result, result);
2823
}
2824

    
2825

    
2826
void LCodeGen::DoCmpT(LCmpT* instr) {
2827
  ASSERT(ToRegister(instr->context()).is(cp));
2828
  Token::Value op = instr->op();
2829

    
2830
  Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2831
  CallCode(ic, RelocInfo::CODE_TARGET, instr);
2832
  // This instruction also signals no smi code inlined.
2833
  __ cmp(r0, Operand::Zero());
2834

    
2835
  Condition condition = ComputeCompareCondition(op);
2836
  __ LoadRoot(ToRegister(instr->result()),
2837
              Heap::kTrueValueRootIndex,
2838
              condition);
2839
  __ LoadRoot(ToRegister(instr->result()),
2840
              Heap::kFalseValueRootIndex,
2841
              NegateCondition(condition));
2842
}
2843

    
2844

    
2845
void LCodeGen::DoReturn(LReturn* instr) {
2846
  if (FLAG_trace && info()->IsOptimizing()) {
2847
    // Push the return value on the stack as the parameter.
2848
    // Runtime::TraceExit returns its parameter in r0.  We're leaving the code
2849
    // managed by the register allocator and tearing down the frame, it's
2850
    // safe to write to the context register.
2851
    __ push(r0);
2852
    __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2853
    __ CallRuntime(Runtime::kTraceExit, 1);
2854
  }
2855
  if (info()->saves_caller_doubles()) {
2856
    ASSERT(NeedsEagerFrame());
2857
    BitVector* doubles = chunk()->allocated_double_registers();
2858
    BitVector::Iterator save_iterator(doubles);
2859
    int count = 0;
2860
    while (!save_iterator.Done()) {
2861
      __ vldr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
2862
               MemOperand(sp, count * kDoubleSize));
2863
      save_iterator.Advance();
2864
      count++;
2865
    }
2866
  }
2867
  int no_frame_start = -1;
2868
  if (NeedsEagerFrame()) {
2869
    __ mov(sp, fp);
2870
    no_frame_start = masm_->pc_offset();
2871
    __ ldm(ia_w, sp, fp.bit() | lr.bit());
2872
  }
2873
  if (instr->has_constant_parameter_count()) {
2874
    int parameter_count = ToInteger32(instr->constant_parameter_count());
2875
    int32_t sp_delta = (parameter_count + 1) * kPointerSize;
2876
    if (sp_delta != 0) {
2877
      __ add(sp, sp, Operand(sp_delta));
2878
    }
2879
  } else {
2880
    Register reg = ToRegister(instr->parameter_count());
2881
    // The argument count parameter is a smi
2882
    __ SmiUntag(reg);
2883
    __ add(sp, sp, Operand(reg, LSL, kPointerSizeLog2));
2884
  }
2885

    
2886
  __ Jump(lr);
2887

    
2888
  if (no_frame_start != -1) {
2889
    info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
2890
  }
2891
}
2892

    
2893

    
2894
void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2895
  Register result = ToRegister(instr->result());
2896
  __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
2897
  __ ldr(result, FieldMemOperand(ip, Cell::kValueOffset));
2898
  if (instr->hydrogen()->RequiresHoleCheck()) {
2899
    __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2900
    __ cmp(result, ip);
2901
    DeoptimizeIf(eq, instr->environment());
2902
  }
2903
}
2904

    
2905

    
2906
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2907
  ASSERT(ToRegister(instr->context()).is(cp));
2908
  ASSERT(ToRegister(instr->global_object()).is(r0));
2909
  ASSERT(ToRegister(instr->result()).is(r0));
2910

    
2911
  __ mov(r2, Operand(instr->name()));
2912
  RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET
2913
                                             : RelocInfo::CODE_TARGET_CONTEXT;
2914
  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
2915
  CallCode(ic, mode, instr);
2916
}
2917

    
2918

    
2919
void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
2920
  Register value = ToRegister(instr->value());
2921
  Register cell = scratch0();
2922

    
2923
  // Load the cell.
2924
  __ mov(cell, Operand(instr->hydrogen()->cell().handle()));
2925

    
2926
  // If the cell we are storing to contains the hole it could have
2927
  // been deleted from the property dictionary. In that case, we need
2928
  // to update the property details in the property dictionary to mark
2929
  // it as no longer deleted.
2930
  if (instr->hydrogen()->RequiresHoleCheck()) {
2931
    // We use a temp to check the payload (CompareRoot might clobber ip).
2932
    Register payload = ToRegister(instr->temp());
2933
    __ ldr(payload, FieldMemOperand(cell, Cell::kValueOffset));
2934
    __ CompareRoot(payload, Heap::kTheHoleValueRootIndex);
2935
    DeoptimizeIf(eq, instr->environment());
2936
  }
2937

    
2938
  // Store the value.
2939
  __ str(value, FieldMemOperand(cell, Cell::kValueOffset));
2940
  // Cells are always rescanned, so no write barrier here.
2941
}
2942

    
2943

    
2944
void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
2945
  ASSERT(ToRegister(instr->context()).is(cp));
2946
  ASSERT(ToRegister(instr->global_object()).is(r1));
2947
  ASSERT(ToRegister(instr->value()).is(r0));
2948

    
2949
  __ mov(r2, Operand(instr->name()));
2950
  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
2951
      ? isolate()->builtins()->StoreIC_Initialize_Strict()
2952
      : isolate()->builtins()->StoreIC_Initialize();
2953
  CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
2954
}
2955

    
2956

    
2957
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2958
  Register context = ToRegister(instr->context());
2959
  Register result = ToRegister(instr->result());
2960
  __ ldr(result, ContextOperand(context, instr->slot_index()));
2961
  if (instr->hydrogen()->RequiresHoleCheck()) {
2962
    __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2963
    __ cmp(result, ip);
2964
    if (instr->hydrogen()->DeoptimizesOnHole()) {
2965
      DeoptimizeIf(eq, instr->environment());
2966
    } else {
2967
      __ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq);
2968
    }
2969
  }
2970
}
2971

    
2972

    
2973
void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2974
  Register context = ToRegister(instr->context());
2975
  Register value = ToRegister(instr->value());
2976
  Register scratch = scratch0();
2977
  MemOperand target = ContextOperand(context, instr->slot_index());
2978

    
2979
  Label skip_assignment;
2980

    
2981
  if (instr->hydrogen()->RequiresHoleCheck()) {
2982
    __ ldr(scratch, target);
2983
    __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2984
    __ cmp(scratch, ip);
2985
    if (instr->hydrogen()->DeoptimizesOnHole()) {
2986
      DeoptimizeIf(eq, instr->environment());
2987
    } else {
2988
      __ b(ne, &skip_assignment);
2989
    }
2990
  }
2991

    
2992
  __ str(value, target);
2993
  if (instr->hydrogen()->NeedsWriteBarrier()) {
2994
    SmiCheck check_needed =
2995
        instr->hydrogen()->value()->IsHeapObject()
2996
            ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2997
    __ RecordWriteContextSlot(context,
2998
                              target.offset(),
2999
                              value,
3000
                              scratch,
3001
                              GetLinkRegisterState(),
3002
                              kSaveFPRegs,
3003
                              EMIT_REMEMBERED_SET,
3004
                              check_needed);
3005
  }
3006

    
3007
  __ bind(&skip_assignment);
3008
}
3009

    
3010

    
3011
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
3012
  HObjectAccess access = instr->hydrogen()->access();
3013
  int offset = access.offset();
3014
  Register object = ToRegister(instr->object());
3015

    
3016
  if (access.IsExternalMemory()) {
3017
    Register result = ToRegister(instr->result());
3018
    MemOperand operand = MemOperand(object, offset);
3019
    if (access.representation().IsByte()) {
3020
      __ ldrb(result, operand);
3021
    } else {
3022
      __ ldr(result, operand);
3023
    }
3024
    return;
3025
  }
3026

    
3027
  if (instr->hydrogen()->representation().IsDouble()) {
3028
    DwVfpRegister result = ToDoubleRegister(instr->result());
3029
    __ vldr(result, FieldMemOperand(object, offset));
3030
    return;
3031
  }
3032

    
3033
  Register result = ToRegister(instr->result());
3034
  if (!access.IsInobject()) {
3035
    __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
3036
    object = result;
3037
  }
3038
  MemOperand operand = FieldMemOperand(object, offset);
3039
  if (access.representation().IsByte()) {
3040
    __ ldrb(result, operand);
3041
  } else {
3042
    __ ldr(result, operand);
3043
  }
3044
}
3045

    
3046

    
3047
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3048
  ASSERT(ToRegister(instr->context()).is(cp));
3049
  ASSERT(ToRegister(instr->object()).is(r0));
3050
  ASSERT(ToRegister(instr->result()).is(r0));
3051

    
3052
  // Name is always in r2.
3053
  __ mov(r2, Operand(instr->name()));
3054
  Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
3055
  CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
3056
}
3057

    
3058

    
3059
void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
3060
  Register scratch = scratch0();
3061
  Register function = ToRegister(instr->function());
3062
  Register result = ToRegister(instr->result());
3063

    
3064
  // Check that the function really is a function. Load map into the
3065
  // result register.
3066
  __ CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
3067
  DeoptimizeIf(ne, instr->environment());
3068

    
3069
  // Make sure that the function has an instance prototype.
3070
  Label non_instance;
3071
  __ ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
3072
  __ tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
3073
  __ b(ne, &non_instance);
3074

    
3075
  // Get the prototype or initial map from the function.
3076
  __ ldr(result,
3077
         FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3078

    
3079
  // Check that the function has a prototype or an initial map.
3080
  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3081
  __ cmp(result, ip);
3082
  DeoptimizeIf(eq, instr->environment());
3083

    
3084
  // If the function does not have an initial map, we're done.
3085
  Label done;
3086
  __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
3087
  __ b(ne, &done);
3088

    
3089
  // Get the prototype from the initial map.
3090
  __ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
3091
  __ jmp(&done);
3092

    
3093
  // Non-instance prototype: Fetch prototype from constructor field
3094
  // in initial map.
3095
  __ bind(&non_instance);
3096
  __ ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
3097

    
3098
  // All done.
3099
  __ bind(&done);
3100
}
3101

    
3102

    
3103
void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3104
  Register result = ToRegister(instr->result());
3105
  __ LoadRoot(result, instr->index());
3106
}
3107

    
3108

    
3109
void LCodeGen::DoLoadExternalArrayPointer(
3110
    LLoadExternalArrayPointer* instr) {
3111
  Register to_reg = ToRegister(instr->result());
3112
  Register from_reg  = ToRegister(instr->object());
3113
  __ ldr(to_reg, FieldMemOperand(from_reg,
3114
                                 ExternalArray::kExternalPointerOffset));
3115
}
3116

    
3117

    
3118
void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
3119
  Register arguments = ToRegister(instr->arguments());
3120
  Register result = ToRegister(instr->result());
3121
  if (instr->length()->IsConstantOperand() &&
3122
      instr->index()->IsConstantOperand()) {
3123
    int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3124
    int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
3125
    int index = (const_length - const_index) + 1;
3126
    __ ldr(result, MemOperand(arguments, index * kPointerSize));
3127
  } else {
3128
    Register length = ToRegister(instr->length());
3129
    Register index = ToRegister(instr->index());
3130
    // There are two words between the frame pointer and the last argument.
3131
    // Subtracting from length accounts for one of them add one more.
3132
    __ sub(length, length, index);
3133
    __ add(length, length, Operand(1));
3134
    __ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2));
3135
  }
3136
}
3137

    
3138

    
3139
void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
3140
  Register external_pointer = ToRegister(instr->elements());
3141
  Register key = no_reg;
3142
  ElementsKind elements_kind = instr->elements_kind();
3143
  bool key_is_constant = instr->key()->IsConstantOperand();
3144
  int constant_key = 0;
3145
  if (key_is_constant) {
3146
    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3147
    if (constant_key & 0xF0000000) {
3148
      Abort(kArrayIndexConstantValueTooBig);
3149
    }
3150
  } else {
3151
    key = ToRegister(instr->key());
3152
  }
3153
  int element_size_shift = ElementsKindToShiftSize(elements_kind);
3154
  int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3155
      ? (element_size_shift - kSmiTagSize) : element_size_shift;
3156
  int additional_offset = instr->additional_index() << element_size_shift;
3157

    
3158
  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
3159
      elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
3160
    DwVfpRegister result = ToDoubleRegister(instr->result());
3161
    Operand operand = key_is_constant
3162
        ? Operand(constant_key << element_size_shift)
3163
        : Operand(key, LSL, shift_size);
3164
    __ add(scratch0(), external_pointer, operand);
3165
    if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
3166
      __ vldr(double_scratch0().low(), scratch0(), additional_offset);
3167
      __ vcvt_f64_f32(result, double_scratch0().low());
3168
    } else  {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
3169
      __ vldr(result, scratch0(), additional_offset);
3170
    }
3171
  } else {
3172
    Register result = ToRegister(instr->result());
3173
    MemOperand mem_operand = PrepareKeyedOperand(
3174
        key, external_pointer, key_is_constant, constant_key,
3175
        element_size_shift, shift_size,
3176
        instr->additional_index(), additional_offset);
3177
    switch (elements_kind) {
3178
      case EXTERNAL_BYTE_ELEMENTS:
3179
        __ ldrsb(result, mem_operand);
3180
        break;
3181
      case EXTERNAL_PIXEL_ELEMENTS:
3182
      case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
3183
        __ ldrb(result, mem_operand);
3184
        break;
3185
      case EXTERNAL_SHORT_ELEMENTS:
3186
        __ ldrsh(result, mem_operand);
3187
        break;
3188
      case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
3189
        __ ldrh(result, mem_operand);
3190
        break;
3191
      case EXTERNAL_INT_ELEMENTS:
3192
        __ ldr(result, mem_operand);
3193
        break;
3194
      case EXTERNAL_UNSIGNED_INT_ELEMENTS:
3195
        __ ldr(result, mem_operand);
3196
        if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3197
          __ cmp(result, Operand(0x80000000));
3198
          DeoptimizeIf(cs, instr->environment());
3199
        }
3200
        break;
3201
      case EXTERNAL_FLOAT_ELEMENTS:
3202
      case EXTERNAL_DOUBLE_ELEMENTS:
3203
      case FAST_HOLEY_DOUBLE_ELEMENTS:
3204
      case FAST_HOLEY_ELEMENTS:
3205
      case FAST_HOLEY_SMI_ELEMENTS:
3206
      case FAST_DOUBLE_ELEMENTS:
3207
      case FAST_ELEMENTS:
3208
      case FAST_SMI_ELEMENTS:
3209
      case DICTIONARY_ELEMENTS:
3210
      case NON_STRICT_ARGUMENTS_ELEMENTS:
3211
        UNREACHABLE();
3212
        break;
3213
    }
3214
  }
3215
}
3216

    
3217

    
3218
void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3219
  Register elements = ToRegister(instr->elements());
3220
  bool key_is_constant = instr->key()->IsConstantOperand();
3221
  Register key = no_reg;
3222
  DwVfpRegister result = ToDoubleRegister(instr->result());
3223
  Register scratch = scratch0();
3224

    
3225
  int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
3226

    
3227
  int base_offset =
3228
      FixedDoubleArray::kHeaderSize - kHeapObjectTag +
3229
      (instr->additional_index() << element_size_shift);
3230
  if (key_is_constant) {
3231
    int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3232
    if (constant_key & 0xF0000000) {
3233
      Abort(kArrayIndexConstantValueTooBig);
3234
    }
3235
    base_offset += constant_key << element_size_shift;
3236
  }
3237
  __ add(scratch, elements, Operand(base_offset));
3238

    
3239
  if (!key_is_constant) {
3240
    key = ToRegister(instr->key());
3241
    int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3242
        ? (element_size_shift - kSmiTagSize) : element_size_shift;
3243
    __ add(scratch, scratch, Operand(key, LSL, shift_size));
3244
  }
3245

    
3246
  __ vldr(result, scratch, 0);
3247

    
3248
  if (instr->hydrogen()->RequiresHoleCheck()) {
3249
    __ ldr(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
3250
    __ cmp(scratch, Operand(kHoleNanUpper32));
3251
    DeoptimizeIf(eq, instr->environment());
3252
  }
3253
}
3254

    
3255

    
3256
void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3257
  Register elements = ToRegister(instr->elements());
3258
  Register result = ToRegister(instr->result());
3259
  Register scratch = scratch0();
3260
  Register store_base = scratch;
3261
  int offset = 0;
3262

    
3263
  if (instr->key()->IsConstantOperand()) {
3264
    LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3265
    offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
3266
                                           instr->additional_index());
3267
    store_base = elements;
3268
  } else {
3269
    Register key = ToRegister(instr->key());
3270
    // Even though the HLoadKeyed instruction forces the input
3271
    // representation for the key to be an integer, the input gets replaced
3272
    // during bound check elimination with the index argument to the bounds
3273
    // check, which can be tagged, so that case must be handled here, too.
3274
    if (instr->hydrogen()->key()->representation().IsSmi()) {
3275
      __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key));
3276
    } else {
3277
      __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
3278
    }
3279
    offset = FixedArray::OffsetOfElementAt(instr->additional_index());
3280
  }
3281
  __ ldr(result, FieldMemOperand(store_base, offset));
3282

    
3283
  // Check for the hole value.
3284
  if (instr->hydrogen()->RequiresHoleCheck()) {
3285
    if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3286
      __ SmiTst(result);
3287
      DeoptimizeIf(ne, instr->environment());
3288
    } else {
3289
      __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3290
      __ cmp(result, scratch);
3291
      DeoptimizeIf(eq, instr->environment());
3292
    }
3293
  }
3294
}
3295

    
3296

    
3297
void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3298
  if (instr->is_external()) {
3299
    DoLoadKeyedExternalArray(instr);
3300
  } else if (instr->hydrogen()->representation().IsDouble()) {
3301
    DoLoadKeyedFixedDoubleArray(instr);
3302
  } else {
3303
    DoLoadKeyedFixedArray(instr);
3304
  }
3305
}
3306

    
3307

    
3308
MemOperand LCodeGen::PrepareKeyedOperand(Register key,
3309
                                         Register base,
3310
                                         bool key_is_constant,
3311
                                         int constant_key,
3312
                                         int element_size,
3313
                                         int shift_size,
3314
                                         int additional_index,
3315
                                         int additional_offset) {
3316
  if (additional_index != 0 && !key_is_constant) {
3317
    additional_index *= 1 << (element_size - shift_size);
3318
    __ add(scratch0(), key, Operand(additional_index));
3319
  }
3320

    
3321
  if (key_is_constant) {
3322
    return MemOperand(base,
3323
                      (constant_key << element_size) + additional_offset);
3324
  }
3325

    
3326
  if (additional_index == 0) {
3327
    if (shift_size >= 0) {
3328
      return MemOperand(base, key, LSL, shift_size);
3329
    } else {
3330
      ASSERT_EQ(-1, shift_size);
3331
      return MemOperand(base, key, LSR, 1);
3332
    }
3333
  }
3334

    
3335
  if (shift_size >= 0) {
3336
    return MemOperand(base, scratch0(), LSL, shift_size);
3337
  } else {
3338
    ASSERT_EQ(-1, shift_size);
3339
    return MemOperand(base, scratch0(), LSR, 1);
3340
  }
3341
}
3342

    
3343

    
3344
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3345
  ASSERT(ToRegister(instr->context()).is(cp));
3346
  ASSERT(ToRegister(instr->object()).is(r1));
3347
  ASSERT(ToRegister(instr->key()).is(r0));
3348

    
3349
  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
3350
  CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
3351
}
3352

    
3353

    
3354
void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3355
  Register scratch = scratch0();
3356
  Register result = ToRegister(instr->result());
3357

    
3358
  if (instr->hydrogen()->from_inlined()) {
3359
    __ sub(result, sp, Operand(2 * kPointerSize));
3360
  } else {
3361
    // Check if the calling frame is an arguments adaptor frame.
3362
    Label done, adapted;
3363
    __ ldr(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3364
    __ ldr(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
3365
    __ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3366

    
3367
    // Result is the frame pointer for the frame if not adapted and for the real
3368
    // frame below the adaptor frame if adapted.
3369
    __ mov(result, fp, LeaveCC, ne);
3370
    __ mov(result, scratch, LeaveCC, eq);
3371
  }
3372
}
3373

    
3374

    
3375
void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3376
  Register elem = ToRegister(instr->elements());
3377
  Register result = ToRegister(instr->result());
3378

    
3379
  Label done;
3380

    
3381
  // If no arguments adaptor frame the number of arguments is fixed.
3382
  __ cmp(fp, elem);
3383
  __ mov(result, Operand(scope()->num_parameters()));
3384
  __ b(eq, &done);
3385

    
3386
  // Arguments adaptor frame present. Get argument length from there.
3387
  __ ldr(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3388
  __ ldr(result,
3389
         MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
3390
  __ SmiUntag(result);
3391

    
3392
  // Argument length is in result register.
3393
  __ bind(&done);
3394
}
3395

    
3396

    
3397
void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3398
  Register receiver = ToRegister(instr->receiver());
3399
  Register function = ToRegister(instr->function());
3400
  Register scratch = scratch0();
3401

    
3402
  // If the receiver is null or undefined, we have to pass the global
3403
  // object as a receiver to normal functions. Values have to be
3404
  // passed unchanged to builtins and strict-mode functions.
3405
  Label global_object, receiver_ok;
3406

    
3407
  // Do not transform the receiver to object for strict mode
3408
  // functions.
3409
  __ ldr(scratch,
3410
         FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3411
  __ ldr(scratch,
3412
         FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
3413
  __ tst(scratch,
3414
         Operand(1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize)));
3415
  __ b(ne, &receiver_ok);
3416

    
3417
  // Do not transform the receiver to object for builtins.
3418
  __ tst(scratch, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
3419
  __ b(ne, &receiver_ok);
3420

    
3421
  // Normal function. Replace undefined or null with global receiver.
3422
  __ LoadRoot(scratch, Heap::kNullValueRootIndex);
3423
  __ cmp(receiver, scratch);
3424
  __ b(eq, &global_object);
3425
  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3426
  __ cmp(receiver, scratch);
3427
  __ b(eq, &global_object);
3428

    
3429
  // Deoptimize if the receiver is not a JS object.
3430
  __ SmiTst(receiver);
3431
  DeoptimizeIf(eq, instr->environment());
3432
  __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
3433
  DeoptimizeIf(lt, instr->environment());
3434
  __ jmp(&receiver_ok);
3435

    
3436
  __ bind(&global_object);
3437
  __ ldr(receiver, GlobalObjectOperand());
3438
  __ ldr(receiver,
3439
         FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
3440
  __ bind(&receiver_ok);
3441
}
3442

    
3443

    
3444
void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
3445
  Register receiver = ToRegister(instr->receiver());
3446
  Register function = ToRegister(instr->function());
3447
  Register length = ToRegister(instr->length());
3448
  Register elements = ToRegister(instr->elements());
3449
  Register scratch = scratch0();
3450
  ASSERT(receiver.is(r0));  // Used for parameter count.
3451
  ASSERT(function.is(r1));  // Required by InvokeFunction.
3452
  ASSERT(ToRegister(instr->result()).is(r0));
3453

    
3454
  // Copy the arguments to this function possibly from the
3455
  // adaptor frame below it.
3456
  const uint32_t kArgumentsLimit = 1 * KB;
3457
  __ cmp(length, Operand(kArgumentsLimit));
3458
  DeoptimizeIf(hi, instr->environment());
3459

    
3460
  // Push the receiver and use the register to keep the original
3461
  // number of arguments.
3462
  __ push(receiver);
3463
  __ mov(receiver, length);
3464
  // The arguments are at a one pointer size offset from elements.
3465
  __ add(elements, elements, Operand(1 * kPointerSize));
3466

    
3467
  // Loop through the arguments pushing them onto the execution
3468
  // stack.
3469
  Label invoke, loop;
3470
  // length is a small non-negative integer, due to the test above.
3471
  __ cmp(length, Operand::Zero());
3472
  __ b(eq, &invoke);
3473
  __ bind(&loop);
3474
  __ ldr(scratch, MemOperand(elements, length, LSL, 2));
3475
  __ push(scratch);
3476
  __ sub(length, length, Operand(1), SetCC);
3477
  __ b(ne, &loop);
3478

    
3479
  __ bind(&invoke);
3480
  ASSERT(instr->HasPointerMap());
3481
  LPointerMap* pointers = instr->pointer_map();
3482
  SafepointGenerator safepoint_generator(
3483
      this, pointers, Safepoint::kLazyDeopt);
3484
  // The number of arguments is stored in receiver which is r0, as expected
3485
  // by InvokeFunction.
3486
  ParameterCount actual(receiver);
3487
  __ InvokeFunction(function, actual, CALL_FUNCTION,
3488
                    safepoint_generator, CALL_AS_METHOD);
3489
}
3490

    
3491

    
3492
void LCodeGen::DoPushArgument(LPushArgument* instr) {
3493
  LOperand* argument = instr->value();
3494
  if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
3495
    Abort(kDoPushArgumentNotImplementedForDoubleType);
3496
  } else {
3497
    Register argument_reg = EmitLoadRegister(argument, ip);
3498
    __ push(argument_reg);
3499
  }
3500
}
3501

    
3502

    
3503
void LCodeGen::DoDrop(LDrop* instr) {
3504
  __ Drop(instr->count());
3505
}
3506

    
3507

    
3508
void LCodeGen::DoThisFunction(LThisFunction* instr) {
3509
  Register result = ToRegister(instr->result());
3510
  __ ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
3511
}
3512

    
3513

    
3514
void LCodeGen::DoContext(LContext* instr) {
3515
  // If there is a non-return use, the context must be moved to a register.
3516
  Register result = ToRegister(instr->result());
3517
  if (info()->IsOptimizing()) {
3518
    __ ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
3519
  } else {
3520
    // If there is no frame, the context must be in cp.
3521
    ASSERT(result.is(cp));
3522
  }
3523
}
3524

    
3525

    
3526
void LCodeGen::DoOuterContext(LOuterContext* instr) {
3527
  Register context = ToRegister(instr->context());
3528
  Register result = ToRegister(instr->result());
3529
  __ ldr(result,
3530
         MemOperand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
3531
}
3532

    
3533

    
3534
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3535
  ASSERT(ToRegister(instr->context()).is(cp));
3536
  __ push(cp);  // The context is the first argument.
3537
  __ Move(scratch0(), instr->hydrogen()->pairs());
3538
  __ push(scratch0());
3539
  __ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
3540
  __ push(scratch0());
3541
  CallRuntime(Runtime::kDeclareGlobals, 3, instr);
3542
}
3543

    
3544

    
3545
void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
3546
  Register context = ToRegister(instr->context());
3547
  Register result = ToRegister(instr->result());
3548
  __ ldr(result, ContextOperand(context, Context::GLOBAL_OBJECT_INDEX));
3549
}
3550

    
3551

    
3552
void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
3553
  Register global = ToRegister(instr->global_object());
3554
  Register result = ToRegister(instr->result());
3555
  __ ldr(result, FieldMemOperand(global, GlobalObject::kGlobalReceiverOffset));
3556
}
3557

    
3558

    
3559
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3560
                                 int formal_parameter_count,
3561
                                 int arity,
3562
                                 LInstruction* instr,
3563
                                 CallKind call_kind,
3564
                                 R1State r1_state) {
3565
  bool dont_adapt_arguments =
3566
      formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3567
  bool can_invoke_directly =
3568
      dont_adapt_arguments || formal_parameter_count == arity;
3569

    
3570
  LPointerMap* pointers = instr->pointer_map();
3571

    
3572
  if (can_invoke_directly) {
3573
    if (r1_state == R1_UNINITIALIZED) {
3574
      __ Move(r1, function);
3575
    }
3576

    
3577
    // Change context.
3578
    __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
3579

    
3580
    // Set r0 to arguments count if adaption is not needed. Assumes that r0
3581
    // is available to write to at this point.
3582
    if (dont_adapt_arguments) {
3583
      __ mov(r0, Operand(arity));
3584
    }
3585

    
3586
    // Invoke function.
3587
    __ SetCallKind(r5, call_kind);
3588
    __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
3589
    __ Call(ip);
3590

    
3591
    // Set up deoptimization.
3592
    RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3593
  } else {
3594
    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3595
    ParameterCount count(arity);
3596
    ParameterCount expected(formal_parameter_count);
3597
    __ InvokeFunction(
3598
        function, expected, count, CALL_FUNCTION, generator, call_kind);
3599
  }
3600
}
3601

    
3602

    
3603
void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
3604
  ASSERT(ToRegister(instr->result()).is(r0));
3605
  CallKnownFunction(instr->hydrogen()->function(),
3606
                    instr->hydrogen()->formal_parameter_count(),
3607
                    instr->arity(),
3608
                    instr,
3609
                    CALL_AS_METHOD,
3610
                    R1_UNINITIALIZED);
3611
}
3612

    
3613

    
3614
void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3615
  ASSERT(instr->context() != NULL);
3616
  ASSERT(ToRegister(instr->context()).is(cp));
3617
  Register input = ToRegister(instr->value());
3618
  Register result = ToRegister(instr->result());
3619
  Register scratch = scratch0();
3620

    
3621
  // Deoptimize if not a heap number.
3622
  __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3623
  __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3624
  __ cmp(scratch, Operand(ip));
3625
  DeoptimizeIf(ne, instr->environment());
3626

    
3627
  Label done;
3628
  Register exponent = scratch0();
3629
  scratch = no_reg;
3630
  __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3631
  // Check the sign of the argument. If the argument is positive, just
3632
  // return it.
3633
  __ tst(exponent, Operand(HeapNumber::kSignMask));
3634
  // Move the input to the result if necessary.
3635
  __ Move(result, input);
3636
  __ b(eq, &done);
3637

    
3638
  // Input is negative. Reverse its sign.
3639
  // Preserve the value of all registers.
3640
  {
3641
    PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3642

    
3643
    // Registers were saved at the safepoint, so we can use
3644
    // many scratch registers.
3645
    Register tmp1 = input.is(r1) ? r0 : r1;
3646
    Register tmp2 = input.is(r2) ? r0 : r2;
3647
    Register tmp3 = input.is(r3) ? r0 : r3;
3648
    Register tmp4 = input.is(r4) ? r0 : r4;
3649

    
3650
    // exponent: floating point exponent value.
3651

    
3652
    Label allocated, slow;
3653
    __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3654
    __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3655
    __ b(&allocated);
3656

    
3657
    // Slow case: Call the runtime system to do the number allocation.
3658
    __ bind(&slow);
3659

    
3660
    CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
3661
                            instr->context());
3662
    // Set the pointer to the new heap number in tmp.
3663
    if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0));
3664
    // Restore input_reg after call to runtime.
3665
    __ LoadFromSafepointRegisterSlot(input, input);
3666
    __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3667

    
3668
    __ bind(&allocated);
3669
    // exponent: floating point exponent value.
3670
    // tmp1: allocated heap number.
3671
    __ bic(exponent, exponent, Operand(HeapNumber::kSignMask));
3672
    __ str(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
3673
    __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
3674
    __ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
3675

    
3676
    __ StoreToSafepointRegisterSlot(tmp1, result);
3677
  }
3678

    
3679
  __ bind(&done);
3680
}
3681

    
3682

    
3683
void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3684
  Register input = ToRegister(instr->value());
3685
  Register result = ToRegister(instr->result());
3686
  __ cmp(input, Operand::Zero());
3687
  __ Move(result, input, pl);
3688
  // We can make rsb conditional because the previous cmp instruction
3689
  // will clear the V (overflow) flag and rsb won't set this flag
3690
  // if input is positive.
3691
  __ rsb(result, input, Operand::Zero(), SetCC, mi);
3692
  // Deoptimize on overflow.
3693
  DeoptimizeIf(vs, instr->environment());
3694
}
3695

    
3696

    
3697
void LCodeGen::DoMathAbs(LMathAbs* instr) {
3698
  // Class for deferred case.
3699
  class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
3700
   public:
3701
    DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3702
        : LDeferredCode(codegen), instr_(instr) { }
3703
    virtual void Generate() V8_OVERRIDE {
3704
      codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3705
    }
3706
    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
3707
   private:
3708
    LMathAbs* instr_;
3709
  };
3710

    
3711
  Representation r = instr->hydrogen()->value()->representation();
3712
  if (r.IsDouble()) {
3713
    DwVfpRegister input = ToDoubleRegister(instr->value());
3714
    DwVfpRegister result = ToDoubleRegister(instr->result());
3715
    __ vabs(result, input);
3716
  } else if (r.IsSmiOrInteger32()) {
3717
    EmitIntegerMathAbs(instr);
3718
  } else {
3719
    // Representation is tagged.
3720
    DeferredMathAbsTaggedHeapNumber* deferred =
3721
        new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
3722
    Register input = ToRegister(instr->value());
3723
    // Smi check.
3724
    __ JumpIfNotSmi(input, deferred->entry());
3725
    // If smi, handle it directly.
3726
    EmitIntegerMathAbs(instr);
3727
    __ bind(deferred->exit());
3728
  }
3729
}
3730

    
3731

    
3732
void LCodeGen::DoMathFloor(LMathFloor* instr) {
3733
  DwVfpRegister input = ToDoubleRegister(instr->value());
3734
  Register result = ToRegister(instr->result());
3735
  Register input_high = scratch0();
3736
  Label done, exact;
3737

    
3738
  __ TryInt32Floor(result, input, input_high, double_scratch0(), &done, &exact);
3739
  DeoptimizeIf(al, instr->environment());
3740

    
3741
  __ bind(&exact);
3742
  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3743
    // Test for -0.
3744
    __ cmp(result, Operand::Zero());
3745
    __ b(ne, &done);
3746
    __ cmp(input_high, Operand::Zero());
3747
    DeoptimizeIf(mi, instr->environment());
3748
  }
3749
  __ bind(&done);
3750
}
3751

    
3752

    
3753
void LCodeGen::DoMathRound(LMathRound* instr) {
3754
  DwVfpRegister input = ToDoubleRegister(instr->value());
3755
  Register result = ToRegister(instr->result());
3756
  DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp());
3757
  DwVfpRegister input_plus_dot_five = double_scratch1;
3758
  Register input_high = scratch0();
3759
  DwVfpRegister dot_five = double_scratch0();
3760
  Label convert, done;
3761

    
3762
  __ Vmov(dot_five, 0.5, scratch0());
3763
  __ vabs(double_scratch1, input);
3764
  __ VFPCompareAndSetFlags(double_scratch1, dot_five);
3765
  // If input is in [-0.5, -0], the result is -0.
3766
  // If input is in [+0, +0.5[, the result is +0.
3767
  // If the input is +0.5, the result is 1.
3768
  __ b(hi, &convert);  // Out of [-0.5, +0.5].
3769
  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3770
    __ VmovHigh(input_high, input);
3771
    __ cmp(input_high, Operand::Zero());
3772
    DeoptimizeIf(mi, instr->environment());  // [-0.5, -0].
3773
  }
3774
  __ VFPCompareAndSetFlags(input, dot_five);
3775
  __ mov(result, Operand(1), LeaveCC, eq);  // +0.5.
3776
  // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
3777
  // flag kBailoutOnMinusZero.
3778
  __ mov(result, Operand::Zero(), LeaveCC, ne);
3779
  __ b(&done);
3780

    
3781
  __ bind(&convert);
3782
  __ vadd(input_plus_dot_five, input, dot_five);
3783
  // Reuse dot_five (double_scratch0) as we no longer need this value.
3784
  __ TryInt32Floor(result, input_plus_dot_five, input_high, double_scratch0(),
3785
                   &done, &done);
3786
  DeoptimizeIf(al, instr->environment());
3787
  __ bind(&done);
3788
}
3789

    
3790

    
3791
void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3792
  DwVfpRegister input = ToDoubleRegister(instr->value());
3793
  DwVfpRegister result = ToDoubleRegister(instr->result());
3794
  __ vsqrt(result, input);
3795
}
3796

    
3797

    
3798
void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
3799
  DwVfpRegister input = ToDoubleRegister(instr->value());
3800
  DwVfpRegister result = ToDoubleRegister(instr->result());
3801
  DwVfpRegister temp = ToDoubleRegister(instr->temp());
3802

    
3803
  // Note that according to ECMA-262 15.8.2.13:
3804
  // Math.pow(-Infinity, 0.5) == Infinity
3805
  // Math.sqrt(-Infinity) == NaN
3806
  Label done;
3807
  __ vmov(temp, -V8_INFINITY, scratch0());
3808
  __ VFPCompareAndSetFlags(input, temp);
3809
  __ vneg(result, temp, eq);
3810
  __ b(&done, eq);
3811

    
3812
  // Add +0 to convert -0 to +0.
3813
  __ vadd(result, input, kDoubleRegZero);
3814
  __ vsqrt(result, result);
3815
  __ bind(&done);
3816
}
3817

    
3818

    
3819
void LCodeGen::DoPower(LPower* instr) {
3820
  Representation exponent_type = instr->hydrogen()->right()->representation();
3821
  // Having marked this as a call, we can use any registers.
3822
  // Just make sure that the input/output registers are the expected ones.
3823
  ASSERT(!instr->right()->IsDoubleRegister() ||
3824
         ToDoubleRegister(instr->right()).is(d2));
3825
  ASSERT(!instr->right()->IsRegister() ||
3826
         ToRegister(instr->right()).is(r2));
3827
  ASSERT(ToDoubleRegister(instr->left()).is(d1));
3828
  ASSERT(ToDoubleRegister(instr->result()).is(d3));
3829

    
3830
  if (exponent_type.IsSmi()) {
3831
    MathPowStub stub(MathPowStub::TAGGED);
3832
    __ CallStub(&stub);
3833
  } else if (exponent_type.IsTagged()) {
3834
    Label no_deopt;
3835
    __ JumpIfSmi(r2, &no_deopt);
3836
    __ ldr(r6, FieldMemOperand(r2, HeapObject::kMapOffset));
3837
    __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
3838
    __ cmp(r6, Operand(ip));
3839
    DeoptimizeIf(ne, instr->environment());
3840
    __ bind(&no_deopt);
3841
    MathPowStub stub(MathPowStub::TAGGED);
3842
    __ CallStub(&stub);
3843
  } else if (exponent_type.IsInteger32()) {
3844
    MathPowStub stub(MathPowStub::INTEGER);
3845
    __ CallStub(&stub);
3846
  } else {
3847
    ASSERT(exponent_type.IsDouble());
3848
    MathPowStub stub(MathPowStub::DOUBLE);
3849
    __ CallStub(&stub);
3850
  }
3851
}
3852

    
3853

    
3854
void LCodeGen::DoRandom(LRandom* instr) {
3855
  // Assert that the register size is indeed the size of each seed.
3856
  static const int kSeedSize = sizeof(uint32_t);
3857
  STATIC_ASSERT(kPointerSize == kSeedSize);
3858

    
3859
  // Load native context
3860
  Register global_object = ToRegister(instr->global_object());
3861
  Register native_context = global_object;
3862
  __ ldr(native_context, FieldMemOperand(
3863
          global_object, GlobalObject::kNativeContextOffset));
3864

    
3865
  // Load state (FixedArray of the native context's random seeds)
3866
  static const int kRandomSeedOffset =
3867
      FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
3868
  Register state = native_context;
3869
  __ ldr(state, FieldMemOperand(native_context, kRandomSeedOffset));
3870

    
3871
  // Load state[0].
3872
  Register state0 = ToRegister(instr->scratch());
3873
  __ ldr(state0, FieldMemOperand(state, ByteArray::kHeaderSize));
3874
  // Load state[1].
3875
  Register state1 = ToRegister(instr->scratch2());
3876
  __ ldr(state1, FieldMemOperand(state, ByteArray::kHeaderSize + kSeedSize));
3877

    
3878
  // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
3879
  Register scratch3 = ToRegister(instr->scratch3());
3880
  Register scratch4 = scratch0();
3881
  __ and_(scratch3, state0, Operand(0xFFFF));
3882
  __ mov(scratch4, Operand(18273));
3883
  __ mul(scratch3, scratch3, scratch4);
3884
  __ add(state0, scratch3, Operand(state0, LSR, 16));
3885
  // Save state[0].
3886
  __ str(state0, FieldMemOperand(state, ByteArray::kHeaderSize));
3887

    
3888
  // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
3889
  __ and_(scratch3, state1, Operand(0xFFFF));
3890
  __ mov(scratch4, Operand(36969));
3891
  __ mul(scratch3, scratch3, scratch4);
3892
  __ add(state1, scratch3, Operand(state1, LSR, 16));
3893
  // Save state[1].
3894
  __ str(state1, FieldMemOperand(state, ByteArray::kHeaderSize + kSeedSize));
3895

    
3896
  // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
3897
  Register random = scratch4;
3898
  __ and_(random, state1, Operand(0x3FFFF));
3899
  __ add(random, random, Operand(state0, LSL, 14));
3900

    
3901
  // 0x41300000 is the top half of 1.0 x 2^20 as a double.
3902
  // Create this constant using mov/orr to avoid PC relative load.
3903
  __ mov(scratch3, Operand(0x41000000));
3904
  __ orr(scratch3, scratch3, Operand(0x300000));
3905
  // Move 0x41300000xxxxxxxx (x = random bits) to VFP.
3906
  DwVfpRegister result = ToDoubleRegister(instr->result());
3907
  __ vmov(result, random, scratch3);
3908
  // Move 0x4130000000000000 to VFP.
3909
  __ mov(scratch4, Operand::Zero());
3910
  DwVfpRegister scratch5 = double_scratch0();
3911
  __ vmov(scratch5, scratch4, scratch3);
3912
  __ vsub(result, result, scratch5);
3913
}
3914

    
3915

    
3916
void LCodeGen::DoMathExp(LMathExp* instr) {
3917
  DwVfpRegister input = ToDoubleRegister(instr->value());
3918
  DwVfpRegister result = ToDoubleRegister(instr->result());
3919
  DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
3920
  DwVfpRegister double_scratch2 = double_scratch0();
3921
  Register temp1 = ToRegister(instr->temp1());
3922
  Register temp2 = ToRegister(instr->temp2());
3923

    
3924
  MathExpGenerator::EmitMathExp(
3925
      masm(), input, result, double_scratch1, double_scratch2,
3926
      temp1, temp2, scratch0());
3927
}
3928

    
3929

    
3930
void LCodeGen::DoMathLog(LMathLog* instr) {
3931
  ASSERT(ToDoubleRegister(instr->result()).is(d2));
3932
  // Set the context register to a GC-safe fake value. Clobbering it is
3933
  // OK because this instruction is marked as a call.
3934
  __ mov(cp, Operand::Zero());
3935
  TranscendentalCacheStub stub(TranscendentalCache::LOG,
3936
                               TranscendentalCacheStub::UNTAGGED);
3937
  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
3938
}
3939

    
3940

    
3941
void LCodeGen::DoMathTan(LMathTan* instr) {
3942
  ASSERT(ToDoubleRegister(instr->result()).is(d2));
3943
  // Set the context register to a GC-safe fake value. Clobbering it is
3944
  // OK because this instruction is marked as a call.
3945
  __ mov(cp, Operand::Zero());
3946
  TranscendentalCacheStub stub(TranscendentalCache::TAN,
3947
                               TranscendentalCacheStub::UNTAGGED);
3948
  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
3949
}
3950

    
3951

    
3952
void LCodeGen::DoMathCos(LMathCos* instr) {
3953
  ASSERT(ToDoubleRegister(instr->result()).is(d2));
3954
  // Set the context register to a GC-safe fake value. Clobbering it is
3955
  // OK because this instruction is marked as a call.
3956
  __ mov(cp, Operand::Zero());
3957
  TranscendentalCacheStub stub(TranscendentalCache::COS,
3958
                               TranscendentalCacheStub::UNTAGGED);
3959
  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
3960
}
3961

    
3962

    
3963
void LCodeGen::DoMathSin(LMathSin* instr) {
3964
  ASSERT(ToDoubleRegister(instr->result()).is(d2));
3965
  // Set the context register to a GC-safe fake value. Clobbering it is
3966
  // OK because this instruction is marked as a call.
3967
  __ mov(cp, Operand::Zero());
3968
  TranscendentalCacheStub stub(TranscendentalCache::SIN,
3969
                               TranscendentalCacheStub::UNTAGGED);
3970
  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
3971
}
3972

    
3973

    
3974
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
3975
  ASSERT(ToRegister(instr->context()).is(cp));
3976
  ASSERT(ToRegister(instr->function()).is(r1));
3977
  ASSERT(instr->HasPointerMap());
3978

    
3979
  Handle<JSFunction> known_function = instr->hydrogen()->known_function();
3980
  if (known_function.is_null()) {
3981
    LPointerMap* pointers = instr->pointer_map();
3982
    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3983
    ParameterCount count(instr->arity());
3984
    __ InvokeFunction(r1, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
3985
  } else {
3986
    CallKnownFunction(known_function,
3987
                      instr->hydrogen()->formal_parameter_count(),
3988
                      instr->arity(),
3989
                      instr,
3990
                      CALL_AS_METHOD,
3991
                      R1_CONTAINS_TARGET);
3992
  }
3993
}
3994

    
3995

    
3996
void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
3997
  ASSERT(ToRegister(instr->context()).is(cp));
3998
  ASSERT(ToRegister(instr->result()).is(r0));
3999

    
4000
  int arity = instr->arity();
4001
  Handle<Code> ic =
4002
      isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
4003
  CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
4004
}
4005

    
4006

    
4007
void LCodeGen::DoCallNamed(LCallNamed* instr) {
4008
  ASSERT(ToRegister(instr->context()).is(cp));
4009
  ASSERT(ToRegister(instr->result()).is(r0));
4010

    
4011
  int arity = instr->arity();
4012
  RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
4013
  Handle<Code> ic =
4014
      isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
4015
  __ mov(r2, Operand(instr->name()));
4016
  CallCode(ic, mode, instr, NEVER_INLINE_TARGET_ADDRESS);
4017
}
4018

    
4019

    
4020
void LCodeGen::DoCallFunction(LCallFunction* instr) {
4021
  ASSERT(ToRegister(instr->context()).is(cp));
4022
  ASSERT(ToRegister(instr->function()).is(r1));
4023
  ASSERT(ToRegister(instr->result()).is(r0));
4024

    
4025
  int arity = instr->arity();
4026
  CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
4027
  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
4028
}
4029

    
4030

    
4031
void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
4032
  ASSERT(ToRegister(instr->context()).is(cp));
4033
  ASSERT(ToRegister(instr->result()).is(r0));
4034

    
4035
  int arity = instr->arity();
4036
  RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
4037
  Handle<Code> ic =
4038
      isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
4039
  __ mov(r2, Operand(instr->name()));
4040
  CallCode(ic, mode, instr, NEVER_INLINE_TARGET_ADDRESS);
4041
}
4042

    
4043

    
4044
void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
4045
  ASSERT(ToRegister(instr->result()).is(r0));
4046
  CallKnownFunction(instr->hydrogen()->target(),
4047
                    instr->hydrogen()->formal_parameter_count(),
4048
                    instr->arity(),
4049
                    instr,
4050
                    CALL_AS_FUNCTION,
4051
                    R1_UNINITIALIZED);
4052
}
4053

    
4054

    
4055
void LCodeGen::DoCallNew(LCallNew* instr) {
4056
  ASSERT(ToRegister(instr->context()).is(cp));
4057
  ASSERT(ToRegister(instr->constructor()).is(r1));
4058
  ASSERT(ToRegister(instr->result()).is(r0));
4059

    
4060
  __ mov(r0, Operand(instr->arity()));
4061
  // No cell in r2 for construct type feedback in optimized code
4062
  Handle<Object> undefined_value(isolate()->factory()->undefined_value());
4063
  __ mov(r2, Operand(undefined_value));
4064
  CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
4065
  CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4066
}
4067

    
4068

    
4069
void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
4070
  ASSERT(ToRegister(instr->context()).is(cp));
4071
  ASSERT(ToRegister(instr->constructor()).is(r1));
4072
  ASSERT(ToRegister(instr->result()).is(r0));
4073

    
4074
  __ mov(r0, Operand(instr->arity()));
4075
  __ mov(r2, Operand(instr->hydrogen()->property_cell()));
4076
  ElementsKind kind = instr->hydrogen()->elements_kind();
4077
  AllocationSiteOverrideMode override_mode =
4078
      (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
4079
          ? DISABLE_ALLOCATION_SITES
4080
          : DONT_OVERRIDE;
4081
  ContextCheckMode context_mode = CONTEXT_CHECK_NOT_REQUIRED;
4082

    
4083
  if (instr->arity() == 0) {
4084
    ArrayNoArgumentConstructorStub stub(kind, context_mode, override_mode);
4085
    CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4086
  } else if (instr->arity() == 1) {
4087
    Label done;
4088
    if (IsFastPackedElementsKind(kind)) {
4089
      Label packed_case;
4090
      // We might need a change here
4091
      // look at the first argument
4092
      __ ldr(r5, MemOperand(sp, 0));
4093
      __ cmp(r5, Operand::Zero());
4094
      __ b(eq, &packed_case);
4095

    
4096
      ElementsKind holey_kind = GetHoleyElementsKind(kind);
4097
      ArraySingleArgumentConstructorStub stub(holey_kind, context_mode,
4098
                                              override_mode);
4099
      CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4100
      __ jmp(&done);
4101
      __ bind(&packed_case);
4102
    }
4103

    
4104
    ArraySingleArgumentConstructorStub stub(kind, context_mode, override_mode);
4105
    CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4106
    __ bind(&done);
4107
  } else {
4108
    ArrayNArgumentsConstructorStub stub(kind, context_mode, override_mode);
4109
    CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
4110
  }
4111
}
4112

    
4113

    
4114
void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
4115
  CallRuntime(instr->function(), instr->arity(), instr);
4116
}
4117

    
4118

    
4119
void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
4120
  Register function = ToRegister(instr->function());
4121
  Register code_object = ToRegister(instr->code_object());
4122
  __ add(code_object, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
4123
  __ str(code_object,
4124
         FieldMemOperand(function, JSFunction::kCodeEntryOffset));
4125
}
4126

    
4127

    
4128
void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
4129
  Register result = ToRegister(instr->result());
4130
  Register base = ToRegister(instr->base_object());
4131
  __ add(result, base, Operand(instr->offset()));
4132
}
4133

    
4134

    
4135
void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4136
  Representation representation = instr->representation();
4137

    
4138
  Register object = ToRegister(instr->object());
4139
  Register scratch = scratch0();
4140
  HObjectAccess access = instr->hydrogen()->access();
4141
  int offset = access.offset();
4142

    
4143
  if (access.IsExternalMemory()) {
4144
    Register value = ToRegister(instr->value());
4145
    MemOperand operand = MemOperand(object, offset);
4146
    if (representation.IsByte()) {
4147
      __ strb(value, operand);
4148
    } else {
4149
      __ str(value, operand);
4150
    }
4151
    return;
4152
  }
4153

    
4154
  Handle<Map> transition = instr->transition();
4155

    
4156
  if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
4157
    Register value = ToRegister(instr->value());
4158
    if (!instr->hydrogen()->value()->type().IsHeapObject()) {
4159
      __ SmiTst(value);
4160
      DeoptimizeIf(eq, instr->environment());
4161
    }
4162
  } else if (FLAG_track_double_fields && representation.IsDouble()) {
4163
    ASSERT(transition.is_null());
4164
    ASSERT(access.IsInobject());
4165
    ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4166
    DwVfpRegister value = ToDoubleRegister(instr->value());
4167
    __ vstr(value, FieldMemOperand(object, offset));
4168
    return;
4169
  }
4170

    
4171
  if (!transition.is_null()) {
4172
    __ mov(scratch, Operand(transition));
4173
    __ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4174
    if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
4175
      Register temp = ToRegister(instr->temp());
4176
      // Update the write barrier for the map field.
4177
      __ RecordWriteField(object,
4178
                          HeapObject::kMapOffset,
4179
                          scratch,
4180
                          temp,
4181
                          GetLinkRegisterState(),
4182
                          kSaveFPRegs,
4183
                          OMIT_REMEMBERED_SET,
4184
                          OMIT_SMI_CHECK);
4185
    }
4186
  }
4187

    
4188
  // Do the store.
4189
  Register value = ToRegister(instr->value());
4190
  ASSERT(!object.is(value));
4191
  SmiCheck check_needed =
4192
      instr->hydrogen()->value()->IsHeapObject()
4193
          ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4194
  if (access.IsInobject()) {
4195
    MemOperand operand = FieldMemOperand(object, offset);
4196
    if (representation.IsByte()) {
4197
      __ strb(value, operand);
4198
    } else {
4199
      __ str(value, operand);
4200
    }
4201
    if (instr->hydrogen()->NeedsWriteBarrier()) {
4202
      // Update the write barrier for the object for in-object properties.
4203
      __ RecordWriteField(object,
4204
                          offset,
4205
                          value,
4206
                          scratch,
4207
                          GetLinkRegisterState(),
4208
                          kSaveFPRegs,
4209
                          EMIT_REMEMBERED_SET,
4210
                          check_needed);
4211
    }
4212
  } else {
4213
    __ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
4214
    MemOperand operand = FieldMemOperand(scratch, offset);
4215
    if (representation.IsByte()) {
4216
      __ strb(value, operand);
4217
    } else {
4218
      __ str(value, operand);
4219
    }
4220
    if (instr->hydrogen()->NeedsWriteBarrier()) {
4221
      // Update the write barrier for the properties array.
4222
      // object is used as a scratch register.
4223
      __ RecordWriteField(scratch,
4224
                          offset,
4225
                          value,
4226
                          object,
4227
                          GetLinkRegisterState(),
4228
                          kSaveFPRegs,
4229
                          EMIT_REMEMBERED_SET,
4230
                          check_needed);
4231
    }
4232
  }
4233
}
4234

    
4235

    
4236
void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4237
  ASSERT(ToRegister(instr->context()).is(cp));
4238
  ASSERT(ToRegister(instr->object()).is(r1));
4239
  ASSERT(ToRegister(instr->value()).is(r0));
4240

    
4241
  // Name is always in r2.
4242
  __ mov(r2, Operand(instr->name()));
4243
  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
4244
      ? isolate()->builtins()->StoreIC_Initialize_Strict()
4245
      : isolate()->builtins()->StoreIC_Initialize();
4246
  CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
4247
}
4248

    
4249

    
4250
void LCodeGen::ApplyCheckIf(Condition condition, LBoundsCheck* check) {
4251
  if (FLAG_debug_code && check->hydrogen()->skip_check()) {
4252
    Label done;
4253
    __ b(NegateCondition(condition), &done);
4254
    __ stop("eliminated bounds check failed");
4255
    __ bind(&done);
4256
  } else {
4257
    DeoptimizeIf(condition, check->environment());
4258
  }
4259
}
4260

    
4261

    
4262
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4263
  if (instr->hydrogen()->skip_check()) return;
4264

    
4265
  if (instr->index()->IsConstantOperand()) {
4266
    int constant_index =
4267
        ToInteger32(LConstantOperand::cast(instr->index()));
4268
    if (instr->hydrogen()->length()->representation().IsSmi()) {
4269
      __ mov(ip, Operand(Smi::FromInt(constant_index)));
4270
    } else {
4271
      __ mov(ip, Operand(constant_index));
4272
    }
4273
    __ cmp(ip, ToRegister(instr->length()));
4274
  } else {
4275
    __ cmp(ToRegister(instr->index()), ToRegister(instr->length()));
4276
  }
4277
  Condition condition = instr->hydrogen()->allow_equality() ? hi : hs;
4278
  ApplyCheckIf(condition, instr);
4279
}
4280

    
4281

    
4282
void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4283
  Register external_pointer = ToRegister(instr->elements());
4284
  Register key = no_reg;
4285
  ElementsKind elements_kind = instr->elements_kind();
4286
  bool key_is_constant = instr->key()->IsConstantOperand();
4287
  int constant_key = 0;
4288
  if (key_is_constant) {
4289
    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4290
    if (constant_key & 0xF0000000) {
4291
      Abort(kArrayIndexConstantValueTooBig);
4292
    }
4293
  } else {
4294
    key = ToRegister(instr->key());
4295
  }
4296
  int element_size_shift = ElementsKindToShiftSize(elements_kind);
4297
  int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4298
      ? (element_size_shift - kSmiTagSize) : element_size_shift;
4299
  int additional_offset = instr->additional_index() << element_size_shift;
4300

    
4301
  if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
4302
      elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
4303
    Register address = scratch0();
4304
    DwVfpRegister value(ToDoubleRegister(instr->value()));
4305
    if (key_is_constant) {
4306
      if (constant_key != 0) {
4307
        __ add(address, external_pointer,
4308
               Operand(constant_key << element_size_shift));
4309
      } else {
4310
        address = external_pointer;
4311
      }
4312
    } else {
4313
      __ add(address, external_pointer, Operand(key, LSL, shift_size));
4314
    }
4315
    if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
4316
      __ vcvt_f32_f64(double_scratch0().low(), value);
4317
      __ vstr(double_scratch0().low(), address, additional_offset);
4318
    } else {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
4319
      __ vstr(value, address, additional_offset);
4320
    }
4321
  } else {
4322
    Register value(ToRegister(instr->value()));
4323
    MemOperand mem_operand = PrepareKeyedOperand(
4324
        key, external_pointer, key_is_constant, constant_key,
4325
        element_size_shift, shift_size,
4326
        instr->additional_index(), additional_offset);
4327
    switch (elements_kind) {
4328
      case EXTERNAL_PIXEL_ELEMENTS:
4329
      case EXTERNAL_BYTE_ELEMENTS:
4330
      case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
4331
        __ strb(value, mem_operand);
4332
        break;
4333
      case EXTERNAL_SHORT_ELEMENTS:
4334
      case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
4335
        __ strh(value, mem_operand);
4336
        break;
4337
      case EXTERNAL_INT_ELEMENTS:
4338
      case EXTERNAL_UNSIGNED_INT_ELEMENTS:
4339
        __ str(value, mem_operand);
4340
        break;
4341
      case EXTERNAL_FLOAT_ELEMENTS:
4342
      case EXTERNAL_DOUBLE_ELEMENTS:
4343
      case FAST_DOUBLE_ELEMENTS:
4344
      case FAST_ELEMENTS:
4345
      case FAST_SMI_ELEMENTS:
4346
      case FAST_HOLEY_DOUBLE_ELEMENTS:
4347
      case FAST_HOLEY_ELEMENTS:
4348
      case FAST_HOLEY_SMI_ELEMENTS:
4349
      case DICTIONARY_ELEMENTS:
4350
      case NON_STRICT_ARGUMENTS_ELEMENTS:
4351
        UNREACHABLE();
4352
        break;
4353
    }
4354
  }
4355
}
4356

    
4357

    
4358
void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4359
  DwVfpRegister value = ToDoubleRegister(instr->value());
4360
  Register elements = ToRegister(instr->elements());
4361
  Register scratch = scratch0();
4362
  DwVfpRegister double_scratch = double_scratch0();
4363
  bool key_is_constant = instr->key()->IsConstantOperand();
4364

    
4365
  // Calculate the effective address of the slot in the array to store the
4366
  // double value.
4367
  int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
4368
  if (key_is_constant) {
4369
    int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4370
    if (constant_key & 0xF0000000) {
4371
      Abort(kArrayIndexConstantValueTooBig);
4372
    }
4373
    __ add(scratch, elements,
4374
           Operand((constant_key << element_size_shift) +
4375
                   FixedDoubleArray::kHeaderSize - kHeapObjectTag));
4376
  } else {
4377
    int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4378
        ? (element_size_shift - kSmiTagSize) : element_size_shift;
4379
    __ add(scratch, elements,
4380
           Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
4381
    __ add(scratch, scratch,
4382
           Operand(ToRegister(instr->key()), LSL, shift_size));
4383
  }
4384

    
4385
  if (instr->NeedsCanonicalization()) {
4386
    // Force a canonical NaN.
4387
    if (masm()->emit_debug_code()) {
4388
      __ vmrs(ip);
4389
      __ tst(ip, Operand(kVFPDefaultNaNModeControlBit));
4390
      __ Assert(ne, kDefaultNaNModeNotSet);
4391
    }
4392
    __ VFPCanonicalizeNaN(double_scratch, value);
4393
    __ vstr(double_scratch, scratch,
4394
            instr->additional_index() << element_size_shift);
4395
  } else {
4396
    __ vstr(value, scratch, instr->additional_index() << element_size_shift);
4397
  }
4398
}
4399

    
4400

    
4401
void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4402
  Register value = ToRegister(instr->value());
4403
  Register elements = ToRegister(instr->elements());
4404
  Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
4405
      : no_reg;
4406
  Register scratch = scratch0();
4407
  Register store_base = scratch;
4408
  int offset = 0;
4409

    
4410
  // Do the store.
4411
  if (instr->key()->IsConstantOperand()) {
4412
    ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4413
    LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
4414
    offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
4415
                                           instr->additional_index());
4416
    store_base = elements;
4417
  } else {
4418
    // Even though the HLoadKeyed instruction forces the input
4419
    // representation for the key to be an integer, the input gets replaced
4420
    // during bound check elimination with the index argument to the bounds
4421
    // check, which can be tagged, so that case must be handled here, too.
4422
    if (instr->hydrogen()->key()->representation().IsSmi()) {
4423
      __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key));
4424
    } else {
4425
      __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
4426
    }
4427
    offset = FixedArray::OffsetOfElementAt(instr->additional_index());
4428
  }
4429
  __ str(value, FieldMemOperand(store_base, offset));
4430

    
4431
  if (instr->hydrogen()->NeedsWriteBarrier()) {
4432
    SmiCheck check_needed =
4433
        instr->hydrogen()->value()->IsHeapObject()
4434
            ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4435
    // Compute address of modified element and store it into key register.
4436
    __ add(key, store_base, Operand(offset - kHeapObjectTag));
4437
    __ RecordWrite(elements,
4438
                   key,
4439
                   value,
4440
                   GetLinkRegisterState(),
4441
                   kSaveFPRegs,
4442
                   EMIT_REMEMBERED_SET,
4443
                   check_needed);
4444
  }
4445
}
4446

    
4447

    
4448
void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
4449
  // By cases: external, fast double
4450
  if (instr->is_external()) {
4451
    DoStoreKeyedExternalArray(instr);
4452
  } else if (instr->hydrogen()->value()->representation().IsDouble()) {
4453
    DoStoreKeyedFixedDoubleArray(instr);
4454
  } else {
4455
    DoStoreKeyedFixedArray(instr);
4456
  }
4457
}
4458

    
4459

    
4460
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4461
  ASSERT(ToRegister(instr->context()).is(cp));
4462
  ASSERT(ToRegister(instr->object()).is(r2));
4463
  ASSERT(ToRegister(instr->key()).is(r1));
4464
  ASSERT(ToRegister(instr->value()).is(r0));
4465

    
4466
  Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
4467
      ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
4468
      : isolate()->builtins()->KeyedStoreIC_Initialize();
4469
  CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
4470
}
4471

    
4472

    
4473
void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4474
  Register object_reg = ToRegister(instr->object());
4475
  Register scratch = scratch0();
4476

    
4477
  Handle<Map> from_map = instr->original_map();
4478
  Handle<Map> to_map = instr->transitioned_map();
4479
  ElementsKind from_kind = instr->from_kind();
4480
  ElementsKind to_kind = instr->to_kind();
4481

    
4482
  Label not_applicable;
4483
  __ ldr(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4484
  __ cmp(scratch, Operand(from_map));
4485
  __ b(ne, &not_applicable);
4486

    
4487
  if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4488
    Register new_map_reg = ToRegister(instr->new_map_temp());
4489
    __ mov(new_map_reg, Operand(to_map));
4490
    __ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4491
    // Write barrier.
4492
    __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
4493
                        scratch, GetLinkRegisterState(), kDontSaveFPRegs);
4494
  } else {
4495
    ASSERT(ToRegister(instr->context()).is(cp));
4496
    PushSafepointRegistersScope scope(
4497
        this, Safepoint::kWithRegistersAndDoubles);
4498
    __ Move(r0, object_reg);
4499
    __ Move(r1, to_map);
4500
    TransitionElementsKindStub stub(from_kind, to_kind);
4501
    __ CallStub(&stub);
4502
    RecordSafepointWithRegistersAndDoubles(
4503
        instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4504
  }
4505
  __ bind(&not_applicable);
4506
}
4507

    
4508

    
4509
void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
4510
  Register object = ToRegister(instr->object());
4511
  Register temp = ToRegister(instr->temp());
4512
  Label no_memento_found;
4513
  __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
4514
  DeoptimizeIf(eq, instr->environment());
4515
  __ bind(&no_memento_found);
4516
}
4517

    
4518

    
4519
void LCodeGen::DoStringAdd(LStringAdd* instr) {
4520
  ASSERT(ToRegister(instr->context()).is(cp));
4521
  __ push(ToRegister(instr->left()));
4522
  __ push(ToRegister(instr->right()));
4523
  StringAddStub stub(instr->hydrogen()->flags());
4524
  CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
4525
}
4526

    
4527

    
4528
void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
4529
  class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
4530
   public:
4531
    DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
4532
        : LDeferredCode(codegen), instr_(instr) { }
4533
    virtual void Generate() V8_OVERRIDE {
4534
      codegen()->DoDeferredStringCharCodeAt(instr_);
4535
    }
4536
    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4537
   private:
4538
    LStringCharCodeAt* instr_;
4539
  };
4540

    
4541
  DeferredStringCharCodeAt* deferred =
4542
      new(zone()) DeferredStringCharCodeAt(this, instr);
4543

    
4544
  StringCharLoadGenerator::Generate(masm(),
4545
                                    ToRegister(instr->string()),
4546
                                    ToRegister(instr->index()),
4547
                                    ToRegister(instr->result()),
4548
                                    deferred->entry());
4549
  __ bind(deferred->exit());
4550
}
4551

    
4552

    
4553
void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
4554
  Register string = ToRegister(instr->string());
4555
  Register result = ToRegister(instr->result());
4556
  Register scratch = scratch0();
4557

    
4558
  // TODO(3095996): Get rid of this. For now, we need to make the
4559
  // result register contain a valid pointer because it is already
4560
  // contained in the register pointer map.
4561
  __ mov(result, Operand::Zero());
4562

    
4563
  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4564
  __ push(string);
4565
  // Push the index as a smi. This is safe because of the checks in
4566
  // DoStringCharCodeAt above.
4567
  if (instr->index()->IsConstantOperand()) {
4568
    int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4569
    __ mov(scratch, Operand(Smi::FromInt(const_index)));
4570
    __ push(scratch);
4571
  } else {
4572
    Register index = ToRegister(instr->index());
4573
    __ SmiTag(index);
4574
    __ push(index);
4575
  }
4576
  CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr,
4577
                          instr->context());
4578
  __ AssertSmi(r0);
4579
  __ SmiUntag(r0);
4580
  __ StoreToSafepointRegisterSlot(r0, result);
4581
}
4582

    
4583

    
4584
void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
4585
  class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
4586
   public:
4587
    DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
4588
        : LDeferredCode(codegen), instr_(instr) { }
4589
    virtual void Generate() V8_OVERRIDE {
4590
      codegen()->DoDeferredStringCharFromCode(instr_);
4591
    }
4592
    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4593
   private:
4594
    LStringCharFromCode* instr_;
4595
  };
4596

    
4597
  DeferredStringCharFromCode* deferred =
4598
      new(zone()) DeferredStringCharFromCode(this, instr);
4599

    
4600
  ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
4601
  Register char_code = ToRegister(instr->char_code());
4602
  Register result = ToRegister(instr->result());
4603
  ASSERT(!char_code.is(result));
4604

    
4605
  __ cmp(char_code, Operand(String::kMaxOneByteCharCode));
4606
  __ b(hi, deferred->entry());
4607
  __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4608
  __ add(result, result, Operand(char_code, LSL, kPointerSizeLog2));
4609
  __ ldr(result, FieldMemOperand(result, FixedArray::kHeaderSize));
4610
  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4611
  __ cmp(result, ip);
4612
  __ b(eq, deferred->entry());
4613
  __ bind(deferred->exit());
4614
}
4615

    
4616

    
4617
void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4618
  Register char_code = ToRegister(instr->char_code());
4619
  Register result = ToRegister(instr->result());
4620

    
4621
  // TODO(3095996): Get rid of this. For now, we need to make the
4622
  // result register contain a valid pointer because it is already
4623
  // contained in the register pointer map.
4624
  __ mov(result, Operand::Zero());
4625

    
4626
  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4627
  __ SmiTag(char_code);
4628
  __ push(char_code);
4629
  CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
4630
  __ StoreToSafepointRegisterSlot(r0, result);
4631
}
4632

    
4633

    
4634
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4635
  LOperand* input = instr->value();
4636
  ASSERT(input->IsRegister() || input->IsStackSlot());
4637
  LOperand* output = instr->result();
4638
  ASSERT(output->IsDoubleRegister());
4639
  SwVfpRegister single_scratch = double_scratch0().low();
4640
  if (input->IsStackSlot()) {
4641
    Register scratch = scratch0();
4642
    __ ldr(scratch, ToMemOperand(input));
4643
    __ vmov(single_scratch, scratch);
4644
  } else {
4645
    __ vmov(single_scratch, ToRegister(input));
4646
  }
4647
  __ vcvt_f64_s32(ToDoubleRegister(output), single_scratch);
4648
}
4649

    
4650

    
4651
void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
4652
  LOperand* input = instr->value();
4653
  LOperand* output = instr->result();
4654
  __ SmiTag(ToRegister(output), ToRegister(input), SetCC);
4655
  if (!instr->hydrogen()->value()->HasRange() ||
4656
      !instr->hydrogen()->value()->range()->IsInSmiRange()) {
4657
    DeoptimizeIf(vs, instr->environment());
4658
  }
4659
}
4660

    
4661

    
4662
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4663
  LOperand* input = instr->value();
4664
  LOperand* output = instr->result();
4665

    
4666
  SwVfpRegister flt_scratch = double_scratch0().low();
4667
  __ vmov(flt_scratch, ToRegister(input));
4668
  __ vcvt_f64_u32(ToDoubleRegister(output), flt_scratch);
4669
}
4670

    
4671

    
4672
void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) {
4673
  LOperand* input = instr->value();
4674
  LOperand* output = instr->result();
4675
  if (!instr->hydrogen()->value()->HasRange() ||
4676
      !instr->hydrogen()->value()->range()->IsInSmiRange()) {
4677
    __ tst(ToRegister(input), Operand(0xc0000000));
4678
    DeoptimizeIf(ne, instr->environment());
4679
  }
4680
  __ SmiTag(ToRegister(output), ToRegister(input));
4681
}
4682

    
4683

    
4684
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4685
  class DeferredNumberTagI V8_FINAL : public LDeferredCode {
4686
   public:
4687
    DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4688
        : LDeferredCode(codegen), instr_(instr) { }
4689
    virtual void Generate() V8_OVERRIDE {
4690
      codegen()->DoDeferredNumberTagI(instr_,
4691
                                      instr_->value(),
4692
                                      SIGNED_INT32);
4693
    }
4694
    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4695
   private:
4696
    LNumberTagI* instr_;
4697
  };
4698

    
4699
  Register src = ToRegister(instr->value());
4700
  Register dst = ToRegister(instr->result());
4701

    
4702
  DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
4703
  __ SmiTag(dst, src, SetCC);
4704
  __ b(vs, deferred->entry());
4705
  __ bind(deferred->exit());
4706
}
4707

    
4708

    
4709
void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4710
  class DeferredNumberTagU V8_FINAL : public LDeferredCode {
4711
   public:
4712
    DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4713
        : LDeferredCode(codegen), instr_(instr) { }
4714
    virtual void Generate() V8_OVERRIDE {
4715
      codegen()->DoDeferredNumberTagI(instr_,
4716
                                      instr_->value(),
4717
                                      UNSIGNED_INT32);
4718
    }
4719
    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4720
   private:
4721
    LNumberTagU* instr_;
4722
  };
4723

    
4724
  LOperand* input = instr->value();
4725
  ASSERT(input->IsRegister() && input->Equals(instr->result()));
4726
  Register reg = ToRegister(input);
4727

    
4728
  DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4729
  __ cmp(reg, Operand(Smi::kMaxValue));
4730
  __ b(hi, deferred->entry());
4731
  __ SmiTag(reg, reg);
4732
  __ bind(deferred->exit());
4733
}
4734

    
4735

    
4736
void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
4737
                                    LOperand* value,
4738
                                    IntegerSignedness signedness) {
4739
  Label slow;
4740
  Register src = ToRegister(value);
4741
  Register dst = ToRegister(instr->result());
4742
  LowDwVfpRegister dbl_scratch = double_scratch0();
4743

    
4744
  // Preserve the value of all registers.
4745
  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4746

    
4747
  Label done;
4748
  if (signedness == SIGNED_INT32) {
4749
    // There was overflow, so bits 30 and 31 of the original integer
4750
    // disagree. Try to allocate a heap number in new space and store
4751
    // the value in there. If that fails, call the runtime system.
4752
    if (dst.is(src)) {
4753
      __ SmiUntag(src, dst);
4754
      __ eor(src, src, Operand(0x80000000));
4755
    }
4756
    __ vmov(dbl_scratch.low(), src);
4757
    __ vcvt_f64_s32(dbl_scratch, dbl_scratch.low());
4758
  } else {
4759
    __ vmov(dbl_scratch.low(), src);
4760
    __ vcvt_f64_u32(dbl_scratch, dbl_scratch.low());
4761
  }
4762

    
4763
  if (FLAG_inline_new) {
4764
    __ LoadRoot(scratch0(), Heap::kHeapNumberMapRootIndex);
4765
    __ AllocateHeapNumber(r5, r3, r4, scratch0(), &slow, DONT_TAG_RESULT);
4766
    __ Move(dst, r5);
4767
    __ b(&done);
4768
  }
4769

    
4770
  // Slow case: Call the runtime system to do the number allocation.
4771
  __ bind(&slow);
4772

    
4773
  // TODO(3095996): Put a valid pointer value in the stack slot where the result
4774
  // register is stored, as this register is in the pointer map, but contains an
4775
  // integer value.
4776
  __ mov(ip, Operand::Zero());
4777
  __ StoreToSafepointRegisterSlot(ip, dst);
4778
  // NumberTagI and NumberTagD use the context from the frame, rather than
4779
  // the environment's HContext or HInlinedContext value.
4780
  // They only call Runtime::kAllocateHeapNumber.
4781
  // The corresponding HChange instructions are added in a phase that does
4782
  // not have easy access to the local context.
4783
  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4784
  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4785
  RecordSafepointWithRegisters(
4786
      instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4787
  __ Move(dst, r0);
4788
  __ sub(dst, dst, Operand(kHeapObjectTag));
4789

    
4790
  // Done. Put the value in dbl_scratch into the value of the allocated heap
4791
  // number.
4792
  __ bind(&done);
4793
  __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
4794
  __ add(dst, dst, Operand(kHeapObjectTag));
4795
  __ StoreToSafepointRegisterSlot(dst, dst);
4796
}
4797

    
4798

    
4799
void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4800
  class DeferredNumberTagD V8_FINAL : public LDeferredCode {
4801
   public:
4802
    DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4803
        : LDeferredCode(codegen), instr_(instr) { }
4804
    virtual void Generate() V8_OVERRIDE {
4805
      codegen()->DoDeferredNumberTagD(instr_);
4806
    }
4807
    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4808
   private:
4809
    LNumberTagD* instr_;
4810
  };
4811

    
4812
  DwVfpRegister input_reg = ToDoubleRegister(instr->value());
4813
  Register scratch = scratch0();
4814
  Register reg = ToRegister(instr->result());
4815
  Register temp1 = ToRegister(instr->temp());
4816
  Register temp2 = ToRegister(instr->temp2());
4817

    
4818
  DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4819
  if (FLAG_inline_new) {
4820
    __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4821
    // We want the untagged address first for performance
4822
    __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
4823
                          DONT_TAG_RESULT);
4824
  } else {
4825
    __ jmp(deferred->entry());
4826
  }
4827
  __ bind(deferred->exit());
4828
  __ vstr(input_reg, reg, HeapNumber::kValueOffset);
4829
  // Now that we have finished with the object's real address tag it
4830
  __ add(reg, reg, Operand(kHeapObjectTag));
4831
}
4832

    
4833

    
4834
void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4835
  // TODO(3095996): Get rid of this. For now, we need to make the
4836
  // result register contain a valid pointer because it is already
4837
  // contained in the register pointer map.
4838
  Register reg = ToRegister(instr->result());
4839
  __ mov(reg, Operand::Zero());
4840

    
4841
  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4842
  // NumberTagI and NumberTagD use the context from the frame, rather than
4843
  // the environment's HContext or HInlinedContext value.
4844
  // They only call Runtime::kAllocateHeapNumber.
4845
  // The corresponding HChange instructions are added in a phase that does
4846
  // not have easy access to the local context.
4847
  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4848
  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4849
  RecordSafepointWithRegisters(
4850
      instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4851
  __ sub(r0, r0, Operand(kHeapObjectTag));
4852
  __ StoreToSafepointRegisterSlot(r0, reg);
4853
}
4854

    
4855

    
4856
void LCodeGen::DoSmiTag(LSmiTag* instr) {
4857
  ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
4858
  __ SmiTag(ToRegister(instr->result()), ToRegister(instr->value()));
4859
}
4860

    
4861

    
4862
void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4863
  Register input = ToRegister(instr->value());
4864
  Register result = ToRegister(instr->result());
4865
  if (instr->needs_check()) {
4866
    STATIC_ASSERT(kHeapObjectTag == 1);
4867
    // If the input is a HeapObject, SmiUntag will set the carry flag.
4868
    __ SmiUntag(result, input, SetCC);
4869
    DeoptimizeIf(cs, instr->environment());
4870
  } else {
4871
    __ SmiUntag(result, input);
4872
  }
4873
}
4874

    
4875

    
4876
void LCodeGen::EmitNumberUntagD(Register input_reg,
4877
                                DwVfpRegister result_reg,
4878
                                bool can_convert_undefined_to_nan,
4879
                                bool deoptimize_on_minus_zero,
4880
                                LEnvironment* env,
4881
                                NumberUntagDMode mode) {
4882
  Register scratch = scratch0();
4883
  SwVfpRegister flt_scratch = double_scratch0().low();
4884
  ASSERT(!result_reg.is(double_scratch0()));
4885
  Label convert, load_smi, done;
4886
  if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4887
    // Smi check.
4888
    __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4889
    // Heap number map check.
4890
    __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4891
    __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4892
    __ cmp(scratch, Operand(ip));
4893
    if (can_convert_undefined_to_nan) {
4894
      __ b(ne, &convert);
4895
    } else {
4896
      DeoptimizeIf(ne, env);
4897
    }
4898
    // load heap number
4899
    __ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag);
4900
    if (deoptimize_on_minus_zero) {
4901
      __ VmovLow(scratch, result_reg);
4902
      __ cmp(scratch, Operand::Zero());
4903
      __ b(ne, &done);
4904
      __ VmovHigh(scratch, result_reg);
4905
      __ cmp(scratch, Operand(HeapNumber::kSignMask));
4906
      DeoptimizeIf(eq, env);
4907
    }
4908
    __ jmp(&done);
4909
    if (can_convert_undefined_to_nan) {
4910
      __ bind(&convert);
4911
      // Convert undefined (and hole) to NaN.
4912
      __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4913
      __ cmp(input_reg, Operand(ip));
4914
      DeoptimizeIf(ne, env);
4915
      __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4916
      __ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag);
4917
      __ jmp(&done);
4918
    }
4919
  } else {
4920
    __ SmiUntag(scratch, input_reg);
4921
    ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
4922
  }
4923
  // Smi to double register conversion
4924
  __ bind(&load_smi);
4925
  // scratch: untagged value of input_reg
4926
  __ vmov(flt_scratch, scratch);
4927
  __ vcvt_f64_s32(result_reg, flt_scratch);
4928
  __ bind(&done);
4929
}
4930

    
4931

    
4932
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
4933
  Register input_reg = ToRegister(instr->value());
4934
  Register scratch1 = scratch0();
4935
  Register scratch2 = ToRegister(instr->temp());
4936
  LowDwVfpRegister double_scratch = double_scratch0();
4937
  DwVfpRegister double_scratch2 = ToDoubleRegister(instr->temp2());
4938

    
4939
  ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
4940
  ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
4941

    
4942
  Label done;
4943

    
4944
  // The input was optimistically untagged; revert it.
4945
  // The carry flag is set when we reach this deferred code as we just executed
4946
  // SmiUntag(heap_object, SetCC)
4947
  STATIC_ASSERT(kHeapObjectTag == 1);
4948
  __ adc(scratch2, input_reg, Operand(input_reg));
4949

    
4950
  // Heap number map check.
4951
  __ ldr(scratch1, FieldMemOperand(scratch2, HeapObject::kMapOffset));
4952
  __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4953
  __ cmp(scratch1, Operand(ip));
4954

    
4955
  if (instr->truncating()) {
4956
    // Performs a truncating conversion of a floating point number as used by
4957
    // the JS bitwise operations.
4958
    Label no_heap_number, check_bools, check_false;
4959
    __ b(ne, &no_heap_number);
4960
    __ TruncateHeapNumberToI(input_reg, scratch2);
4961
    __ b(&done);
4962

    
4963
    // Check for Oddballs. Undefined/False is converted to zero and True to one
4964
    // for truncating conversions.
4965
    __ bind(&no_heap_number);
4966
    __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
4967
    __ cmp(scratch2, Operand(ip));
4968
    __ b(ne, &check_bools);
4969
    __ mov(input_reg, Operand::Zero());
4970
    __ b(&done);
4971

    
4972
    __ bind(&check_bools);
4973
    __ LoadRoot(ip, Heap::kTrueValueRootIndex);
4974
    __ cmp(scratch2, Operand(ip));
4975
    __ b(ne, &check_false);
4976
    __ mov(input_reg, Operand(1));
4977
    __ b(&done);
4978

    
4979
    __ bind(&check_false);
4980
    __ LoadRoot(ip, Heap::kFalseValueRootIndex);
4981
    __ cmp(scratch2, Operand(ip));
4982
    DeoptimizeIf(ne, instr->environment());
4983
    __ mov(input_reg, Operand::Zero());
4984
    __ b(&done);
4985
  } else {
4986
    // Deoptimize if we don't have a heap number.
4987
    DeoptimizeIf(ne, instr->environment());
4988

    
4989
    __ sub(ip, scratch2, Operand(kHeapObjectTag));
4990
    __ vldr(double_scratch2, ip, HeapNumber::kValueOffset);
4991
    __ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch);
4992
    DeoptimizeIf(ne, instr->environment());
4993

    
4994
    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4995
      __ cmp(input_reg, Operand::Zero());
4996
      __ b(ne, &done);
4997
      __ VmovHigh(scratch1, double_scratch2);
4998
      __ tst(scratch1, Operand(HeapNumber::kSignMask));
4999
      DeoptimizeIf(ne, instr->environment());
5000
    }
5001
  }
5002
  __ bind(&done);
5003
}
5004

    
5005

    
5006
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5007
  class DeferredTaggedToI V8_FINAL : public LDeferredCode {
5008
   public:
5009
    DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
5010
        : LDeferredCode(codegen), instr_(instr) { }
5011
    virtual void Generate() V8_OVERRIDE {
5012
      codegen()->DoDeferredTaggedToI(instr_);
5013
    }
5014
    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5015
   private:
5016
    LTaggedToI* instr_;
5017
  };
5018

    
5019
  LOperand* input = instr->value();
5020
  ASSERT(input->IsRegister());
5021
  ASSERT(input->Equals(instr->result()));
5022

    
5023
  Register input_reg = ToRegister(input);
5024

    
5025
  if (instr->hydrogen()->value()->representation().IsSmi()) {
5026
    __ SmiUntag(input_reg);
5027
  } else {
5028
    DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
5029

    
5030
    // Optimistically untag the input.
5031
    // If the input is a HeapObject, SmiUntag will set the carry flag.
5032
    __ SmiUntag(input_reg, SetCC);
5033
    // Branch to deferred code if the input was tagged.
5034
    // The deferred code will take care of restoring the tag.
5035
    __ b(cs, deferred->entry());
5036
    __ bind(deferred->exit());
5037
  }
5038
}
5039

    
5040

    
5041
void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
5042
  LOperand* input = instr->value();
5043
  ASSERT(input->IsRegister());
5044
  LOperand* result = instr->result();
5045
  ASSERT(result->IsDoubleRegister());
5046

    
5047
  Register input_reg = ToRegister(input);
5048
  DwVfpRegister result_reg = ToDoubleRegister(result);
5049

    
5050
  HValue* value = instr->hydrogen()->value();
5051
  NumberUntagDMode mode = value->representation().IsSmi()
5052
      ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
5053

    
5054
  EmitNumberUntagD(input_reg, result_reg,
5055
                   instr->hydrogen()->can_convert_undefined_to_nan(),
5056
                   instr->hydrogen()->deoptimize_on_minus_zero(),
5057
                   instr->environment(),
5058
                   mode);
5059
}
5060

    
5061

    
5062
void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
5063
  Register result_reg = ToRegister(instr->result());
5064
  Register scratch1 = scratch0();
5065
  DwVfpRegister double_input = ToDoubleRegister(instr->value());
5066
  LowDwVfpRegister double_scratch = double_scratch0();
5067

    
5068
  if (instr->truncating()) {
5069
    __ TruncateDoubleToI(result_reg, double_input);
5070
  } else {
5071
    __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
5072
    // Deoptimize if the input wasn't a int32 (inside a double).
5073
    DeoptimizeIf(ne, instr->environment());
5074
    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5075
      Label done;
5076
      __ cmp(result_reg, Operand::Zero());
5077
      __ b(ne, &done);
5078
      __ VmovHigh(scratch1, double_input);
5079
      __ tst(scratch1, Operand(HeapNumber::kSignMask));
5080
      DeoptimizeIf(ne, instr->environment());
5081
      __ bind(&done);
5082
    }
5083
  }
5084
}
5085

    
5086

    
5087
void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5088
  Register result_reg = ToRegister(instr->result());
5089
  Register scratch1 = scratch0();
5090
  DwVfpRegister double_input = ToDoubleRegister(instr->value());
5091
  LowDwVfpRegister double_scratch = double_scratch0();
5092

    
5093
  if (instr->truncating()) {
5094
    __ TruncateDoubleToI(result_reg, double_input);
5095
  } else {
5096
    __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
5097
    // Deoptimize if the input wasn't a int32 (inside a double).
5098
    DeoptimizeIf(ne, instr->environment());
5099
    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5100
      Label done;
5101
      __ cmp(result_reg, Operand::Zero());
5102
      __ b(ne, &done);
5103
      __ VmovHigh(scratch1, double_input);
5104
      __ tst(scratch1, Operand(HeapNumber::kSignMask));
5105
      DeoptimizeIf(ne, instr->environment());
5106
      __ bind(&done);
5107
    }
5108
  }
5109
  __ SmiTag(result_reg, SetCC);
5110
  DeoptimizeIf(vs, instr->environment());
5111
}
5112

    
5113

    
5114
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5115
  LOperand* input = instr->value();
5116
  __ SmiTst(ToRegister(input));
5117
  DeoptimizeIf(ne, instr->environment());
5118
}
5119

    
5120

    
5121
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5122
  if (!instr->hydrogen()->value()->IsHeapObject()) {
5123
    LOperand* input = instr->value();
5124
    __ SmiTst(ToRegister(input));
5125
    DeoptimizeIf(eq, instr->environment());
5126
  }
5127
}
5128

    
5129

    
5130
void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5131
  Register input = ToRegister(instr->value());
5132
  Register scratch = scratch0();
5133

    
5134
  __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
5135
  __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5136

    
5137
  if (instr->hydrogen()->is_interval_check()) {
5138
    InstanceType first;
5139
    InstanceType last;
5140
    instr->hydrogen()->GetCheckInterval(&first, &last);
5141

    
5142
    __ cmp(scratch, Operand(first));
5143

    
5144
    // If there is only one type in the interval check for equality.
5145
    if (first == last) {
5146
      DeoptimizeIf(ne, instr->environment());
5147
    } else {
5148
      DeoptimizeIf(lo, instr->environment());
5149
      // Omit check for the last type.
5150
      if (last != LAST_TYPE) {
5151
        __ cmp(scratch, Operand(last));
5152
        DeoptimizeIf(hi, instr->environment());
5153
      }
5154
    }
5155
  } else {
5156
    uint8_t mask;
5157
    uint8_t tag;
5158
    instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5159

    
5160
    if (IsPowerOf2(mask)) {
5161
      ASSERT(tag == 0 || IsPowerOf2(tag));
5162
      __ tst(scratch, Operand(mask));
5163
      DeoptimizeIf(tag == 0 ? ne : eq, instr->environment());
5164
    } else {
5165
      __ and_(scratch, scratch, Operand(mask));
5166
      __ cmp(scratch, Operand(tag));
5167
      DeoptimizeIf(ne, instr->environment());
5168
    }
5169
  }
5170
}
5171

    
5172

    
5173
void LCodeGen::DoCheckValue(LCheckValue* instr) {
5174
  Register reg = ToRegister(instr->value());
5175
  Handle<HeapObject> object = instr->hydrogen()->object().handle();
5176
  AllowDeferredHandleDereference smi_check;
5177
  if (isolate()->heap()->InNewSpace(*object)) {
5178
    Register reg = ToRegister(instr->value());
5179
    Handle<Cell> cell = isolate()->factory()->NewCell(object);
5180
    __ mov(ip, Operand(Handle<Object>(cell)));
5181
    __ ldr(ip, FieldMemOperand(ip, Cell::kValueOffset));
5182
    __ cmp(reg, ip);
5183
  } else {
5184
    __ cmp(reg, Operand(object));
5185
  }
5186
  DeoptimizeIf(ne, instr->environment());
5187
}
5188

    
5189

    
5190
void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5191
  {
5192
    PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5193
    __ push(object);
5194
    __ mov(cp, Operand::Zero());
5195
    __ CallRuntimeSaveDoubles(Runtime::kMigrateInstance);
5196
    RecordSafepointWithRegisters(
5197
        instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5198
    __ StoreToSafepointRegisterSlot(r0, scratch0());
5199
  }
5200
  __ tst(scratch0(), Operand(kSmiTagMask));
5201
  DeoptimizeIf(eq, instr->environment());
5202
}
5203

    
5204

    
5205
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5206
  class DeferredCheckMaps V8_FINAL : public LDeferredCode {
5207
   public:
5208
    DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
5209
        : LDeferredCode(codegen), instr_(instr), object_(object) {
5210
      SetExit(check_maps());
5211
    }
5212
    virtual void Generate() V8_OVERRIDE {
5213
      codegen()->DoDeferredInstanceMigration(instr_, object_);
5214
    }
5215
    Label* check_maps() { return &check_maps_; }
5216
    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5217
   private:
5218
    LCheckMaps* instr_;
5219
    Label check_maps_;
5220
    Register object_;
5221
  };
5222

    
5223
  if (instr->hydrogen()->CanOmitMapChecks()) return;
5224
  Register map_reg = scratch0();
5225

    
5226
  LOperand* input = instr->value();
5227
  ASSERT(input->IsRegister());
5228
  Register reg = ToRegister(input);
5229

    
5230
  __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
5231

    
5232
  DeferredCheckMaps* deferred = NULL;
5233
  if (instr->hydrogen()->has_migration_target()) {
5234
    deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
5235
    __ bind(deferred->check_maps());
5236
  }
5237

    
5238
  UniqueSet<Map> map_set = instr->hydrogen()->map_set();
5239
  Label success;
5240
  for (int i = 0; i < map_set.size() - 1; i++) {
5241
    Handle<Map> map = map_set.at(i).handle();
5242
    __ CompareMap(map_reg, map, &success);
5243
    __ b(eq, &success);
5244
  }
5245

    
5246
  Handle<Map> map = map_set.at(map_set.size() - 1).handle();
5247
  __ CompareMap(map_reg, map, &success);
5248
  if (instr->hydrogen()->has_migration_target()) {
5249
    __ b(ne, deferred->entry());
5250
  } else {
5251
    DeoptimizeIf(ne, instr->environment());
5252
  }
5253

    
5254
  __ bind(&success);
5255
}
5256

    
5257

    
5258
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5259
  DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped());
5260
  Register result_reg = ToRegister(instr->result());
5261
  __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0());
5262
}
5263

    
5264

    
5265
void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5266
  Register unclamped_reg = ToRegister(instr->unclamped());
5267
  Register result_reg = ToRegister(instr->result());
5268
  __ ClampUint8(result_reg, unclamped_reg);
5269
}
5270

    
5271

    
5272
void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5273
  Register scratch = scratch0();
5274
  Register input_reg = ToRegister(instr->unclamped());
5275
  Register result_reg = ToRegister(instr->result());
5276
  DwVfpRegister temp_reg = ToDoubleRegister(instr->temp());
5277
  Label is_smi, done, heap_number;
5278

    
5279
  // Both smi and heap number cases are handled.
5280
  __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
5281

    
5282
  // Check for heap number
5283
  __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5284
  __ cmp(scratch, Operand(factory()->heap_number_map()));
5285
  __ b(eq, &heap_number);
5286

    
5287
  // Check for undefined. Undefined is converted to zero for clamping
5288
  // conversions.
5289
  __ cmp(input_reg, Operand(factory()->undefined_value()));
5290
  DeoptimizeIf(ne, instr->environment());
5291
  __ mov(result_reg, Operand::Zero());
5292
  __ jmp(&done);
5293

    
5294
  // Heap number
5295
  __ bind(&heap_number);
5296
  __ vldr(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
5297
  __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0());
5298
  __ jmp(&done);
5299

    
5300
  // smi
5301
  __ bind(&is_smi);
5302
  __ ClampUint8(result_reg, result_reg);
5303

    
5304
  __ bind(&done);
5305
}
5306

    
5307

    
5308
void LCodeGen::DoAllocate(LAllocate* instr) {
5309
  class DeferredAllocate V8_FINAL : public LDeferredCode {
5310
   public:
5311
    DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5312
        : LDeferredCode(codegen), instr_(instr) { }
5313
    virtual void Generate() V8_OVERRIDE {
5314
      codegen()->DoDeferredAllocate(instr_);
5315
    }
5316
    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5317
   private:
5318
    LAllocate* instr_;
5319
  };
5320

    
5321
  DeferredAllocate* deferred =
5322
      new(zone()) DeferredAllocate(this, instr);
5323

    
5324
  Register result = ToRegister(instr->result());
5325
  Register scratch = ToRegister(instr->temp1());
5326
  Register scratch2 = ToRegister(instr->temp2());
5327

    
5328
  // Allocate memory for the object.
5329
  AllocationFlags flags = TAG_OBJECT;
5330
  if (instr->hydrogen()->MustAllocateDoubleAligned()) {
5331
    flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
5332
  }
5333
  if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5334
    ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
5335
    ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5336
    flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
5337
  } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5338
    ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5339
    flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
5340
  }
5341

    
5342
  if (instr->size()->IsConstantOperand()) {
5343
    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5344
    __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5345
  } else {
5346
    Register size = ToRegister(instr->size());
5347
    __ Allocate(size,
5348
                result,
5349
                scratch,
5350
                scratch2,
5351
                deferred->entry(),
5352
                flags);
5353
  }
5354

    
5355
  __ bind(deferred->exit());
5356

    
5357
  if (instr->hydrogen()->MustPrefillWithFiller()) {
5358
    if (instr->size()->IsConstantOperand()) {
5359
      int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5360
      __ mov(scratch, Operand(size));
5361
    } else {
5362
      scratch = ToRegister(instr->size());
5363
    }
5364
    __ sub(scratch, scratch, Operand(kPointerSize));
5365
    __ sub(result, result, Operand(kHeapObjectTag));
5366
    Label loop;
5367
    __ bind(&loop);
5368
    __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
5369
    __ str(scratch2, MemOperand(result, scratch));
5370
    __ sub(scratch, scratch, Operand(kPointerSize));
5371
    __ cmp(scratch, Operand(0));
5372
    __ b(ge, &loop);
5373
    __ add(result, result, Operand(kHeapObjectTag));
5374
  }
5375
}
5376

    
5377

    
5378
void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5379
  Register result = ToRegister(instr->result());
5380

    
5381
  // TODO(3095996): Get rid of this. For now, we need to make the
5382
  // result register contain a valid pointer because it is already
5383
  // contained in the register pointer map.
5384
  __ mov(result, Operand(Smi::FromInt(0)));
5385

    
5386
  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5387
  if (instr->size()->IsRegister()) {
5388
    Register size = ToRegister(instr->size());
5389
    ASSERT(!size.is(result));
5390
    __ SmiTag(size);
5391
    __ push(size);
5392
  } else {
5393
    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5394
    __ Push(Smi::FromInt(size));
5395
  }
5396

    
5397
  if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5398
    ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
5399
    ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5400
    CallRuntimeFromDeferred(Runtime::kAllocateInOldPointerSpace, 1, instr,
5401
                            instr->context());
5402
  } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5403
    ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5404
    CallRuntimeFromDeferred(Runtime::kAllocateInOldDataSpace, 1, instr,
5405
                            instr->context());
5406
  } else {
5407
    CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr,
5408
                            instr->context());
5409
  }
5410
  __ StoreToSafepointRegisterSlot(r0, result);
5411
}
5412

    
5413

    
5414
void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5415
  ASSERT(ToRegister(instr->value()).is(r0));
5416
  __ push(r0);
5417
  CallRuntime(Runtime::kToFastProperties, 1, instr);
5418
}
5419

    
5420

    
5421
void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5422
  ASSERT(ToRegister(instr->context()).is(cp));
5423
  Label materialized;
5424
  // Registers will be used as follows:
5425
  // r6 = literals array.
5426
  // r1 = regexp literal.
5427
  // r0 = regexp literal clone.
5428
  // r2-5 are used as temporaries.
5429
  int literal_offset =
5430
      FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5431
  __ Move(r6, instr->hydrogen()->literals());
5432
  __ ldr(r1, FieldMemOperand(r6, literal_offset));
5433
  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5434
  __ cmp(r1, ip);
5435
  __ b(ne, &materialized);
5436

    
5437
  // Create regexp literal using runtime function
5438
  // Result will be in r0.
5439
  __ mov(r5, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
5440
  __ mov(r4, Operand(instr->hydrogen()->pattern()));
5441
  __ mov(r3, Operand(instr->hydrogen()->flags()));
5442
  __ Push(r6, r5, r4, r3);
5443
  CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5444
  __ mov(r1, r0);
5445

    
5446
  __ bind(&materialized);
5447
  int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
5448
  Label allocated, runtime_allocate;
5449

    
5450
  __ Allocate(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
5451
  __ jmp(&allocated);
5452

    
5453
  __ bind(&runtime_allocate);
5454
  __ mov(r0, Operand(Smi::FromInt(size)));
5455
  __ Push(r1, r0);
5456
  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5457
  __ pop(r1);
5458

    
5459
  __ bind(&allocated);
5460
  // Copy the content into the newly allocated memory.
5461
  __ CopyFields(r0, r1, double_scratch0(), size / kPointerSize);
5462
}
5463

    
5464

    
5465
void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5466
  ASSERT(ToRegister(instr->context()).is(cp));
5467
  // Use the fast case closure allocation code that allocates in new
5468
  // space for nested functions that don't need literals cloning.
5469
  bool pretenure = instr->hydrogen()->pretenure();
5470
  if (!pretenure && instr->hydrogen()->has_no_literals()) {
5471
    FastNewClosureStub stub(instr->hydrogen()->language_mode(),
5472
                            instr->hydrogen()->is_generator());
5473
    __ mov(r2, Operand(instr->hydrogen()->shared_info()));
5474
    CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
5475
  } else {
5476
    __ mov(r2, Operand(instr->hydrogen()->shared_info()));
5477
    __ mov(r1, Operand(pretenure ? factory()->true_value()
5478
                                 : factory()->false_value()));
5479
    __ Push(cp, r2, r1);
5480
    CallRuntime(Runtime::kNewClosure, 3, instr);
5481
  }
5482
}
5483

    
5484

    
5485
void LCodeGen::DoTypeof(LTypeof* instr) {
5486
  Register input = ToRegister(instr->value());
5487
  __ push(input);
5488
  CallRuntime(Runtime::kTypeof, 1, instr);
5489
}
5490

    
5491

    
5492
void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
5493
  Register input = ToRegister(instr->value());
5494

    
5495
  Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
5496
                                                  instr->FalseLabel(chunk_),
5497
                                                  input,
5498
                                                  instr->type_literal());
5499
  if (final_branch_condition != kNoCondition) {
5500
    EmitBranch(instr, final_branch_condition);
5501
  }
5502
}
5503

    
5504

    
5505
Condition LCodeGen::EmitTypeofIs(Label* true_label,
5506
                                 Label* false_label,
5507
                                 Register input,
5508
                                 Handle<String> type_name) {
5509
  Condition final_branch_condition = kNoCondition;
5510
  Register scratch = scratch0();
5511
  if (type_name->Equals(heap()->number_string())) {
5512
    __ JumpIfSmi(input, true_label);
5513
    __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset));
5514
    __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
5515
    __ cmp(input, Operand(ip));
5516
    final_branch_condition = eq;
5517

    
5518
  } else if (type_name->Equals(heap()->string_string())) {
5519
    __ JumpIfSmi(input, false_label);
5520
    __ CompareObjectType(input, input, scratch, FIRST_NONSTRING_TYPE);
5521
    __ b(ge, false_label);
5522
    __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
5523
    __ tst(ip, Operand(1 << Map::kIsUndetectable));
5524
    final_branch_condition = eq;
5525

    
5526
  } else if (type_name->Equals(heap()->symbol_string())) {
5527
    __ JumpIfSmi(input, false_label);
5528
    __ CompareObjectType(input, input, scratch, SYMBOL_TYPE);
5529
    final_branch_condition = eq;
5530

    
5531
  } else if (type_name->Equals(heap()->boolean_string())) {
5532
    __ CompareRoot(input, Heap::kTrueValueRootIndex);
5533
    __ b(eq, true_label);
5534
    __ CompareRoot(input, Heap::kFalseValueRootIndex);
5535
    final_branch_condition = eq;
5536

    
5537
  } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
5538
    __ CompareRoot(input, Heap::kNullValueRootIndex);
5539
    final_branch_condition = eq;
5540

    
5541
  } else if (type_name->Equals(heap()->undefined_string())) {
5542
    __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
5543
    __ b(eq, true_label);
5544
    __ JumpIfSmi(input, false_label);
5545
    // Check for undetectable objects => true.
5546
    __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset));
5547
    __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
5548
    __ tst(ip, Operand(1 << Map::kIsUndetectable));
5549
    final_branch_condition = ne;
5550

    
5551
  } else if (type_name->Equals(heap()->function_string())) {
5552
    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
5553
    __ JumpIfSmi(input, false_label);
5554
    __ CompareObjectType(input, scratch, input, JS_FUNCTION_TYPE);
5555
    __ b(eq, true_label);
5556
    __ cmp(input, Operand(JS_FUNCTION_PROXY_TYPE));
5557
    final_branch_condition = eq;
5558

    
5559
  } else if (type_name->Equals(heap()->object_string())) {
5560
    __ JumpIfSmi(input, false_label);
5561
    if (!FLAG_harmony_typeof) {
5562
      __ CompareRoot(input, Heap::kNullValueRootIndex);
5563
      __ b(eq, true_label);
5564
    }
5565
    __ CompareObjectType(input, input, scratch,
5566
                         FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
5567
    __ b(lt, false_label);
5568
    __ CompareInstanceType(input, scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
5569
    __ b(gt, false_label);
5570
    // Check for undetectable objects => false.
5571
    __ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
5572
    __ tst(ip, Operand(1 << Map::kIsUndetectable));
5573
    final_branch_condition = eq;
5574

    
5575
  } else {
5576
    __ b(false_label);
5577
  }
5578

    
5579
  return final_branch_condition;
5580
}
5581

    
5582

    
5583
void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
5584
  Register temp1 = ToRegister(instr->temp());
5585

    
5586
  EmitIsConstructCall(temp1, scratch0());
5587
  EmitBranch(instr, eq);
5588
}
5589

    
5590

    
5591
void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
5592
  ASSERT(!temp1.is(temp2));
5593
  // Get the frame pointer for the calling frame.
5594
  __ ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
5595

    
5596
  // Skip the arguments adaptor frame if it exists.
5597
  Label check_frame_marker;
5598
  __ ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
5599
  __ cmp(temp2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
5600
  __ b(ne, &check_frame_marker);
5601
  __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
5602

    
5603
  // Check the marker in the calling frame.
5604
  __ bind(&check_frame_marker);
5605
  __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
5606
  __ cmp(temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
5607
}
5608

    
5609

    
5610
void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5611
  if (info()->IsStub()) return;
5612
  // Ensure that we have enough space after the previous lazy-bailout
5613
  // instruction for patching the code here.
5614
  int current_pc = masm()->pc_offset();
5615
  if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5616
    // Block literal pool emission for duration of padding.
5617
    Assembler::BlockConstPoolScope block_const_pool(masm());
5618
    int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
5619
    ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
5620
    while (padding_size > 0) {
5621
      __ nop();
5622
      padding_size -= Assembler::kInstrSize;
5623
    }
5624
  }
5625
}
5626

    
5627

    
5628
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
5629
  EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5630
  last_lazy_deopt_pc_ = masm()->pc_offset();
5631
  ASSERT(instr->HasEnvironment());
5632
  LEnvironment* env = instr->environment();
5633
  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5634
  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5635
}
5636

    
5637

    
5638
void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5639
  Deoptimizer::BailoutType type = instr->hydrogen()->type();
5640
  // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5641
  // needed return address), even though the implementation of LAZY and EAGER is
5642
  // now identical. When LAZY is eventually completely folded into EAGER, remove
5643
  // the special case below.
5644
  if (info()->IsStub() && type == Deoptimizer::EAGER) {
5645
    type = Deoptimizer::LAZY;
5646
  }
5647

    
5648
  Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
5649
  DeoptimizeIf(al, instr->environment(), type);
5650
}
5651

    
5652

    
5653
void LCodeGen::DoDummyUse(LDummyUse* instr) {
5654
  // Nothing to see here, move on!
5655
}
5656

    
5657

    
5658
void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
5659
  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5660
  LoadContextFromDeferred(instr->context());
5661
  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
5662
  RecordSafepointWithLazyDeopt(
5663
      instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
5664
  ASSERT(instr->HasEnvironment());
5665
  LEnvironment* env = instr->environment();
5666
  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5667
}
5668

    
5669

    
5670
void LCodeGen::DoStackCheck(LStackCheck* instr) {
5671
  class DeferredStackCheck V8_FINAL : public LDeferredCode {
5672
   public:
5673
    DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
5674
        : LDeferredCode(codegen), instr_(instr) { }
5675
    virtual void Generate() V8_OVERRIDE {
5676
      codegen()->DoDeferredStackCheck(instr_);
5677
    }
5678
    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5679
   private:
5680
    LStackCheck* instr_;
5681
  };
5682

    
5683
  ASSERT(instr->HasEnvironment());
5684
  LEnvironment* env = instr->environment();
5685
  // There is no LLazyBailout instruction for stack-checks. We have to
5686
  // prepare for lazy deoptimization explicitly here.
5687
  if (instr->hydrogen()->is_function_entry()) {
5688
    // Perform stack overflow check.
5689
    Label done;
5690
    __ LoadRoot(ip, Heap::kStackLimitRootIndex);
5691
    __ cmp(sp, Operand(ip));
5692
    __ b(hs, &done);
5693
    PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize);
5694
    ASSERT(instr->context()->IsRegister());
5695
    ASSERT(ToRegister(instr->context()).is(cp));
5696
    CallCode(isolate()->builtins()->StackCheck(),
5697
              RelocInfo::CODE_TARGET,
5698
              instr);
5699
    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5700
    last_lazy_deopt_pc_ = masm()->pc_offset();
5701
    __ bind(&done);
5702
    RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5703
    safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
5704
  } else {
5705
    ASSERT(instr->hydrogen()->is_backwards_branch());
5706
    // Perform stack overflow check if this goto needs it before jumping.
5707
    DeferredStackCheck* deferred_stack_check =
5708
        new(zone()) DeferredStackCheck(this, instr);
5709
    __ LoadRoot(ip, Heap::kStackLimitRootIndex);
5710
    __ cmp(sp, Operand(ip));
5711
    __ b(lo, deferred_stack_check->entry());
5712
    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
5713
    last_lazy_deopt_pc_ = masm()->pc_offset();
5714
    __ bind(instr->done_label());
5715
    deferred_stack_check->SetExit(instr->done_label());
5716
    RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
5717
    // Don't record a deoptimization index for the safepoint here.
5718
    // This will be done explicitly when emitting call and the safepoint in
5719
    // the deferred code.
5720
  }
5721
}
5722

    
5723

    
5724
void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
5725
  // This is a pseudo-instruction that ensures that the environment here is
5726
  // properly registered for deoptimization and records the assembler's PC
5727
  // offset.
5728
  LEnvironment* environment = instr->environment();
5729

    
5730
  // If the environment were already registered, we would have no way of
5731
  // backpatching it with the spill slot operands.
5732
  ASSERT(!environment->HasBeenRegistered());
5733
  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5734

    
5735
  GenerateOsrPrologue();
5736
}
5737

    
5738

    
5739
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5740
  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
5741
  __ cmp(r0, ip);
5742
  DeoptimizeIf(eq, instr->environment());
5743

    
5744
  Register null_value = r5;
5745
  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5746
  __ cmp(r0, null_value);
5747
  DeoptimizeIf(eq, instr->environment());
5748

    
5749
  __ SmiTst(r0);
5750
  DeoptimizeIf(eq, instr->environment());
5751

    
5752
  STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
5753
  __ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE);
5754
  DeoptimizeIf(le, instr->environment());
5755

    
5756
  Label use_cache, call_runtime;
5757
  __ CheckEnumCache(null_value, &call_runtime);
5758

    
5759
  __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
5760
  __ b(&use_cache);
5761

    
5762
  // Get the set of properties to enumerate.
5763
  __ bind(&call_runtime);
5764
  __ push(r0);
5765
  CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5766

    
5767
  __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
5768
  __ LoadRoot(ip, Heap::kMetaMapRootIndex);
5769
  __ cmp(r1, ip);
5770
  DeoptimizeIf(ne, instr->environment());
5771
  __ bind(&use_cache);
5772
}
5773

    
5774

    
5775
void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5776
  Register map = ToRegister(instr->map());
5777
  Register result = ToRegister(instr->result());
5778
  Label load_cache, done;
5779
  __ EnumLength(result, map);
5780
  __ cmp(result, Operand(Smi::FromInt(0)));
5781
  __ b(ne, &load_cache);
5782
  __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
5783
  __ jmp(&done);
5784

    
5785
  __ bind(&load_cache);
5786
  __ LoadInstanceDescriptors(map, result);
5787
  __ ldr(result,
5788
         FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
5789
  __ ldr(result,
5790
         FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5791
  __ cmp(result, Operand::Zero());
5792
  DeoptimizeIf(eq, instr->environment());
5793

    
5794
  __ bind(&done);
5795
}
5796

    
5797

    
5798
void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5799
  Register object = ToRegister(instr->value());
5800
  Register map = ToRegister(instr->map());
5801
  __ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
5802
  __ cmp(map, scratch0());
5803
  DeoptimizeIf(ne, instr->environment());
5804
}
5805

    
5806

    
5807
void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
5808
  Register object = ToRegister(instr->object());
5809
  Register index = ToRegister(instr->index());
5810
  Register result = ToRegister(instr->result());
5811
  Register scratch = scratch0();
5812

    
5813
  Label out_of_object, done;
5814
  __ cmp(index, Operand::Zero());
5815
  __ b(lt, &out_of_object);
5816

    
5817
  __ add(scratch, object, Operand::PointerOffsetFromSmiKey(index));
5818
  __ ldr(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
5819

    
5820
  __ b(&done);
5821

    
5822
  __ bind(&out_of_object);
5823
  __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5824
  // Index is equal to negated out of object property index plus 1.
5825
  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
5826
  __ sub(scratch, result, Operand::PointerOffsetFromSmiKey(index));
5827
  __ ldr(result, FieldMemOperand(scratch,
5828
                                 FixedArray::kHeaderSize - kPointerSize));
5829
  __ bind(&done);
5830
}
5831

    
5832

    
5833
#undef __
5834

    
5835
} }  // namespace v8::internal