The data contained in this repository can be downloaded to your computer using one of several clients.
Please see the documentation of your version control software client for more information.

Please select the desired protocol below to get the URL.

This URL has Read-Only access.

Statistics
| Branch: | Revision:

main_repo / deps / v8 / src / arm / macro-assembler-arm.cc @ f230a1cf

History | View | Annotate | Download (134 KB)

1
// Copyright 2012 the V8 project authors. All rights reserved.
2
// Redistribution and use in source and binary forms, with or without
3
// modification, are permitted provided that the following conditions are
4
// met:
5
//
6
//     * Redistributions of source code must retain the above copyright
7
//       notice, this list of conditions and the following disclaimer.
8
//     * Redistributions in binary form must reproduce the above
9
//       copyright notice, this list of conditions and the following
10
//       disclaimer in the documentation and/or other materials provided
11
//       with the distribution.
12
//     * Neither the name of Google Inc. nor the names of its
13
//       contributors may be used to endorse or promote products derived
14
//       from this software without specific prior written permission.
15
//
16
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27

    
28
#include <limits.h>  // For LONG_MIN, LONG_MAX.
29

    
30
#include "v8.h"
31

    
32
#if V8_TARGET_ARCH_ARM
33

    
34
#include "bootstrapper.h"
35
#include "codegen.h"
36
#include "cpu-profiler.h"
37
#include "debug.h"
38
#include "isolate-inl.h"
39
#include "runtime.h"
40

    
41
namespace v8 {
42
namespace internal {
43

    
44
MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
45
    : Assembler(arg_isolate, buffer, size),
46
      generating_stub_(false),
47
      allow_stub_calls_(true),
48
      has_frame_(false) {
49
  if (isolate() != NULL) {
50
    code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
51
                                  isolate());
52
  }
53
}
54

    
55

    
56
void MacroAssembler::Jump(Register target, Condition cond) {
57
  bx(target, cond);
58
}
59

    
60

    
61
void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
62
                          Condition cond) {
63
  mov(ip, Operand(target, rmode));
64
  bx(ip, cond);
65
}
66

    
67

    
68
void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode,
69
                          Condition cond) {
70
  ASSERT(!RelocInfo::IsCodeTarget(rmode));
71
  Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
72
}
73

    
74

    
75
void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
76
                          Condition cond) {
77
  ASSERT(RelocInfo::IsCodeTarget(rmode));
78
  // 'code' is always generated ARM code, never THUMB code
79
  AllowDeferredHandleDereference embedding_raw_address;
80
  Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
81
}
82

    
83

    
84
int MacroAssembler::CallSize(Register target, Condition cond) {
85
  return kInstrSize;
86
}
87

    
88

    
89
void MacroAssembler::Call(Register target, Condition cond) {
90
  // Block constant pool for the call instruction sequence.
91
  BlockConstPoolScope block_const_pool(this);
92
  Label start;
93
  bind(&start);
94
  blx(target, cond);
95
  ASSERT_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start));
96
}
97

    
98

    
99
int MacroAssembler::CallSize(
100
    Address target, RelocInfo::Mode rmode, Condition cond) {
101
  int size = 2 * kInstrSize;
102
  Instr mov_instr = cond | MOV | LeaveCC;
103
  intptr_t immediate = reinterpret_cast<intptr_t>(target);
104
  if (!Operand(immediate, rmode).is_single_instruction(this, mov_instr)) {
105
    size += kInstrSize;
106
  }
107
  return size;
108
}
109

    
110

    
111
int MacroAssembler::CallSizeNotPredictableCodeSize(
112
    Address target, RelocInfo::Mode rmode, Condition cond) {
113
  int size = 2 * kInstrSize;
114
  Instr mov_instr = cond | MOV | LeaveCC;
115
  intptr_t immediate = reinterpret_cast<intptr_t>(target);
116
  if (!Operand(immediate, rmode).is_single_instruction(NULL, mov_instr)) {
117
    size += kInstrSize;
118
  }
119
  return size;
120
}
121

    
122

    
123
void MacroAssembler::Call(Address target,
124
                          RelocInfo::Mode rmode,
125
                          Condition cond,
126
                          TargetAddressStorageMode mode) {
127
  // Block constant pool for the call instruction sequence.
128
  BlockConstPoolScope block_const_pool(this);
129
  Label start;
130
  bind(&start);
131

    
132
  bool old_predictable_code_size = predictable_code_size();
133
  if (mode == NEVER_INLINE_TARGET_ADDRESS) {
134
    set_predictable_code_size(true);
135
  }
136

    
137
  // Call sequence on V7 or later may be :
138
  //  movw  ip, #... @ call address low 16
139
  //  movt  ip, #... @ call address high 16
140
  //  blx   ip
141
  //                      @ return address
142
  // Or for pre-V7 or values that may be back-patched
143
  // to avoid ICache flushes:
144
  //  ldr   ip, [pc, #...] @ call address
145
  //  blx   ip
146
  //                      @ return address
147

    
148
  // Statement positions are expected to be recorded when the target
149
  // address is loaded. The mov method will automatically record
150
  // positions when pc is the target, since this is not the case here
151
  // we have to do it explicitly.
152
  positions_recorder()->WriteRecordedPositions();
153

    
154
  mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode));
155
  blx(ip, cond);
156

    
157
  ASSERT_EQ(CallSize(target, rmode, cond), SizeOfCodeGeneratedSince(&start));
158
  if (mode == NEVER_INLINE_TARGET_ADDRESS) {
159
    set_predictable_code_size(old_predictable_code_size);
160
  }
161
}
162

    
163

    
164
int MacroAssembler::CallSize(Handle<Code> code,
165
                             RelocInfo::Mode rmode,
166
                             TypeFeedbackId ast_id,
167
                             Condition cond) {
168
  AllowDeferredHandleDereference using_raw_address;
169
  return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
170
}
171

    
172

    
173
void MacroAssembler::Call(Handle<Code> code,
174
                          RelocInfo::Mode rmode,
175
                          TypeFeedbackId ast_id,
176
                          Condition cond,
177
                          TargetAddressStorageMode mode) {
178
  Label start;
179
  bind(&start);
180
  ASSERT(RelocInfo::IsCodeTarget(rmode));
181
  if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
182
    SetRecordedAstId(ast_id);
183
    rmode = RelocInfo::CODE_TARGET_WITH_ID;
184
  }
185
  // 'code' is always generated ARM code, never THUMB code
186
  AllowDeferredHandleDereference embedding_raw_address;
187
  Call(reinterpret_cast<Address>(code.location()), rmode, cond, mode);
188
}
189

    
190

    
191
void MacroAssembler::Ret(Condition cond) {
192
  bx(lr, cond);
193
}
194

    
195

    
196
void MacroAssembler::Drop(int count, Condition cond) {
197
  if (count > 0) {
198
    add(sp, sp, Operand(count * kPointerSize), LeaveCC, cond);
199
  }
200
}
201

    
202

    
203
void MacroAssembler::Ret(int drop, Condition cond) {
204
  Drop(drop, cond);
205
  Ret(cond);
206
}
207

    
208

    
209
void MacroAssembler::Swap(Register reg1,
210
                          Register reg2,
211
                          Register scratch,
212
                          Condition cond) {
213
  if (scratch.is(no_reg)) {
214
    eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
215
    eor(reg2, reg2, Operand(reg1), LeaveCC, cond);
216
    eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
217
  } else {
218
    mov(scratch, reg1, LeaveCC, cond);
219
    mov(reg1, reg2, LeaveCC, cond);
220
    mov(reg2, scratch, LeaveCC, cond);
221
  }
222
}
223

    
224

    
225
void MacroAssembler::Call(Label* target) {
226
  bl(target);
227
}
228

    
229

    
230
void MacroAssembler::Push(Handle<Object> handle) {
231
  mov(ip, Operand(handle));
232
  push(ip);
233
}
234

    
235

    
236
void MacroAssembler::Move(Register dst, Handle<Object> value) {
237
  AllowDeferredHandleDereference smi_check;
238
  if (value->IsSmi()) {
239
    mov(dst, Operand(value));
240
  } else {
241
    ASSERT(value->IsHeapObject());
242
    if (isolate()->heap()->InNewSpace(*value)) {
243
      Handle<Cell> cell = isolate()->factory()->NewCell(value);
244
      mov(dst, Operand(cell));
245
      ldr(dst, FieldMemOperand(dst, Cell::kValueOffset));
246
    } else {
247
      mov(dst, Operand(value));
248
    }
249
  }
250
}
251

    
252

    
253
void MacroAssembler::Move(Register dst, Register src, Condition cond) {
254
  if (!dst.is(src)) {
255
    mov(dst, src, LeaveCC, cond);
256
  }
257
}
258

    
259

    
260
void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) {
261
  if (!dst.is(src)) {
262
    vmov(dst, src);
263
  }
264
}
265

    
266

    
267
void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
268
                         Condition cond) {
269
  if (!src2.is_reg() &&
270
      !src2.must_output_reloc_info(this) &&
271
      src2.immediate() == 0) {
272
    mov(dst, Operand::Zero(), LeaveCC, cond);
273
  } else if (!src2.is_single_instruction(this) &&
274
             !src2.must_output_reloc_info(this) &&
275
             CpuFeatures::IsSupported(ARMv7) &&
276
             IsPowerOf2(src2.immediate() + 1)) {
277
    ubfx(dst, src1, 0,
278
        WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond);
279
  } else {
280
    and_(dst, src1, src2, LeaveCC, cond);
281
  }
282
}
283

    
284

    
285
void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
286
                          Condition cond) {
287
  ASSERT(lsb < 32);
288
  if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
289
    int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
290
    and_(dst, src1, Operand(mask), LeaveCC, cond);
291
    if (lsb != 0) {
292
      mov(dst, Operand(dst, LSR, lsb), LeaveCC, cond);
293
    }
294
  } else {
295
    ubfx(dst, src1, lsb, width, cond);
296
  }
297
}
298

    
299

    
300
void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
301
                          Condition cond) {
302
  ASSERT(lsb < 32);
303
  if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
304
    int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
305
    and_(dst, src1, Operand(mask), LeaveCC, cond);
306
    int shift_up = 32 - lsb - width;
307
    int shift_down = lsb + shift_up;
308
    if (shift_up != 0) {
309
      mov(dst, Operand(dst, LSL, shift_up), LeaveCC, cond);
310
    }
311
    if (shift_down != 0) {
312
      mov(dst, Operand(dst, ASR, shift_down), LeaveCC, cond);
313
    }
314
  } else {
315
    sbfx(dst, src1, lsb, width, cond);
316
  }
317
}
318

    
319

    
320
void MacroAssembler::Bfi(Register dst,
321
                         Register src,
322
                         Register scratch,
323
                         int lsb,
324
                         int width,
325
                         Condition cond) {
326
  ASSERT(0 <= lsb && lsb < 32);
327
  ASSERT(0 <= width && width < 32);
328
  ASSERT(lsb + width < 32);
329
  ASSERT(!scratch.is(dst));
330
  if (width == 0) return;
331
  if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
332
    int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
333
    bic(dst, dst, Operand(mask));
334
    and_(scratch, src, Operand((1 << width) - 1));
335
    mov(scratch, Operand(scratch, LSL, lsb));
336
    orr(dst, dst, scratch);
337
  } else {
338
    bfi(dst, src, lsb, width, cond);
339
  }
340
}
341

    
342

    
343
void MacroAssembler::Bfc(Register dst, Register src, int lsb, int width,
344
                         Condition cond) {
345
  ASSERT(lsb < 32);
346
  if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
347
    int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
348
    bic(dst, src, Operand(mask));
349
  } else {
350
    Move(dst, src, cond);
351
    bfc(dst, lsb, width, cond);
352
  }
353
}
354

    
355

    
356
void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
357
                          Condition cond) {
358
  if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
359
    ASSERT(!dst.is(pc) && !src.rm().is(pc));
360
    ASSERT((satpos >= 0) && (satpos <= 31));
361

    
362
    // These asserts are required to ensure compatibility with the ARMv7
363
    // implementation.
364
    ASSERT((src.shift_op() == ASR) || (src.shift_op() == LSL));
365
    ASSERT(src.rs().is(no_reg));
366

    
367
    Label done;
368
    int satval = (1 << satpos) - 1;
369

    
370
    if (cond != al) {
371
      b(NegateCondition(cond), &done);  // Skip saturate if !condition.
372
    }
373
    if (!(src.is_reg() && dst.is(src.rm()))) {
374
      mov(dst, src);
375
    }
376
    tst(dst, Operand(~satval));
377
    b(eq, &done);
378
    mov(dst, Operand::Zero(), LeaveCC, mi);  // 0 if negative.
379
    mov(dst, Operand(satval), LeaveCC, pl);  // satval if positive.
380
    bind(&done);
381
  } else {
382
    usat(dst, satpos, src, cond);
383
  }
384
}
385

    
386

    
387
void MacroAssembler::LoadRoot(Register destination,
388
                              Heap::RootListIndex index,
389
                              Condition cond) {
390
  if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
391
      isolate()->heap()->RootCanBeTreatedAsConstant(index) &&
392
      !predictable_code_size()) {
393
    // The CPU supports fast immediate values, and this root will never
394
    // change. We will load it as a relocatable immediate value.
395
    Handle<Object> root(&isolate()->heap()->roots_array_start()[index]);
396
    mov(destination, Operand(root), LeaveCC, cond);
397
    return;
398
  }
399
  ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
400
}
401

    
402

    
403
void MacroAssembler::StoreRoot(Register source,
404
                               Heap::RootListIndex index,
405
                               Condition cond) {
406
  str(source, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
407
}
408

    
409

    
410
void MacroAssembler::InNewSpace(Register object,
411
                                Register scratch,
412
                                Condition cond,
413
                                Label* branch) {
414
  ASSERT(cond == eq || cond == ne);
415
  and_(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
416
  cmp(scratch, Operand(ExternalReference::new_space_start(isolate())));
417
  b(cond, branch);
418
}
419

    
420

    
421
void MacroAssembler::RecordWriteField(
422
    Register object,
423
    int offset,
424
    Register value,
425
    Register dst,
426
    LinkRegisterStatus lr_status,
427
    SaveFPRegsMode save_fp,
428
    RememberedSetAction remembered_set_action,
429
    SmiCheck smi_check) {
430
  // First, check if a write barrier is even needed. The tests below
431
  // catch stores of Smis.
432
  Label done;
433

    
434
  // Skip barrier if writing a smi.
435
  if (smi_check == INLINE_SMI_CHECK) {
436
    JumpIfSmi(value, &done);
437
  }
438

    
439
  // Although the object register is tagged, the offset is relative to the start
440
  // of the object, so so offset must be a multiple of kPointerSize.
441
  ASSERT(IsAligned(offset, kPointerSize));
442

    
443
  add(dst, object, Operand(offset - kHeapObjectTag));
444
  if (emit_debug_code()) {
445
    Label ok;
446
    tst(dst, Operand((1 << kPointerSizeLog2) - 1));
447
    b(eq, &ok);
448
    stop("Unaligned cell in write barrier");
449
    bind(&ok);
450
  }
451

    
452
  RecordWrite(object,
453
              dst,
454
              value,
455
              lr_status,
456
              save_fp,
457
              remembered_set_action,
458
              OMIT_SMI_CHECK);
459

    
460
  bind(&done);
461

    
462
  // Clobber clobbered input registers when running with the debug-code flag
463
  // turned on to provoke errors.
464
  if (emit_debug_code()) {
465
    mov(value, Operand(BitCast<int32_t>(kZapValue + 4)));
466
    mov(dst, Operand(BitCast<int32_t>(kZapValue + 8)));
467
  }
468
}
469

    
470

    
471
// Will clobber 4 registers: object, address, scratch, ip.  The
472
// register 'object' contains a heap object pointer.  The heap object
473
// tag is shifted away.
474
void MacroAssembler::RecordWrite(Register object,
475
                                 Register address,
476
                                 Register value,
477
                                 LinkRegisterStatus lr_status,
478
                                 SaveFPRegsMode fp_mode,
479
                                 RememberedSetAction remembered_set_action,
480
                                 SmiCheck smi_check) {
481
  if (emit_debug_code()) {
482
    ldr(ip, MemOperand(address));
483
    cmp(ip, value);
484
    Check(eq, kWrongAddressOrValuePassedToRecordWrite);
485
  }
486

    
487
  Label done;
488

    
489
  if (smi_check == INLINE_SMI_CHECK) {
490
    JumpIfSmi(value, &done);
491
  }
492

    
493
  CheckPageFlag(value,
494
                value,  // Used as scratch.
495
                MemoryChunk::kPointersToHereAreInterestingMask,
496
                eq,
497
                &done);
498
  CheckPageFlag(object,
499
                value,  // Used as scratch.
500
                MemoryChunk::kPointersFromHereAreInterestingMask,
501
                eq,
502
                &done);
503

    
504
  // Record the actual write.
505
  if (lr_status == kLRHasNotBeenSaved) {
506
    push(lr);
507
  }
508
  RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
509
  CallStub(&stub);
510
  if (lr_status == kLRHasNotBeenSaved) {
511
    pop(lr);
512
  }
513

    
514
  bind(&done);
515

    
516
  // Clobber clobbered registers when running with the debug-code flag
517
  // turned on to provoke errors.
518
  if (emit_debug_code()) {
519
    mov(address, Operand(BitCast<int32_t>(kZapValue + 12)));
520
    mov(value, Operand(BitCast<int32_t>(kZapValue + 16)));
521
  }
522
}
523

    
524

    
525
void MacroAssembler::RememberedSetHelper(Register object,  // For debug tests.
526
                                         Register address,
527
                                         Register scratch,
528
                                         SaveFPRegsMode fp_mode,
529
                                         RememberedSetFinalAction and_then) {
530
  Label done;
531
  if (emit_debug_code()) {
532
    Label ok;
533
    JumpIfNotInNewSpace(object, scratch, &ok);
534
    stop("Remembered set pointer is in new space");
535
    bind(&ok);
536
  }
537
  // Load store buffer top.
538
  ExternalReference store_buffer =
539
      ExternalReference::store_buffer_top(isolate());
540
  mov(ip, Operand(store_buffer));
541
  ldr(scratch, MemOperand(ip));
542
  // Store pointer to buffer and increment buffer top.
543
  str(address, MemOperand(scratch, kPointerSize, PostIndex));
544
  // Write back new top of buffer.
545
  str(scratch, MemOperand(ip));
546
  // Call stub on end of buffer.
547
  // Check for end of buffer.
548
  tst(scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
549
  if (and_then == kFallThroughAtEnd) {
550
    b(eq, &done);
551
  } else {
552
    ASSERT(and_then == kReturnAtEnd);
553
    Ret(eq);
554
  }
555
  push(lr);
556
  StoreBufferOverflowStub store_buffer_overflow =
557
      StoreBufferOverflowStub(fp_mode);
558
  CallStub(&store_buffer_overflow);
559
  pop(lr);
560
  bind(&done);
561
  if (and_then == kReturnAtEnd) {
562
    Ret();
563
  }
564
}
565

    
566

    
567
// Push and pop all registers that can hold pointers.
568
void MacroAssembler::PushSafepointRegisters() {
569
  // Safepoints expect a block of contiguous register values starting with r0:
570
  ASSERT(((1 << kNumSafepointSavedRegisters) - 1) == kSafepointSavedRegisters);
571
  // Safepoints expect a block of kNumSafepointRegisters values on the
572
  // stack, so adjust the stack for unsaved registers.
573
  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
574
  ASSERT(num_unsaved >= 0);
575
  sub(sp, sp, Operand(num_unsaved * kPointerSize));
576
  stm(db_w, sp, kSafepointSavedRegisters);
577
}
578

    
579

    
580
void MacroAssembler::PopSafepointRegisters() {
581
  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
582
  ldm(ia_w, sp, kSafepointSavedRegisters);
583
  add(sp, sp, Operand(num_unsaved * kPointerSize));
584
}
585

    
586

    
587
void MacroAssembler::PushSafepointRegistersAndDoubles() {
588
  // Number of d-regs not known at snapshot time.
589
  ASSERT(!Serializer::enabled());
590
  PushSafepointRegisters();
591
  sub(sp, sp, Operand(DwVfpRegister::NumAllocatableRegisters() *
592
                      kDoubleSize));
593
  for (int i = 0; i < DwVfpRegister::NumAllocatableRegisters(); i++) {
594
    vstr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
595
  }
596
}
597

    
598

    
599
void MacroAssembler::PopSafepointRegistersAndDoubles() {
600
  // Number of d-regs not known at snapshot time.
601
  ASSERT(!Serializer::enabled());
602
  for (int i = 0; i < DwVfpRegister::NumAllocatableRegisters(); i++) {
603
    vldr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
604
  }
605
  add(sp, sp, Operand(DwVfpRegister::NumAllocatableRegisters() *
606
                      kDoubleSize));
607
  PopSafepointRegisters();
608
}
609

    
610
void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
611
                                                             Register dst) {
612
  str(src, SafepointRegistersAndDoublesSlot(dst));
613
}
614

    
615

    
616
void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
617
  str(src, SafepointRegisterSlot(dst));
618
}
619

    
620

    
621
void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
622
  ldr(dst, SafepointRegisterSlot(src));
623
}
624

    
625

    
626
int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
627
  // The registers are pushed starting with the highest encoding,
628
  // which means that lowest encodings are closest to the stack pointer.
629
  ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters);
630
  return reg_code;
631
}
632

    
633

    
634
MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
635
  return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
636
}
637

    
638

    
639
MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
640
  // Number of d-regs not known at snapshot time.
641
  ASSERT(!Serializer::enabled());
642
  // General purpose registers are pushed last on the stack.
643
  int doubles_size = DwVfpRegister::NumAllocatableRegisters() * kDoubleSize;
644
  int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
645
  return MemOperand(sp, doubles_size + register_offset);
646
}
647

    
648

    
649
void MacroAssembler::Ldrd(Register dst1, Register dst2,
650
                          const MemOperand& src, Condition cond) {
651
  ASSERT(src.rm().is(no_reg));
652
  ASSERT(!dst1.is(lr));  // r14.
653

    
654
  // V8 does not use this addressing mode, so the fallback code
655
  // below doesn't support it yet.
656
  ASSERT((src.am() != PreIndex) && (src.am() != NegPreIndex));
657

    
658
  // Generate two ldr instructions if ldrd is not available.
659
  if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() &&
660
      (dst1.code() % 2 == 0) && (dst1.code() + 1 == dst2.code())) {
661
    CpuFeatureScope scope(this, ARMv7);
662
    ldrd(dst1, dst2, src, cond);
663
  } else {
664
    if ((src.am() == Offset) || (src.am() == NegOffset)) {
665
      MemOperand src2(src);
666
      src2.set_offset(src2.offset() + 4);
667
      if (dst1.is(src.rn())) {
668
        ldr(dst2, src2, cond);
669
        ldr(dst1, src, cond);
670
      } else {
671
        ldr(dst1, src, cond);
672
        ldr(dst2, src2, cond);
673
      }
674
    } else {  // PostIndex or NegPostIndex.
675
      ASSERT((src.am() == PostIndex) || (src.am() == NegPostIndex));
676
      if (dst1.is(src.rn())) {
677
        ldr(dst2, MemOperand(src.rn(), 4, Offset), cond);
678
        ldr(dst1, src, cond);
679
      } else {
680
        MemOperand src2(src);
681
        src2.set_offset(src2.offset() - 4);
682
        ldr(dst1, MemOperand(src.rn(), 4, PostIndex), cond);
683
        ldr(dst2, src2, cond);
684
      }
685
    }
686
  }
687
}
688

    
689

    
690
void MacroAssembler::Strd(Register src1, Register src2,
691
                          const MemOperand& dst, Condition cond) {
692
  ASSERT(dst.rm().is(no_reg));
693
  ASSERT(!src1.is(lr));  // r14.
694

    
695
  // V8 does not use this addressing mode, so the fallback code
696
  // below doesn't support it yet.
697
  ASSERT((dst.am() != PreIndex) && (dst.am() != NegPreIndex));
698

    
699
  // Generate two str instructions if strd is not available.
700
  if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() &&
701
      (src1.code() % 2 == 0) && (src1.code() + 1 == src2.code())) {
702
    CpuFeatureScope scope(this, ARMv7);
703
    strd(src1, src2, dst, cond);
704
  } else {
705
    MemOperand dst2(dst);
706
    if ((dst.am() == Offset) || (dst.am() == NegOffset)) {
707
      dst2.set_offset(dst2.offset() + 4);
708
      str(src1, dst, cond);
709
      str(src2, dst2, cond);
710
    } else {  // PostIndex or NegPostIndex.
711
      ASSERT((dst.am() == PostIndex) || (dst.am() == NegPostIndex));
712
      dst2.set_offset(dst2.offset() - 4);
713
      str(src1, MemOperand(dst.rn(), 4, PostIndex), cond);
714
      str(src2, dst2, cond);
715
    }
716
  }
717
}
718

    
719

    
720
void MacroAssembler::VFPEnsureFPSCRState(Register scratch) {
721
  // If needed, restore wanted bits of FPSCR.
722
  Label fpscr_done;
723
  vmrs(scratch);
724
  tst(scratch, Operand(kVFPDefaultNaNModeControlBit));
725
  b(ne, &fpscr_done);
726
  orr(scratch, scratch, Operand(kVFPDefaultNaNModeControlBit));
727
  vmsr(scratch);
728
  bind(&fpscr_done);
729
}
730

    
731

    
732
void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst,
733
                                        const DwVfpRegister src,
734
                                        const Condition cond) {
735
  vsub(dst, src, kDoubleRegZero, cond);
736
}
737

    
738

    
739
void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
740
                                           const DwVfpRegister src2,
741
                                           const Condition cond) {
742
  // Compare and move FPSCR flags to the normal condition flags.
743
  VFPCompareAndLoadFlags(src1, src2, pc, cond);
744
}
745

    
746
void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
747
                                           const double src2,
748
                                           const Condition cond) {
749
  // Compare and move FPSCR flags to the normal condition flags.
750
  VFPCompareAndLoadFlags(src1, src2, pc, cond);
751
}
752

    
753

    
754
void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
755
                                            const DwVfpRegister src2,
756
                                            const Register fpscr_flags,
757
                                            const Condition cond) {
758
  // Compare and load FPSCR.
759
  vcmp(src1, src2, cond);
760
  vmrs(fpscr_flags, cond);
761
}
762

    
763
void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
764
                                            const double src2,
765
                                            const Register fpscr_flags,
766
                                            const Condition cond) {
767
  // Compare and load FPSCR.
768
  vcmp(src1, src2, cond);
769
  vmrs(fpscr_flags, cond);
770
}
771

    
772
void MacroAssembler::Vmov(const DwVfpRegister dst,
773
                          const double imm,
774
                          const Register scratch) {
775
  static const DoubleRepresentation minus_zero(-0.0);
776
  static const DoubleRepresentation zero(0.0);
777
  DoubleRepresentation value(imm);
778
  // Handle special values first.
779
  if (value.bits == zero.bits) {
780
    vmov(dst, kDoubleRegZero);
781
  } else if (value.bits == minus_zero.bits) {
782
    vneg(dst, kDoubleRegZero);
783
  } else {
784
    vmov(dst, imm, scratch);
785
  }
786
}
787

    
788

    
789
void MacroAssembler::VmovHigh(Register dst, DwVfpRegister src) {
790
  if (src.code() < 16) {
791
    const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
792
    vmov(dst, loc.high());
793
  } else {
794
    vmov(dst, VmovIndexHi, src);
795
  }
796
}
797

    
798

    
799
void MacroAssembler::VmovHigh(DwVfpRegister dst, Register src) {
800
  if (dst.code() < 16) {
801
    const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
802
    vmov(loc.high(), src);
803
  } else {
804
    vmov(dst, VmovIndexHi, src);
805
  }
806
}
807

    
808

    
809
void MacroAssembler::VmovLow(Register dst, DwVfpRegister src) {
810
  if (src.code() < 16) {
811
    const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code());
812
    vmov(dst, loc.low());
813
  } else {
814
    vmov(dst, VmovIndexLo, src);
815
  }
816
}
817

    
818

    
819
void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) {
820
  if (dst.code() < 16) {
821
    const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
822
    vmov(loc.low(), src);
823
  } else {
824
    vmov(dst, VmovIndexLo, src);
825
  }
826
}
827

    
828

    
829
void MacroAssembler::LoadNumber(Register object,
830
                                LowDwVfpRegister dst,
831
                                Register heap_number_map,
832
                                Register scratch,
833
                                Label* not_number) {
834
  Label is_smi, done;
835

    
836
  UntagAndJumpIfSmi(scratch, object, &is_smi);
837
  JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
838

    
839
  vldr(dst, FieldMemOperand(object, HeapNumber::kValueOffset));
840
  b(&done);
841

    
842
  // Handle loading a double from a smi.
843
  bind(&is_smi);
844
  vmov(dst.high(), scratch);
845
  vcvt_f64_s32(dst, dst.high());
846

    
847
  bind(&done);
848
}
849

    
850

    
851
void MacroAssembler::LoadNumberAsInt32Double(Register object,
852
                                             DwVfpRegister double_dst,
853
                                             Register heap_number_map,
854
                                             Register scratch,
855
                                             LowDwVfpRegister double_scratch,
856
                                             Label* not_int32) {
857
  ASSERT(!scratch.is(object));
858
  ASSERT(!heap_number_map.is(object) && !heap_number_map.is(scratch));
859

    
860
  Label done, obj_is_not_smi;
861

    
862
  UntagAndJumpIfNotSmi(scratch, object, &obj_is_not_smi);
863
  vmov(double_scratch.low(), scratch);
864
  vcvt_f64_s32(double_dst, double_scratch.low());
865
  b(&done);
866

    
867
  bind(&obj_is_not_smi);
868
  JumpIfNotHeapNumber(object, heap_number_map, scratch, not_int32);
869

    
870
  // Load the number.
871
  // Load the double value.
872
  vldr(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
873

    
874
  TestDoubleIsInt32(double_dst, double_scratch);
875
  // Jump to not_int32 if the operation did not succeed.
876
  b(ne, not_int32);
877

    
878
  bind(&done);
879
}
880

    
881

    
882
void MacroAssembler::LoadNumberAsInt32(Register object,
883
                                       Register dst,
884
                                       Register heap_number_map,
885
                                       Register scratch,
886
                                       DwVfpRegister double_scratch0,
887
                                       LowDwVfpRegister double_scratch1,
888
                                       Label* not_int32) {
889
  ASSERT(!dst.is(object));
890
  ASSERT(!scratch.is(object));
891

    
892
  Label done, maybe_undefined;
893

    
894
  UntagAndJumpIfSmi(dst, object, &done);
895

    
896
  JumpIfNotHeapNumber(object, heap_number_map, scratch, &maybe_undefined);
897

    
898
  // Object is a heap number.
899
  // Convert the floating point value to a 32-bit integer.
900
  // Load the double value.
901
  vldr(double_scratch0, FieldMemOperand(object, HeapNumber::kValueOffset));
902

    
903
  TryDoubleToInt32Exact(dst, double_scratch0, double_scratch1);
904
  // Jump to not_int32 if the operation did not succeed.
905
  b(ne, not_int32);
906
  b(&done);
907

    
908
  bind(&maybe_undefined);
909
  CompareRoot(object, Heap::kUndefinedValueRootIndex);
910
  b(ne, not_int32);
911
  // |undefined| is truncated to 0.
912
  mov(dst, Operand(Smi::FromInt(0)));
913
  // Fall through.
914

    
915
  bind(&done);
916
}
917

    
918

    
919
void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
920
  if (frame_mode == BUILD_STUB_FRAME) {
921
    stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
922
    Push(Smi::FromInt(StackFrame::STUB));
923
    // Adjust FP to point to saved FP.
924
    add(fp, sp, Operand(2 * kPointerSize));
925
  } else {
926
    PredictableCodeSizeScope predictible_code_size_scope(
927
        this, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
928
    // The following three instructions must remain together and unmodified
929
    // for code aging to work properly.
930
    if (isolate()->IsCodePreAgingActive()) {
931
      // Pre-age the code.
932
      Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
933
      add(r0, pc, Operand(-8));
934
      ldr(pc, MemOperand(pc, -4));
935
      dd(reinterpret_cast<uint32_t>(stub->instruction_start()));
936
    } else {
937
      stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
938
      nop(ip.code());
939
      // Adjust FP to point to saved FP.
940
      add(fp, sp, Operand(2 * kPointerSize));
941
    }
942
  }
943
}
944

    
945

    
946
void MacroAssembler::EnterFrame(StackFrame::Type type) {
947
  // r0-r3: preserved
948
  stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
949
  mov(ip, Operand(Smi::FromInt(type)));
950
  push(ip);
951
  mov(ip, Operand(CodeObject()));
952
  push(ip);
953
  add(fp, sp, Operand(3 * kPointerSize));  // Adjust FP to point to saved FP.
954
}
955

    
956

    
957
void MacroAssembler::LeaveFrame(StackFrame::Type type) {
958
  // r0: preserved
959
  // r1: preserved
960
  // r2: preserved
961

    
962
  // Drop the execution stack down to the frame pointer and restore
963
  // the caller frame pointer and return address.
964
  mov(sp, fp);
965
  ldm(ia_w, sp, fp.bit() | lr.bit());
966
}
967

    
968

    
969
void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
970
  // Set up the frame structure on the stack.
971
  ASSERT_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
972
  ASSERT_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
973
  ASSERT_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
974
  Push(lr, fp);
975
  mov(fp, Operand(sp));  // Set up new frame pointer.
976
  // Reserve room for saved entry sp and code object.
977
  sub(sp, sp, Operand(2 * kPointerSize));
978
  if (emit_debug_code()) {
979
    mov(ip, Operand::Zero());
980
    str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
981
  }
982
  mov(ip, Operand(CodeObject()));
983
  str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset));
984

    
985
  // Save the frame pointer and the context in top.
986
  mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
987
  str(fp, MemOperand(ip));
988
  mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
989
  str(cp, MemOperand(ip));
990

    
991
  // Optionally save all double registers.
992
  if (save_doubles) {
993
    SaveFPRegs(sp, ip);
994
    // Note that d0 will be accessible at
995
    //   fp - 2 * kPointerSize - DwVfpRegister::kMaxNumRegisters * kDoubleSize,
996
    // since the sp slot and code slot were pushed after the fp.
997
  }
998

    
999
  // Reserve place for the return address and stack space and align the frame
1000
  // preparing for calling the runtime function.
1001
  const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
1002
  sub(sp, sp, Operand((stack_space + 1) * kPointerSize));
1003
  if (frame_alignment > 0) {
1004
    ASSERT(IsPowerOf2(frame_alignment));
1005
    and_(sp, sp, Operand(-frame_alignment));
1006
  }
1007

    
1008
  // Set the exit frame sp value to point just before the return address
1009
  // location.
1010
  add(ip, sp, Operand(kPointerSize));
1011
  str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
1012
}
1013

    
1014

    
1015
void MacroAssembler::InitializeNewString(Register string,
1016
                                         Register length,
1017
                                         Heap::RootListIndex map_index,
1018
                                         Register scratch1,
1019
                                         Register scratch2) {
1020
  SmiTag(scratch1, length);
1021
  LoadRoot(scratch2, map_index);
1022
  str(scratch1, FieldMemOperand(string, String::kLengthOffset));
1023
  mov(scratch1, Operand(String::kEmptyHashField));
1024
  str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
1025
  str(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
1026
}
1027

    
1028

    
1029
int MacroAssembler::ActivationFrameAlignment() {
1030
#if V8_HOST_ARCH_ARM
1031
  // Running on the real platform. Use the alignment as mandated by the local
1032
  // environment.
1033
  // Note: This will break if we ever start generating snapshots on one ARM
1034
  // platform for another ARM platform with a different alignment.
1035
  return OS::ActivationFrameAlignment();
1036
#else  // V8_HOST_ARCH_ARM
1037
  // If we are using the simulator then we should always align to the expected
1038
  // alignment. As the simulator is used to generate snapshots we do not know
1039
  // if the target platform will need alignment, so this is controlled from a
1040
  // flag.
1041
  return FLAG_sim_stack_alignment;
1042
#endif  // V8_HOST_ARCH_ARM
1043
}
1044

    
1045

    
1046
void MacroAssembler::LeaveExitFrame(bool save_doubles,
1047
                                    Register argument_count,
1048
                                    bool restore_context) {
1049
  // Optionally restore all double registers.
1050
  if (save_doubles) {
1051
    // Calculate the stack location of the saved doubles and restore them.
1052
    const int offset = 2 * kPointerSize;
1053
    sub(r3, fp,
1054
        Operand(offset + DwVfpRegister::kMaxNumRegisters * kDoubleSize));
1055
    RestoreFPRegs(r3, ip);
1056
  }
1057

    
1058
  // Clear top frame.
1059
  mov(r3, Operand::Zero());
1060
  mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1061
  str(r3, MemOperand(ip));
1062

    
1063

    
1064
  // Restore current context from top and clear it in debug mode.
1065
  if (restore_context) {
1066
    mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1067
    ldr(cp, MemOperand(ip));
1068
  }
1069
#ifdef DEBUG
1070
  mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1071
  str(r3, MemOperand(ip));
1072
#endif
1073

    
1074
  // Tear down the exit frame, pop the arguments, and return.
1075
  mov(sp, Operand(fp));
1076
  ldm(ia_w, sp, fp.bit() | lr.bit());
1077
  if (argument_count.is_valid()) {
1078
    add(sp, sp, Operand(argument_count, LSL, kPointerSizeLog2));
1079
  }
1080
}
1081

    
1082

    
1083
void MacroAssembler::GetCFunctionDoubleResult(const DwVfpRegister dst) {
1084
  if (use_eabi_hardfloat()) {
1085
    Move(dst, d0);
1086
  } else {
1087
    vmov(dst, r0, r1);
1088
  }
1089
}
1090

    
1091

    
1092
void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
1093
  // This macro takes the dst register to make the code more readable
1094
  // at the call sites. However, the dst register has to be r5 to
1095
  // follow the calling convention which requires the call type to be
1096
  // in r5.
1097
  ASSERT(dst.is(r5));
1098
  if (call_kind == CALL_AS_FUNCTION) {
1099
    mov(dst, Operand(Smi::FromInt(1)));
1100
  } else {
1101
    mov(dst, Operand(Smi::FromInt(0)));
1102
  }
1103
}
1104

    
1105

    
1106
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
1107
                                    const ParameterCount& actual,
1108
                                    Handle<Code> code_constant,
1109
                                    Register code_reg,
1110
                                    Label* done,
1111
                                    bool* definitely_mismatches,
1112
                                    InvokeFlag flag,
1113
                                    const CallWrapper& call_wrapper,
1114
                                    CallKind call_kind) {
1115
  bool definitely_matches = false;
1116
  *definitely_mismatches = false;
1117
  Label regular_invoke;
1118

    
1119
  // Check whether the expected and actual arguments count match. If not,
1120
  // setup registers according to contract with ArgumentsAdaptorTrampoline:
1121
  //  r0: actual arguments count
1122
  //  r1: function (passed through to callee)
1123
  //  r2: expected arguments count
1124
  //  r3: callee code entry
1125

    
1126
  // The code below is made a lot easier because the calling code already sets
1127
  // up actual and expected registers according to the contract if values are
1128
  // passed in registers.
1129
  ASSERT(actual.is_immediate() || actual.reg().is(r0));
1130
  ASSERT(expected.is_immediate() || expected.reg().is(r2));
1131
  ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(r3));
1132

    
1133
  if (expected.is_immediate()) {
1134
    ASSERT(actual.is_immediate());
1135
    if (expected.immediate() == actual.immediate()) {
1136
      definitely_matches = true;
1137
    } else {
1138
      mov(r0, Operand(actual.immediate()));
1139
      const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
1140
      if (expected.immediate() == sentinel) {
1141
        // Don't worry about adapting arguments for builtins that
1142
        // don't want that done. Skip adaption code by making it look
1143
        // like we have a match between expected and actual number of
1144
        // arguments.
1145
        definitely_matches = true;
1146
      } else {
1147
        *definitely_mismatches = true;
1148
        mov(r2, Operand(expected.immediate()));
1149
      }
1150
    }
1151
  } else {
1152
    if (actual.is_immediate()) {
1153
      cmp(expected.reg(), Operand(actual.immediate()));
1154
      b(eq, &regular_invoke);
1155
      mov(r0, Operand(actual.immediate()));
1156
    } else {
1157
      cmp(expected.reg(), Operand(actual.reg()));
1158
      b(eq, &regular_invoke);
1159
    }
1160
  }
1161

    
1162
  if (!definitely_matches) {
1163
    if (!code_constant.is_null()) {
1164
      mov(r3, Operand(code_constant));
1165
      add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
1166
    }
1167

    
1168
    Handle<Code> adaptor =
1169
        isolate()->builtins()->ArgumentsAdaptorTrampoline();
1170
    if (flag == CALL_FUNCTION) {
1171
      call_wrapper.BeforeCall(CallSize(adaptor));
1172
      SetCallKind(r5, call_kind);
1173
      Call(adaptor);
1174
      call_wrapper.AfterCall();
1175
      if (!*definitely_mismatches) {
1176
        b(done);
1177
      }
1178
    } else {
1179
      SetCallKind(r5, call_kind);
1180
      Jump(adaptor, RelocInfo::CODE_TARGET);
1181
    }
1182
    bind(&regular_invoke);
1183
  }
1184
}
1185

    
1186

    
1187
void MacroAssembler::InvokeCode(Register code,
1188
                                const ParameterCount& expected,
1189
                                const ParameterCount& actual,
1190
                                InvokeFlag flag,
1191
                                const CallWrapper& call_wrapper,
1192
                                CallKind call_kind) {
1193
  // You can't call a function without a valid frame.
1194
  ASSERT(flag == JUMP_FUNCTION || has_frame());
1195

    
1196
  Label done;
1197
  bool definitely_mismatches = false;
1198
  InvokePrologue(expected, actual, Handle<Code>::null(), code,
1199
                 &done, &definitely_mismatches, flag,
1200
                 call_wrapper, call_kind);
1201
  if (!definitely_mismatches) {
1202
    if (flag == CALL_FUNCTION) {
1203
      call_wrapper.BeforeCall(CallSize(code));
1204
      SetCallKind(r5, call_kind);
1205
      Call(code);
1206
      call_wrapper.AfterCall();
1207
    } else {
1208
      ASSERT(flag == JUMP_FUNCTION);
1209
      SetCallKind(r5, call_kind);
1210
      Jump(code);
1211
    }
1212

    
1213
    // Continue here if InvokePrologue does handle the invocation due to
1214
    // mismatched parameter counts.
1215
    bind(&done);
1216
  }
1217
}
1218

    
1219

    
1220
void MacroAssembler::InvokeCode(Handle<Code> code,
1221
                                const ParameterCount& expected,
1222
                                const ParameterCount& actual,
1223
                                RelocInfo::Mode rmode,
1224
                                InvokeFlag flag,
1225
                                CallKind call_kind) {
1226
  // You can't call a function without a valid frame.
1227
  ASSERT(flag == JUMP_FUNCTION || has_frame());
1228

    
1229
  Label done;
1230
  bool definitely_mismatches = false;
1231
  InvokePrologue(expected, actual, code, no_reg,
1232
                 &done, &definitely_mismatches, flag,
1233
                 NullCallWrapper(), call_kind);
1234
  if (!definitely_mismatches) {
1235
    if (flag == CALL_FUNCTION) {
1236
      SetCallKind(r5, call_kind);
1237
      Call(code, rmode);
1238
    } else {
1239
      SetCallKind(r5, call_kind);
1240
      Jump(code, rmode);
1241
    }
1242

    
1243
    // Continue here if InvokePrologue does handle the invocation due to
1244
    // mismatched parameter counts.
1245
    bind(&done);
1246
  }
1247
}
1248

    
1249

    
1250
void MacroAssembler::InvokeFunction(Register fun,
1251
                                    const ParameterCount& actual,
1252
                                    InvokeFlag flag,
1253
                                    const CallWrapper& call_wrapper,
1254
                                    CallKind call_kind) {
1255
  // You can't call a function without a valid frame.
1256
  ASSERT(flag == JUMP_FUNCTION || has_frame());
1257

    
1258
  // Contract with called JS functions requires that function is passed in r1.
1259
  ASSERT(fun.is(r1));
1260

    
1261
  Register expected_reg = r2;
1262
  Register code_reg = r3;
1263

    
1264
  ldr(code_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
1265
  ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
1266
  ldr(expected_reg,
1267
      FieldMemOperand(code_reg,
1268
                      SharedFunctionInfo::kFormalParameterCountOffset));
1269
  SmiUntag(expected_reg);
1270
  ldr(code_reg,
1271
      FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
1272

    
1273
  ParameterCount expected(expected_reg);
1274
  InvokeCode(code_reg, expected, actual, flag, call_wrapper, call_kind);
1275
}
1276

    
1277

    
1278
void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
1279
                                    const ParameterCount& expected,
1280
                                    const ParameterCount& actual,
1281
                                    InvokeFlag flag,
1282
                                    const CallWrapper& call_wrapper,
1283
                                    CallKind call_kind) {
1284
  // You can't call a function without a valid frame.
1285
  ASSERT(flag == JUMP_FUNCTION || has_frame());
1286

    
1287
  // Get the function and setup the context.
1288
  Move(r1, function);
1289
  ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
1290

    
1291
  // We call indirectly through the code field in the function to
1292
  // allow recompilation to take effect without changing any of the
1293
  // call sites.
1294
  ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
1295
  InvokeCode(r3, expected, actual, flag, call_wrapper, call_kind);
1296
}
1297

    
1298

    
1299
void MacroAssembler::IsObjectJSObjectType(Register heap_object,
1300
                                          Register map,
1301
                                          Register scratch,
1302
                                          Label* fail) {
1303
  ldr(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
1304
  IsInstanceJSObjectType(map, scratch, fail);
1305
}
1306

    
1307

    
1308
void MacroAssembler::IsInstanceJSObjectType(Register map,
1309
                                            Register scratch,
1310
                                            Label* fail) {
1311
  ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
1312
  cmp(scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
1313
  b(lt, fail);
1314
  cmp(scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
1315
  b(gt, fail);
1316
}
1317

    
1318

    
1319
void MacroAssembler::IsObjectJSStringType(Register object,
1320
                                          Register scratch,
1321
                                          Label* fail) {
1322
  ASSERT(kNotStringTag != 0);
1323

    
1324
  ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1325
  ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1326
  tst(scratch, Operand(kIsNotStringMask));
1327
  b(ne, fail);
1328
}
1329

    
1330

    
1331
void MacroAssembler::IsObjectNameType(Register object,
1332
                                      Register scratch,
1333
                                      Label* fail) {
1334
  ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1335
  ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1336
  cmp(scratch, Operand(LAST_NAME_TYPE));
1337
  b(hi, fail);
1338
}
1339

    
1340

    
1341
#ifdef ENABLE_DEBUGGER_SUPPORT
1342
void MacroAssembler::DebugBreak() {
1343
  mov(r0, Operand::Zero());
1344
  mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
1345
  CEntryStub ces(1);
1346
  ASSERT(AllowThisStubCall(&ces));
1347
  Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
1348
}
1349
#endif
1350

    
1351

    
1352
void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
1353
                                    int handler_index) {
1354
  // Adjust this code if not the case.
1355
  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1356
  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1357
  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1358
  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1359
  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1360
  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1361

    
1362
  // For the JSEntry handler, we must preserve r0-r4, r5-r6 are available.
1363
  // We will build up the handler from the bottom by pushing on the stack.
1364
  // Set up the code object (r5) and the state (r6) for pushing.
1365
  unsigned state =
1366
      StackHandler::IndexField::encode(handler_index) |
1367
      StackHandler::KindField::encode(kind);
1368
  mov(r5, Operand(CodeObject()));
1369
  mov(r6, Operand(state));
1370

    
1371
  // Push the frame pointer, context, state, and code object.
1372
  if (kind == StackHandler::JS_ENTRY) {
1373
    mov(cp, Operand(Smi::FromInt(0)));  // Indicates no context.
1374
    mov(ip, Operand::Zero());  // NULL frame pointer.
1375
    stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | ip.bit());
1376
  } else {
1377
    stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | fp.bit());
1378
  }
1379

    
1380
  // Link the current handler as the next handler.
1381
  mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1382
  ldr(r5, MemOperand(r6));
1383
  push(r5);
1384
  // Set this new handler as the current one.
1385
  str(sp, MemOperand(r6));
1386
}
1387

    
1388

    
1389
void MacroAssembler::PopTryHandler() {
1390
  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1391
  pop(r1);
1392
  mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1393
  add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
1394
  str(r1, MemOperand(ip));
1395
}
1396

    
1397

    
1398
void MacroAssembler::JumpToHandlerEntry() {
1399
  // Compute the handler entry address and jump to it.  The handler table is
1400
  // a fixed array of (smi-tagged) code offsets.
1401
  // r0 = exception, r1 = code object, r2 = state.
1402
  ldr(r3, FieldMemOperand(r1, Code::kHandlerTableOffset));  // Handler table.
1403
  add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
1404
  mov(r2, Operand(r2, LSR, StackHandler::kKindWidth));  // Handler index.
1405
  ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2));  // Smi-tagged offset.
1406
  add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));  // Code start.
1407
  add(pc, r1, Operand::SmiUntag(r2));  // Jump
1408
}
1409

    
1410

    
1411
void MacroAssembler::Throw(Register value) {
1412
  // Adjust this code if not the case.
1413
  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1414
  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1415
  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1416
  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1417
  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1418
  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1419

    
1420
  // The exception is expected in r0.
1421
  if (!value.is(r0)) {
1422
    mov(r0, value);
1423
  }
1424
  // Drop the stack pointer to the top of the top handler.
1425
  mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1426
  ldr(sp, MemOperand(r3));
1427
  // Restore the next handler.
1428
  pop(r2);
1429
  str(r2, MemOperand(r3));
1430

    
1431
  // Get the code object (r1) and state (r2).  Restore the context and frame
1432
  // pointer.
1433
  ldm(ia_w, sp, r1.bit() | r2.bit() | cp.bit() | fp.bit());
1434

    
1435
  // If the handler is a JS frame, restore the context to the frame.
1436
  // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
1437
  // or cp.
1438
  tst(cp, cp);
1439
  str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
1440

    
1441
  JumpToHandlerEntry();
1442
}
1443

    
1444

    
1445
void MacroAssembler::ThrowUncatchable(Register value) {
1446
  // Adjust this code if not the case.
1447
  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1448
  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1449
  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1450
  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1451
  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1452
  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1453

    
1454
  // The exception is expected in r0.
1455
  if (!value.is(r0)) {
1456
    mov(r0, value);
1457
  }
1458
  // Drop the stack pointer to the top of the top stack handler.
1459
  mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1460
  ldr(sp, MemOperand(r3));
1461

    
1462
  // Unwind the handlers until the ENTRY handler is found.
1463
  Label fetch_next, check_kind;
1464
  jmp(&check_kind);
1465
  bind(&fetch_next);
1466
  ldr(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
1467

    
1468
  bind(&check_kind);
1469
  STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
1470
  ldr(r2, MemOperand(sp, StackHandlerConstants::kStateOffset));
1471
  tst(r2, Operand(StackHandler::KindField::kMask));
1472
  b(ne, &fetch_next);
1473

    
1474
  // Set the top handler address to next handler past the top ENTRY handler.
1475
  pop(r2);
1476
  str(r2, MemOperand(r3));
1477
  // Get the code object (r1) and state (r2).  Clear the context and frame
1478
  // pointer (0 was saved in the handler).
1479
  ldm(ia_w, sp, r1.bit() | r2.bit() | cp.bit() | fp.bit());
1480

    
1481
  JumpToHandlerEntry();
1482
}
1483

    
1484

    
1485
void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
1486
                                            Register scratch,
1487
                                            Label* miss) {
1488
  Label same_contexts;
1489

    
1490
  ASSERT(!holder_reg.is(scratch));
1491
  ASSERT(!holder_reg.is(ip));
1492
  ASSERT(!scratch.is(ip));
1493

    
1494
  // Load current lexical context from the stack frame.
1495
  ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
1496
  // In debug mode, make sure the lexical context is set.
1497
#ifdef DEBUG
1498
  cmp(scratch, Operand::Zero());
1499
  Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
1500
#endif
1501

    
1502
  // Load the native context of the current context.
1503
  int offset =
1504
      Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
1505
  ldr(scratch, FieldMemOperand(scratch, offset));
1506
  ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
1507

    
1508
  // Check the context is a native context.
1509
  if (emit_debug_code()) {
1510
    // Cannot use ip as a temporary in this verification code. Due to the fact
1511
    // that ip is clobbered as part of cmp with an object Operand.
1512
    push(holder_reg);  // Temporarily save holder on the stack.
1513
    // Read the first word and compare to the native_context_map.
1514
    ldr(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
1515
    LoadRoot(ip, Heap::kNativeContextMapRootIndex);
1516
    cmp(holder_reg, ip);
1517
    Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
1518
    pop(holder_reg);  // Restore holder.
1519
  }
1520

    
1521
  // Check if both contexts are the same.
1522
  ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1523
  cmp(scratch, Operand(ip));
1524
  b(eq, &same_contexts);
1525

    
1526
  // Check the context is a native context.
1527
  if (emit_debug_code()) {
1528
    // Cannot use ip as a temporary in this verification code. Due to the fact
1529
    // that ip is clobbered as part of cmp with an object Operand.
1530
    push(holder_reg);  // Temporarily save holder on the stack.
1531
    mov(holder_reg, ip);  // Move ip to its holding place.
1532
    LoadRoot(ip, Heap::kNullValueRootIndex);
1533
    cmp(holder_reg, ip);
1534
    Check(ne, kJSGlobalProxyContextShouldNotBeNull);
1535

    
1536
    ldr(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
1537
    LoadRoot(ip, Heap::kNativeContextMapRootIndex);
1538
    cmp(holder_reg, ip);
1539
    Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
1540
    // Restore ip is not needed. ip is reloaded below.
1541
    pop(holder_reg);  // Restore holder.
1542
    // Restore ip to holder's context.
1543
    ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1544
  }
1545

    
1546
  // Check that the security token in the calling global object is
1547
  // compatible with the security token in the receiving global
1548
  // object.
1549
  int token_offset = Context::kHeaderSize +
1550
                     Context::SECURITY_TOKEN_INDEX * kPointerSize;
1551

    
1552
  ldr(scratch, FieldMemOperand(scratch, token_offset));
1553
  ldr(ip, FieldMemOperand(ip, token_offset));
1554
  cmp(scratch, Operand(ip));
1555
  b(ne, miss);
1556

    
1557
  bind(&same_contexts);
1558
}
1559

    
1560

    
1561
void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
1562
  // First of all we assign the hash seed to scratch.
1563
  LoadRoot(scratch, Heap::kHashSeedRootIndex);
1564
  SmiUntag(scratch);
1565

    
1566
  // Xor original key with a seed.
1567
  eor(t0, t0, Operand(scratch));
1568

    
1569
  // Compute the hash code from the untagged key.  This must be kept in sync
1570
  // with ComputeIntegerHash in utils.h.
1571
  //
1572
  // hash = ~hash + (hash << 15);
1573
  mvn(scratch, Operand(t0));
1574
  add(t0, scratch, Operand(t0, LSL, 15));
1575
  // hash = hash ^ (hash >> 12);
1576
  eor(t0, t0, Operand(t0, LSR, 12));
1577
  // hash = hash + (hash << 2);
1578
  add(t0, t0, Operand(t0, LSL, 2));
1579
  // hash = hash ^ (hash >> 4);
1580
  eor(t0, t0, Operand(t0, LSR, 4));
1581
  // hash = hash * 2057;
1582
  mov(scratch, Operand(t0, LSL, 11));
1583
  add(t0, t0, Operand(t0, LSL, 3));
1584
  add(t0, t0, scratch);
1585
  // hash = hash ^ (hash >> 16);
1586
  eor(t0, t0, Operand(t0, LSR, 16));
1587
}
1588

    
1589

    
1590
void MacroAssembler::LoadFromNumberDictionary(Label* miss,
1591
                                              Register elements,
1592
                                              Register key,
1593
                                              Register result,
1594
                                              Register t0,
1595
                                              Register t1,
1596
                                              Register t2) {
1597
  // Register use:
1598
  //
1599
  // elements - holds the slow-case elements of the receiver on entry.
1600
  //            Unchanged unless 'result' is the same register.
1601
  //
1602
  // key      - holds the smi key on entry.
1603
  //            Unchanged unless 'result' is the same register.
1604
  //
1605
  // result   - holds the result on exit if the load succeeded.
1606
  //            Allowed to be the same as 'key' or 'result'.
1607
  //            Unchanged on bailout so 'key' or 'result' can be used
1608
  //            in further computation.
1609
  //
1610
  // Scratch registers:
1611
  //
1612
  // t0 - holds the untagged key on entry and holds the hash once computed.
1613
  //
1614
  // t1 - used to hold the capacity mask of the dictionary
1615
  //
1616
  // t2 - used for the index into the dictionary.
1617
  Label done;
1618

    
1619
  GetNumberHash(t0, t1);
1620

    
1621
  // Compute the capacity mask.
1622
  ldr(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
1623
  SmiUntag(t1);
1624
  sub(t1, t1, Operand(1));
1625

    
1626
  // Generate an unrolled loop that performs a few probes before giving up.
1627
  static const int kProbes = 4;
1628
  for (int i = 0; i < kProbes; i++) {
1629
    // Use t2 for index calculations and keep the hash intact in t0.
1630
    mov(t2, t0);
1631
    // Compute the masked index: (hash + i + i * i) & mask.
1632
    if (i > 0) {
1633
      add(t2, t2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
1634
    }
1635
    and_(t2, t2, Operand(t1));
1636

    
1637
    // Scale the index by multiplying by the element size.
1638
    ASSERT(SeededNumberDictionary::kEntrySize == 3);
1639
    add(t2, t2, Operand(t2, LSL, 1));  // t2 = t2 * 3
1640

    
1641
    // Check if the key is identical to the name.
1642
    add(t2, elements, Operand(t2, LSL, kPointerSizeLog2));
1643
    ldr(ip, FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset));
1644
    cmp(key, Operand(ip));
1645
    if (i != kProbes - 1) {
1646
      b(eq, &done);
1647
    } else {
1648
      b(ne, miss);
1649
    }
1650
  }
1651

    
1652
  bind(&done);
1653
  // Check that the value is a normal property.
1654
  // t2: elements + (index * kPointerSize)
1655
  const int kDetailsOffset =
1656
      SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
1657
  ldr(t1, FieldMemOperand(t2, kDetailsOffset));
1658
  tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
1659
  b(ne, miss);
1660

    
1661
  // Get the value at the masked, scaled index and return.
1662
  const int kValueOffset =
1663
      SeededNumberDictionary::kElementsStartOffset + kPointerSize;
1664
  ldr(result, FieldMemOperand(t2, kValueOffset));
1665
}
1666

    
1667

    
1668
void MacroAssembler::Allocate(int object_size,
1669
                              Register result,
1670
                              Register scratch1,
1671
                              Register scratch2,
1672
                              Label* gc_required,
1673
                              AllocationFlags flags) {
1674
  ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
1675
  if (!FLAG_inline_new) {
1676
    if (emit_debug_code()) {
1677
      // Trash the registers to simulate an allocation failure.
1678
      mov(result, Operand(0x7091));
1679
      mov(scratch1, Operand(0x7191));
1680
      mov(scratch2, Operand(0x7291));
1681
    }
1682
    jmp(gc_required);
1683
    return;
1684
  }
1685

    
1686
  ASSERT(!result.is(scratch1));
1687
  ASSERT(!result.is(scratch2));
1688
  ASSERT(!scratch1.is(scratch2));
1689
  ASSERT(!scratch1.is(ip));
1690
  ASSERT(!scratch2.is(ip));
1691

    
1692
  // Make object size into bytes.
1693
  if ((flags & SIZE_IN_WORDS) != 0) {
1694
    object_size *= kPointerSize;
1695
  }
1696
  ASSERT_EQ(0, object_size & kObjectAlignmentMask);
1697

    
1698
  // Check relative positions of allocation top and limit addresses.
1699
  // The values must be adjacent in memory to allow the use of LDM.
1700
  // Also, assert that the registers are numbered such that the values
1701
  // are loaded in the correct order.
1702
  ExternalReference allocation_top =
1703
      AllocationUtils::GetAllocationTopReference(isolate(), flags);
1704
  ExternalReference allocation_limit =
1705
      AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1706

    
1707
  intptr_t top   =
1708
      reinterpret_cast<intptr_t>(allocation_top.address());
1709
  intptr_t limit =
1710
      reinterpret_cast<intptr_t>(allocation_limit.address());
1711
  ASSERT((limit - top) == kPointerSize);
1712
  ASSERT(result.code() < ip.code());
1713

    
1714
  // Set up allocation top address register.
1715
  Register topaddr = scratch1;
1716
  mov(topaddr, Operand(allocation_top));
1717

    
1718
  // This code stores a temporary value in ip. This is OK, as the code below
1719
  // does not need ip for implicit literal generation.
1720
  if ((flags & RESULT_CONTAINS_TOP) == 0) {
1721
    // Load allocation top into result and allocation limit into ip.
1722
    ldm(ia, topaddr, result.bit() | ip.bit());
1723
  } else {
1724
    if (emit_debug_code()) {
1725
      // Assert that result actually contains top on entry. ip is used
1726
      // immediately below so this use of ip does not cause difference with
1727
      // respect to register content between debug and release mode.
1728
      ldr(ip, MemOperand(topaddr));
1729
      cmp(result, ip);
1730
      Check(eq, kUnexpectedAllocationTop);
1731
    }
1732
    // Load allocation limit into ip. Result already contains allocation top.
1733
    ldr(ip, MemOperand(topaddr, limit - top));
1734
  }
1735

    
1736
  if ((flags & DOUBLE_ALIGNMENT) != 0) {
1737
    // Align the next allocation. Storing the filler map without checking top is
1738
    // safe in new-space because the limit of the heap is aligned there.
1739
    ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
1740
    STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
1741
    and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
1742
    Label aligned;
1743
    b(eq, &aligned);
1744
    if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
1745
      cmp(result, Operand(ip));
1746
      b(hs, gc_required);
1747
    }
1748
    mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
1749
    str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
1750
    bind(&aligned);
1751
  }
1752

    
1753
  // Calculate new top and bail out if new space is exhausted. Use result
1754
  // to calculate the new top. We must preserve the ip register at this
1755
  // point, so we cannot just use add().
1756
  ASSERT(object_size > 0);
1757
  Register source = result;
1758
  Condition cond = al;
1759
  int shift = 0;
1760
  while (object_size != 0) {
1761
    if (((object_size >> shift) & 0x03) == 0) {
1762
      shift += 2;
1763
    } else {
1764
      int bits = object_size & (0xff << shift);
1765
      object_size -= bits;
1766
      shift += 8;
1767
      Operand bits_operand(bits);
1768
      ASSERT(bits_operand.is_single_instruction(this));
1769
      add(scratch2, source, bits_operand, SetCC, cond);
1770
      source = scratch2;
1771
      cond = cc;
1772
    }
1773
  }
1774
  b(cs, gc_required);
1775
  cmp(scratch2, Operand(ip));
1776
  b(hi, gc_required);
1777
  str(scratch2, MemOperand(topaddr));
1778

    
1779
  // Tag object if requested.
1780
  if ((flags & TAG_OBJECT) != 0) {
1781
    add(result, result, Operand(kHeapObjectTag));
1782
  }
1783
}
1784

    
1785

    
1786
void MacroAssembler::Allocate(Register object_size,
1787
                              Register result,
1788
                              Register scratch1,
1789
                              Register scratch2,
1790
                              Label* gc_required,
1791
                              AllocationFlags flags) {
1792
  if (!FLAG_inline_new) {
1793
    if (emit_debug_code()) {
1794
      // Trash the registers to simulate an allocation failure.
1795
      mov(result, Operand(0x7091));
1796
      mov(scratch1, Operand(0x7191));
1797
      mov(scratch2, Operand(0x7291));
1798
    }
1799
    jmp(gc_required);
1800
    return;
1801
  }
1802

    
1803
  // Assert that the register arguments are different and that none of
1804
  // them are ip. ip is used explicitly in the code generated below.
1805
  ASSERT(!result.is(scratch1));
1806
  ASSERT(!result.is(scratch2));
1807
  ASSERT(!scratch1.is(scratch2));
1808
  ASSERT(!object_size.is(ip));
1809
  ASSERT(!result.is(ip));
1810
  ASSERT(!scratch1.is(ip));
1811
  ASSERT(!scratch2.is(ip));
1812

    
1813
  // Check relative positions of allocation top and limit addresses.
1814
  // The values must be adjacent in memory to allow the use of LDM.
1815
  // Also, assert that the registers are numbered such that the values
1816
  // are loaded in the correct order.
1817
  ExternalReference allocation_top =
1818
      AllocationUtils::GetAllocationTopReference(isolate(), flags);
1819
  ExternalReference allocation_limit =
1820
      AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1821
  intptr_t top =
1822
      reinterpret_cast<intptr_t>(allocation_top.address());
1823
  intptr_t limit =
1824
      reinterpret_cast<intptr_t>(allocation_limit.address());
1825
  ASSERT((limit - top) == kPointerSize);
1826
  ASSERT(result.code() < ip.code());
1827

    
1828
  // Set up allocation top address.
1829
  Register topaddr = scratch1;
1830
  mov(topaddr, Operand(allocation_top));
1831

    
1832
  // This code stores a temporary value in ip. This is OK, as the code below
1833
  // does not need ip for implicit literal generation.
1834
  if ((flags & RESULT_CONTAINS_TOP) == 0) {
1835
    // Load allocation top into result and allocation limit into ip.
1836
    ldm(ia, topaddr, result.bit() | ip.bit());
1837
  } else {
1838
    if (emit_debug_code()) {
1839
      // Assert that result actually contains top on entry. ip is used
1840
      // immediately below so this use of ip does not cause difference with
1841
      // respect to register content between debug and release mode.
1842
      ldr(ip, MemOperand(topaddr));
1843
      cmp(result, ip);
1844
      Check(eq, kUnexpectedAllocationTop);
1845
    }
1846
    // Load allocation limit into ip. Result already contains allocation top.
1847
    ldr(ip, MemOperand(topaddr, limit - top));
1848
  }
1849

    
1850
  if ((flags & DOUBLE_ALIGNMENT) != 0) {
1851
    // Align the next allocation. Storing the filler map without checking top is
1852
    // safe in new-space because the limit of the heap is aligned there.
1853
    ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
1854
    ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
1855
    and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
1856
    Label aligned;
1857
    b(eq, &aligned);
1858
    if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
1859
      cmp(result, Operand(ip));
1860
      b(hs, gc_required);
1861
    }
1862
    mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
1863
    str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex));
1864
    bind(&aligned);
1865
  }
1866

    
1867
  // Calculate new top and bail out if new space is exhausted. Use result
1868
  // to calculate the new top. Object size may be in words so a shift is
1869
  // required to get the number of bytes.
1870
  if ((flags & SIZE_IN_WORDS) != 0) {
1871
    add(scratch2, result, Operand(object_size, LSL, kPointerSizeLog2), SetCC);
1872
  } else {
1873
    add(scratch2, result, Operand(object_size), SetCC);
1874
  }
1875
  b(cs, gc_required);
1876
  cmp(scratch2, Operand(ip));
1877
  b(hi, gc_required);
1878

    
1879
  // Update allocation top. result temporarily holds the new top.
1880
  if (emit_debug_code()) {
1881
    tst(scratch2, Operand(kObjectAlignmentMask));
1882
    Check(eq, kUnalignedAllocationInNewSpace);
1883
  }
1884
  str(scratch2, MemOperand(topaddr));
1885

    
1886
  // Tag object if requested.
1887
  if ((flags & TAG_OBJECT) != 0) {
1888
    add(result, result, Operand(kHeapObjectTag));
1889
  }
1890
}
1891

    
1892

    
1893
void MacroAssembler::UndoAllocationInNewSpace(Register object,
1894
                                              Register scratch) {
1895
  ExternalReference new_space_allocation_top =
1896
      ExternalReference::new_space_allocation_top_address(isolate());
1897

    
1898
  // Make sure the object has no tag before resetting top.
1899
  and_(object, object, Operand(~kHeapObjectTagMask));
1900
#ifdef DEBUG
1901
  // Check that the object un-allocated is below the current top.
1902
  mov(scratch, Operand(new_space_allocation_top));
1903
  ldr(scratch, MemOperand(scratch));
1904
  cmp(object, scratch);
1905
  Check(lt, kUndoAllocationOfNonAllocatedMemory);
1906
#endif
1907
  // Write the address of the object to un-allocate as the current top.
1908
  mov(scratch, Operand(new_space_allocation_top));
1909
  str(object, MemOperand(scratch));
1910
}
1911

    
1912

    
1913
void MacroAssembler::AllocateTwoByteString(Register result,
1914
                                           Register length,
1915
                                           Register scratch1,
1916
                                           Register scratch2,
1917
                                           Register scratch3,
1918
                                           Label* gc_required) {
1919
  // Calculate the number of bytes needed for the characters in the string while
1920
  // observing object alignment.
1921
  ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1922
  mov(scratch1, Operand(length, LSL, 1));  // Length in bytes, not chars.
1923
  add(scratch1, scratch1,
1924
      Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
1925
  and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
1926

    
1927
  // Allocate two-byte string in new space.
1928
  Allocate(scratch1,
1929
           result,
1930
           scratch2,
1931
           scratch3,
1932
           gc_required,
1933
           TAG_OBJECT);
1934

    
1935
  // Set the map, length and hash field.
1936
  InitializeNewString(result,
1937
                      length,
1938
                      Heap::kStringMapRootIndex,
1939
                      scratch1,
1940
                      scratch2);
1941
}
1942

    
1943

    
1944
void MacroAssembler::AllocateAsciiString(Register result,
1945
                                         Register length,
1946
                                         Register scratch1,
1947
                                         Register scratch2,
1948
                                         Register scratch3,
1949
                                         Label* gc_required) {
1950
  // Calculate the number of bytes needed for the characters in the string while
1951
  // observing object alignment.
1952
  ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1953
  ASSERT(kCharSize == 1);
1954
  add(scratch1, length,
1955
      Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize));
1956
  and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
1957

    
1958
  // Allocate ASCII string in new space.
1959
  Allocate(scratch1,
1960
           result,
1961
           scratch2,
1962
           scratch3,
1963
           gc_required,
1964
           TAG_OBJECT);
1965

    
1966
  // Set the map, length and hash field.
1967
  InitializeNewString(result,
1968
                      length,
1969
                      Heap::kAsciiStringMapRootIndex,
1970
                      scratch1,
1971
                      scratch2);
1972
}
1973

    
1974

    
1975
void MacroAssembler::AllocateTwoByteConsString(Register result,
1976
                                               Register length,
1977
                                               Register scratch1,
1978
                                               Register scratch2,
1979
                                               Label* gc_required) {
1980
  Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
1981
           TAG_OBJECT);
1982

    
1983
  InitializeNewString(result,
1984
                      length,
1985
                      Heap::kConsStringMapRootIndex,
1986
                      scratch1,
1987
                      scratch2);
1988
}
1989

    
1990

    
1991
void MacroAssembler::AllocateAsciiConsString(Register result,
1992
                                             Register length,
1993
                                             Register scratch1,
1994
                                             Register scratch2,
1995
                                             Label* gc_required) {
1996
  Label allocate_new_space, install_map;
1997
  AllocationFlags flags = TAG_OBJECT;
1998

    
1999
  ExternalReference high_promotion_mode = ExternalReference::
2000
      new_space_high_promotion_mode_active_address(isolate());
2001
  mov(scratch1, Operand(high_promotion_mode));
2002
  ldr(scratch1, MemOperand(scratch1, 0));
2003
  cmp(scratch1, Operand::Zero());
2004
  b(eq, &allocate_new_space);
2005

    
2006
  Allocate(ConsString::kSize,
2007
           result,
2008
           scratch1,
2009
           scratch2,
2010
           gc_required,
2011
           static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
2012

    
2013
  jmp(&install_map);
2014

    
2015
  bind(&allocate_new_space);
2016
  Allocate(ConsString::kSize,
2017
           result,
2018
           scratch1,
2019
           scratch2,
2020
           gc_required,
2021
           flags);
2022

    
2023
  bind(&install_map);
2024

    
2025
  InitializeNewString(result,
2026
                      length,
2027
                      Heap::kConsAsciiStringMapRootIndex,
2028
                      scratch1,
2029
                      scratch2);
2030
}
2031

    
2032

    
2033
void MacroAssembler::AllocateTwoByteSlicedString(Register result,
2034
                                                 Register length,
2035
                                                 Register scratch1,
2036
                                                 Register scratch2,
2037
                                                 Label* gc_required) {
2038
  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
2039
           TAG_OBJECT);
2040

    
2041
  InitializeNewString(result,
2042
                      length,
2043
                      Heap::kSlicedStringMapRootIndex,
2044
                      scratch1,
2045
                      scratch2);
2046
}
2047

    
2048

    
2049
void MacroAssembler::AllocateAsciiSlicedString(Register result,
2050
                                               Register length,
2051
                                               Register scratch1,
2052
                                               Register scratch2,
2053
                                               Label* gc_required) {
2054
  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
2055
           TAG_OBJECT);
2056

    
2057
  InitializeNewString(result,
2058
                      length,
2059
                      Heap::kSlicedAsciiStringMapRootIndex,
2060
                      scratch1,
2061
                      scratch2);
2062
}
2063

    
2064

    
2065
void MacroAssembler::CompareObjectType(Register object,
2066
                                       Register map,
2067
                                       Register type_reg,
2068
                                       InstanceType type) {
2069
  ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
2070
  CompareInstanceType(map, type_reg, type);
2071
}
2072

    
2073

    
2074
void MacroAssembler::CompareInstanceType(Register map,
2075
                                         Register type_reg,
2076
                                         InstanceType type) {
2077
  ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
2078
  cmp(type_reg, Operand(type));
2079
}
2080

    
2081

    
2082
void MacroAssembler::CompareRoot(Register obj,
2083
                                 Heap::RootListIndex index) {
2084
  ASSERT(!obj.is(ip));
2085
  LoadRoot(ip, index);
2086
  cmp(obj, ip);
2087
}
2088

    
2089

    
2090
void MacroAssembler::CheckFastElements(Register map,
2091
                                       Register scratch,
2092
                                       Label* fail) {
2093
  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
2094
  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
2095
  STATIC_ASSERT(FAST_ELEMENTS == 2);
2096
  STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
2097
  ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
2098
  cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
2099
  b(hi, fail);
2100
}
2101

    
2102

    
2103
void MacroAssembler::CheckFastObjectElements(Register map,
2104
                                             Register scratch,
2105
                                             Label* fail) {
2106
  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
2107
  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
2108
  STATIC_ASSERT(FAST_ELEMENTS == 2);
2109
  STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
2110
  ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
2111
  cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
2112
  b(ls, fail);
2113
  cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
2114
  b(hi, fail);
2115
}
2116

    
2117

    
2118
void MacroAssembler::CheckFastSmiElements(Register map,
2119
                                          Register scratch,
2120
                                          Label* fail) {
2121
  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
2122
  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
2123
  ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
2124
  cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
2125
  b(hi, fail);
2126
}
2127

    
2128

    
2129
void MacroAssembler::StoreNumberToDoubleElements(
2130
                                      Register value_reg,
2131
                                      Register key_reg,
2132
                                      Register elements_reg,
2133
                                      Register scratch1,
2134
                                      LowDwVfpRegister double_scratch,
2135
                                      Label* fail,
2136
                                      int elements_offset) {
2137
  Label smi_value, store;
2138

    
2139
  // Handle smi values specially.
2140
  JumpIfSmi(value_reg, &smi_value);
2141

    
2142
  // Ensure that the object is a heap number
2143
  CheckMap(value_reg,
2144
           scratch1,
2145
           isolate()->factory()->heap_number_map(),
2146
           fail,
2147
           DONT_DO_SMI_CHECK);
2148

    
2149
  vldr(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
2150
  // Force a canonical NaN.
2151
  if (emit_debug_code()) {
2152
    vmrs(ip);
2153
    tst(ip, Operand(kVFPDefaultNaNModeControlBit));
2154
    Assert(ne, kDefaultNaNModeNotSet);
2155
  }
2156
  VFPCanonicalizeNaN(double_scratch);
2157
  b(&store);
2158

    
2159
  bind(&smi_value);
2160
  SmiToDouble(double_scratch, value_reg);
2161

    
2162
  bind(&store);
2163
  add(scratch1, elements_reg, Operand::DoubleOffsetFromSmiKey(key_reg));
2164
  vstr(double_scratch,
2165
       FieldMemOperand(scratch1,
2166
                       FixedDoubleArray::kHeaderSize - elements_offset));
2167
}
2168

    
2169

    
2170
void MacroAssembler::CompareMap(Register obj,
2171
                                Register scratch,
2172
                                Handle<Map> map,
2173
                                Label* early_success) {
2174
  ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2175
  CompareMap(scratch, map, early_success);
2176
}
2177

    
2178

    
2179
void MacroAssembler::CompareMap(Register obj_map,
2180
                                Handle<Map> map,
2181
                                Label* early_success) {
2182
  cmp(obj_map, Operand(map));
2183
}
2184

    
2185

    
2186
void MacroAssembler::CheckMap(Register obj,
2187
                              Register scratch,
2188
                              Handle<Map> map,
2189
                              Label* fail,
2190
                              SmiCheckType smi_check_type) {
2191
  if (smi_check_type == DO_SMI_CHECK) {
2192
    JumpIfSmi(obj, fail);
2193
  }
2194

    
2195
  Label success;
2196
  CompareMap(obj, scratch, map, &success);
2197
  b(ne, fail);
2198
  bind(&success);
2199
}
2200

    
2201

    
2202
void MacroAssembler::CheckMap(Register obj,
2203
                              Register scratch,
2204
                              Heap::RootListIndex index,
2205
                              Label* fail,
2206
                              SmiCheckType smi_check_type) {
2207
  if (smi_check_type == DO_SMI_CHECK) {
2208
    JumpIfSmi(obj, fail);
2209
  }
2210
  ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2211
  LoadRoot(ip, index);
2212
  cmp(scratch, ip);
2213
  b(ne, fail);
2214
}
2215

    
2216

    
2217
void MacroAssembler::DispatchMap(Register obj,
2218
                                 Register scratch,
2219
                                 Handle<Map> map,
2220
                                 Handle<Code> success,
2221
                                 SmiCheckType smi_check_type) {
2222
  Label fail;
2223
  if (smi_check_type == DO_SMI_CHECK) {
2224
    JumpIfSmi(obj, &fail);
2225
  }
2226
  ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2227
  mov(ip, Operand(map));
2228
  cmp(scratch, ip);
2229
  Jump(success, RelocInfo::CODE_TARGET, eq);
2230
  bind(&fail);
2231
}
2232

    
2233

    
2234
void MacroAssembler::TryGetFunctionPrototype(Register function,
2235
                                             Register result,
2236
                                             Register scratch,
2237
                                             Label* miss,
2238
                                             bool miss_on_bound_function) {
2239
  // Check that the receiver isn't a smi.
2240
  JumpIfSmi(function, miss);
2241

    
2242
  // Check that the function really is a function.  Load map into result reg.
2243
  CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
2244
  b(ne, miss);
2245

    
2246
  if (miss_on_bound_function) {
2247
    ldr(scratch,
2248
        FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
2249
    ldr(scratch,
2250
        FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
2251
    tst(scratch,
2252
        Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
2253
    b(ne, miss);
2254
  }
2255

    
2256
  // Make sure that the function has an instance prototype.
2257
  Label non_instance;
2258
  ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
2259
  tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
2260
  b(ne, &non_instance);
2261

    
2262
  // Get the prototype or initial map from the function.
2263
  ldr(result,
2264
      FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2265

    
2266
  // If the prototype or initial map is the hole, don't return it and
2267
  // simply miss the cache instead. This will allow us to allocate a
2268
  // prototype object on-demand in the runtime system.
2269
  LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2270
  cmp(result, ip);
2271
  b(eq, miss);
2272

    
2273
  // If the function does not have an initial map, we're done.
2274
  Label done;
2275
  CompareObjectType(result, scratch, scratch, MAP_TYPE);
2276
  b(ne, &done);
2277

    
2278
  // Get the prototype from the initial map.
2279
  ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
2280
  jmp(&done);
2281

    
2282
  // Non-instance prototype: Fetch prototype from constructor field
2283
  // in initial map.
2284
  bind(&non_instance);
2285
  ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
2286

    
2287
  // All done.
2288
  bind(&done);
2289
}
2290

    
2291

    
2292
void MacroAssembler::CallStub(CodeStub* stub,
2293
                              TypeFeedbackId ast_id,
2294
                              Condition cond) {
2295
  ASSERT(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
2296
  Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id, cond);
2297
}
2298

    
2299

    
2300
void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
2301
  ASSERT(allow_stub_calls_ ||
2302
         stub->CompilingCallsToThisStubIsGCSafe(isolate()));
2303
  Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, cond);
2304
}
2305

    
2306

    
2307
static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
2308
  return ref0.address() - ref1.address();
2309
}
2310

    
2311

    
2312
void MacroAssembler::CallApiFunctionAndReturn(
2313
    ExternalReference function,
2314
    Address function_address,
2315
    ExternalReference thunk_ref,
2316
    Register thunk_last_arg,
2317
    int stack_space,
2318
    MemOperand return_value_operand,
2319
    MemOperand* context_restore_operand) {
2320
  ExternalReference next_address =
2321
      ExternalReference::handle_scope_next_address(isolate());
2322
  const int kNextOffset = 0;
2323
  const int kLimitOffset = AddressOffset(
2324
      ExternalReference::handle_scope_limit_address(isolate()),
2325
      next_address);
2326
  const int kLevelOffset = AddressOffset(
2327
      ExternalReference::handle_scope_level_address(isolate()),
2328
      next_address);
2329

    
2330
  ASSERT(!thunk_last_arg.is(r3));
2331

    
2332
  // Allocate HandleScope in callee-save registers.
2333
  mov(r9, Operand(next_address));
2334
  ldr(r4, MemOperand(r9, kNextOffset));
2335
  ldr(r5, MemOperand(r9, kLimitOffset));
2336
  ldr(r6, MemOperand(r9, kLevelOffset));
2337
  add(r6, r6, Operand(1));
2338
  str(r6, MemOperand(r9, kLevelOffset));
2339

    
2340
  if (FLAG_log_timer_events) {
2341
    FrameScope frame(this, StackFrame::MANUAL);
2342
    PushSafepointRegisters();
2343
    PrepareCallCFunction(1, r0);
2344
    mov(r0, Operand(ExternalReference::isolate_address(isolate())));
2345
    CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
2346
    PopSafepointRegisters();
2347
  }
2348

    
2349
  Label profiler_disabled;
2350
  Label end_profiler_check;
2351
  bool* is_profiling_flag =
2352
      isolate()->cpu_profiler()->is_profiling_address();
2353
  STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
2354
  mov(r3, Operand(reinterpret_cast<int32_t>(is_profiling_flag)));
2355
  ldrb(r3, MemOperand(r3, 0));
2356
  cmp(r3, Operand(0));
2357
  b(eq, &profiler_disabled);
2358

    
2359
  // Additional parameter is the address of the actual callback.
2360
  mov(thunk_last_arg, Operand(reinterpret_cast<int32_t>(function_address)));
2361
  mov(r3, Operand(thunk_ref));
2362
  jmp(&end_profiler_check);
2363

    
2364
  bind(&profiler_disabled);
2365
  mov(r3, Operand(function));
2366
  bind(&end_profiler_check);
2367

    
2368
  // Native call returns to the DirectCEntry stub which redirects to the
2369
  // return address pushed on stack (could have moved after GC).
2370
  // DirectCEntry stub itself is generated early and never moves.
2371
  DirectCEntryStub stub;
2372
  stub.GenerateCall(this, r3);
2373

    
2374
  if (FLAG_log_timer_events) {
2375
    FrameScope frame(this, StackFrame::MANUAL);
2376
    PushSafepointRegisters();
2377
    PrepareCallCFunction(1, r0);
2378
    mov(r0, Operand(ExternalReference::isolate_address(isolate())));
2379
    CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
2380
    PopSafepointRegisters();
2381
  }
2382

    
2383
  Label promote_scheduled_exception;
2384
  Label exception_handled;
2385
  Label delete_allocated_handles;
2386
  Label leave_exit_frame;
2387
  Label return_value_loaded;
2388

    
2389
  // load value from ReturnValue
2390
  ldr(r0, return_value_operand);
2391
  bind(&return_value_loaded);
2392
  // No more valid handles (the result handle was the last one). Restore
2393
  // previous handle scope.
2394
  str(r4, MemOperand(r9, kNextOffset));
2395
  if (emit_debug_code()) {
2396
    ldr(r1, MemOperand(r9, kLevelOffset));
2397
    cmp(r1, r6);
2398
    Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
2399
  }
2400
  sub(r6, r6, Operand(1));
2401
  str(r6, MemOperand(r9, kLevelOffset));
2402
  ldr(ip, MemOperand(r9, kLimitOffset));
2403
  cmp(r5, ip);
2404
  b(ne, &delete_allocated_handles);
2405

    
2406
  // Check if the function scheduled an exception.
2407
  bind(&leave_exit_frame);
2408
  LoadRoot(r4, Heap::kTheHoleValueRootIndex);
2409
  mov(ip, Operand(ExternalReference::scheduled_exception_address(isolate())));
2410
  ldr(r5, MemOperand(ip));
2411
  cmp(r4, r5);
2412
  b(ne, &promote_scheduled_exception);
2413
  bind(&exception_handled);
2414

    
2415
  bool restore_context = context_restore_operand != NULL;
2416
  if (restore_context) {
2417
    ldr(cp, *context_restore_operand);
2418
  }
2419
  // LeaveExitFrame expects unwind space to be in a register.
2420
  mov(r4, Operand(stack_space));
2421
  LeaveExitFrame(false, r4, !restore_context);
2422
  mov(pc, lr);
2423

    
2424
  bind(&promote_scheduled_exception);
2425
  {
2426
    FrameScope frame(this, StackFrame::INTERNAL);
2427
    CallExternalReference(
2428
        ExternalReference(Runtime::kPromoteScheduledException, isolate()),
2429
        0);
2430
  }
2431
  jmp(&exception_handled);
2432

    
2433
  // HandleScope limit has changed. Delete allocated extensions.
2434
  bind(&delete_allocated_handles);
2435
  str(r5, MemOperand(r9, kLimitOffset));
2436
  mov(r4, r0);
2437
  PrepareCallCFunction(1, r5);
2438
  mov(r0, Operand(ExternalReference::isolate_address(isolate())));
2439
  CallCFunction(
2440
      ExternalReference::delete_handle_scope_extensions(isolate()), 1);
2441
  mov(r0, r4);
2442
  jmp(&leave_exit_frame);
2443
}
2444

    
2445

    
2446
bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
2447
  if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
2448
  return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe(isolate());
2449
}
2450

    
2451

    
2452
void MacroAssembler::IllegalOperation(int num_arguments) {
2453
  if (num_arguments > 0) {
2454
    add(sp, sp, Operand(num_arguments * kPointerSize));
2455
  }
2456
  LoadRoot(r0, Heap::kUndefinedValueRootIndex);
2457
}
2458

    
2459

    
2460
void MacroAssembler::IndexFromHash(Register hash, Register index) {
2461
  // If the hash field contains an array index pick it out. The assert checks
2462
  // that the constants for the maximum number of digits for an array index
2463
  // cached in the hash field and the number of bits reserved for it does not
2464
  // conflict.
2465
  ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
2466
         (1 << String::kArrayIndexValueBits));
2467
  // We want the smi-tagged index in key.  kArrayIndexValueMask has zeros in
2468
  // the low kHashShift bits.
2469
  Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
2470
  SmiTag(index, hash);
2471
}
2472

    
2473

    
2474
void MacroAssembler::SmiToDouble(LowDwVfpRegister value, Register smi) {
2475
  if (CpuFeatures::IsSupported(VFP3)) {
2476
    vmov(value.low(), smi);
2477
    vcvt_f64_s32(value, 1);
2478
  } else {
2479
    SmiUntag(ip, smi);
2480
    vmov(value.low(), ip);
2481
    vcvt_f64_s32(value, value.low());
2482
  }
2483
}
2484

    
2485

    
2486
void MacroAssembler::TestDoubleIsInt32(DwVfpRegister double_input,
2487
                                       LowDwVfpRegister double_scratch) {
2488
  ASSERT(!double_input.is(double_scratch));
2489
  vcvt_s32_f64(double_scratch.low(), double_input);
2490
  vcvt_f64_s32(double_scratch, double_scratch.low());
2491
  VFPCompareAndSetFlags(double_input, double_scratch);
2492
}
2493

    
2494

    
2495
void MacroAssembler::TryDoubleToInt32Exact(Register result,
2496
                                           DwVfpRegister double_input,
2497
                                           LowDwVfpRegister double_scratch) {
2498
  ASSERT(!double_input.is(double_scratch));
2499
  vcvt_s32_f64(double_scratch.low(), double_input);
2500
  vmov(result, double_scratch.low());
2501
  vcvt_f64_s32(double_scratch, double_scratch.low());
2502
  VFPCompareAndSetFlags(double_input, double_scratch);
2503
}
2504

    
2505

    
2506
void MacroAssembler::TryInt32Floor(Register result,
2507
                                   DwVfpRegister double_input,
2508
                                   Register input_high,
2509
                                   LowDwVfpRegister double_scratch,
2510
                                   Label* done,
2511
                                   Label* exact) {
2512
  ASSERT(!result.is(input_high));
2513
  ASSERT(!double_input.is(double_scratch));
2514
  Label negative, exception;
2515

    
2516
  VmovHigh(input_high, double_input);
2517

    
2518
  // Test for NaN and infinities.
2519
  Sbfx(result, input_high,
2520
       HeapNumber::kExponentShift, HeapNumber::kExponentBits);
2521
  cmp(result, Operand(-1));
2522
  b(eq, &exception);
2523
  // Test for values that can be exactly represented as a
2524
  // signed 32-bit integer.
2525
  TryDoubleToInt32Exact(result, double_input, double_scratch);
2526
  // If exact, return (result already fetched).
2527
  b(eq, exact);
2528
  cmp(input_high, Operand::Zero());
2529
  b(mi, &negative);
2530

    
2531
  // Input is in ]+0, +inf[.
2532
  // If result equals 0x7fffffff input was out of range or
2533
  // in ]0x7fffffff, 0x80000000[. We ignore this last case which
2534
  // could fits into an int32, that means we always think input was
2535
  // out of range and always go to exception.
2536
  // If result < 0x7fffffff, go to done, result fetched.
2537
  cmn(result, Operand(1));
2538
  b(mi, &exception);
2539
  b(done);
2540

    
2541
  // Input is in ]-inf, -0[.
2542
  // If x is a non integer negative number,
2543
  // floor(x) <=> round_to_zero(x) - 1.
2544
  bind(&negative);
2545
  sub(result, result, Operand(1), SetCC);
2546
  // If result is still negative, go to done, result fetched.
2547
  // Else, we had an overflow and we fall through exception.
2548
  b(mi, done);
2549
  bind(&exception);
2550
}
2551

    
2552
void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
2553
                                                DwVfpRegister double_input,
2554
                                                Label* done) {
2555
  LowDwVfpRegister double_scratch = kScratchDoubleReg;
2556
  vcvt_s32_f64(double_scratch.low(), double_input);
2557
  vmov(result, double_scratch.low());
2558

    
2559
  // If result is not saturated (0x7fffffff or 0x80000000), we are done.
2560
  sub(ip, result, Operand(1));
2561
  cmp(ip, Operand(0x7ffffffe));
2562
  b(lt, done);
2563
}
2564

    
2565

    
2566
void MacroAssembler::TruncateDoubleToI(Register result,
2567
                                       DwVfpRegister double_input) {
2568
  Label done;
2569

    
2570
  TryInlineTruncateDoubleToI(result, double_input, &done);
2571

    
2572
  // If we fell through then inline version didn't succeed - call stub instead.
2573
  push(lr);
2574
  sub(sp, sp, Operand(kDoubleSize));  // Put input on stack.
2575
  vstr(double_input, MemOperand(sp, 0));
2576

    
2577
  DoubleToIStub stub(sp, result, 0, true, true);
2578
  CallStub(&stub);
2579

    
2580
  add(sp, sp, Operand(kDoubleSize));
2581
  pop(lr);
2582

    
2583
  bind(&done);
2584
}
2585

    
2586

    
2587
void MacroAssembler::TruncateHeapNumberToI(Register result,
2588
                                           Register object) {
2589
  Label done;
2590
  LowDwVfpRegister double_scratch = kScratchDoubleReg;
2591
  ASSERT(!result.is(object));
2592

    
2593
  vldr(double_scratch,
2594
       MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
2595
  TryInlineTruncateDoubleToI(result, double_scratch, &done);
2596

    
2597
  // If we fell through then inline version didn't succeed - call stub instead.
2598
  push(lr);
2599
  DoubleToIStub stub(object,
2600
                     result,
2601
                     HeapNumber::kValueOffset - kHeapObjectTag,
2602
                     true,
2603
                     true);
2604
  CallStub(&stub);
2605
  pop(lr);
2606

    
2607
  bind(&done);
2608
}
2609

    
2610

    
2611
void MacroAssembler::TruncateNumberToI(Register object,
2612
                                       Register result,
2613
                                       Register heap_number_map,
2614
                                       Register scratch1,
2615
                                       Label* not_number) {
2616
  Label done;
2617
  ASSERT(!result.is(object));
2618

    
2619
  UntagAndJumpIfSmi(result, object, &done);
2620
  JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
2621
  TruncateHeapNumberToI(result, object);
2622

    
2623
  bind(&done);
2624
}
2625

    
2626

    
2627
void MacroAssembler::GetLeastBitsFromSmi(Register dst,
2628
                                         Register src,
2629
                                         int num_least_bits) {
2630
  if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
2631
    ubfx(dst, src, kSmiTagSize, num_least_bits);
2632
  } else {
2633
    SmiUntag(dst, src);
2634
    and_(dst, dst, Operand((1 << num_least_bits) - 1));
2635
  }
2636
}
2637

    
2638

    
2639
void MacroAssembler::GetLeastBitsFromInt32(Register dst,
2640
                                           Register src,
2641
                                           int num_least_bits) {
2642
  and_(dst, src, Operand((1 << num_least_bits) - 1));
2643
}
2644

    
2645

    
2646
void MacroAssembler::CallRuntime(const Runtime::Function* f,
2647
                                 int num_arguments,
2648
                                 SaveFPRegsMode save_doubles) {
2649
  // All parameters are on the stack.  r0 has the return value after call.
2650

    
2651
  // If the expected number of arguments of the runtime function is
2652
  // constant, we check that the actual number of arguments match the
2653
  // expectation.
2654
  if (f->nargs >= 0 && f->nargs != num_arguments) {
2655
    IllegalOperation(num_arguments);
2656
    return;
2657
  }
2658

    
2659
  // TODO(1236192): Most runtime routines don't need the number of
2660
  // arguments passed in because it is constant. At some point we
2661
  // should remove this need and make the runtime routine entry code
2662
  // smarter.
2663
  mov(r0, Operand(num_arguments));
2664
  mov(r1, Operand(ExternalReference(f, isolate())));
2665
  CEntryStub stub(1, save_doubles);
2666
  CallStub(&stub);
2667
}
2668

    
2669

    
2670
void MacroAssembler::CallExternalReference(const ExternalReference& ext,
2671
                                           int num_arguments) {
2672
  mov(r0, Operand(num_arguments));
2673
  mov(r1, Operand(ext));
2674

    
2675
  CEntryStub stub(1);
2676
  CallStub(&stub);
2677
}
2678

    
2679

    
2680
void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
2681
                                               int num_arguments,
2682
                                               int result_size) {
2683
  // TODO(1236192): Most runtime routines don't need the number of
2684
  // arguments passed in because it is constant. At some point we
2685
  // should remove this need and make the runtime routine entry code
2686
  // smarter.
2687
  mov(r0, Operand(num_arguments));
2688
  JumpToExternalReference(ext);
2689
}
2690

    
2691

    
2692
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
2693
                                     int num_arguments,
2694
                                     int result_size) {
2695
  TailCallExternalReference(ExternalReference(fid, isolate()),
2696
                            num_arguments,
2697
                            result_size);
2698
}
2699

    
2700

    
2701
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
2702
#if defined(__thumb__)
2703
  // Thumb mode builtin.
2704
  ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
2705
#endif
2706
  mov(r1, Operand(builtin));
2707
  CEntryStub stub(1);
2708
  Jump(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
2709
}
2710

    
2711

    
2712
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
2713
                                   InvokeFlag flag,
2714
                                   const CallWrapper& call_wrapper) {
2715
  // You can't call a builtin without a valid frame.
2716
  ASSERT(flag == JUMP_FUNCTION || has_frame());
2717

    
2718
  GetBuiltinEntry(r2, id);
2719
  if (flag == CALL_FUNCTION) {
2720
    call_wrapper.BeforeCall(CallSize(r2));
2721
    SetCallKind(r5, CALL_AS_METHOD);
2722
    Call(r2);
2723
    call_wrapper.AfterCall();
2724
  } else {
2725
    ASSERT(flag == JUMP_FUNCTION);
2726
    SetCallKind(r5, CALL_AS_METHOD);
2727
    Jump(r2);
2728
  }
2729
}
2730

    
2731

    
2732
void MacroAssembler::GetBuiltinFunction(Register target,
2733
                                        Builtins::JavaScript id) {
2734
  // Load the builtins object into target register.
2735
  ldr(target,
2736
      MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2737
  ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
2738
  // Load the JavaScript builtin function from the builtins object.
2739
  ldr(target, FieldMemOperand(target,
2740
                          JSBuiltinsObject::OffsetOfFunctionWithId(id)));
2741
}
2742

    
2743

    
2744
void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
2745
  ASSERT(!target.is(r1));
2746
  GetBuiltinFunction(r1, id);
2747
  // Load the code entry point from the builtins object.
2748
  ldr(target, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
2749
}
2750

    
2751

    
2752
void MacroAssembler::SetCounter(StatsCounter* counter, int value,
2753
                                Register scratch1, Register scratch2) {
2754
  if (FLAG_native_code_counters && counter->Enabled()) {
2755
    mov(scratch1, Operand(value));
2756
    mov(scratch2, Operand(ExternalReference(counter)));
2757
    str(scratch1, MemOperand(scratch2));
2758
  }
2759
}
2760

    
2761

    
2762
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
2763
                                      Register scratch1, Register scratch2) {
2764
  ASSERT(value > 0);
2765
  if (FLAG_native_code_counters && counter->Enabled()) {
2766
    mov(scratch2, Operand(ExternalReference(counter)));
2767
    ldr(scratch1, MemOperand(scratch2));
2768
    add(scratch1, scratch1, Operand(value));
2769
    str(scratch1, MemOperand(scratch2));
2770
  }
2771
}
2772

    
2773

    
2774
void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
2775
                                      Register scratch1, Register scratch2) {
2776
  ASSERT(value > 0);
2777
  if (FLAG_native_code_counters && counter->Enabled()) {
2778
    mov(scratch2, Operand(ExternalReference(counter)));
2779
    ldr(scratch1, MemOperand(scratch2));
2780
    sub(scratch1, scratch1, Operand(value));
2781
    str(scratch1, MemOperand(scratch2));
2782
  }
2783
}
2784

    
2785

    
2786
void MacroAssembler::Assert(Condition cond, BailoutReason reason) {
2787
  if (emit_debug_code())
2788
    Check(cond, reason);
2789
}
2790

    
2791

    
2792
void MacroAssembler::AssertFastElements(Register elements) {
2793
  if (emit_debug_code()) {
2794
    ASSERT(!elements.is(ip));
2795
    Label ok;
2796
    push(elements);
2797
    ldr(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
2798
    LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
2799
    cmp(elements, ip);
2800
    b(eq, &ok);
2801
    LoadRoot(ip, Heap::kFixedDoubleArrayMapRootIndex);
2802
    cmp(elements, ip);
2803
    b(eq, &ok);
2804
    LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
2805
    cmp(elements, ip);
2806
    b(eq, &ok);
2807
    Abort(kJSObjectWithFastElementsMapHasSlowElements);
2808
    bind(&ok);
2809
    pop(elements);
2810
  }
2811
}
2812

    
2813

    
2814
void MacroAssembler::Check(Condition cond, BailoutReason reason) {
2815
  Label L;
2816
  b(cond, &L);
2817
  Abort(reason);
2818
  // will not return here
2819
  bind(&L);
2820
}
2821

    
2822

    
2823
void MacroAssembler::Abort(BailoutReason reason) {
2824
  Label abort_start;
2825
  bind(&abort_start);
2826
  // We want to pass the msg string like a smi to avoid GC
2827
  // problems, however msg is not guaranteed to be aligned
2828
  // properly. Instead, we pass an aligned pointer that is
2829
  // a proper v8 smi, but also pass the alignment difference
2830
  // from the real pointer as a smi.
2831
  const char* msg = GetBailoutReason(reason);
2832
  intptr_t p1 = reinterpret_cast<intptr_t>(msg);
2833
  intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
2834
  ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
2835
#ifdef DEBUG
2836
  if (msg != NULL) {
2837
    RecordComment("Abort message: ");
2838
    RecordComment(msg);
2839
  }
2840

    
2841
  if (FLAG_trap_on_abort) {
2842
    stop(msg);
2843
    return;
2844
  }
2845
#endif
2846

    
2847
  mov(r0, Operand(p0));
2848
  push(r0);
2849
  mov(r0, Operand(Smi::FromInt(p1 - p0)));
2850
  push(r0);
2851
  // Disable stub call restrictions to always allow calls to abort.
2852
  if (!has_frame_) {
2853
    // We don't actually want to generate a pile of code for this, so just
2854
    // claim there is a stack frame, without generating one.
2855
    FrameScope scope(this, StackFrame::NONE);
2856
    CallRuntime(Runtime::kAbort, 2);
2857
  } else {
2858
    CallRuntime(Runtime::kAbort, 2);
2859
  }
2860
  // will not return here
2861
  if (is_const_pool_blocked()) {
2862
    // If the calling code cares about the exact number of
2863
    // instructions generated, we insert padding here to keep the size
2864
    // of the Abort macro constant.
2865
    static const int kExpectedAbortInstructions = 10;
2866
    int abort_instructions = InstructionsGeneratedSince(&abort_start);
2867
    ASSERT(abort_instructions <= kExpectedAbortInstructions);
2868
    while (abort_instructions++ < kExpectedAbortInstructions) {
2869
      nop();
2870
    }
2871
  }
2872
}
2873

    
2874

    
2875
void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2876
  if (context_chain_length > 0) {
2877
    // Move up the chain of contexts to the context containing the slot.
2878
    ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2879
    for (int i = 1; i < context_chain_length; i++) {
2880
      ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2881
    }
2882
  } else {
2883
    // Slot is in the current function context.  Move it into the
2884
    // destination register in case we store into it (the write barrier
2885
    // cannot be allowed to destroy the context in esi).
2886
    mov(dst, cp);
2887
  }
2888
}
2889

    
2890

    
2891
void MacroAssembler::LoadTransitionedArrayMapConditional(
2892
    ElementsKind expected_kind,
2893
    ElementsKind transitioned_kind,
2894
    Register map_in_out,
2895
    Register scratch,
2896
    Label* no_map_match) {
2897
  // Load the global or builtins object from the current context.
2898
  ldr(scratch,
2899
      MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2900
  ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
2901

    
2902
  // Check that the function's map is the same as the expected cached map.
2903
  ldr(scratch,
2904
      MemOperand(scratch,
2905
                 Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
2906
  size_t offset = expected_kind * kPointerSize +
2907
      FixedArrayBase::kHeaderSize;
2908
  ldr(ip, FieldMemOperand(scratch, offset));
2909
  cmp(map_in_out, ip);
2910
  b(ne, no_map_match);
2911

    
2912
  // Use the transitioned cached map.
2913
  offset = transitioned_kind * kPointerSize +
2914
      FixedArrayBase::kHeaderSize;
2915
  ldr(map_in_out, FieldMemOperand(scratch, offset));
2916
}
2917

    
2918

    
2919
void MacroAssembler::LoadInitialArrayMap(
2920
    Register function_in, Register scratch,
2921
    Register map_out, bool can_have_holes) {
2922
  ASSERT(!function_in.is(map_out));
2923
  Label done;
2924
  ldr(map_out, FieldMemOperand(function_in,
2925
                               JSFunction::kPrototypeOrInitialMapOffset));
2926
  if (!FLAG_smi_only_arrays) {
2927
    ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
2928
    LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
2929
                                        kind,
2930
                                        map_out,
2931
                                        scratch,
2932
                                        &done);
2933
  } else if (can_have_holes) {
2934
    LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
2935
                                        FAST_HOLEY_SMI_ELEMENTS,
2936
                                        map_out,
2937
                                        scratch,
2938
                                        &done);
2939
  }
2940
  bind(&done);
2941
}
2942

    
2943

    
2944
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
2945
  // Load the global or builtins object from the current context.
2946
  ldr(function,
2947
      MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2948
  // Load the native context from the global or builtins object.
2949
  ldr(function, FieldMemOperand(function,
2950
                                GlobalObject::kNativeContextOffset));
2951
  // Load the function from the native context.
2952
  ldr(function, MemOperand(function, Context::SlotOffset(index)));
2953
}
2954

    
2955

    
2956
void MacroAssembler::LoadArrayFunction(Register function) {
2957
  // Load the global or builtins object from the current context.
2958
  ldr(function,
2959
      MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2960
  // Load the global context from the global or builtins object.
2961
  ldr(function,
2962
      FieldMemOperand(function, GlobalObject::kGlobalContextOffset));
2963
  // Load the array function from the native context.
2964
  ldr(function,
2965
      MemOperand(function, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
2966
}
2967

    
2968

    
2969
void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
2970
                                                  Register map,
2971
                                                  Register scratch) {
2972
  // Load the initial map. The global functions all have initial maps.
2973
  ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2974
  if (emit_debug_code()) {
2975
    Label ok, fail;
2976
    CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
2977
    b(&ok);
2978
    bind(&fail);
2979
    Abort(kGlobalFunctionsMustHaveInitialMap);
2980
    bind(&ok);
2981
  }
2982
}
2983

    
2984

    
2985
void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
2986
    Register reg,
2987
    Register scratch,
2988
    Label* not_power_of_two_or_zero) {
2989
  sub(scratch, reg, Operand(1), SetCC);
2990
  b(mi, not_power_of_two_or_zero);
2991
  tst(scratch, reg);
2992
  b(ne, not_power_of_two_or_zero);
2993
}
2994

    
2995

    
2996
void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg(
2997
    Register reg,
2998
    Register scratch,
2999
    Label* zero_and_neg,
3000
    Label* not_power_of_two) {
3001
  sub(scratch, reg, Operand(1), SetCC);
3002
  b(mi, zero_and_neg);
3003
  tst(scratch, reg);
3004
  b(ne, not_power_of_two);
3005
}
3006

    
3007

    
3008
void MacroAssembler::JumpIfNotBothSmi(Register reg1,
3009
                                      Register reg2,
3010
                                      Label* on_not_both_smi) {
3011
  STATIC_ASSERT(kSmiTag == 0);
3012
  tst(reg1, Operand(kSmiTagMask));
3013
  tst(reg2, Operand(kSmiTagMask), eq);
3014
  b(ne, on_not_both_smi);
3015
}
3016

    
3017

    
3018
void MacroAssembler::UntagAndJumpIfSmi(
3019
    Register dst, Register src, Label* smi_case) {
3020
  STATIC_ASSERT(kSmiTag == 0);
3021
  SmiUntag(dst, src, SetCC);
3022
  b(cc, smi_case);  // Shifter carry is not set for a smi.
3023
}
3024

    
3025

    
3026
void MacroAssembler::UntagAndJumpIfNotSmi(
3027
    Register dst, Register src, Label* non_smi_case) {
3028
  STATIC_ASSERT(kSmiTag == 0);
3029
  SmiUntag(dst, src, SetCC);
3030
  b(cs, non_smi_case);  // Shifter carry is set for a non-smi.
3031
}
3032

    
3033

    
3034
void MacroAssembler::JumpIfEitherSmi(Register reg1,
3035
                                     Register reg2,
3036
                                     Label* on_either_smi) {
3037
  STATIC_ASSERT(kSmiTag == 0);
3038
  tst(reg1, Operand(kSmiTagMask));
3039
  tst(reg2, Operand(kSmiTagMask), ne);
3040
  b(eq, on_either_smi);
3041
}
3042

    
3043

    
3044
void MacroAssembler::AssertNotSmi(Register object) {
3045
  if (emit_debug_code()) {
3046
    STATIC_ASSERT(kSmiTag == 0);
3047
    tst(object, Operand(kSmiTagMask));
3048
    Check(ne, kOperandIsASmi);
3049
  }
3050
}
3051

    
3052

    
3053
void MacroAssembler::AssertSmi(Register object) {
3054
  if (emit_debug_code()) {
3055
    STATIC_ASSERT(kSmiTag == 0);
3056
    tst(object, Operand(kSmiTagMask));
3057
    Check(eq, kOperandIsNotSmi);
3058
  }
3059
}
3060

    
3061

    
3062
void MacroAssembler::AssertString(Register object) {
3063
  if (emit_debug_code()) {
3064
    STATIC_ASSERT(kSmiTag == 0);
3065
    tst(object, Operand(kSmiTagMask));
3066
    Check(ne, kOperandIsASmiAndNotAString);
3067
    push(object);
3068
    ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
3069
    CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
3070
    pop(object);
3071
    Check(lo, kOperandIsNotAString);
3072
  }
3073
}
3074

    
3075

    
3076
void MacroAssembler::AssertName(Register object) {
3077
  if (emit_debug_code()) {
3078
    STATIC_ASSERT(kSmiTag == 0);
3079
    tst(object, Operand(kSmiTagMask));
3080
    Check(ne, kOperandIsASmiAndNotAName);
3081
    push(object);
3082
    ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
3083
    CompareInstanceType(object, object, LAST_NAME_TYPE);
3084
    pop(object);
3085
    Check(le, kOperandIsNotAName);
3086
  }
3087
}
3088

    
3089

    
3090

    
3091
void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
3092
  if (emit_debug_code()) {
3093
    CompareRoot(reg, index);
3094
    Check(eq, kHeapNumberMapRegisterClobbered);
3095
  }
3096
}
3097

    
3098

    
3099
void MacroAssembler::JumpIfNotHeapNumber(Register object,
3100
                                         Register heap_number_map,
3101
                                         Register scratch,
3102
                                         Label* on_not_heap_number) {
3103
  ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3104
  AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3105
  cmp(scratch, heap_number_map);
3106
  b(ne, on_not_heap_number);
3107
}
3108

    
3109

    
3110
void MacroAssembler::LookupNumberStringCache(Register object,
3111
                                             Register result,
3112
                                             Register scratch1,
3113
                                             Register scratch2,
3114
                                             Register scratch3,
3115
                                             Label* not_found) {
3116
  // Use of registers. Register result is used as a temporary.
3117
  Register number_string_cache = result;
3118
  Register mask = scratch3;
3119

    
3120
  // Load the number string cache.
3121
  LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
3122

    
3123
  // Make the hash mask from the length of the number string cache. It
3124
  // contains two elements (number and string) for each cache entry.
3125
  ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
3126
  // Divide length by two (length is a smi).
3127
  mov(mask, Operand(mask, ASR, kSmiTagSize + 1));
3128
  sub(mask, mask, Operand(1));  // Make mask.
3129

    
3130
  // Calculate the entry in the number string cache. The hash value in the
3131
  // number string cache for smis is just the smi value, and the hash for
3132
  // doubles is the xor of the upper and lower words. See
3133
  // Heap::GetNumberStringCache.
3134
  Label is_smi;
3135
  Label load_result_from_cache;
3136
  JumpIfSmi(object, &is_smi);
3137
  CheckMap(object,
3138
           scratch1,
3139
           Heap::kHeapNumberMapRootIndex,
3140
           not_found,
3141
           DONT_DO_SMI_CHECK);
3142

    
3143
  STATIC_ASSERT(8 == kDoubleSize);
3144
  add(scratch1,
3145
      object,
3146
      Operand(HeapNumber::kValueOffset - kHeapObjectTag));
3147
  ldm(ia, scratch1, scratch1.bit() | scratch2.bit());
3148
  eor(scratch1, scratch1, Operand(scratch2));
3149
  and_(scratch1, scratch1, Operand(mask));
3150

    
3151
  // Calculate address of entry in string cache: each entry consists
3152
  // of two pointer sized fields.
3153
  add(scratch1,
3154
      number_string_cache,
3155
      Operand(scratch1, LSL, kPointerSizeLog2 + 1));
3156

    
3157
  Register probe = mask;
3158
  ldr(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
3159
  JumpIfSmi(probe, not_found);
3160
  sub(scratch2, object, Operand(kHeapObjectTag));
3161
  vldr(d0, scratch2, HeapNumber::kValueOffset);
3162
  sub(probe, probe, Operand(kHeapObjectTag));
3163
  vldr(d1, probe, HeapNumber::kValueOffset);
3164
  VFPCompareAndSetFlags(d0, d1);
3165
  b(ne, not_found);  // The cache did not contain this value.
3166
  b(&load_result_from_cache);
3167

    
3168
  bind(&is_smi);
3169
  Register scratch = scratch1;
3170
  and_(scratch, mask, Operand(object, ASR, 1));
3171
  // Calculate address of entry in string cache: each entry consists
3172
  // of two pointer sized fields.
3173
  add(scratch,
3174
      number_string_cache,
3175
      Operand(scratch, LSL, kPointerSizeLog2 + 1));
3176

    
3177
  // Check if the entry is the smi we are looking for.
3178
  ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
3179
  cmp(object, probe);
3180
  b(ne, not_found);
3181

    
3182
  // Get the result from the cache.
3183
  bind(&load_result_from_cache);
3184
  ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
3185
  IncrementCounter(isolate()->counters()->number_to_string_native(),
3186
                   1,
3187
                   scratch1,
3188
                   scratch2);
3189
}
3190

    
3191

    
3192
void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
3193
    Register first,
3194
    Register second,
3195
    Register scratch1,
3196
    Register scratch2,
3197
    Label* failure) {
3198
  // Test that both first and second are sequential ASCII strings.
3199
  // Assume that they are non-smis.
3200
  ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
3201
  ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
3202
  ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
3203
  ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
3204

    
3205
  JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
3206
                                               scratch2,
3207
                                               scratch1,
3208
                                               scratch2,
3209
                                               failure);
3210
}
3211

    
3212
void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
3213
                                                         Register second,
3214
                                                         Register scratch1,
3215
                                                         Register scratch2,
3216
                                                         Label* failure) {
3217
  // Check that neither is a smi.
3218
  and_(scratch1, first, Operand(second));
3219
  JumpIfSmi(scratch1, failure);
3220
  JumpIfNonSmisNotBothSequentialAsciiStrings(first,
3221
                                             second,
3222
                                             scratch1,
3223
                                             scratch2,
3224
                                             failure);
3225
}
3226

    
3227

    
3228
void MacroAssembler::JumpIfNotUniqueName(Register reg,
3229
                                         Label* not_unique_name) {
3230
  STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
3231
  Label succeed;
3232
  tst(reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
3233
  b(eq, &succeed);
3234
  cmp(reg, Operand(SYMBOL_TYPE));
3235
  b(ne, not_unique_name);
3236

    
3237
  bind(&succeed);
3238
}
3239

    
3240

    
3241
// Allocates a heap number or jumps to the need_gc label if the young space
3242
// is full and a scavenge is needed.
3243
void MacroAssembler::AllocateHeapNumber(Register result,
3244
                                        Register scratch1,
3245
                                        Register scratch2,
3246
                                        Register heap_number_map,
3247
                                        Label* gc_required,
3248
                                        TaggingMode tagging_mode) {
3249
  // Allocate an object in the heap for the heap number and tag it as a heap
3250
  // object.
3251
  Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
3252
           tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
3253

    
3254
  // Store heap number map in the allocated object.
3255
  AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3256
  if (tagging_mode == TAG_RESULT) {
3257
    str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
3258
  } else {
3259
    str(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
3260
  }
3261
}
3262

    
3263

    
3264
void MacroAssembler::AllocateHeapNumberWithValue(Register result,
3265
                                                 DwVfpRegister value,
3266
                                                 Register scratch1,
3267
                                                 Register scratch2,
3268
                                                 Register heap_number_map,
3269
                                                 Label* gc_required) {
3270
  AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required);
3271
  sub(scratch1, result, Operand(kHeapObjectTag));
3272
  vstr(value, scratch1, HeapNumber::kValueOffset);
3273
}
3274

    
3275

    
3276
// Copies a fixed number of fields of heap objects from src to dst.
3277
void MacroAssembler::CopyFields(Register dst,
3278
                                Register src,
3279
                                LowDwVfpRegister double_scratch,
3280
                                int field_count) {
3281
  int double_count = field_count / (DwVfpRegister::kSizeInBytes / kPointerSize);
3282
  for (int i = 0; i < double_count; i++) {
3283
    vldr(double_scratch, FieldMemOperand(src, i * DwVfpRegister::kSizeInBytes));
3284
    vstr(double_scratch, FieldMemOperand(dst, i * DwVfpRegister::kSizeInBytes));
3285
  }
3286

    
3287
  STATIC_ASSERT(SwVfpRegister::kSizeInBytes == kPointerSize);
3288
  STATIC_ASSERT(2 * SwVfpRegister::kSizeInBytes == DwVfpRegister::kSizeInBytes);
3289

    
3290
  int remain = field_count % (DwVfpRegister::kSizeInBytes / kPointerSize);
3291
  if (remain != 0) {
3292
    vldr(double_scratch.low(),
3293
         FieldMemOperand(src, (field_count - 1) * kPointerSize));
3294
    vstr(double_scratch.low(),
3295
         FieldMemOperand(dst, (field_count - 1) * kPointerSize));
3296
  }
3297
}
3298

    
3299

    
3300
void MacroAssembler::CopyBytes(Register src,
3301
                               Register dst,
3302
                               Register length,
3303
                               Register scratch) {
3304
  Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
3305

    
3306
  // Align src before copying in word size chunks.
3307
  cmp(length, Operand(kPointerSize));
3308
  b(le, &byte_loop);
3309

    
3310
  bind(&align_loop_1);
3311
  tst(src, Operand(kPointerSize - 1));
3312
  b(eq, &word_loop);
3313
  ldrb(scratch, MemOperand(src, 1, PostIndex));
3314
  strb(scratch, MemOperand(dst, 1, PostIndex));
3315
  sub(length, length, Operand(1), SetCC);
3316
  b(&align_loop_1);
3317
  // Copy bytes in word size chunks.
3318
  bind(&word_loop);
3319
  if (emit_debug_code()) {
3320
    tst(src, Operand(kPointerSize - 1));
3321
    Assert(eq, kExpectingAlignmentForCopyBytes);
3322
  }
3323
  cmp(length, Operand(kPointerSize));
3324
  b(lt, &byte_loop);
3325
  ldr(scratch, MemOperand(src, kPointerSize, PostIndex));
3326
  if (CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) {
3327
    str(scratch, MemOperand(dst, kPointerSize, PostIndex));
3328
  } else {
3329
    strb(scratch, MemOperand(dst, 1, PostIndex));
3330
    mov(scratch, Operand(scratch, LSR, 8));
3331
    strb(scratch, MemOperand(dst, 1, PostIndex));
3332
    mov(scratch, Operand(scratch, LSR, 8));
3333
    strb(scratch, MemOperand(dst, 1, PostIndex));
3334
    mov(scratch, Operand(scratch, LSR, 8));
3335
    strb(scratch, MemOperand(dst, 1, PostIndex));
3336
  }
3337
  sub(length, length, Operand(kPointerSize));
3338
  b(&word_loop);
3339

    
3340
  // Copy the last bytes if any left.
3341
  bind(&byte_loop);
3342
  cmp(length, Operand::Zero());
3343
  b(eq, &done);
3344
  bind(&byte_loop_1);
3345
  ldrb(scratch, MemOperand(src, 1, PostIndex));
3346
  strb(scratch, MemOperand(dst, 1, PostIndex));
3347
  sub(length, length, Operand(1), SetCC);
3348
  b(ne, &byte_loop_1);
3349
  bind(&done);
3350
}
3351

    
3352

    
3353
void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
3354
                                                Register end_offset,
3355
                                                Register filler) {
3356
  Label loop, entry;
3357
  b(&entry);
3358
  bind(&loop);
3359
  str(filler, MemOperand(start_offset, kPointerSize, PostIndex));
3360
  bind(&entry);
3361
  cmp(start_offset, end_offset);
3362
  b(lt, &loop);
3363
}
3364

    
3365

    
3366
void MacroAssembler::CheckFor32DRegs(Register scratch) {
3367
  mov(scratch, Operand(ExternalReference::cpu_features()));
3368
  ldr(scratch, MemOperand(scratch));
3369
  tst(scratch, Operand(1u << VFP32DREGS));
3370
}
3371

    
3372

    
3373
void MacroAssembler::SaveFPRegs(Register location, Register scratch) {
3374
  CheckFor32DRegs(scratch);
3375
  vstm(db_w, location, d16, d31, ne);
3376
  sub(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
3377
  vstm(db_w, location, d0, d15);
3378
}
3379

    
3380

    
3381
void MacroAssembler::RestoreFPRegs(Register location, Register scratch) {
3382
  CheckFor32DRegs(scratch);
3383
  vldm(ia_w, location, d0, d15);
3384
  vldm(ia_w, location, d16, d31, ne);
3385
  add(location, location, Operand(16 * kDoubleSize), LeaveCC, eq);
3386
}
3387

    
3388

    
3389
void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
3390
    Register first,
3391
    Register second,
3392
    Register scratch1,
3393
    Register scratch2,
3394
    Label* failure) {
3395
  const int kFlatAsciiStringMask =
3396
      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
3397
  const int kFlatAsciiStringTag =
3398
      kStringTag | kOneByteStringTag | kSeqStringTag;
3399
  and_(scratch1, first, Operand(kFlatAsciiStringMask));
3400
  and_(scratch2, second, Operand(kFlatAsciiStringMask));
3401
  cmp(scratch1, Operand(kFlatAsciiStringTag));
3402
  // Ignore second test if first test failed.
3403
  cmp(scratch2, Operand(kFlatAsciiStringTag), eq);
3404
  b(ne, failure);
3405
}
3406

    
3407

    
3408
void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
3409
                                                            Register scratch,
3410
                                                            Label* failure) {
3411
  const int kFlatAsciiStringMask =
3412
      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
3413
  const int kFlatAsciiStringTag =
3414
      kStringTag | kOneByteStringTag | kSeqStringTag;
3415
  and_(scratch, type, Operand(kFlatAsciiStringMask));
3416
  cmp(scratch, Operand(kFlatAsciiStringTag));
3417
  b(ne, failure);
3418
}
3419

    
3420
static const int kRegisterPassedArguments = 4;
3421

    
3422

    
3423
int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
3424
                                              int num_double_arguments) {
3425
  int stack_passed_words = 0;
3426
  if (use_eabi_hardfloat()) {
3427
    // In the hard floating point calling convention, we can use
3428
    // all double registers to pass doubles.
3429
    if (num_double_arguments > DoubleRegister::NumRegisters()) {
3430
      stack_passed_words +=
3431
          2 * (num_double_arguments - DoubleRegister::NumRegisters());
3432
    }
3433
  } else {
3434
    // In the soft floating point calling convention, every double
3435
    // argument is passed using two registers.
3436
    num_reg_arguments += 2 * num_double_arguments;
3437
  }
3438
  // Up to four simple arguments are passed in registers r0..r3.
3439
  if (num_reg_arguments > kRegisterPassedArguments) {
3440
    stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
3441
  }
3442
  return stack_passed_words;
3443
}
3444

    
3445

    
3446
void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
3447
                                          int num_double_arguments,
3448
                                          Register scratch) {
3449
  int frame_alignment = ActivationFrameAlignment();
3450
  int stack_passed_arguments = CalculateStackPassedWords(
3451
      num_reg_arguments, num_double_arguments);
3452
  if (frame_alignment > kPointerSize) {
3453
    // Make stack end at alignment and make room for num_arguments - 4 words
3454
    // and the original value of sp.
3455
    mov(scratch, sp);
3456
    sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
3457
    ASSERT(IsPowerOf2(frame_alignment));
3458
    and_(sp, sp, Operand(-frame_alignment));
3459
    str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
3460
  } else {
3461
    sub(sp, sp, Operand(stack_passed_arguments * kPointerSize));
3462
  }
3463
}
3464

    
3465

    
3466
void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
3467
                                          Register scratch) {
3468
  PrepareCallCFunction(num_reg_arguments, 0, scratch);
3469
}
3470

    
3471

    
3472
void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg) {
3473
  if (use_eabi_hardfloat()) {
3474
    Move(d0, dreg);
3475
  } else {
3476
    vmov(r0, r1, dreg);
3477
  }
3478
}
3479

    
3480

    
3481
void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg1,
3482
                                             DwVfpRegister dreg2) {
3483
  if (use_eabi_hardfloat()) {
3484
    if (dreg2.is(d0)) {
3485
      ASSERT(!dreg1.is(d1));
3486
      Move(d1, dreg2);
3487
      Move(d0, dreg1);
3488
    } else {
3489
      Move(d0, dreg1);
3490
      Move(d1, dreg2);
3491
    }
3492
  } else {
3493
    vmov(r0, r1, dreg1);
3494
    vmov(r2, r3, dreg2);
3495
  }
3496
}
3497

    
3498

    
3499
void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg,
3500
                                             Register reg) {
3501
  if (use_eabi_hardfloat()) {
3502
    Move(d0, dreg);
3503
    Move(r0, reg);
3504
  } else {
3505
    Move(r2, reg);
3506
    vmov(r0, r1, dreg);
3507
  }
3508
}
3509

    
3510

    
3511
void MacroAssembler::CallCFunction(ExternalReference function,
3512
                                   int num_reg_arguments,
3513
                                   int num_double_arguments) {
3514
  mov(ip, Operand(function));
3515
  CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
3516
}
3517

    
3518

    
3519
void MacroAssembler::CallCFunction(Register function,
3520
                                   int num_reg_arguments,
3521
                                   int num_double_arguments) {
3522
  CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
3523
}
3524

    
3525

    
3526
void MacroAssembler::CallCFunction(ExternalReference function,
3527
                                   int num_arguments) {
3528
  CallCFunction(function, num_arguments, 0);
3529
}
3530

    
3531

    
3532
void MacroAssembler::CallCFunction(Register function,
3533
                                   int num_arguments) {
3534
  CallCFunction(function, num_arguments, 0);
3535
}
3536

    
3537

    
3538
void MacroAssembler::CallCFunctionHelper(Register function,
3539
                                         int num_reg_arguments,
3540
                                         int num_double_arguments) {
3541
  ASSERT(has_frame());
3542
  // Make sure that the stack is aligned before calling a C function unless
3543
  // running in the simulator. The simulator has its own alignment check which
3544
  // provides more information.
3545
#if V8_HOST_ARCH_ARM
3546
  if (emit_debug_code()) {
3547
    int frame_alignment = OS::ActivationFrameAlignment();
3548
    int frame_alignment_mask = frame_alignment - 1;
3549
    if (frame_alignment > kPointerSize) {
3550
      ASSERT(IsPowerOf2(frame_alignment));
3551
      Label alignment_as_expected;
3552
      tst(sp, Operand(frame_alignment_mask));
3553
      b(eq, &alignment_as_expected);
3554
      // Don't use Check here, as it will call Runtime_Abort possibly
3555
      // re-entering here.
3556
      stop("Unexpected alignment");
3557
      bind(&alignment_as_expected);
3558
    }
3559
  }
3560
#endif
3561

    
3562
  // Just call directly. The function called cannot cause a GC, or
3563
  // allow preemption, so the return address in the link register
3564
  // stays correct.
3565
  Call(function);
3566
  int stack_passed_arguments = CalculateStackPassedWords(
3567
      num_reg_arguments, num_double_arguments);
3568
  if (ActivationFrameAlignment() > kPointerSize) {
3569
    ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
3570
  } else {
3571
    add(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
3572
  }
3573
}
3574

    
3575

    
3576
void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
3577
                               Register result) {
3578
  const uint32_t kLdrOffsetMask = (1 << 12) - 1;
3579
  const int32_t kPCRegOffset = 2 * kPointerSize;
3580
  ldr(result, MemOperand(ldr_location));
3581
  if (emit_debug_code()) {
3582
    // Check that the instruction is a ldr reg, [pc + offset] .
3583
    and_(result, result, Operand(kLdrPCPattern));
3584
    cmp(result, Operand(kLdrPCPattern));
3585
    Check(eq, kTheInstructionToPatchShouldBeALoadFromPc);
3586
    // Result was clobbered. Restore it.
3587
    ldr(result, MemOperand(ldr_location));
3588
  }
3589
  // Get the address of the constant.
3590
  and_(result, result, Operand(kLdrOffsetMask));
3591
  add(result, ldr_location, Operand(result));
3592
  add(result, result, Operand(kPCRegOffset));
3593
}
3594

    
3595

    
3596
void MacroAssembler::CheckPageFlag(
3597
    Register object,
3598
    Register scratch,
3599
    int mask,
3600
    Condition cc,
3601
    Label* condition_met) {
3602
  Bfc(scratch, object, 0, kPageSizeBits);
3603
  ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
3604
  tst(scratch, Operand(mask));
3605
  b(cc, condition_met);
3606
}
3607

    
3608

    
3609
void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
3610
                                        Register scratch,
3611
                                        Label* if_deprecated) {
3612
  if (map->CanBeDeprecated()) {
3613
    mov(scratch, Operand(map));
3614
    ldr(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
3615
    tst(scratch, Operand(Smi::FromInt(Map::Deprecated::kMask)));
3616
    b(ne, if_deprecated);
3617
  }
3618
}
3619

    
3620

    
3621
void MacroAssembler::JumpIfBlack(Register object,
3622
                                 Register scratch0,
3623
                                 Register scratch1,
3624
                                 Label* on_black) {
3625
  HasColor(object, scratch0, scratch1, on_black, 1, 0);  // kBlackBitPattern.
3626
  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
3627
}
3628

    
3629

    
3630
void MacroAssembler::HasColor(Register object,
3631
                              Register bitmap_scratch,
3632
                              Register mask_scratch,
3633
                              Label* has_color,
3634
                              int first_bit,
3635
                              int second_bit) {
3636
  ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
3637

    
3638
  GetMarkBits(object, bitmap_scratch, mask_scratch);
3639

    
3640
  Label other_color, word_boundary;
3641
  ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3642
  tst(ip, Operand(mask_scratch));
3643
  b(first_bit == 1 ? eq : ne, &other_color);
3644
  // Shift left 1 by adding.
3645
  add(mask_scratch, mask_scratch, Operand(mask_scratch), SetCC);
3646
  b(eq, &word_boundary);
3647
  tst(ip, Operand(mask_scratch));
3648
  b(second_bit == 1 ? ne : eq, has_color);
3649
  jmp(&other_color);
3650

    
3651
  bind(&word_boundary);
3652
  ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
3653
  tst(ip, Operand(1));
3654
  b(second_bit == 1 ? ne : eq, has_color);
3655
  bind(&other_color);
3656
}
3657

    
3658

    
3659
// Detect some, but not all, common pointer-free objects.  This is used by the
3660
// incremental write barrier which doesn't care about oddballs (they are always
3661
// marked black immediately so this code is not hit).
3662
void MacroAssembler::JumpIfDataObject(Register value,
3663
                                      Register scratch,
3664
                                      Label* not_data_object) {
3665
  Label is_data_object;
3666
  ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
3667
  CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
3668
  b(eq, &is_data_object);
3669
  ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
3670
  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
3671
  // If it's a string and it's not a cons string then it's an object containing
3672
  // no GC pointers.
3673
  ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3674
  tst(scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
3675
  b(ne, not_data_object);
3676
  bind(&is_data_object);
3677
}
3678

    
3679

    
3680
void MacroAssembler::GetMarkBits(Register addr_reg,
3681
                                 Register bitmap_reg,
3682
                                 Register mask_reg) {
3683
  ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
3684
  and_(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
3685
  Ubfx(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
3686
  const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
3687
  Ubfx(ip, addr_reg, kLowBits, kPageSizeBits - kLowBits);
3688
  add(bitmap_reg, bitmap_reg, Operand(ip, LSL, kPointerSizeLog2));
3689
  mov(ip, Operand(1));
3690
  mov(mask_reg, Operand(ip, LSL, mask_reg));
3691
}
3692

    
3693

    
3694
void MacroAssembler::EnsureNotWhite(
3695
    Register value,
3696
    Register bitmap_scratch,
3697
    Register mask_scratch,
3698
    Register load_scratch,
3699
    Label* value_is_white_and_not_data) {
3700
  ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
3701
  GetMarkBits(value, bitmap_scratch, mask_scratch);
3702

    
3703
  // If the value is black or grey we don't need to do anything.
3704
  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
3705
  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
3706
  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
3707
  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
3708

    
3709
  Label done;
3710

    
3711
  // Since both black and grey have a 1 in the first position and white does
3712
  // not have a 1 there we only need to check one bit.
3713
  ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3714
  tst(mask_scratch, load_scratch);
3715
  b(ne, &done);
3716

    
3717
  if (emit_debug_code()) {
3718
    // Check for impossible bit pattern.
3719
    Label ok;
3720
    // LSL may overflow, making the check conservative.
3721
    tst(load_scratch, Operand(mask_scratch, LSL, 1));
3722
    b(eq, &ok);
3723
    stop("Impossible marking bit pattern");
3724
    bind(&ok);
3725
  }
3726

    
3727
  // Value is white.  We check whether it is data that doesn't need scanning.
3728
  // Currently only checks for HeapNumber and non-cons strings.
3729
  Register map = load_scratch;  // Holds map while checking type.
3730
  Register length = load_scratch;  // Holds length of object after testing type.
3731
  Label is_data_object;
3732

    
3733
  // Check for heap-number
3734
  ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
3735
  CompareRoot(map, Heap::kHeapNumberMapRootIndex);
3736
  mov(length, Operand(HeapNumber::kSize), LeaveCC, eq);
3737
  b(eq, &is_data_object);
3738

    
3739
  // Check for strings.
3740
  ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
3741
  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
3742
  // If it's a string and it's not a cons string then it's an object containing
3743
  // no GC pointers.
3744
  Register instance_type = load_scratch;
3745
  ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
3746
  tst(instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
3747
  b(ne, value_is_white_and_not_data);
3748
  // It's a non-indirect (non-cons and non-slice) string.
3749
  // If it's external, the length is just ExternalString::kSize.
3750
  // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
3751
  // External strings are the only ones with the kExternalStringTag bit
3752
  // set.
3753
  ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
3754
  ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
3755
  tst(instance_type, Operand(kExternalStringTag));
3756
  mov(length, Operand(ExternalString::kSize), LeaveCC, ne);
3757
  b(ne, &is_data_object);
3758

    
3759
  // Sequential string, either ASCII or UC16.
3760
  // For ASCII (char-size of 1) we shift the smi tag away to get the length.
3761
  // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
3762
  // getting the length multiplied by 2.
3763
  ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4);
3764
  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
3765
  ldr(ip, FieldMemOperand(value, String::kLengthOffset));
3766
  tst(instance_type, Operand(kStringEncodingMask));
3767
  mov(ip, Operand(ip, LSR, 1), LeaveCC, ne);
3768
  add(length, ip, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
3769
  and_(length, length, Operand(~kObjectAlignmentMask));
3770

    
3771
  bind(&is_data_object);
3772
  // Value is a data object, and it is white.  Mark it black.  Since we know
3773
  // that the object is white we can make it black by flipping one bit.
3774
  ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3775
  orr(ip, ip, Operand(mask_scratch));
3776
  str(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3777

    
3778
  and_(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
3779
  ldr(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
3780
  add(ip, ip, Operand(length));
3781
  str(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
3782

    
3783
  bind(&done);
3784
}
3785

    
3786

    
3787
void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
3788
  Usat(output_reg, 8, Operand(input_reg));
3789
}
3790

    
3791

    
3792
void MacroAssembler::ClampDoubleToUint8(Register result_reg,
3793
                                        DwVfpRegister input_reg,
3794
                                        LowDwVfpRegister double_scratch) {
3795
  Label above_zero;
3796
  Label done;
3797
  Label in_bounds;
3798

    
3799
  VFPCompareAndSetFlags(input_reg, 0.0);
3800
  b(gt, &above_zero);
3801

    
3802
  // Double value is less than zero, NaN or Inf, return 0.
3803
  mov(result_reg, Operand::Zero());
3804
  b(al, &done);
3805

    
3806
  // Double value is >= 255, return 255.
3807
  bind(&above_zero);
3808
  Vmov(double_scratch, 255.0, result_reg);
3809
  VFPCompareAndSetFlags(input_reg, double_scratch);
3810
  b(le, &in_bounds);
3811
  mov(result_reg, Operand(255));
3812
  b(al, &done);
3813

    
3814
  // In 0-255 range, round and truncate.
3815
  bind(&in_bounds);
3816
  // Save FPSCR.
3817
  vmrs(ip);
3818
  // Set rounding mode to round to the nearest integer by clearing bits[23:22].
3819
  bic(result_reg, ip, Operand(kVFPRoundingModeMask));
3820
  vmsr(result_reg);
3821
  vcvt_s32_f64(double_scratch.low(), input_reg, kFPSCRRounding);
3822
  vmov(result_reg, double_scratch.low());
3823
  // Restore FPSCR.
3824
  vmsr(ip);
3825
  bind(&done);
3826
}
3827

    
3828

    
3829
void MacroAssembler::LoadInstanceDescriptors(Register map,
3830
                                             Register descriptors) {
3831
  ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
3832
}
3833

    
3834

    
3835
void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
3836
  ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
3837
  DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
3838
}
3839

    
3840

    
3841
void MacroAssembler::EnumLength(Register dst, Register map) {
3842
  STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3843
  ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
3844
  and_(dst, dst, Operand(Smi::FromInt(Map::EnumLengthBits::kMask)));
3845
}
3846

    
3847

    
3848
void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
3849
  Register  empty_fixed_array_value = r6;
3850
  LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
3851
  Label next, start;
3852
  mov(r2, r0);
3853

    
3854
  // Check if the enum length field is properly initialized, indicating that
3855
  // there is an enum cache.
3856
  ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
3857

    
3858
  EnumLength(r3, r1);
3859
  cmp(r3, Operand(Smi::FromInt(Map::kInvalidEnumCache)));
3860
  b(eq, call_runtime);
3861

    
3862
  jmp(&start);
3863

    
3864
  bind(&next);
3865
  ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
3866

    
3867
  // For all objects but the receiver, check that the cache is empty.
3868
  EnumLength(r3, r1);
3869
  cmp(r3, Operand(Smi::FromInt(0)));
3870
  b(ne, call_runtime);
3871

    
3872
  bind(&start);
3873

    
3874
  // Check that there are no elements. Register r2 contains the current JS
3875
  // object we've reached through the prototype chain.
3876
  ldr(r2, FieldMemOperand(r2, JSObject::kElementsOffset));
3877
  cmp(r2, empty_fixed_array_value);
3878
  b(ne, call_runtime);
3879

    
3880
  ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset));
3881
  cmp(r2, null_value);
3882
  b(ne, &next);
3883
}
3884

    
3885

    
3886
void MacroAssembler::TestJSArrayForAllocationMemento(
3887
    Register receiver_reg,
3888
    Register scratch_reg,
3889
    Label* no_memento_found) {
3890
  ExternalReference new_space_start =
3891
      ExternalReference::new_space_start(isolate());
3892
  ExternalReference new_space_allocation_top =
3893
      ExternalReference::new_space_allocation_top_address(isolate());
3894
  add(scratch_reg, receiver_reg,
3895
      Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
3896
  cmp(scratch_reg, Operand(new_space_start));
3897
  b(lt, no_memento_found);
3898
  mov(ip, Operand(new_space_allocation_top));
3899
  ldr(ip, MemOperand(ip));
3900
  cmp(scratch_reg, ip);
3901
  b(gt, no_memento_found);
3902
  ldr(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
3903
  cmp(scratch_reg,
3904
      Operand(isolate()->factory()->allocation_memento_map()));
3905
}
3906

    
3907

    
3908
Register GetRegisterThatIsNotOneOf(Register reg1,
3909
                                   Register reg2,
3910
                                   Register reg3,
3911
                                   Register reg4,
3912
                                   Register reg5,
3913
                                   Register reg6) {
3914
  RegList regs = 0;
3915
  if (reg1.is_valid()) regs |= reg1.bit();
3916
  if (reg2.is_valid()) regs |= reg2.bit();
3917
  if (reg3.is_valid()) regs |= reg3.bit();
3918
  if (reg4.is_valid()) regs |= reg4.bit();
3919
  if (reg5.is_valid()) regs |= reg5.bit();
3920
  if (reg6.is_valid()) regs |= reg6.bit();
3921

    
3922
  for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
3923
    Register candidate = Register::FromAllocationIndex(i);
3924
    if (regs & candidate.bit()) continue;
3925
    return candidate;
3926
  }
3927
  UNREACHABLE();
3928
  return no_reg;
3929
}
3930

    
3931

    
3932
#ifdef DEBUG
3933
bool AreAliased(Register reg1,
3934
                Register reg2,
3935
                Register reg3,
3936
                Register reg4,
3937
                Register reg5,
3938
                Register reg6) {
3939
  int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
3940
    reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid();
3941

    
3942
  RegList regs = 0;
3943
  if (reg1.is_valid()) regs |= reg1.bit();
3944
  if (reg2.is_valid()) regs |= reg2.bit();
3945
  if (reg3.is_valid()) regs |= reg3.bit();
3946
  if (reg4.is_valid()) regs |= reg4.bit();
3947
  if (reg5.is_valid()) regs |= reg5.bit();
3948
  if (reg6.is_valid()) regs |= reg6.bit();
3949
  int n_of_non_aliasing_regs = NumRegs(regs);
3950

    
3951
  return n_of_valid_regs != n_of_non_aliasing_regs;
3952
}
3953
#endif
3954

    
3955

    
3956
CodePatcher::CodePatcher(byte* address,
3957
                         int instructions,
3958
                         FlushICache flush_cache)
3959
    : address_(address),
3960
      size_(instructions * Assembler::kInstrSize),
3961
      masm_(NULL, address, size_ + Assembler::kGap),
3962
      flush_cache_(flush_cache) {
3963
  // Create a new macro assembler pointing to the address of the code to patch.
3964
  // The size is adjusted with kGap on order for the assembler to generate size
3965
  // bytes of instructions without failing with buffer size constraints.
3966
  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
3967
}
3968

    
3969

    
3970
CodePatcher::~CodePatcher() {
3971
  // Indicate that code has changed.
3972
  if (flush_cache_ == FLUSH) {
3973
    CPU::FlushICache(address_, size_);
3974
  }
3975

    
3976
  // Check that the code was patched as expected.
3977
  ASSERT(masm_.pc_ == address_ + size_);
3978
  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
3979
}
3980

    
3981

    
3982
void CodePatcher::Emit(Instr instr) {
3983
  masm()->emit(instr);
3984
}
3985

    
3986

    
3987
void CodePatcher::Emit(Address addr) {
3988
  masm()->emit(reinterpret_cast<Instr>(addr));
3989
}
3990

    
3991

    
3992
void CodePatcher::EmitCondition(Condition cond) {
3993
  Instr instr = Assembler::instr_at(masm_.pc_);
3994
  instr = (instr & ~kCondMask) | cond;
3995
  masm_.emit(instr);
3996
}
3997

    
3998

    
3999
} }  // namespace v8::internal
4000

    
4001
#endif  // V8_TARGET_ARCH_ARM