The data contained in this repository can be downloaded to your computer using one of several clients.
Please see the documentation of your version control software client for more information.

Please select the desired protocol below to get the URL.

This URL has Read-Only access.

Statistics
| Branch: | Revision:

main_repo / deps / v8 / src / mips / macro-assembler-mips.cc @ f230a1cf

History | View | Annotate | Download (181 KB)

1
// Copyright 2012 the V8 project authors. All rights reserved.
2
// Redistribution and use in source and binary forms, with or without
3
// modification, are permitted provided that the following conditions are
4
// met:
5
//
6
//     * Redistributions of source code must retain the above copyright
7
//       notice, this list of conditions and the following disclaimer.
8
//     * Redistributions in binary form must reproduce the above
9
//       copyright notice, this list of conditions and the following
10
//       disclaimer in the documentation and/or other materials provided
11
//       with the distribution.
12
//     * Neither the name of Google Inc. nor the names of its
13
//       contributors may be used to endorse or promote products derived
14
//       from this software without specific prior written permission.
15
//
16
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27

    
28
#include <limits.h>  // For LONG_MIN, LONG_MAX.
29

    
30
#include "v8.h"
31

    
32
#if V8_TARGET_ARCH_MIPS
33

    
34
#include "bootstrapper.h"
35
#include "codegen.h"
36
#include "cpu-profiler.h"
37
#include "debug.h"
38
#include "isolate-inl.h"
39
#include "runtime.h"
40

    
41
namespace v8 {
42
namespace internal {
43

    
44
MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
45
    : Assembler(arg_isolate, buffer, size),
46
      generating_stub_(false),
47
      allow_stub_calls_(true),
48
      has_frame_(false) {
49
  if (isolate() != NULL) {
50
    code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
51
                                  isolate());
52
  }
53
}
54

    
55

    
56
void MacroAssembler::LoadRoot(Register destination,
57
                              Heap::RootListIndex index) {
58
  lw(destination, MemOperand(s6, index << kPointerSizeLog2));
59
}
60

    
61

    
62
void MacroAssembler::LoadRoot(Register destination,
63
                              Heap::RootListIndex index,
64
                              Condition cond,
65
                              Register src1, const Operand& src2) {
66
  Branch(2, NegateCondition(cond), src1, src2);
67
  lw(destination, MemOperand(s6, index << kPointerSizeLog2));
68
}
69

    
70

    
71
void MacroAssembler::StoreRoot(Register source,
72
                               Heap::RootListIndex index) {
73
  sw(source, MemOperand(s6, index << kPointerSizeLog2));
74
}
75

    
76

    
77
void MacroAssembler::StoreRoot(Register source,
78
                               Heap::RootListIndex index,
79
                               Condition cond,
80
                               Register src1, const Operand& src2) {
81
  Branch(2, NegateCondition(cond), src1, src2);
82
  sw(source, MemOperand(s6, index << kPointerSizeLog2));
83
}
84

    
85

    
86
void MacroAssembler::LoadHeapObject(Register result,
87
                                    Handle<HeapObject> object) {
88
  AllowDeferredHandleDereference using_raw_address;
89
  if (isolate()->heap()->InNewSpace(*object)) {
90
    Handle<Cell> cell = isolate()->factory()->NewCell(object);
91
    li(result, Operand(cell));
92
    lw(result, FieldMemOperand(result, Cell::kValueOffset));
93
  } else {
94
    li(result, Operand(object));
95
  }
96
}
97

    
98

    
99
// Push and pop all registers that can hold pointers.
100
void MacroAssembler::PushSafepointRegisters() {
101
  // Safepoints expect a block of kNumSafepointRegisters values on the
102
  // stack, so adjust the stack for unsaved registers.
103
  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
104
  ASSERT(num_unsaved >= 0);
105
  if (num_unsaved > 0) {
106
    Subu(sp, sp, Operand(num_unsaved * kPointerSize));
107
  }
108
  MultiPush(kSafepointSavedRegisters);
109
}
110

    
111

    
112
void MacroAssembler::PopSafepointRegisters() {
113
  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
114
  MultiPop(kSafepointSavedRegisters);
115
  if (num_unsaved > 0) {
116
    Addu(sp, sp, Operand(num_unsaved * kPointerSize));
117
  }
118
}
119

    
120

    
121
void MacroAssembler::PushSafepointRegistersAndDoubles() {
122
  PushSafepointRegisters();
123
  Subu(sp, sp, Operand(FPURegister::NumAllocatableRegisters() * kDoubleSize));
124
  for (int i = 0; i < FPURegister::NumAllocatableRegisters(); i+=2) {
125
    FPURegister reg = FPURegister::FromAllocationIndex(i);
126
    sdc1(reg, MemOperand(sp, i * kDoubleSize));
127
  }
128
}
129

    
130

    
131
void MacroAssembler::PopSafepointRegistersAndDoubles() {
132
  for (int i = 0; i < FPURegister::NumAllocatableRegisters(); i+=2) {
133
    FPURegister reg = FPURegister::FromAllocationIndex(i);
134
    ldc1(reg, MemOperand(sp, i * kDoubleSize));
135
  }
136
  Addu(sp, sp, Operand(FPURegister::NumAllocatableRegisters() * kDoubleSize));
137
  PopSafepointRegisters();
138
}
139

    
140

    
141
void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
142
                                                             Register dst) {
143
  sw(src, SafepointRegistersAndDoublesSlot(dst));
144
}
145

    
146

    
147
void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
148
  sw(src, SafepointRegisterSlot(dst));
149
}
150

    
151

    
152
void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
153
  lw(dst, SafepointRegisterSlot(src));
154
}
155

    
156

    
157
int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
158
  // The registers are pushed starting with the highest encoding,
159
  // which means that lowest encodings are closest to the stack pointer.
160
  return kSafepointRegisterStackIndexMap[reg_code];
161
}
162

    
163

    
164
MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
165
  return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
166
}
167

    
168

    
169
MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
170
  UNIMPLEMENTED_MIPS();
171
  // General purpose registers are pushed last on the stack.
172
  int doubles_size = FPURegister::NumAllocatableRegisters() * kDoubleSize;
173
  int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
174
  return MemOperand(sp, doubles_size + register_offset);
175
}
176

    
177

    
178
void MacroAssembler::InNewSpace(Register object,
179
                                Register scratch,
180
                                Condition cc,
181
                                Label* branch) {
182
  ASSERT(cc == eq || cc == ne);
183
  And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
184
  Branch(branch, cc, scratch,
185
         Operand(ExternalReference::new_space_start(isolate())));
186
}
187

    
188

    
189
void MacroAssembler::RecordWriteField(
190
    Register object,
191
    int offset,
192
    Register value,
193
    Register dst,
194
    RAStatus ra_status,
195
    SaveFPRegsMode save_fp,
196
    RememberedSetAction remembered_set_action,
197
    SmiCheck smi_check) {
198
  ASSERT(!AreAliased(value, dst, t8, object));
199
  // First, check if a write barrier is even needed. The tests below
200
  // catch stores of Smis.
201
  Label done;
202

    
203
  // Skip barrier if writing a smi.
204
  if (smi_check == INLINE_SMI_CHECK) {
205
    JumpIfSmi(value, &done);
206
  }
207

    
208
  // Although the object register is tagged, the offset is relative to the start
209
  // of the object, so so offset must be a multiple of kPointerSize.
210
  ASSERT(IsAligned(offset, kPointerSize));
211

    
212
  Addu(dst, object, Operand(offset - kHeapObjectTag));
213
  if (emit_debug_code()) {
214
    Label ok;
215
    And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
216
    Branch(&ok, eq, t8, Operand(zero_reg));
217
    stop("Unaligned cell in write barrier");
218
    bind(&ok);
219
  }
220

    
221
  RecordWrite(object,
222
              dst,
223
              value,
224
              ra_status,
225
              save_fp,
226
              remembered_set_action,
227
              OMIT_SMI_CHECK);
228

    
229
  bind(&done);
230

    
231
  // Clobber clobbered input registers when running with the debug-code flag
232
  // turned on to provoke errors.
233
  if (emit_debug_code()) {
234
    li(value, Operand(BitCast<int32_t>(kZapValue + 4)));
235
    li(dst, Operand(BitCast<int32_t>(kZapValue + 8)));
236
  }
237
}
238

    
239

    
240
// Will clobber 4 registers: object, address, scratch, ip.  The
241
// register 'object' contains a heap object pointer.  The heap object
242
// tag is shifted away.
243
void MacroAssembler::RecordWrite(Register object,
244
                                 Register address,
245
                                 Register value,
246
                                 RAStatus ra_status,
247
                                 SaveFPRegsMode fp_mode,
248
                                 RememberedSetAction remembered_set_action,
249
                                 SmiCheck smi_check) {
250
  ASSERT(!AreAliased(object, address, value, t8));
251
  ASSERT(!AreAliased(object, address, value, t9));
252

    
253
  if (emit_debug_code()) {
254
    lw(at, MemOperand(address));
255
    Assert(
256
        eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value));
257
  }
258

    
259
  Label done;
260

    
261
  if (smi_check == INLINE_SMI_CHECK) {
262
    ASSERT_EQ(0, kSmiTag);
263
    JumpIfSmi(value, &done);
264
  }
265

    
266
  CheckPageFlag(value,
267
                value,  // Used as scratch.
268
                MemoryChunk::kPointersToHereAreInterestingMask,
269
                eq,
270
                &done);
271
  CheckPageFlag(object,
272
                value,  // Used as scratch.
273
                MemoryChunk::kPointersFromHereAreInterestingMask,
274
                eq,
275
                &done);
276

    
277
  // Record the actual write.
278
  if (ra_status == kRAHasNotBeenSaved) {
279
    push(ra);
280
  }
281
  RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
282
  CallStub(&stub);
283
  if (ra_status == kRAHasNotBeenSaved) {
284
    pop(ra);
285
  }
286

    
287
  bind(&done);
288

    
289
  // Clobber clobbered registers when running with the debug-code flag
290
  // turned on to provoke errors.
291
  if (emit_debug_code()) {
292
    li(address, Operand(BitCast<int32_t>(kZapValue + 12)));
293
    li(value, Operand(BitCast<int32_t>(kZapValue + 16)));
294
  }
295
}
296

    
297

    
298
void MacroAssembler::RememberedSetHelper(Register object,  // For debug tests.
299
                                         Register address,
300
                                         Register scratch,
301
                                         SaveFPRegsMode fp_mode,
302
                                         RememberedSetFinalAction and_then) {
303
  Label done;
304
  if (emit_debug_code()) {
305
    Label ok;
306
    JumpIfNotInNewSpace(object, scratch, &ok);
307
    stop("Remembered set pointer is in new space");
308
    bind(&ok);
309
  }
310
  // Load store buffer top.
311
  ExternalReference store_buffer =
312
      ExternalReference::store_buffer_top(isolate());
313
  li(t8, Operand(store_buffer));
314
  lw(scratch, MemOperand(t8));
315
  // Store pointer to buffer and increment buffer top.
316
  sw(address, MemOperand(scratch));
317
  Addu(scratch, scratch, kPointerSize);
318
  // Write back new top of buffer.
319
  sw(scratch, MemOperand(t8));
320
  // Call stub on end of buffer.
321
  // Check for end of buffer.
322
  And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
323
  if (and_then == kFallThroughAtEnd) {
324
    Branch(&done, eq, t8, Operand(zero_reg));
325
  } else {
326
    ASSERT(and_then == kReturnAtEnd);
327
    Ret(eq, t8, Operand(zero_reg));
328
  }
329
  push(ra);
330
  StoreBufferOverflowStub store_buffer_overflow =
331
      StoreBufferOverflowStub(fp_mode);
332
  CallStub(&store_buffer_overflow);
333
  pop(ra);
334
  bind(&done);
335
  if (and_then == kReturnAtEnd) {
336
    Ret();
337
  }
338
}
339

    
340

    
341
// -----------------------------------------------------------------------------
342
// Allocation support.
343

    
344

    
345
void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
346
                                            Register scratch,
347
                                            Label* miss) {
348
  Label same_contexts;
349

    
350
  ASSERT(!holder_reg.is(scratch));
351
  ASSERT(!holder_reg.is(at));
352
  ASSERT(!scratch.is(at));
353

    
354
  // Load current lexical context from the stack frame.
355
  lw(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
356
  // In debug mode, make sure the lexical context is set.
357
#ifdef DEBUG
358
  Check(ne, kWeShouldNotHaveAnEmptyLexicalContext,
359
      scratch, Operand(zero_reg));
360
#endif
361

    
362
  // Load the native context of the current context.
363
  int offset =
364
      Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
365
  lw(scratch, FieldMemOperand(scratch, offset));
366
  lw(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
367

    
368
  // Check the context is a native context.
369
  if (emit_debug_code()) {
370
    push(holder_reg);  // Temporarily save holder on the stack.
371
    // Read the first word and compare to the native_context_map.
372
    lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
373
    LoadRoot(at, Heap::kNativeContextMapRootIndex);
374
    Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
375
          holder_reg, Operand(at));
376
    pop(holder_reg);  // Restore holder.
377
  }
378

    
379
  // Check if both contexts are the same.
380
  lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
381
  Branch(&same_contexts, eq, scratch, Operand(at));
382

    
383
  // Check the context is a native context.
384
  if (emit_debug_code()) {
385
    push(holder_reg);  // Temporarily save holder on the stack.
386
    mov(holder_reg, at);  // Move at to its holding place.
387
    LoadRoot(at, Heap::kNullValueRootIndex);
388
    Check(ne, kJSGlobalProxyContextShouldNotBeNull,
389
          holder_reg, Operand(at));
390

    
391
    lw(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
392
    LoadRoot(at, Heap::kNativeContextMapRootIndex);
393
    Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
394
          holder_reg, Operand(at));
395
    // Restore at is not needed. at is reloaded below.
396
    pop(holder_reg);  // Restore holder.
397
    // Restore at to holder's context.
398
    lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
399
  }
400

    
401
  // Check that the security token in the calling global object is
402
  // compatible with the security token in the receiving global
403
  // object.
404
  int token_offset = Context::kHeaderSize +
405
                     Context::SECURITY_TOKEN_INDEX * kPointerSize;
406

    
407
  lw(scratch, FieldMemOperand(scratch, token_offset));
408
  lw(at, FieldMemOperand(at, token_offset));
409
  Branch(miss, ne, scratch, Operand(at));
410

    
411
  bind(&same_contexts);
412
}
413

    
414

    
415
void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
416
  // First of all we assign the hash seed to scratch.
417
  LoadRoot(scratch, Heap::kHashSeedRootIndex);
418
  SmiUntag(scratch);
419

    
420
  // Xor original key with a seed.
421
  xor_(reg0, reg0, scratch);
422

    
423
  // Compute the hash code from the untagged key.  This must be kept in sync
424
  // with ComputeIntegerHash in utils.h.
425
  //
426
  // hash = ~hash + (hash << 15);
427
  nor(scratch, reg0, zero_reg);
428
  sll(at, reg0, 15);
429
  addu(reg0, scratch, at);
430

    
431
  // hash = hash ^ (hash >> 12);
432
  srl(at, reg0, 12);
433
  xor_(reg0, reg0, at);
434

    
435
  // hash = hash + (hash << 2);
436
  sll(at, reg0, 2);
437
  addu(reg0, reg0, at);
438

    
439
  // hash = hash ^ (hash >> 4);
440
  srl(at, reg0, 4);
441
  xor_(reg0, reg0, at);
442

    
443
  // hash = hash * 2057;
444
  sll(scratch, reg0, 11);
445
  sll(at, reg0, 3);
446
  addu(reg0, reg0, at);
447
  addu(reg0, reg0, scratch);
448

    
449
  // hash = hash ^ (hash >> 16);
450
  srl(at, reg0, 16);
451
  xor_(reg0, reg0, at);
452
}
453

    
454

    
455
void MacroAssembler::LoadFromNumberDictionary(Label* miss,
456
                                              Register elements,
457
                                              Register key,
458
                                              Register result,
459
                                              Register reg0,
460
                                              Register reg1,
461
                                              Register reg2) {
462
  // Register use:
463
  //
464
  // elements - holds the slow-case elements of the receiver on entry.
465
  //            Unchanged unless 'result' is the same register.
466
  //
467
  // key      - holds the smi key on entry.
468
  //            Unchanged unless 'result' is the same register.
469
  //
470
  //
471
  // result   - holds the result on exit if the load succeeded.
472
  //            Allowed to be the same as 'key' or 'result'.
473
  //            Unchanged on bailout so 'key' or 'result' can be used
474
  //            in further computation.
475
  //
476
  // Scratch registers:
477
  //
478
  // reg0 - holds the untagged key on entry and holds the hash once computed.
479
  //
480
  // reg1 - Used to hold the capacity mask of the dictionary.
481
  //
482
  // reg2 - Used for the index into the dictionary.
483
  // at   - Temporary (avoid MacroAssembler instructions also using 'at').
484
  Label done;
485

    
486
  GetNumberHash(reg0, reg1);
487

    
488
  // Compute the capacity mask.
489
  lw(reg1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
490
  sra(reg1, reg1, kSmiTagSize);
491
  Subu(reg1, reg1, Operand(1));
492

    
493
  // Generate an unrolled loop that performs a few probes before giving up.
494
  static const int kProbes = 4;
495
  for (int i = 0; i < kProbes; i++) {
496
    // Use reg2 for index calculations and keep the hash intact in reg0.
497
    mov(reg2, reg0);
498
    // Compute the masked index: (hash + i + i * i) & mask.
499
    if (i > 0) {
500
      Addu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
501
    }
502
    and_(reg2, reg2, reg1);
503

    
504
    // Scale the index by multiplying by the element size.
505
    ASSERT(SeededNumberDictionary::kEntrySize == 3);
506
    sll(at, reg2, 1);  // 2x.
507
    addu(reg2, reg2, at);  // reg2 = reg2 * 3.
508

    
509
    // Check if the key is identical to the name.
510
    sll(at, reg2, kPointerSizeLog2);
511
    addu(reg2, elements, at);
512

    
513
    lw(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
514
    if (i != kProbes - 1) {
515
      Branch(&done, eq, key, Operand(at));
516
    } else {
517
      Branch(miss, ne, key, Operand(at));
518
    }
519
  }
520

    
521
  bind(&done);
522
  // Check that the value is a normal property.
523
  // reg2: elements + (index * kPointerSize).
524
  const int kDetailsOffset =
525
      SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
526
  lw(reg1, FieldMemOperand(reg2, kDetailsOffset));
527
  And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
528
  Branch(miss, ne, at, Operand(zero_reg));
529

    
530
  // Get the value at the masked, scaled index and return.
531
  const int kValueOffset =
532
      SeededNumberDictionary::kElementsStartOffset + kPointerSize;
533
  lw(result, FieldMemOperand(reg2, kValueOffset));
534
}
535

    
536

    
537
// ---------------------------------------------------------------------------
538
// Instruction macros.
539

    
540
void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
541
  if (rt.is_reg()) {
542
    addu(rd, rs, rt.rm());
543
  } else {
544
    if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
545
      addiu(rd, rs, rt.imm32_);
546
    } else {
547
      // li handles the relocation.
548
      ASSERT(!rs.is(at));
549
      li(at, rt);
550
      addu(rd, rs, at);
551
    }
552
  }
553
}
554

    
555

    
556
void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
557
  if (rt.is_reg()) {
558
    subu(rd, rs, rt.rm());
559
  } else {
560
    if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
561
      addiu(rd, rs, -rt.imm32_);  // No subiu instr, use addiu(x, y, -imm).
562
    } else {
563
      // li handles the relocation.
564
      ASSERT(!rs.is(at));
565
      li(at, rt);
566
      subu(rd, rs, at);
567
    }
568
  }
569
}
570

    
571

    
572
void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
573
  if (rt.is_reg()) {
574
    if (kArchVariant == kLoongson) {
575
      mult(rs, rt.rm());
576
      mflo(rd);
577
    } else {
578
      mul(rd, rs, rt.rm());
579
    }
580
  } else {
581
    // li handles the relocation.
582
    ASSERT(!rs.is(at));
583
    li(at, rt);
584
    if (kArchVariant == kLoongson) {
585
      mult(rs, at);
586
      mflo(rd);
587
    } else {
588
      mul(rd, rs, at);
589
    }
590
  }
591
}
592

    
593

    
594
void MacroAssembler::Mult(Register rs, const Operand& rt) {
595
  if (rt.is_reg()) {
596
    mult(rs, rt.rm());
597
  } else {
598
    // li handles the relocation.
599
    ASSERT(!rs.is(at));
600
    li(at, rt);
601
    mult(rs, at);
602
  }
603
}
604

    
605

    
606
void MacroAssembler::Multu(Register rs, const Operand& rt) {
607
  if (rt.is_reg()) {
608
    multu(rs, rt.rm());
609
  } else {
610
    // li handles the relocation.
611
    ASSERT(!rs.is(at));
612
    li(at, rt);
613
    multu(rs, at);
614
  }
615
}
616

    
617

    
618
void MacroAssembler::Div(Register rs, const Operand& rt) {
619
  if (rt.is_reg()) {
620
    div(rs, rt.rm());
621
  } else {
622
    // li handles the relocation.
623
    ASSERT(!rs.is(at));
624
    li(at, rt);
625
    div(rs, at);
626
  }
627
}
628

    
629

    
630
void MacroAssembler::Divu(Register rs, const Operand& rt) {
631
  if (rt.is_reg()) {
632
    divu(rs, rt.rm());
633
  } else {
634
    // li handles the relocation.
635
    ASSERT(!rs.is(at));
636
    li(at, rt);
637
    divu(rs, at);
638
  }
639
}
640

    
641

    
642
void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
643
  if (rt.is_reg()) {
644
    and_(rd, rs, rt.rm());
645
  } else {
646
    if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
647
      andi(rd, rs, rt.imm32_);
648
    } else {
649
      // li handles the relocation.
650
      ASSERT(!rs.is(at));
651
      li(at, rt);
652
      and_(rd, rs, at);
653
    }
654
  }
655
}
656

    
657

    
658
void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
659
  if (rt.is_reg()) {
660
    or_(rd, rs, rt.rm());
661
  } else {
662
    if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
663
      ori(rd, rs, rt.imm32_);
664
    } else {
665
      // li handles the relocation.
666
      ASSERT(!rs.is(at));
667
      li(at, rt);
668
      or_(rd, rs, at);
669
    }
670
  }
671
}
672

    
673

    
674
void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
675
  if (rt.is_reg()) {
676
    xor_(rd, rs, rt.rm());
677
  } else {
678
    if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
679
      xori(rd, rs, rt.imm32_);
680
    } else {
681
      // li handles the relocation.
682
      ASSERT(!rs.is(at));
683
      li(at, rt);
684
      xor_(rd, rs, at);
685
    }
686
  }
687
}
688

    
689

    
690
void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
691
  if (rt.is_reg()) {
692
    nor(rd, rs, rt.rm());
693
  } else {
694
    // li handles the relocation.
695
    ASSERT(!rs.is(at));
696
    li(at, rt);
697
    nor(rd, rs, at);
698
  }
699
}
700

    
701

    
702
void MacroAssembler::Neg(Register rs, const Operand& rt) {
703
  ASSERT(rt.is_reg());
704
  ASSERT(!at.is(rs));
705
  ASSERT(!at.is(rt.rm()));
706
  li(at, -1);
707
  xor_(rs, rt.rm(), at);
708
}
709

    
710

    
711
void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
712
  if (rt.is_reg()) {
713
    slt(rd, rs, rt.rm());
714
  } else {
715
    if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
716
      slti(rd, rs, rt.imm32_);
717
    } else {
718
      // li handles the relocation.
719
      ASSERT(!rs.is(at));
720
      li(at, rt);
721
      slt(rd, rs, at);
722
    }
723
  }
724
}
725

    
726

    
727
void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
728
  if (rt.is_reg()) {
729
    sltu(rd, rs, rt.rm());
730
  } else {
731
    if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
732
      sltiu(rd, rs, rt.imm32_);
733
    } else {
734
      // li handles the relocation.
735
      ASSERT(!rs.is(at));
736
      li(at, rt);
737
      sltu(rd, rs, at);
738
    }
739
  }
740
}
741

    
742

    
743
void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
744
  if (kArchVariant == kMips32r2) {
745
    if (rt.is_reg()) {
746
      rotrv(rd, rs, rt.rm());
747
    } else {
748
      rotr(rd, rs, rt.imm32_);
749
    }
750
  } else {
751
    if (rt.is_reg()) {
752
      subu(at, zero_reg, rt.rm());
753
      sllv(at, rs, at);
754
      srlv(rd, rs, rt.rm());
755
      or_(rd, rd, at);
756
    } else {
757
      if (rt.imm32_ == 0) {
758
        srl(rd, rs, 0);
759
      } else {
760
        srl(at, rs, rt.imm32_);
761
        sll(rd, rs, (0x20 - rt.imm32_) & 0x1f);
762
        or_(rd, rd, at);
763
      }
764
    }
765
  }
766
}
767

    
768

    
769
//------------Pseudo-instructions-------------
770

    
771
void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
772
  ASSERT(!j.is_reg());
773
  BlockTrampolinePoolScope block_trampoline_pool(this);
774
  if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
775
    // Normal load of an immediate value which does not need Relocation Info.
776
    if (is_int16(j.imm32_)) {
777
      addiu(rd, zero_reg, j.imm32_);
778
    } else if (!(j.imm32_ & kHiMask)) {
779
      ori(rd, zero_reg, j.imm32_);
780
    } else if (!(j.imm32_ & kImm16Mask)) {
781
      lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
782
    } else {
783
      lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
784
      ori(rd, rd, (j.imm32_ & kImm16Mask));
785
    }
786
  } else {
787
    if (MustUseReg(j.rmode_)) {
788
      RecordRelocInfo(j.rmode_, j.imm32_);
789
    }
790
    // We always need the same number of instructions as we may need to patch
791
    // this code to load another value which may need 2 instructions to load.
792
    lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
793
    ori(rd, rd, (j.imm32_ & kImm16Mask));
794
  }
795
}
796

    
797

    
798
void MacroAssembler::MultiPush(RegList regs) {
799
  int16_t num_to_push = NumberOfBitsSet(regs);
800
  int16_t stack_offset = num_to_push * kPointerSize;
801

    
802
  Subu(sp, sp, Operand(stack_offset));
803
  for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
804
    if ((regs & (1 << i)) != 0) {
805
      stack_offset -= kPointerSize;
806
      sw(ToRegister(i), MemOperand(sp, stack_offset));
807
    }
808
  }
809
}
810

    
811

    
812
void MacroAssembler::MultiPushReversed(RegList regs) {
813
  int16_t num_to_push = NumberOfBitsSet(regs);
814
  int16_t stack_offset = num_to_push * kPointerSize;
815

    
816
  Subu(sp, sp, Operand(stack_offset));
817
  for (int16_t i = 0; i < kNumRegisters; i++) {
818
    if ((regs & (1 << i)) != 0) {
819
      stack_offset -= kPointerSize;
820
      sw(ToRegister(i), MemOperand(sp, stack_offset));
821
    }
822
  }
823
}
824

    
825

    
826
void MacroAssembler::MultiPop(RegList regs) {
827
  int16_t stack_offset = 0;
828

    
829
  for (int16_t i = 0; i < kNumRegisters; i++) {
830
    if ((regs & (1 << i)) != 0) {
831
      lw(ToRegister(i), MemOperand(sp, stack_offset));
832
      stack_offset += kPointerSize;
833
    }
834
  }
835
  addiu(sp, sp, stack_offset);
836
}
837

    
838

    
839
void MacroAssembler::MultiPopReversed(RegList regs) {
840
  int16_t stack_offset = 0;
841

    
842
  for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
843
    if ((regs & (1 << i)) != 0) {
844
      lw(ToRegister(i), MemOperand(sp, stack_offset));
845
      stack_offset += kPointerSize;
846
    }
847
  }
848
  addiu(sp, sp, stack_offset);
849
}
850

    
851

    
852
void MacroAssembler::MultiPushFPU(RegList regs) {
853
  int16_t num_to_push = NumberOfBitsSet(regs);
854
  int16_t stack_offset = num_to_push * kDoubleSize;
855

    
856
  Subu(sp, sp, Operand(stack_offset));
857
  for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
858
    if ((regs & (1 << i)) != 0) {
859
      stack_offset -= kDoubleSize;
860
      sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
861
    }
862
  }
863
}
864

    
865

    
866
void MacroAssembler::MultiPushReversedFPU(RegList regs) {
867
  int16_t num_to_push = NumberOfBitsSet(regs);
868
  int16_t stack_offset = num_to_push * kDoubleSize;
869

    
870
  Subu(sp, sp, Operand(stack_offset));
871
  for (int16_t i = 0; i < kNumRegisters; i++) {
872
    if ((regs & (1 << i)) != 0) {
873
      stack_offset -= kDoubleSize;
874
      sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
875
    }
876
  }
877
}
878

    
879

    
880
void MacroAssembler::MultiPopFPU(RegList regs) {
881
  int16_t stack_offset = 0;
882

    
883
  for (int16_t i = 0; i < kNumRegisters; i++) {
884
    if ((regs & (1 << i)) != 0) {
885
      ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
886
      stack_offset += kDoubleSize;
887
    }
888
  }
889
  addiu(sp, sp, stack_offset);
890
}
891

    
892

    
893
void MacroAssembler::MultiPopReversedFPU(RegList regs) {
894
  int16_t stack_offset = 0;
895

    
896
  for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
897
    if ((regs & (1 << i)) != 0) {
898
      ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
899
      stack_offset += kDoubleSize;
900
    }
901
  }
902
  addiu(sp, sp, stack_offset);
903
}
904

    
905

    
906
void MacroAssembler::FlushICache(Register address, unsigned instructions) {
907
  RegList saved_regs = kJSCallerSaved | ra.bit();
908
  MultiPush(saved_regs);
909
  AllowExternalCallThatCantCauseGC scope(this);
910

    
911
  // Save to a0 in case address == t0.
912
  Move(a0, address);
913
  PrepareCallCFunction(2, t0);
914

    
915
  li(a1, instructions * kInstrSize);
916
  CallCFunction(ExternalReference::flush_icache_function(isolate()), 2);
917
  MultiPop(saved_regs);
918
}
919

    
920

    
921
void MacroAssembler::Ext(Register rt,
922
                         Register rs,
923
                         uint16_t pos,
924
                         uint16_t size) {
925
  ASSERT(pos < 32);
926
  ASSERT(pos + size < 33);
927

    
928
  if (kArchVariant == kMips32r2) {
929
    ext_(rt, rs, pos, size);
930
  } else {
931
    // Move rs to rt and shift it left then right to get the
932
    // desired bitfield on the right side and zeroes on the left.
933
    int shift_left = 32 - (pos + size);
934
    sll(rt, rs, shift_left);  // Acts as a move if shift_left == 0.
935

    
936
    int shift_right = 32 - size;
937
    if (shift_right > 0) {
938
      srl(rt, rt, shift_right);
939
    }
940
  }
941
}
942

    
943

    
944
void MacroAssembler::Ins(Register rt,
945
                         Register rs,
946
                         uint16_t pos,
947
                         uint16_t size) {
948
  ASSERT(pos < 32);
949
  ASSERT(pos + size <= 32);
950
  ASSERT(size != 0);
951

    
952
  if (kArchVariant == kMips32r2) {
953
    ins_(rt, rs, pos, size);
954
  } else {
955
    ASSERT(!rt.is(t8) && !rs.is(t8));
956
    Subu(at, zero_reg, Operand(1));
957
    srl(at, at, 32 - size);
958
    and_(t8, rs, at);
959
    sll(t8, t8, pos);
960
    sll(at, at, pos);
961
    nor(at, at, zero_reg);
962
    and_(at, rt, at);
963
    or_(rt, t8, at);
964
  }
965
}
966

    
967

    
968
void MacroAssembler::Cvt_d_uw(FPURegister fd,
969
                              FPURegister fs,
970
                              FPURegister scratch) {
971
  // Move the data from fs to t8.
972
  mfc1(t8, fs);
973
  Cvt_d_uw(fd, t8, scratch);
974
}
975

    
976

    
977
void MacroAssembler::Cvt_d_uw(FPURegister fd,
978
                              Register rs,
979
                              FPURegister scratch) {
980
  // Convert rs to a FP value in fd (and fd + 1).
981
  // We do this by converting rs minus the MSB to avoid sign conversion,
982
  // then adding 2^31 to the result (if needed).
983

    
984
  ASSERT(!fd.is(scratch));
985
  ASSERT(!rs.is(t9));
986
  ASSERT(!rs.is(at));
987

    
988
  // Save rs's MSB to t9.
989
  Ext(t9, rs, 31, 1);
990
  // Remove rs's MSB.
991
  Ext(at, rs, 0, 31);
992
  // Move the result to fd.
993
  mtc1(at, fd);
994

    
995
  // Convert fd to a real FP value.
996
  cvt_d_w(fd, fd);
997

    
998
  Label conversion_done;
999

    
1000
  // If rs's MSB was 0, it's done.
1001
  // Otherwise we need to add that to the FP register.
1002
  Branch(&conversion_done, eq, t9, Operand(zero_reg));
1003

    
1004
  // Load 2^31 into f20 as its float representation.
1005
  li(at, 0x41E00000);
1006
  mtc1(at, FPURegister::from_code(scratch.code() + 1));
1007
  mtc1(zero_reg, scratch);
1008
  // Add it to fd.
1009
  add_d(fd, fd, scratch);
1010

    
1011
  bind(&conversion_done);
1012
}
1013

    
1014

    
1015
void MacroAssembler::Trunc_uw_d(FPURegister fd,
1016
                                FPURegister fs,
1017
                                FPURegister scratch) {
1018
  Trunc_uw_d(fs, t8, scratch);
1019
  mtc1(t8, fd);
1020
}
1021

    
1022

    
1023
void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
1024
  if (kArchVariant == kLoongson && fd.is(fs)) {
1025
    mfc1(t8, FPURegister::from_code(fs.code() + 1));
1026
    trunc_w_d(fd, fs);
1027
    mtc1(t8, FPURegister::from_code(fs.code() + 1));
1028
  } else {
1029
    trunc_w_d(fd, fs);
1030
  }
1031
}
1032

    
1033

    
1034
void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
1035
  if (kArchVariant == kLoongson && fd.is(fs)) {
1036
    mfc1(t8, FPURegister::from_code(fs.code() + 1));
1037
    round_w_d(fd, fs);
1038
    mtc1(t8, FPURegister::from_code(fs.code() + 1));
1039
  } else {
1040
    round_w_d(fd, fs);
1041
  }
1042
}
1043

    
1044

    
1045
void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
1046
  if (kArchVariant == kLoongson && fd.is(fs)) {
1047
    mfc1(t8, FPURegister::from_code(fs.code() + 1));
1048
    floor_w_d(fd, fs);
1049
    mtc1(t8, FPURegister::from_code(fs.code() + 1));
1050
  } else {
1051
    floor_w_d(fd, fs);
1052
  }
1053
}
1054

    
1055

    
1056
void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
1057
  if (kArchVariant == kLoongson && fd.is(fs)) {
1058
    mfc1(t8, FPURegister::from_code(fs.code() + 1));
1059
    ceil_w_d(fd, fs);
1060
    mtc1(t8, FPURegister::from_code(fs.code() + 1));
1061
  } else {
1062
    ceil_w_d(fd, fs);
1063
  }
1064
}
1065

    
1066

    
1067
void MacroAssembler::Trunc_uw_d(FPURegister fd,
1068
                                Register rs,
1069
                                FPURegister scratch) {
1070
  ASSERT(!fd.is(scratch));
1071
  ASSERT(!rs.is(at));
1072

    
1073
  // Load 2^31 into scratch as its float representation.
1074
  li(at, 0x41E00000);
1075
  mtc1(at, FPURegister::from_code(scratch.code() + 1));
1076
  mtc1(zero_reg, scratch);
1077
  // Test if scratch > fd.
1078
  // If fd < 2^31 we can convert it normally.
1079
  Label simple_convert;
1080
  BranchF(&simple_convert, NULL, lt, fd, scratch);
1081

    
1082
  // First we subtract 2^31 from fd, then trunc it to rs
1083
  // and add 2^31 to rs.
1084
  sub_d(scratch, fd, scratch);
1085
  trunc_w_d(scratch, scratch);
1086
  mfc1(rs, scratch);
1087
  Or(rs, rs, 1 << 31);
1088

    
1089
  Label done;
1090
  Branch(&done);
1091
  // Simple conversion.
1092
  bind(&simple_convert);
1093
  trunc_w_d(scratch, fd);
1094
  mfc1(rs, scratch);
1095

    
1096
  bind(&done);
1097
}
1098

    
1099

    
1100
void MacroAssembler::BranchF(Label* target,
1101
                             Label* nan,
1102
                             Condition cc,
1103
                             FPURegister cmp1,
1104
                             FPURegister cmp2,
1105
                             BranchDelaySlot bd) {
1106
  BlockTrampolinePoolScope block_trampoline_pool(this);
1107
  if (cc == al) {
1108
    Branch(bd, target);
1109
    return;
1110
  }
1111

    
1112
  ASSERT(nan || target);
1113
  // Check for unordered (NaN) cases.
1114
  if (nan) {
1115
    c(UN, D, cmp1, cmp2);
1116
    bc1t(nan);
1117
  }
1118

    
1119
  if (target) {
1120
    // Here NaN cases were either handled by this function or are assumed to
1121
    // have been handled by the caller.
1122
    // Unsigned conditions are treated as their signed counterpart.
1123
    switch (cc) {
1124
      case lt:
1125
        c(OLT, D, cmp1, cmp2);
1126
        bc1t(target);
1127
        break;
1128
      case gt:
1129
        c(ULE, D, cmp1, cmp2);
1130
        bc1f(target);
1131
        break;
1132
      case ge:
1133
        c(ULT, D, cmp1, cmp2);
1134
        bc1f(target);
1135
        break;
1136
      case le:
1137
        c(OLE, D, cmp1, cmp2);
1138
        bc1t(target);
1139
        break;
1140
      case eq:
1141
        c(EQ, D, cmp1, cmp2);
1142
        bc1t(target);
1143
        break;
1144
      case ueq:
1145
        c(UEQ, D, cmp1, cmp2);
1146
        bc1t(target);
1147
        break;
1148
      case ne:
1149
        c(EQ, D, cmp1, cmp2);
1150
        bc1f(target);
1151
        break;
1152
      case nue:
1153
        c(UEQ, D, cmp1, cmp2);
1154
        bc1f(target);
1155
        break;
1156
      default:
1157
        CHECK(0);
1158
    };
1159
  }
1160

    
1161
  if (bd == PROTECT) {
1162
    nop();
1163
  }
1164
}
1165

    
1166

    
1167
void MacroAssembler::Move(FPURegister dst, double imm) {
1168
  static const DoubleRepresentation minus_zero(-0.0);
1169
  static const DoubleRepresentation zero(0.0);
1170
  DoubleRepresentation value(imm);
1171
  // Handle special values first.
1172
  bool force_load = dst.is(kDoubleRegZero);
1173
  if (value.bits == zero.bits && !force_load) {
1174
    mov_d(dst, kDoubleRegZero);
1175
  } else if (value.bits == minus_zero.bits && !force_load) {
1176
    neg_d(dst, kDoubleRegZero);
1177
  } else {
1178
    uint32_t lo, hi;
1179
    DoubleAsTwoUInt32(imm, &lo, &hi);
1180
    // Move the low part of the double into the lower of the corresponding FPU
1181
    // register of FPU register pair.
1182
    if (lo != 0) {
1183
      li(at, Operand(lo));
1184
      mtc1(at, dst);
1185
    } else {
1186
      mtc1(zero_reg, dst);
1187
    }
1188
    // Move the high part of the double into the higher of the corresponding FPU
1189
    // register of FPU register pair.
1190
    if (hi != 0) {
1191
      li(at, Operand(hi));
1192
      mtc1(at, dst.high());
1193
    } else {
1194
      mtc1(zero_reg, dst.high());
1195
    }
1196
  }
1197
}
1198

    
1199

    
1200
void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
1201
  if (kArchVariant == kLoongson) {
1202
    Label done;
1203
    Branch(&done, ne, rt, Operand(zero_reg));
1204
    mov(rd, rs);
1205
    bind(&done);
1206
  } else {
1207
    movz(rd, rs, rt);
1208
  }
1209
}
1210

    
1211

    
1212
void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
1213
  if (kArchVariant == kLoongson) {
1214
    Label done;
1215
    Branch(&done, eq, rt, Operand(zero_reg));
1216
    mov(rd, rs);
1217
    bind(&done);
1218
  } else {
1219
    movn(rd, rs, rt);
1220
  }
1221
}
1222

    
1223

    
1224
void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
1225
  if (kArchVariant == kLoongson) {
1226
    // Tests an FP condition code and then conditionally move rs to rd.
1227
    // We do not currently use any FPU cc bit other than bit 0.
1228
    ASSERT(cc == 0);
1229
    ASSERT(!(rs.is(t8) || rd.is(t8)));
1230
    Label done;
1231
    Register scratch = t8;
1232
    // For testing purposes we need to fetch content of the FCSR register and
1233
    // than test its cc (floating point condition code) bit (for cc = 0, it is
1234
    // 24. bit of the FCSR).
1235
    cfc1(scratch, FCSR);
1236
    // For the MIPS I, II and III architectures, the contents of scratch is
1237
    // UNPREDICTABLE for the instruction immediately following CFC1.
1238
    nop();
1239
    srl(scratch, scratch, 16);
1240
    andi(scratch, scratch, 0x0080);
1241
    Branch(&done, eq, scratch, Operand(zero_reg));
1242
    mov(rd, rs);
1243
    bind(&done);
1244
  } else {
1245
    movt(rd, rs, cc);
1246
  }
1247
}
1248

    
1249

    
1250
void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
1251
  if (kArchVariant == kLoongson) {
1252
    // Tests an FP condition code and then conditionally move rs to rd.
1253
    // We do not currently use any FPU cc bit other than bit 0.
1254
    ASSERT(cc == 0);
1255
    ASSERT(!(rs.is(t8) || rd.is(t8)));
1256
    Label done;
1257
    Register scratch = t8;
1258
    // For testing purposes we need to fetch content of the FCSR register and
1259
    // than test its cc (floating point condition code) bit (for cc = 0, it is
1260
    // 24. bit of the FCSR).
1261
    cfc1(scratch, FCSR);
1262
    // For the MIPS I, II and III architectures, the contents of scratch is
1263
    // UNPREDICTABLE for the instruction immediately following CFC1.
1264
    nop();
1265
    srl(scratch, scratch, 16);
1266
    andi(scratch, scratch, 0x0080);
1267
    Branch(&done, ne, scratch, Operand(zero_reg));
1268
    mov(rd, rs);
1269
    bind(&done);
1270
  } else {
1271
    movf(rd, rs, cc);
1272
  }
1273
}
1274

    
1275

    
1276
void MacroAssembler::Clz(Register rd, Register rs) {
1277
  if (kArchVariant == kLoongson) {
1278
    ASSERT(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9)));
1279
    Register mask = t8;
1280
    Register scratch = t9;
1281
    Label loop, end;
1282
    mov(at, rs);
1283
    mov(rd, zero_reg);
1284
    lui(mask, 0x8000);
1285
    bind(&loop);
1286
    and_(scratch, at, mask);
1287
    Branch(&end, ne, scratch, Operand(zero_reg));
1288
    addiu(rd, rd, 1);
1289
    Branch(&loop, ne, mask, Operand(zero_reg), USE_DELAY_SLOT);
1290
    srl(mask, mask, 1);
1291
    bind(&end);
1292
  } else {
1293
    clz(rd, rs);
1294
  }
1295
}
1296

    
1297

    
1298
void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
1299
                                     Register result,
1300
                                     DoubleRegister double_input,
1301
                                     Register scratch,
1302
                                     DoubleRegister double_scratch,
1303
                                     Register except_flag,
1304
                                     CheckForInexactConversion check_inexact) {
1305
  ASSERT(!result.is(scratch));
1306
  ASSERT(!double_input.is(double_scratch));
1307
  ASSERT(!except_flag.is(scratch));
1308

    
1309
  Label done;
1310

    
1311
  // Clear the except flag (0 = no exception)
1312
  mov(except_flag, zero_reg);
1313

    
1314
  // Test for values that can be exactly represented as a signed 32-bit integer.
1315
  cvt_w_d(double_scratch, double_input);
1316
  mfc1(result, double_scratch);
1317
  cvt_d_w(double_scratch, double_scratch);
1318
  BranchF(&done, NULL, eq, double_input, double_scratch);
1319

    
1320
  int32_t except_mask = kFCSRFlagMask;  // Assume interested in all exceptions.
1321

    
1322
  if (check_inexact == kDontCheckForInexactConversion) {
1323
    // Ignore inexact exceptions.
1324
    except_mask &= ~kFCSRInexactFlagMask;
1325
  }
1326

    
1327
  // Save FCSR.
1328
  cfc1(scratch, FCSR);
1329
  // Disable FPU exceptions.
1330
  ctc1(zero_reg, FCSR);
1331

    
1332
  // Do operation based on rounding mode.
1333
  switch (rounding_mode) {
1334
    case kRoundToNearest:
1335
      Round_w_d(double_scratch, double_input);
1336
      break;
1337
    case kRoundToZero:
1338
      Trunc_w_d(double_scratch, double_input);
1339
      break;
1340
    case kRoundToPlusInf:
1341
      Ceil_w_d(double_scratch, double_input);
1342
      break;
1343
    case kRoundToMinusInf:
1344
      Floor_w_d(double_scratch, double_input);
1345
      break;
1346
  }  // End of switch-statement.
1347

    
1348
  // Retrieve FCSR.
1349
  cfc1(except_flag, FCSR);
1350
  // Restore FCSR.
1351
  ctc1(scratch, FCSR);
1352
  // Move the converted value into the result register.
1353
  mfc1(result, double_scratch);
1354

    
1355
  // Check for fpu exceptions.
1356
  And(except_flag, except_flag, Operand(except_mask));
1357

    
1358
  bind(&done);
1359
}
1360

    
1361

    
1362
void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
1363
                                                DoubleRegister double_input,
1364
                                                Label* done) {
1365
  DoubleRegister single_scratch = kLithiumScratchDouble.low();
1366
  Register scratch = at;
1367
  Register scratch2 = t9;
1368

    
1369
  // Clear cumulative exception flags and save the FCSR.
1370
  cfc1(scratch2, FCSR);
1371
  ctc1(zero_reg, FCSR);
1372
  // Try a conversion to a signed integer.
1373
  trunc_w_d(single_scratch, double_input);
1374
  mfc1(result, single_scratch);
1375
  // Retrieve and restore the FCSR.
1376
  cfc1(scratch, FCSR);
1377
  ctc1(scratch2, FCSR);
1378
  // Check for overflow and NaNs.
1379
  And(scratch,
1380
      scratch,
1381
      kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
1382
  // If we had no exceptions we are done.
1383
  Branch(done, eq, scratch, Operand(zero_reg));
1384
}
1385

    
1386

    
1387
void MacroAssembler::TruncateDoubleToI(Register result,
1388
                                       DoubleRegister double_input) {
1389
  Label done;
1390

    
1391
  TryInlineTruncateDoubleToI(result, double_input, &done);
1392

    
1393
  // If we fell through then inline version didn't succeed - call stub instead.
1394
  push(ra);
1395
  Subu(sp, sp, Operand(kDoubleSize));  // Put input on stack.
1396
  sdc1(double_input, MemOperand(sp, 0));
1397

    
1398
  DoubleToIStub stub(sp, result, 0, true, true);
1399
  CallStub(&stub);
1400

    
1401
  Addu(sp, sp, Operand(kDoubleSize));
1402
  pop(ra);
1403

    
1404
  bind(&done);
1405
}
1406

    
1407

    
1408
void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
1409
  Label done;
1410
  DoubleRegister double_scratch = f12;
1411
  ASSERT(!result.is(object));
1412

    
1413
  ldc1(double_scratch,
1414
       MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
1415
  TryInlineTruncateDoubleToI(result, double_scratch, &done);
1416

    
1417
  // If we fell through then inline version didn't succeed - call stub instead.
1418
  push(ra);
1419
  DoubleToIStub stub(object,
1420
                     result,
1421
                     HeapNumber::kValueOffset - kHeapObjectTag,
1422
                     true,
1423
                     true);
1424
  CallStub(&stub);
1425
  pop(ra);
1426

    
1427
  bind(&done);
1428
}
1429

    
1430

    
1431
void MacroAssembler::TruncateNumberToI(Register object,
1432
                                       Register result,
1433
                                       Register heap_number_map,
1434
                                       Register scratch,
1435
                                       Label* not_number) {
1436
  Label done;
1437
  ASSERT(!result.is(object));
1438

    
1439
  UntagAndJumpIfSmi(result, object, &done);
1440
  JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
1441
  TruncateHeapNumberToI(result, object);
1442

    
1443
  bind(&done);
1444
}
1445

    
1446

    
1447
void MacroAssembler::GetLeastBitsFromSmi(Register dst,
1448
                                         Register src,
1449
                                         int num_least_bits) {
1450
  Ext(dst, src, kSmiTagSize, num_least_bits);
1451
}
1452

    
1453

    
1454
void MacroAssembler::GetLeastBitsFromInt32(Register dst,
1455
                                           Register src,
1456
                                           int num_least_bits) {
1457
  And(dst, src, Operand((1 << num_least_bits) - 1));
1458
}
1459

    
1460

    
1461
// Emulated condtional branches do not emit a nop in the branch delay slot.
1462
//
1463
// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
1464
#define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT(                                \
1465
    (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) ||          \
1466
    (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
1467

    
1468

    
1469
void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
1470
  BranchShort(offset, bdslot);
1471
}
1472

    
1473

    
1474
void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
1475
                            const Operand& rt,
1476
                            BranchDelaySlot bdslot) {
1477
  BranchShort(offset, cond, rs, rt, bdslot);
1478
}
1479

    
1480

    
1481
void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
1482
  if (L->is_bound()) {
1483
    if (is_near(L)) {
1484
      BranchShort(L, bdslot);
1485
    } else {
1486
      Jr(L, bdslot);
1487
    }
1488
  } else {
1489
    if (is_trampoline_emitted()) {
1490
      Jr(L, bdslot);
1491
    } else {
1492
      BranchShort(L, bdslot);
1493
    }
1494
  }
1495
}
1496

    
1497

    
1498
void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
1499
                            const Operand& rt,
1500
                            BranchDelaySlot bdslot) {
1501
  if (L->is_bound()) {
1502
    if (is_near(L)) {
1503
      BranchShort(L, cond, rs, rt, bdslot);
1504
    } else {
1505
      Label skip;
1506
      Condition neg_cond = NegateCondition(cond);
1507
      BranchShort(&skip, neg_cond, rs, rt);
1508
      Jr(L, bdslot);
1509
      bind(&skip);
1510
    }
1511
  } else {
1512
    if (is_trampoline_emitted()) {
1513
      Label skip;
1514
      Condition neg_cond = NegateCondition(cond);
1515
      BranchShort(&skip, neg_cond, rs, rt);
1516
      Jr(L, bdslot);
1517
      bind(&skip);
1518
    } else {
1519
      BranchShort(L, cond, rs, rt, bdslot);
1520
    }
1521
  }
1522
}
1523

    
1524

    
1525
void MacroAssembler::Branch(Label* L,
1526
                            Condition cond,
1527
                            Register rs,
1528
                            Heap::RootListIndex index,
1529
                            BranchDelaySlot bdslot) {
1530
  LoadRoot(at, index);
1531
  Branch(L, cond, rs, Operand(at), bdslot);
1532
}
1533

    
1534

    
1535
void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
1536
  b(offset);
1537

    
1538
  // Emit a nop in the branch delay slot if required.
1539
  if (bdslot == PROTECT)
1540
    nop();
1541
}
1542

    
1543

    
1544
void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
1545
                                 const Operand& rt,
1546
                                 BranchDelaySlot bdslot) {
1547
  BRANCH_ARGS_CHECK(cond, rs, rt);
1548
  ASSERT(!rs.is(zero_reg));
1549
  Register r2 = no_reg;
1550
  Register scratch = at;
1551

    
1552
  if (rt.is_reg()) {
1553
    // NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or
1554
    // rt.
1555
    BlockTrampolinePoolScope block_trampoline_pool(this);
1556
    r2 = rt.rm_;
1557
    switch (cond) {
1558
      case cc_always:
1559
        b(offset);
1560
        break;
1561
      case eq:
1562
        beq(rs, r2, offset);
1563
        break;
1564
      case ne:
1565
        bne(rs, r2, offset);
1566
        break;
1567
      // Signed comparison.
1568
      case greater:
1569
        if (r2.is(zero_reg)) {
1570
          bgtz(rs, offset);
1571
        } else {
1572
          slt(scratch, r2, rs);
1573
          bne(scratch, zero_reg, offset);
1574
        }
1575
        break;
1576
      case greater_equal:
1577
        if (r2.is(zero_reg)) {
1578
          bgez(rs, offset);
1579
        } else {
1580
          slt(scratch, rs, r2);
1581
          beq(scratch, zero_reg, offset);
1582
        }
1583
        break;
1584
      case less:
1585
        if (r2.is(zero_reg)) {
1586
          bltz(rs, offset);
1587
        } else {
1588
          slt(scratch, rs, r2);
1589
          bne(scratch, zero_reg, offset);
1590
        }
1591
        break;
1592
      case less_equal:
1593
        if (r2.is(zero_reg)) {
1594
          blez(rs, offset);
1595
        } else {
1596
          slt(scratch, r2, rs);
1597
          beq(scratch, zero_reg, offset);
1598
        }
1599
        break;
1600
      // Unsigned comparison.
1601
      case Ugreater:
1602
        if (r2.is(zero_reg)) {
1603
          bgtz(rs, offset);
1604
        } else {
1605
          sltu(scratch, r2, rs);
1606
          bne(scratch, zero_reg, offset);
1607
        }
1608
        break;
1609
      case Ugreater_equal:
1610
        if (r2.is(zero_reg)) {
1611
          bgez(rs, offset);
1612
        } else {
1613
          sltu(scratch, rs, r2);
1614
          beq(scratch, zero_reg, offset);
1615
        }
1616
        break;
1617
      case Uless:
1618
        if (r2.is(zero_reg)) {
1619
          // No code needs to be emitted.
1620
          return;
1621
        } else {
1622
          sltu(scratch, rs, r2);
1623
          bne(scratch, zero_reg, offset);
1624
        }
1625
        break;
1626
      case Uless_equal:
1627
        if (r2.is(zero_reg)) {
1628
          b(offset);
1629
        } else {
1630
          sltu(scratch, r2, rs);
1631
          beq(scratch, zero_reg, offset);
1632
        }
1633
        break;
1634
      default:
1635
        UNREACHABLE();
1636
    }
1637
  } else {
1638
    // Be careful to always use shifted_branch_offset only just before the
1639
    // branch instruction, as the location will be remember for patching the
1640
    // target.
1641
    BlockTrampolinePoolScope block_trampoline_pool(this);
1642
    switch (cond) {
1643
      case cc_always:
1644
        b(offset);
1645
        break;
1646
      case eq:
1647
        // We don't want any other register but scratch clobbered.
1648
        ASSERT(!scratch.is(rs));
1649
        r2 = scratch;
1650
        li(r2, rt);
1651
        beq(rs, r2, offset);
1652
        break;
1653
      case ne:
1654
        // We don't want any other register but scratch clobbered.
1655
        ASSERT(!scratch.is(rs));
1656
        r2 = scratch;
1657
        li(r2, rt);
1658
        bne(rs, r2, offset);
1659
        break;
1660
      // Signed comparison.
1661
      case greater:
1662
        if (rt.imm32_ == 0) {
1663
          bgtz(rs, offset);
1664
        } else {
1665
          r2 = scratch;
1666
          li(r2, rt);
1667
          slt(scratch, r2, rs);
1668
          bne(scratch, zero_reg, offset);
1669
        }
1670
        break;
1671
      case greater_equal:
1672
        if (rt.imm32_ == 0) {
1673
          bgez(rs, offset);
1674
        } else if (is_int16(rt.imm32_)) {
1675
          slti(scratch, rs, rt.imm32_);
1676
          beq(scratch, zero_reg, offset);
1677
        } else {
1678
          r2 = scratch;
1679
          li(r2, rt);
1680
          slt(scratch, rs, r2);
1681
          beq(scratch, zero_reg, offset);
1682
        }
1683
        break;
1684
      case less:
1685
        if (rt.imm32_ == 0) {
1686
          bltz(rs, offset);
1687
        } else if (is_int16(rt.imm32_)) {
1688
          slti(scratch, rs, rt.imm32_);
1689
          bne(scratch, zero_reg, offset);
1690
        } else {
1691
          r2 = scratch;
1692
          li(r2, rt);
1693
          slt(scratch, rs, r2);
1694
          bne(scratch, zero_reg, offset);
1695
        }
1696
        break;
1697
      case less_equal:
1698
        if (rt.imm32_ == 0) {
1699
          blez(rs, offset);
1700
        } else {
1701
          r2 = scratch;
1702
          li(r2, rt);
1703
          slt(scratch, r2, rs);
1704
          beq(scratch, zero_reg, offset);
1705
       }
1706
       break;
1707
      // Unsigned comparison.
1708
      case Ugreater:
1709
        if (rt.imm32_ == 0) {
1710
          bgtz(rs, offset);
1711
        } else {
1712
          r2 = scratch;
1713
          li(r2, rt);
1714
          sltu(scratch, r2, rs);
1715
          bne(scratch, zero_reg, offset);
1716
        }
1717
        break;
1718
      case Ugreater_equal:
1719
        if (rt.imm32_ == 0) {
1720
          bgez(rs, offset);
1721
        } else if (is_int16(rt.imm32_)) {
1722
          sltiu(scratch, rs, rt.imm32_);
1723
          beq(scratch, zero_reg, offset);
1724
        } else {
1725
          r2 = scratch;
1726
          li(r2, rt);
1727
          sltu(scratch, rs, r2);
1728
          beq(scratch, zero_reg, offset);
1729
        }
1730
        break;
1731
      case Uless:
1732
        if (rt.imm32_ == 0) {
1733
          // No code needs to be emitted.
1734
          return;
1735
        } else if (is_int16(rt.imm32_)) {
1736
          sltiu(scratch, rs, rt.imm32_);
1737
          bne(scratch, zero_reg, offset);
1738
        } else {
1739
          r2 = scratch;
1740
          li(r2, rt);
1741
          sltu(scratch, rs, r2);
1742
          bne(scratch, zero_reg, offset);
1743
        }
1744
        break;
1745
      case Uless_equal:
1746
        if (rt.imm32_ == 0) {
1747
          b(offset);
1748
        } else {
1749
          r2 = scratch;
1750
          li(r2, rt);
1751
          sltu(scratch, r2, rs);
1752
          beq(scratch, zero_reg, offset);
1753
        }
1754
        break;
1755
      default:
1756
        UNREACHABLE();
1757
    }
1758
  }
1759
  // Emit a nop in the branch delay slot if required.
1760
  if (bdslot == PROTECT)
1761
    nop();
1762
}
1763

    
1764

    
1765
void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
1766
  // We use branch_offset as an argument for the branch instructions to be sure
1767
  // it is called just before generating the branch instruction, as needed.
1768

    
1769
  b(shifted_branch_offset(L, false));
1770

    
1771
  // Emit a nop in the branch delay slot if required.
1772
  if (bdslot == PROTECT)
1773
    nop();
1774
}
1775

    
1776

    
1777
void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
1778
                                 const Operand& rt,
1779
                                 BranchDelaySlot bdslot) {
1780
  BRANCH_ARGS_CHECK(cond, rs, rt);
1781

    
1782
  int32_t offset = 0;
1783
  Register r2 = no_reg;
1784
  Register scratch = at;
1785
  if (rt.is_reg()) {
1786
    BlockTrampolinePoolScope block_trampoline_pool(this);
1787
    r2 = rt.rm_;
1788
    // Be careful to always use shifted_branch_offset only just before the
1789
    // branch instruction, as the location will be remember for patching the
1790
    // target.
1791
    switch (cond) {
1792
      case cc_always:
1793
        offset = shifted_branch_offset(L, false);
1794
        b(offset);
1795
        break;
1796
      case eq:
1797
        offset = shifted_branch_offset(L, false);
1798
        beq(rs, r2, offset);
1799
        break;
1800
      case ne:
1801
        offset = shifted_branch_offset(L, false);
1802
        bne(rs, r2, offset);
1803
        break;
1804
      // Signed comparison.
1805
      case greater:
1806
        if (r2.is(zero_reg)) {
1807
          offset = shifted_branch_offset(L, false);
1808
          bgtz(rs, offset);
1809
        } else {
1810
          slt(scratch, r2, rs);
1811
          offset = shifted_branch_offset(L, false);
1812
          bne(scratch, zero_reg, offset);
1813
        }
1814
        break;
1815
      case greater_equal:
1816
        if (r2.is(zero_reg)) {
1817
          offset = shifted_branch_offset(L, false);
1818
          bgez(rs, offset);
1819
        } else {
1820
          slt(scratch, rs, r2);
1821
          offset = shifted_branch_offset(L, false);
1822
          beq(scratch, zero_reg, offset);
1823
        }
1824
        break;
1825
      case less:
1826
        if (r2.is(zero_reg)) {
1827
          offset = shifted_branch_offset(L, false);
1828
          bltz(rs, offset);
1829
        } else {
1830
          slt(scratch, rs, r2);
1831
          offset = shifted_branch_offset(L, false);
1832
          bne(scratch, zero_reg, offset);
1833
        }
1834
        break;
1835
      case less_equal:
1836
        if (r2.is(zero_reg)) {
1837
          offset = shifted_branch_offset(L, false);
1838
          blez(rs, offset);
1839
        } else {
1840
          slt(scratch, r2, rs);
1841
          offset = shifted_branch_offset(L, false);
1842
          beq(scratch, zero_reg, offset);
1843
        }
1844
        break;
1845
      // Unsigned comparison.
1846
      case Ugreater:
1847
        if (r2.is(zero_reg)) {
1848
          offset = shifted_branch_offset(L, false);
1849
           bgtz(rs, offset);
1850
        } else {
1851
          sltu(scratch, r2, rs);
1852
          offset = shifted_branch_offset(L, false);
1853
          bne(scratch, zero_reg, offset);
1854
        }
1855
        break;
1856
      case Ugreater_equal:
1857
        if (r2.is(zero_reg)) {
1858
          offset = shifted_branch_offset(L, false);
1859
          bgez(rs, offset);
1860
        } else {
1861
          sltu(scratch, rs, r2);
1862
          offset = shifted_branch_offset(L, false);
1863
          beq(scratch, zero_reg, offset);
1864
        }
1865
        break;
1866
      case Uless:
1867
        if (r2.is(zero_reg)) {
1868
          // No code needs to be emitted.
1869
          return;
1870
        } else {
1871
          sltu(scratch, rs, r2);
1872
          offset = shifted_branch_offset(L, false);
1873
          bne(scratch, zero_reg, offset);
1874
        }
1875
        break;
1876
      case Uless_equal:
1877
        if (r2.is(zero_reg)) {
1878
          offset = shifted_branch_offset(L, false);
1879
          b(offset);
1880
        } else {
1881
          sltu(scratch, r2, rs);
1882
          offset = shifted_branch_offset(L, false);
1883
          beq(scratch, zero_reg, offset);
1884
        }
1885
        break;
1886
      default:
1887
        UNREACHABLE();
1888
    }
1889
  } else {
1890
    // Be careful to always use shifted_branch_offset only just before the
1891
    // branch instruction, as the location will be remember for patching the
1892
    // target.
1893
    BlockTrampolinePoolScope block_trampoline_pool(this);
1894
    switch (cond) {
1895
      case cc_always:
1896
        offset = shifted_branch_offset(L, false);
1897
        b(offset);
1898
        break;
1899
      case eq:
1900
        ASSERT(!scratch.is(rs));
1901
        r2 = scratch;
1902
        li(r2, rt);
1903
        offset = shifted_branch_offset(L, false);
1904
        beq(rs, r2, offset);
1905
        break;
1906
      case ne:
1907
        ASSERT(!scratch.is(rs));
1908
        r2 = scratch;
1909
        li(r2, rt);
1910
        offset = shifted_branch_offset(L, false);
1911
        bne(rs, r2, offset);
1912
        break;
1913
      // Signed comparison.
1914
      case greater:
1915
        if (rt.imm32_ == 0) {
1916
          offset = shifted_branch_offset(L, false);
1917
          bgtz(rs, offset);
1918
        } else {
1919
          ASSERT(!scratch.is(rs));
1920
          r2 = scratch;
1921
          li(r2, rt);
1922
          slt(scratch, r2, rs);
1923
          offset = shifted_branch_offset(L, false);
1924
          bne(scratch, zero_reg, offset);
1925
        }
1926
        break;
1927
      case greater_equal:
1928
        if (rt.imm32_ == 0) {
1929
          offset = shifted_branch_offset(L, false);
1930
          bgez(rs, offset);
1931
        } else if (is_int16(rt.imm32_)) {
1932
          slti(scratch, rs, rt.imm32_);
1933
          offset = shifted_branch_offset(L, false);
1934
          beq(scratch, zero_reg, offset);
1935
        } else {
1936
          ASSERT(!scratch.is(rs));
1937
          r2 = scratch;
1938
          li(r2, rt);
1939
          slt(scratch, rs, r2);
1940
          offset = shifted_branch_offset(L, false);
1941
          beq(scratch, zero_reg, offset);
1942
        }
1943
        break;
1944
      case less:
1945
        if (rt.imm32_ == 0) {
1946
          offset = shifted_branch_offset(L, false);
1947
          bltz(rs, offset);
1948
        } else if (is_int16(rt.imm32_)) {
1949
          slti(scratch, rs, rt.imm32_);
1950
          offset = shifted_branch_offset(L, false);
1951
          bne(scratch, zero_reg, offset);
1952
        } else {
1953
          ASSERT(!scratch.is(rs));
1954
          r2 = scratch;
1955
          li(r2, rt);
1956
          slt(scratch, rs, r2);
1957
          offset = shifted_branch_offset(L, false);
1958
          bne(scratch, zero_reg, offset);
1959
        }
1960
        break;
1961
      case less_equal:
1962
        if (rt.imm32_ == 0) {
1963
          offset = shifted_branch_offset(L, false);
1964
          blez(rs, offset);
1965
        } else {
1966
          ASSERT(!scratch.is(rs));
1967
          r2 = scratch;
1968
          li(r2, rt);
1969
          slt(scratch, r2, rs);
1970
          offset = shifted_branch_offset(L, false);
1971
          beq(scratch, zero_reg, offset);
1972
        }
1973
        break;
1974
      // Unsigned comparison.
1975
      case Ugreater:
1976
        if (rt.imm32_ == 0) {
1977
          offset = shifted_branch_offset(L, false);
1978
          bgtz(rs, offset);
1979
        } else {
1980
          ASSERT(!scratch.is(rs));
1981
          r2 = scratch;
1982
          li(r2, rt);
1983
          sltu(scratch, r2, rs);
1984
          offset = shifted_branch_offset(L, false);
1985
          bne(scratch, zero_reg, offset);
1986
        }
1987
        break;
1988
      case Ugreater_equal:
1989
        if (rt.imm32_ == 0) {
1990
          offset = shifted_branch_offset(L, false);
1991
          bgez(rs, offset);
1992
        } else if (is_int16(rt.imm32_)) {
1993
          sltiu(scratch, rs, rt.imm32_);
1994
          offset = shifted_branch_offset(L, false);
1995
          beq(scratch, zero_reg, offset);
1996
        } else {
1997
          ASSERT(!scratch.is(rs));
1998
          r2 = scratch;
1999
          li(r2, rt);
2000
          sltu(scratch, rs, r2);
2001
          offset = shifted_branch_offset(L, false);
2002
          beq(scratch, zero_reg, offset);
2003
        }
2004
        break;
2005
     case Uless:
2006
        if (rt.imm32_ == 0) {
2007
          // No code needs to be emitted.
2008
          return;
2009
        } else if (is_int16(rt.imm32_)) {
2010
          sltiu(scratch, rs, rt.imm32_);
2011
          offset = shifted_branch_offset(L, false);
2012
          bne(scratch, zero_reg, offset);
2013
        } else {
2014
          ASSERT(!scratch.is(rs));
2015
          r2 = scratch;
2016
          li(r2, rt);
2017
          sltu(scratch, rs, r2);
2018
          offset = shifted_branch_offset(L, false);
2019
          bne(scratch, zero_reg, offset);
2020
        }
2021
        break;
2022
      case Uless_equal:
2023
        if (rt.imm32_ == 0) {
2024
          offset = shifted_branch_offset(L, false);
2025
          b(offset);
2026
        } else {
2027
          ASSERT(!scratch.is(rs));
2028
          r2 = scratch;
2029
          li(r2, rt);
2030
          sltu(scratch, r2, rs);
2031
          offset = shifted_branch_offset(L, false);
2032
          beq(scratch, zero_reg, offset);
2033
        }
2034
        break;
2035
      default:
2036
        UNREACHABLE();
2037
    }
2038
  }
2039
  // Check that offset could actually hold on an int16_t.
2040
  ASSERT(is_int16(offset));
2041
  // Emit a nop in the branch delay slot if required.
2042
  if (bdslot == PROTECT)
2043
    nop();
2044
}
2045

    
2046

    
2047
void MacroAssembler::BranchAndLink(int16_t offset, BranchDelaySlot bdslot) {
2048
  BranchAndLinkShort(offset, bdslot);
2049
}
2050

    
2051

    
2052
void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
2053
                                   const Operand& rt,
2054
                                   BranchDelaySlot bdslot) {
2055
  BranchAndLinkShort(offset, cond, rs, rt, bdslot);
2056
}
2057

    
2058

    
2059
void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
2060
  if (L->is_bound()) {
2061
    if (is_near(L)) {
2062
      BranchAndLinkShort(L, bdslot);
2063
    } else {
2064
      Jalr(L, bdslot);
2065
    }
2066
  } else {
2067
    if (is_trampoline_emitted()) {
2068
      Jalr(L, bdslot);
2069
    } else {
2070
      BranchAndLinkShort(L, bdslot);
2071
    }
2072
  }
2073
}
2074

    
2075

    
2076
void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
2077
                                   const Operand& rt,
2078
                                   BranchDelaySlot bdslot) {
2079
  if (L->is_bound()) {
2080
    if (is_near(L)) {
2081
      BranchAndLinkShort(L, cond, rs, rt, bdslot);
2082
    } else {
2083
      Label skip;
2084
      Condition neg_cond = NegateCondition(cond);
2085
      BranchShort(&skip, neg_cond, rs, rt);
2086
      Jalr(L, bdslot);
2087
      bind(&skip);
2088
    }
2089
  } else {
2090
    if (is_trampoline_emitted()) {
2091
      Label skip;
2092
      Condition neg_cond = NegateCondition(cond);
2093
      BranchShort(&skip, neg_cond, rs, rt);
2094
      Jalr(L, bdslot);
2095
      bind(&skip);
2096
    } else {
2097
      BranchAndLinkShort(L, cond, rs, rt, bdslot);
2098
    }
2099
  }
2100
}
2101

    
2102

    
2103
// We need to use a bgezal or bltzal, but they can't be used directly with the
2104
// slt instructions. We could use sub or add instead but we would miss overflow
2105
// cases, so we keep slt and add an intermediate third instruction.
2106
void MacroAssembler::BranchAndLinkShort(int16_t offset,
2107
                                        BranchDelaySlot bdslot) {
2108
  bal(offset);
2109

    
2110
  // Emit a nop in the branch delay slot if required.
2111
  if (bdslot == PROTECT)
2112
    nop();
2113
}
2114

    
2115

    
2116
void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond,
2117
                                        Register rs, const Operand& rt,
2118
                                        BranchDelaySlot bdslot) {
2119
  BRANCH_ARGS_CHECK(cond, rs, rt);
2120
  Register r2 = no_reg;
2121
  Register scratch = at;
2122

    
2123
  if (rt.is_reg()) {
2124
    r2 = rt.rm_;
2125
  } else if (cond != cc_always) {
2126
    r2 = scratch;
2127
    li(r2, rt);
2128
  }
2129

    
2130
  {
2131
    BlockTrampolinePoolScope block_trampoline_pool(this);
2132
    switch (cond) {
2133
      case cc_always:
2134
        bal(offset);
2135
        break;
2136
      case eq:
2137
        bne(rs, r2, 2);
2138
        nop();
2139
        bal(offset);
2140
        break;
2141
      case ne:
2142
        beq(rs, r2, 2);
2143
        nop();
2144
        bal(offset);
2145
        break;
2146

    
2147
      // Signed comparison.
2148
      case greater:
2149
        slt(scratch, r2, rs);
2150
        addiu(scratch, scratch, -1);
2151
        bgezal(scratch, offset);
2152
        break;
2153
      case greater_equal:
2154
        slt(scratch, rs, r2);
2155
        addiu(scratch, scratch, -1);
2156
        bltzal(scratch, offset);
2157
        break;
2158
      case less:
2159
        slt(scratch, rs, r2);
2160
        addiu(scratch, scratch, -1);
2161
        bgezal(scratch, offset);
2162
        break;
2163
      case less_equal:
2164
        slt(scratch, r2, rs);
2165
        addiu(scratch, scratch, -1);
2166
        bltzal(scratch, offset);
2167
        break;
2168

    
2169
      // Unsigned comparison.
2170
      case Ugreater:
2171
        sltu(scratch, r2, rs);
2172
        addiu(scratch, scratch, -1);
2173
        bgezal(scratch, offset);
2174
        break;
2175
      case Ugreater_equal:
2176
        sltu(scratch, rs, r2);
2177
        addiu(scratch, scratch, -1);
2178
        bltzal(scratch, offset);
2179
        break;
2180
      case Uless:
2181
        sltu(scratch, rs, r2);
2182
        addiu(scratch, scratch, -1);
2183
        bgezal(scratch, offset);
2184
        break;
2185
      case Uless_equal:
2186
        sltu(scratch, r2, rs);
2187
        addiu(scratch, scratch, -1);
2188
        bltzal(scratch, offset);
2189
        break;
2190

    
2191
      default:
2192
        UNREACHABLE();
2193
    }
2194
  }
2195
  // Emit a nop in the branch delay slot if required.
2196
  if (bdslot == PROTECT)
2197
    nop();
2198
}
2199

    
2200

    
2201
void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
2202
  bal(shifted_branch_offset(L, false));
2203

    
2204
  // Emit a nop in the branch delay slot if required.
2205
  if (bdslot == PROTECT)
2206
    nop();
2207
}
2208

    
2209

    
2210
void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
2211
                                        const Operand& rt,
2212
                                        BranchDelaySlot bdslot) {
2213
  BRANCH_ARGS_CHECK(cond, rs, rt);
2214

    
2215
  int32_t offset = 0;
2216
  Register r2 = no_reg;
2217
  Register scratch = at;
2218
  if (rt.is_reg()) {
2219
    r2 = rt.rm_;
2220
  } else if (cond != cc_always) {
2221
    r2 = scratch;
2222
    li(r2, rt);
2223
  }
2224

    
2225
  {
2226
    BlockTrampolinePoolScope block_trampoline_pool(this);
2227
    switch (cond) {
2228
      case cc_always:
2229
        offset = shifted_branch_offset(L, false);
2230
        bal(offset);
2231
        break;
2232
      case eq:
2233
        bne(rs, r2, 2);
2234
        nop();
2235
        offset = shifted_branch_offset(L, false);
2236
        bal(offset);
2237
        break;
2238
      case ne:
2239
        beq(rs, r2, 2);
2240
        nop();
2241
        offset = shifted_branch_offset(L, false);
2242
        bal(offset);
2243
        break;
2244

    
2245
      // Signed comparison.
2246
      case greater:
2247
        slt(scratch, r2, rs);
2248
        addiu(scratch, scratch, -1);
2249
        offset = shifted_branch_offset(L, false);
2250
        bgezal(scratch, offset);
2251
        break;
2252
      case greater_equal:
2253
        slt(scratch, rs, r2);
2254
        addiu(scratch, scratch, -1);
2255
        offset = shifted_branch_offset(L, false);
2256
        bltzal(scratch, offset);
2257
        break;
2258
      case less:
2259
        slt(scratch, rs, r2);
2260
        addiu(scratch, scratch, -1);
2261
        offset = shifted_branch_offset(L, false);
2262
        bgezal(scratch, offset);
2263
        break;
2264
      case less_equal:
2265
        slt(scratch, r2, rs);
2266
        addiu(scratch, scratch, -1);
2267
        offset = shifted_branch_offset(L, false);
2268
        bltzal(scratch, offset);
2269
        break;
2270

    
2271
      // Unsigned comparison.
2272
      case Ugreater:
2273
        sltu(scratch, r2, rs);
2274
        addiu(scratch, scratch, -1);
2275
        offset = shifted_branch_offset(L, false);
2276
        bgezal(scratch, offset);
2277
        break;
2278
      case Ugreater_equal:
2279
        sltu(scratch, rs, r2);
2280
        addiu(scratch, scratch, -1);
2281
        offset = shifted_branch_offset(L, false);
2282
        bltzal(scratch, offset);
2283
        break;
2284
      case Uless:
2285
        sltu(scratch, rs, r2);
2286
        addiu(scratch, scratch, -1);
2287
        offset = shifted_branch_offset(L, false);
2288
        bgezal(scratch, offset);
2289
        break;
2290
      case Uless_equal:
2291
        sltu(scratch, r2, rs);
2292
        addiu(scratch, scratch, -1);
2293
        offset = shifted_branch_offset(L, false);
2294
        bltzal(scratch, offset);
2295
        break;
2296

    
2297
      default:
2298
        UNREACHABLE();
2299
    }
2300
  }
2301
  // Check that offset could actually hold on an int16_t.
2302
  ASSERT(is_int16(offset));
2303

    
2304
  // Emit a nop in the branch delay slot if required.
2305
  if (bdslot == PROTECT)
2306
    nop();
2307
}
2308

    
2309

    
2310
void MacroAssembler::Jump(Register target,
2311
                          Condition cond,
2312
                          Register rs,
2313
                          const Operand& rt,
2314
                          BranchDelaySlot bd) {
2315
  BlockTrampolinePoolScope block_trampoline_pool(this);
2316
  if (cond == cc_always) {
2317
    jr(target);
2318
  } else {
2319
    BRANCH_ARGS_CHECK(cond, rs, rt);
2320
    Branch(2, NegateCondition(cond), rs, rt);
2321
    jr(target);
2322
  }
2323
  // Emit a nop in the branch delay slot if required.
2324
  if (bd == PROTECT)
2325
    nop();
2326
}
2327

    
2328

    
2329
void MacroAssembler::Jump(intptr_t target,
2330
                          RelocInfo::Mode rmode,
2331
                          Condition cond,
2332
                          Register rs,
2333
                          const Operand& rt,
2334
                          BranchDelaySlot bd) {
2335
  Label skip;
2336
  if (cond != cc_always) {
2337
    Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
2338
  }
2339
  // The first instruction of 'li' may be placed in the delay slot.
2340
  // This is not an issue, t9 is expected to be clobbered anyway.
2341
  li(t9, Operand(target, rmode));
2342
  Jump(t9, al, zero_reg, Operand(zero_reg), bd);
2343
  bind(&skip);
2344
}
2345

    
2346

    
2347
void MacroAssembler::Jump(Address target,
2348
                          RelocInfo::Mode rmode,
2349
                          Condition cond,
2350
                          Register rs,
2351
                          const Operand& rt,
2352
                          BranchDelaySlot bd) {
2353
  ASSERT(!RelocInfo::IsCodeTarget(rmode));
2354
  Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
2355
}
2356

    
2357

    
2358
void MacroAssembler::Jump(Handle<Code> code,
2359
                          RelocInfo::Mode rmode,
2360
                          Condition cond,
2361
                          Register rs,
2362
                          const Operand& rt,
2363
                          BranchDelaySlot bd) {
2364
  ASSERT(RelocInfo::IsCodeTarget(rmode));
2365
  AllowDeferredHandleDereference embedding_raw_address;
2366
  Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
2367
}
2368

    
2369

    
2370
int MacroAssembler::CallSize(Register target,
2371
                             Condition cond,
2372
                             Register rs,
2373
                             const Operand& rt,
2374
                             BranchDelaySlot bd) {
2375
  int size = 0;
2376

    
2377
  if (cond == cc_always) {
2378
    size += 1;
2379
  } else {
2380
    size += 3;
2381
  }
2382

    
2383
  if (bd == PROTECT)
2384
    size += 1;
2385

    
2386
  return size * kInstrSize;
2387
}
2388

    
2389

    
2390
// Note: To call gcc-compiled C code on mips, you must call thru t9.
2391
void MacroAssembler::Call(Register target,
2392
                          Condition cond,
2393
                          Register rs,
2394
                          const Operand& rt,
2395
                          BranchDelaySlot bd) {
2396
  BlockTrampolinePoolScope block_trampoline_pool(this);
2397
  Label start;
2398
  bind(&start);
2399
  if (cond == cc_always) {
2400
    jalr(target);
2401
  } else {
2402
    BRANCH_ARGS_CHECK(cond, rs, rt);
2403
    Branch(2, NegateCondition(cond), rs, rt);
2404
    jalr(target);
2405
  }
2406
  // Emit a nop in the branch delay slot if required.
2407
  if (bd == PROTECT)
2408
    nop();
2409

    
2410
  ASSERT_EQ(CallSize(target, cond, rs, rt, bd),
2411
            SizeOfCodeGeneratedSince(&start));
2412
}
2413

    
2414

    
2415
int MacroAssembler::CallSize(Address target,
2416
                             RelocInfo::Mode rmode,
2417
                             Condition cond,
2418
                             Register rs,
2419
                             const Operand& rt,
2420
                             BranchDelaySlot bd) {
2421
  int size = CallSize(t9, cond, rs, rt, bd);
2422
  return size + 2 * kInstrSize;
2423
}
2424

    
2425

    
2426
void MacroAssembler::Call(Address target,
2427
                          RelocInfo::Mode rmode,
2428
                          Condition cond,
2429
                          Register rs,
2430
                          const Operand& rt,
2431
                          BranchDelaySlot bd) {
2432
  BlockTrampolinePoolScope block_trampoline_pool(this);
2433
  Label start;
2434
  bind(&start);
2435
  int32_t target_int = reinterpret_cast<int32_t>(target);
2436
  // Must record previous source positions before the
2437
  // li() generates a new code target.
2438
  positions_recorder()->WriteRecordedPositions();
2439
  li(t9, Operand(target_int, rmode), CONSTANT_SIZE);
2440
  Call(t9, cond, rs, rt, bd);
2441
  ASSERT_EQ(CallSize(target, rmode, cond, rs, rt, bd),
2442
            SizeOfCodeGeneratedSince(&start));
2443
}
2444

    
2445

    
2446
int MacroAssembler::CallSize(Handle<Code> code,
2447
                             RelocInfo::Mode rmode,
2448
                             TypeFeedbackId ast_id,
2449
                             Condition cond,
2450
                             Register rs,
2451
                             const Operand& rt,
2452
                             BranchDelaySlot bd) {
2453
  AllowDeferredHandleDereference using_raw_address;
2454
  return CallSize(reinterpret_cast<Address>(code.location()),
2455
      rmode, cond, rs, rt, bd);
2456
}
2457

    
2458

    
2459
void MacroAssembler::Call(Handle<Code> code,
2460
                          RelocInfo::Mode rmode,
2461
                          TypeFeedbackId ast_id,
2462
                          Condition cond,
2463
                          Register rs,
2464
                          const Operand& rt,
2465
                          BranchDelaySlot bd) {
2466
  BlockTrampolinePoolScope block_trampoline_pool(this);
2467
  Label start;
2468
  bind(&start);
2469
  ASSERT(RelocInfo::IsCodeTarget(rmode));
2470
  if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
2471
    SetRecordedAstId(ast_id);
2472
    rmode = RelocInfo::CODE_TARGET_WITH_ID;
2473
  }
2474
  AllowDeferredHandleDereference embedding_raw_address;
2475
  Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
2476
  ASSERT_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
2477
            SizeOfCodeGeneratedSince(&start));
2478
}
2479

    
2480

    
2481
void MacroAssembler::Ret(Condition cond,
2482
                         Register rs,
2483
                         const Operand& rt,
2484
                         BranchDelaySlot bd) {
2485
  Jump(ra, cond, rs, rt, bd);
2486
}
2487

    
2488

    
2489
void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) {
2490
  BlockTrampolinePoolScope block_trampoline_pool(this);
2491

    
2492
  uint32_t imm28;
2493
  imm28 = jump_address(L);
2494
  imm28 &= kImm28Mask;
2495
  { BlockGrowBufferScope block_buf_growth(this);
2496
    // Buffer growth (and relocation) must be blocked for internal references
2497
    // until associated instructions are emitted and available to be patched.
2498
    RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2499
    j(imm28);
2500
  }
2501
  // Emit a nop in the branch delay slot if required.
2502
  if (bdslot == PROTECT)
2503
    nop();
2504
}
2505

    
2506

    
2507
void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) {
2508
  BlockTrampolinePoolScope block_trampoline_pool(this);
2509

    
2510
  uint32_t imm32;
2511
  imm32 = jump_address(L);
2512
  { BlockGrowBufferScope block_buf_growth(this);
2513
    // Buffer growth (and relocation) must be blocked for internal references
2514
    // until associated instructions are emitted and available to be patched.
2515
    RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2516
    lui(at, (imm32 & kHiMask) >> kLuiShift);
2517
    ori(at, at, (imm32 & kImm16Mask));
2518
  }
2519
  jr(at);
2520

    
2521
  // Emit a nop in the branch delay slot if required.
2522
  if (bdslot == PROTECT)
2523
    nop();
2524
}
2525

    
2526

    
2527
void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) {
2528
  BlockTrampolinePoolScope block_trampoline_pool(this);
2529

    
2530
  uint32_t imm32;
2531
  imm32 = jump_address(L);
2532
  { BlockGrowBufferScope block_buf_growth(this);
2533
    // Buffer growth (and relocation) must be blocked for internal references
2534
    // until associated instructions are emitted and available to be patched.
2535
    RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2536
    lui(at, (imm32 & kHiMask) >> kLuiShift);
2537
    ori(at, at, (imm32 & kImm16Mask));
2538
  }
2539
  jalr(at);
2540

    
2541
  // Emit a nop in the branch delay slot if required.
2542
  if (bdslot == PROTECT)
2543
    nop();
2544
}
2545

    
2546

    
2547
void MacroAssembler::DropAndRet(int drop) {
2548
  Ret(USE_DELAY_SLOT);
2549
  addiu(sp, sp, drop * kPointerSize);
2550
}
2551

    
2552
void MacroAssembler::DropAndRet(int drop,
2553
                                Condition cond,
2554
                                Register r1,
2555
                                const Operand& r2) {
2556
  // Both Drop and Ret need to be conditional.
2557
  Label skip;
2558
  if (cond != cc_always) {
2559
    Branch(&skip, NegateCondition(cond), r1, r2);
2560
  }
2561

    
2562
  Drop(drop);
2563
  Ret();
2564

    
2565
  if (cond != cc_always) {
2566
    bind(&skip);
2567
  }
2568
}
2569

    
2570

    
2571
void MacroAssembler::Drop(int count,
2572
                          Condition cond,
2573
                          Register reg,
2574
                          const Operand& op) {
2575
  if (count <= 0) {
2576
    return;
2577
  }
2578

    
2579
  Label skip;
2580

    
2581
  if (cond != al) {
2582
     Branch(&skip, NegateCondition(cond), reg, op);
2583
  }
2584

    
2585
  addiu(sp, sp, count * kPointerSize);
2586

    
2587
  if (cond != al) {
2588
    bind(&skip);
2589
  }
2590
}
2591

    
2592

    
2593

    
2594
void MacroAssembler::Swap(Register reg1,
2595
                          Register reg2,
2596
                          Register scratch) {
2597
  if (scratch.is(no_reg)) {
2598
    Xor(reg1, reg1, Operand(reg2));
2599
    Xor(reg2, reg2, Operand(reg1));
2600
    Xor(reg1, reg1, Operand(reg2));
2601
  } else {
2602
    mov(scratch, reg1);
2603
    mov(reg1, reg2);
2604
    mov(reg2, scratch);
2605
  }
2606
}
2607

    
2608

    
2609
void MacroAssembler::Call(Label* target) {
2610
  BranchAndLink(target);
2611
}
2612

    
2613

    
2614
void MacroAssembler::Push(Handle<Object> handle) {
2615
  li(at, Operand(handle));
2616
  push(at);
2617
}
2618

    
2619

    
2620
#ifdef ENABLE_DEBUGGER_SUPPORT
2621

    
2622
void MacroAssembler::DebugBreak() {
2623
  PrepareCEntryArgs(0);
2624
  PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak, isolate()));
2625
  CEntryStub ces(1);
2626
  ASSERT(AllowThisStubCall(&ces));
2627
  Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
2628
}
2629

    
2630
#endif  // ENABLE_DEBUGGER_SUPPORT
2631

    
2632

    
2633
// ---------------------------------------------------------------------------
2634
// Exception handling.
2635

    
2636
void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
2637
                                    int handler_index) {
2638
  // Adjust this code if not the case.
2639
  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2640
  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
2641
  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2642
  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2643
  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2644
  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2645

    
2646
  // For the JSEntry handler, we must preserve a0-a3 and s0.
2647
  // t1-t3 are available. We will build up the handler from the bottom by
2648
  // pushing on the stack.
2649
  // Set up the code object (t1) and the state (t2) for pushing.
2650
  unsigned state =
2651
      StackHandler::IndexField::encode(handler_index) |
2652
      StackHandler::KindField::encode(kind);
2653
  li(t1, Operand(CodeObject()), CONSTANT_SIZE);
2654
  li(t2, Operand(state));
2655

    
2656
  // Push the frame pointer, context, state, and code object.
2657
  if (kind == StackHandler::JS_ENTRY) {
2658
    ASSERT_EQ(Smi::FromInt(0), 0);
2659
    // The second zero_reg indicates no context.
2660
    // The first zero_reg is the NULL frame pointer.
2661
    // The operands are reversed to match the order of MultiPush/Pop.
2662
    Push(zero_reg, zero_reg, t2, t1);
2663
  } else {
2664
    MultiPush(t1.bit() | t2.bit() | cp.bit() | fp.bit());
2665
  }
2666

    
2667
  // Link the current handler as the next handler.
2668
  li(t2, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
2669
  lw(t1, MemOperand(t2));
2670
  push(t1);
2671
  // Set this new handler as the current one.
2672
  sw(sp, MemOperand(t2));
2673
}
2674

    
2675

    
2676
void MacroAssembler::PopTryHandler() {
2677
  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2678
  pop(a1);
2679
  Addu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
2680
  li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
2681
  sw(a1, MemOperand(at));
2682
}
2683

    
2684

    
2685
void MacroAssembler::JumpToHandlerEntry() {
2686
  // Compute the handler entry address and jump to it.  The handler table is
2687
  // a fixed array of (smi-tagged) code offsets.
2688
  // v0 = exception, a1 = code object, a2 = state.
2689
  lw(a3, FieldMemOperand(a1, Code::kHandlerTableOffset));  // Handler table.
2690
  Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2691
  srl(a2, a2, StackHandler::kKindWidth);  // Handler index.
2692
  sll(a2, a2, kPointerSizeLog2);
2693
  Addu(a2, a3, a2);
2694
  lw(a2, MemOperand(a2));  // Smi-tagged offset.
2695
  Addu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));  // Code start.
2696
  sra(t9, a2, kSmiTagSize);
2697
  Addu(t9, t9, a1);
2698
  Jump(t9);  // Jump.
2699
}
2700

    
2701

    
2702
void MacroAssembler::Throw(Register value) {
2703
  // Adjust this code if not the case.
2704
  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2705
  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2706
  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2707
  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2708
  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2709
  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2710

    
2711
  // The exception is expected in v0.
2712
  Move(v0, value);
2713

    
2714
  // Drop the stack pointer to the top of the top handler.
2715
  li(a3, Operand(ExternalReference(Isolate::kHandlerAddress,
2716
                                   isolate())));
2717
  lw(sp, MemOperand(a3));
2718

    
2719
  // Restore the next handler.
2720
  pop(a2);
2721
  sw(a2, MemOperand(a3));
2722

    
2723
  // Get the code object (a1) and state (a2).  Restore the context and frame
2724
  // pointer.
2725
  MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
2726

    
2727
  // If the handler is a JS frame, restore the context to the frame.
2728
  // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
2729
  // or cp.
2730
  Label done;
2731
  Branch(&done, eq, cp, Operand(zero_reg));
2732
  sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2733
  bind(&done);
2734

    
2735
  JumpToHandlerEntry();
2736
}
2737

    
2738

    
2739
void MacroAssembler::ThrowUncatchable(Register value) {
2740
  // Adjust this code if not the case.
2741
  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2742
  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
2743
  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2744
  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2745
  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2746
  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2747

    
2748
  // The exception is expected in v0.
2749
  if (!value.is(v0)) {
2750
    mov(v0, value);
2751
  }
2752
  // Drop the stack pointer to the top of the top stack handler.
2753
  li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
2754
  lw(sp, MemOperand(a3));
2755

    
2756
  // Unwind the handlers until the ENTRY handler is found.
2757
  Label fetch_next, check_kind;
2758
  jmp(&check_kind);
2759
  bind(&fetch_next);
2760
  lw(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
2761

    
2762
  bind(&check_kind);
2763
  STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
2764
  lw(a2, MemOperand(sp, StackHandlerConstants::kStateOffset));
2765
  And(a2, a2, Operand(StackHandler::KindField::kMask));
2766
  Branch(&fetch_next, ne, a2, Operand(zero_reg));
2767

    
2768
  // Set the top handler address to next handler past the top ENTRY handler.
2769
  pop(a2);
2770
  sw(a2, MemOperand(a3));
2771

    
2772
  // Get the code object (a1) and state (a2).  Clear the context and frame
2773
  // pointer (0 was saved in the handler).
2774
  MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
2775

    
2776
  JumpToHandlerEntry();
2777
}
2778

    
2779

    
2780
void MacroAssembler::Allocate(int object_size,
2781
                              Register result,
2782
                              Register scratch1,
2783
                              Register scratch2,
2784
                              Label* gc_required,
2785
                              AllocationFlags flags) {
2786
  ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
2787
  if (!FLAG_inline_new) {
2788
    if (emit_debug_code()) {
2789
      // Trash the registers to simulate an allocation failure.
2790
      li(result, 0x7091);
2791
      li(scratch1, 0x7191);
2792
      li(scratch2, 0x7291);
2793
    }
2794
    jmp(gc_required);
2795
    return;
2796
  }
2797

    
2798
  ASSERT(!result.is(scratch1));
2799
  ASSERT(!result.is(scratch2));
2800
  ASSERT(!scratch1.is(scratch2));
2801
  ASSERT(!scratch1.is(t9));
2802
  ASSERT(!scratch2.is(t9));
2803
  ASSERT(!result.is(t9));
2804

    
2805
  // Make object size into bytes.
2806
  if ((flags & SIZE_IN_WORDS) != 0) {
2807
    object_size *= kPointerSize;
2808
  }
2809
  ASSERT_EQ(0, object_size & kObjectAlignmentMask);
2810

    
2811
  // Check relative positions of allocation top and limit addresses.
2812
  // ARM adds additional checks to make sure the ldm instruction can be
2813
  // used. On MIPS we don't have ldm so we don't need additional checks either.
2814
  ExternalReference allocation_top =
2815
      AllocationUtils::GetAllocationTopReference(isolate(), flags);
2816
  ExternalReference allocation_limit =
2817
      AllocationUtils::GetAllocationLimitReference(isolate(), flags);
2818

    
2819
  intptr_t top   =
2820
      reinterpret_cast<intptr_t>(allocation_top.address());
2821
  intptr_t limit =
2822
      reinterpret_cast<intptr_t>(allocation_limit.address());
2823
  ASSERT((limit - top) == kPointerSize);
2824

    
2825
  // Set up allocation top address and object size registers.
2826
  Register topaddr = scratch1;
2827
  li(topaddr, Operand(allocation_top));
2828

    
2829
  // This code stores a temporary value in t9.
2830
  if ((flags & RESULT_CONTAINS_TOP) == 0) {
2831
    // Load allocation top into result and allocation limit into t9.
2832
    lw(result, MemOperand(topaddr));
2833
    lw(t9, MemOperand(topaddr, kPointerSize));
2834
  } else {
2835
    if (emit_debug_code()) {
2836
      // Assert that result actually contains top on entry. t9 is used
2837
      // immediately below so this use of t9 does not cause difference with
2838
      // respect to register content between debug and release mode.
2839
      lw(t9, MemOperand(topaddr));
2840
      Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
2841
    }
2842
    // Load allocation limit into t9. Result already contains allocation top.
2843
    lw(t9, MemOperand(topaddr, limit - top));
2844
  }
2845

    
2846
  if ((flags & DOUBLE_ALIGNMENT) != 0) {
2847
    // Align the next allocation. Storing the filler map without checking top is
2848
    // safe in new-space because the limit of the heap is aligned there.
2849
    ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
2850
    ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
2851
    And(scratch2, result, Operand(kDoubleAlignmentMask));
2852
    Label aligned;
2853
    Branch(&aligned, eq, scratch2, Operand(zero_reg));
2854
    if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
2855
      Branch(gc_required, Ugreater_equal, result, Operand(t9));
2856
    }
2857
    li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
2858
    sw(scratch2, MemOperand(result));
2859
    Addu(result, result, Operand(kDoubleSize / 2));
2860
    bind(&aligned);
2861
  }
2862

    
2863
  // Calculate new top and bail out if new space is exhausted. Use result
2864
  // to calculate the new top.
2865
  Addu(scratch2, result, Operand(object_size));
2866
  Branch(gc_required, Ugreater, scratch2, Operand(t9));
2867
  sw(scratch2, MemOperand(topaddr));
2868

    
2869
  // Tag object if requested.
2870
  if ((flags & TAG_OBJECT) != 0) {
2871
    Addu(result, result, Operand(kHeapObjectTag));
2872
  }
2873
}
2874

    
2875

    
2876
void MacroAssembler::Allocate(Register object_size,
2877
                              Register result,
2878
                              Register scratch1,
2879
                              Register scratch2,
2880
                              Label* gc_required,
2881
                              AllocationFlags flags) {
2882
  if (!FLAG_inline_new) {
2883
    if (emit_debug_code()) {
2884
      // Trash the registers to simulate an allocation failure.
2885
      li(result, 0x7091);
2886
      li(scratch1, 0x7191);
2887
      li(scratch2, 0x7291);
2888
    }
2889
    jmp(gc_required);
2890
    return;
2891
  }
2892

    
2893
  ASSERT(!result.is(scratch1));
2894
  ASSERT(!result.is(scratch2));
2895
  ASSERT(!scratch1.is(scratch2));
2896
  ASSERT(!object_size.is(t9));
2897
  ASSERT(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
2898

    
2899
  // Check relative positions of allocation top and limit addresses.
2900
  // ARM adds additional checks to make sure the ldm instruction can be
2901
  // used. On MIPS we don't have ldm so we don't need additional checks either.
2902
  ExternalReference allocation_top =
2903
      AllocationUtils::GetAllocationTopReference(isolate(), flags);
2904
  ExternalReference allocation_limit =
2905
      AllocationUtils::GetAllocationLimitReference(isolate(), flags);
2906
  intptr_t top   =
2907
      reinterpret_cast<intptr_t>(allocation_top.address());
2908
  intptr_t limit =
2909
      reinterpret_cast<intptr_t>(allocation_limit.address());
2910
  ASSERT((limit - top) == kPointerSize);
2911

    
2912
  // Set up allocation top address and object size registers.
2913
  Register topaddr = scratch1;
2914
  li(topaddr, Operand(allocation_top));
2915

    
2916
  // This code stores a temporary value in t9.
2917
  if ((flags & RESULT_CONTAINS_TOP) == 0) {
2918
    // Load allocation top into result and allocation limit into t9.
2919
    lw(result, MemOperand(topaddr));
2920
    lw(t9, MemOperand(topaddr, kPointerSize));
2921
  } else {
2922
    if (emit_debug_code()) {
2923
      // Assert that result actually contains top on entry. t9 is used
2924
      // immediately below so this use of t9 does not cause difference with
2925
      // respect to register content between debug and release mode.
2926
      lw(t9, MemOperand(topaddr));
2927
      Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
2928
    }
2929
    // Load allocation limit into t9. Result already contains allocation top.
2930
    lw(t9, MemOperand(topaddr, limit - top));
2931
  }
2932

    
2933
  if ((flags & DOUBLE_ALIGNMENT) != 0) {
2934
    // Align the next allocation. Storing the filler map without checking top is
2935
    // safe in new-space because the limit of the heap is aligned there.
2936
    ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
2937
    ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
2938
    And(scratch2, result, Operand(kDoubleAlignmentMask));
2939
    Label aligned;
2940
    Branch(&aligned, eq, scratch2, Operand(zero_reg));
2941
    if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
2942
      Branch(gc_required, Ugreater_equal, result, Operand(t9));
2943
    }
2944
    li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
2945
    sw(scratch2, MemOperand(result));
2946
    Addu(result, result, Operand(kDoubleSize / 2));
2947
    bind(&aligned);
2948
  }
2949

    
2950
  // Calculate new top and bail out if new space is exhausted. Use result
2951
  // to calculate the new top. Object size may be in words so a shift is
2952
  // required to get the number of bytes.
2953
  if ((flags & SIZE_IN_WORDS) != 0) {
2954
    sll(scratch2, object_size, kPointerSizeLog2);
2955
    Addu(scratch2, result, scratch2);
2956
  } else {
2957
    Addu(scratch2, result, Operand(object_size));
2958
  }
2959
  Branch(gc_required, Ugreater, scratch2, Operand(t9));
2960

    
2961
  // Update allocation top. result temporarily holds the new top.
2962
  if (emit_debug_code()) {
2963
    And(t9, scratch2, Operand(kObjectAlignmentMask));
2964
    Check(eq, kUnalignedAllocationInNewSpace, t9, Operand(zero_reg));
2965
  }
2966
  sw(scratch2, MemOperand(topaddr));
2967

    
2968
  // Tag object if requested.
2969
  if ((flags & TAG_OBJECT) != 0) {
2970
    Addu(result, result, Operand(kHeapObjectTag));
2971
  }
2972
}
2973

    
2974

    
2975
void MacroAssembler::UndoAllocationInNewSpace(Register object,
2976
                                              Register scratch) {
2977
  ExternalReference new_space_allocation_top =
2978
      ExternalReference::new_space_allocation_top_address(isolate());
2979

    
2980
  // Make sure the object has no tag before resetting top.
2981
  And(object, object, Operand(~kHeapObjectTagMask));
2982
#ifdef DEBUG
2983
  // Check that the object un-allocated is below the current top.
2984
  li(scratch, Operand(new_space_allocation_top));
2985
  lw(scratch, MemOperand(scratch));
2986
  Check(less, kUndoAllocationOfNonAllocatedMemory,
2987
      object, Operand(scratch));
2988
#endif
2989
  // Write the address of the object to un-allocate as the current top.
2990
  li(scratch, Operand(new_space_allocation_top));
2991
  sw(object, MemOperand(scratch));
2992
}
2993

    
2994

    
2995
void MacroAssembler::AllocateTwoByteString(Register result,
2996
                                           Register length,
2997
                                           Register scratch1,
2998
                                           Register scratch2,
2999
                                           Register scratch3,
3000
                                           Label* gc_required) {
3001
  // Calculate the number of bytes needed for the characters in the string while
3002
  // observing object alignment.
3003
  ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3004
  sll(scratch1, length, 1);  // Length in bytes, not chars.
3005
  addiu(scratch1, scratch1,
3006
       kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
3007
  And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3008

    
3009
  // Allocate two-byte string in new space.
3010
  Allocate(scratch1,
3011
           result,
3012
           scratch2,
3013
           scratch3,
3014
           gc_required,
3015
           TAG_OBJECT);
3016

    
3017
  // Set the map, length and hash field.
3018
  InitializeNewString(result,
3019
                      length,
3020
                      Heap::kStringMapRootIndex,
3021
                      scratch1,
3022
                      scratch2);
3023
}
3024

    
3025

    
3026
void MacroAssembler::AllocateAsciiString(Register result,
3027
                                         Register length,
3028
                                         Register scratch1,
3029
                                         Register scratch2,
3030
                                         Register scratch3,
3031
                                         Label* gc_required) {
3032
  // Calculate the number of bytes needed for the characters in the string
3033
  // while observing object alignment.
3034
  ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3035
  ASSERT(kCharSize == 1);
3036
  addiu(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
3037
  And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3038

    
3039
  // Allocate ASCII string in new space.
3040
  Allocate(scratch1,
3041
           result,
3042
           scratch2,
3043
           scratch3,
3044
           gc_required,
3045
           TAG_OBJECT);
3046

    
3047
  // Set the map, length and hash field.
3048
  InitializeNewString(result,
3049
                      length,
3050
                      Heap::kAsciiStringMapRootIndex,
3051
                      scratch1,
3052
                      scratch2);
3053
}
3054

    
3055

    
3056
void MacroAssembler::AllocateTwoByteConsString(Register result,
3057
                                               Register length,
3058
                                               Register scratch1,
3059
                                               Register scratch2,
3060
                                               Label* gc_required) {
3061
  Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
3062
           TAG_OBJECT);
3063
  InitializeNewString(result,
3064
                      length,
3065
                      Heap::kConsStringMapRootIndex,
3066
                      scratch1,
3067
                      scratch2);
3068
}
3069

    
3070

    
3071
void MacroAssembler::AllocateAsciiConsString(Register result,
3072
                                             Register length,
3073
                                             Register scratch1,
3074
                                             Register scratch2,
3075
                                             Label* gc_required) {
3076
  Label allocate_new_space, install_map;
3077
  AllocationFlags flags = TAG_OBJECT;
3078

    
3079
  ExternalReference high_promotion_mode = ExternalReference::
3080
      new_space_high_promotion_mode_active_address(isolate());
3081
  li(scratch1, Operand(high_promotion_mode));
3082
  lw(scratch1, MemOperand(scratch1, 0));
3083
  Branch(&allocate_new_space, eq, scratch1, Operand(zero_reg));
3084

    
3085
  Allocate(ConsString::kSize,
3086
           result,
3087
           scratch1,
3088
           scratch2,
3089
           gc_required,
3090
           static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
3091

    
3092
  jmp(&install_map);
3093

    
3094
  bind(&allocate_new_space);
3095
  Allocate(ConsString::kSize,
3096
           result,
3097
           scratch1,
3098
           scratch2,
3099
           gc_required,
3100
           flags);
3101

    
3102
  bind(&install_map);
3103

    
3104
  InitializeNewString(result,
3105
                      length,
3106
                      Heap::kConsAsciiStringMapRootIndex,
3107
                      scratch1,
3108
                      scratch2);
3109
}
3110

    
3111

    
3112
void MacroAssembler::AllocateTwoByteSlicedString(Register result,
3113
                                                 Register length,
3114
                                                 Register scratch1,
3115
                                                 Register scratch2,
3116
                                                 Label* gc_required) {
3117
  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3118
           TAG_OBJECT);
3119

    
3120
  InitializeNewString(result,
3121
                      length,
3122
                      Heap::kSlicedStringMapRootIndex,
3123
                      scratch1,
3124
                      scratch2);
3125
}
3126

    
3127

    
3128
void MacroAssembler::AllocateAsciiSlicedString(Register result,
3129
                                               Register length,
3130
                                               Register scratch1,
3131
                                               Register scratch2,
3132
                                               Label* gc_required) {
3133
  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3134
           TAG_OBJECT);
3135

    
3136
  InitializeNewString(result,
3137
                      length,
3138
                      Heap::kSlicedAsciiStringMapRootIndex,
3139
                      scratch1,
3140
                      scratch2);
3141
}
3142

    
3143

    
3144
void MacroAssembler::JumpIfNotUniqueName(Register reg,
3145
                                         Label* not_unique_name) {
3146
  STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
3147
  Label succeed;
3148
  And(at, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
3149
  Branch(&succeed, eq, at, Operand(zero_reg));
3150
  Branch(not_unique_name, ne, reg, Operand(SYMBOL_TYPE));
3151

    
3152
  bind(&succeed);
3153
}
3154

    
3155

    
3156
// Allocates a heap number or jumps to the label if the young space is full and
3157
// a scavenge is needed.
3158
void MacroAssembler::AllocateHeapNumber(Register result,
3159
                                        Register scratch1,
3160
                                        Register scratch2,
3161
                                        Register heap_number_map,
3162
                                        Label* need_gc,
3163
                                        TaggingMode tagging_mode) {
3164
  // Allocate an object in the heap for the heap number and tag it as a heap
3165
  // object.
3166
  Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc,
3167
           tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
3168

    
3169
  // Store heap number map in the allocated object.
3170
  AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3171
  if (tagging_mode == TAG_RESULT) {
3172
    sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
3173
  } else {
3174
    sw(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
3175
  }
3176
}
3177

    
3178

    
3179
void MacroAssembler::AllocateHeapNumberWithValue(Register result,
3180
                                                 FPURegister value,
3181
                                                 Register scratch1,
3182
                                                 Register scratch2,
3183
                                                 Label* gc_required) {
3184
  LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
3185
  AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
3186
  sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
3187
}
3188

    
3189

    
3190
// Copies a fixed number of fields of heap objects from src to dst.
3191
void MacroAssembler::CopyFields(Register dst,
3192
                                Register src,
3193
                                RegList temps,
3194
                                int field_count) {
3195
  ASSERT((temps & dst.bit()) == 0);
3196
  ASSERT((temps & src.bit()) == 0);
3197
  // Primitive implementation using only one temporary register.
3198

    
3199
  Register tmp = no_reg;
3200
  // Find a temp register in temps list.
3201
  for (int i = 0; i < kNumRegisters; i++) {
3202
    if ((temps & (1 << i)) != 0) {
3203
      tmp.code_ = i;
3204
      break;
3205
    }
3206
  }
3207
  ASSERT(!tmp.is(no_reg));
3208

    
3209
  for (int i = 0; i < field_count; i++) {
3210
    lw(tmp, FieldMemOperand(src, i * kPointerSize));
3211
    sw(tmp, FieldMemOperand(dst, i * kPointerSize));
3212
  }
3213
}
3214

    
3215

    
3216
void MacroAssembler::CopyBytes(Register src,
3217
                               Register dst,
3218
                               Register length,
3219
                               Register scratch) {
3220
  Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
3221

    
3222
  // Align src before copying in word size chunks.
3223
  Branch(&byte_loop, le, length, Operand(kPointerSize));
3224
  bind(&align_loop_1);
3225
  And(scratch, src, kPointerSize - 1);
3226
  Branch(&word_loop, eq, scratch, Operand(zero_reg));
3227
  lbu(scratch, MemOperand(src));
3228
  Addu(src, src, 1);
3229
  sb(scratch, MemOperand(dst));
3230
  Addu(dst, dst, 1);
3231
  Subu(length, length, Operand(1));
3232
  Branch(&align_loop_1, ne, length, Operand(zero_reg));
3233

    
3234
  // Copy bytes in word size chunks.
3235
  bind(&word_loop);
3236
  if (emit_debug_code()) {
3237
    And(scratch, src, kPointerSize - 1);
3238
    Assert(eq, kExpectingAlignmentForCopyBytes,
3239
        scratch, Operand(zero_reg));
3240
  }
3241
  Branch(&byte_loop, lt, length, Operand(kPointerSize));
3242
  lw(scratch, MemOperand(src));
3243
  Addu(src, src, kPointerSize);
3244

    
3245
  // TODO(kalmard) check if this can be optimized to use sw in most cases.
3246
  // Can't use unaligned access - copy byte by byte.
3247
  sb(scratch, MemOperand(dst, 0));
3248
  srl(scratch, scratch, 8);
3249
  sb(scratch, MemOperand(dst, 1));
3250
  srl(scratch, scratch, 8);
3251
  sb(scratch, MemOperand(dst, 2));
3252
  srl(scratch, scratch, 8);
3253
  sb(scratch, MemOperand(dst, 3));
3254
  Addu(dst, dst, 4);
3255

    
3256
  Subu(length, length, Operand(kPointerSize));
3257
  Branch(&word_loop);
3258

    
3259
  // Copy the last bytes if any left.
3260
  bind(&byte_loop);
3261
  Branch(&done, eq, length, Operand(zero_reg));
3262
  bind(&byte_loop_1);
3263
  lbu(scratch, MemOperand(src));
3264
  Addu(src, src, 1);
3265
  sb(scratch, MemOperand(dst));
3266
  Addu(dst, dst, 1);
3267
  Subu(length, length, Operand(1));
3268
  Branch(&byte_loop_1, ne, length, Operand(zero_reg));
3269
  bind(&done);
3270
}
3271

    
3272

    
3273
void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
3274
                                                Register end_offset,
3275
                                                Register filler) {
3276
  Label loop, entry;
3277
  Branch(&entry);
3278
  bind(&loop);
3279
  sw(filler, MemOperand(start_offset));
3280
  Addu(start_offset, start_offset, kPointerSize);
3281
  bind(&entry);
3282
  Branch(&loop, lt, start_offset, Operand(end_offset));
3283
}
3284

    
3285

    
3286
void MacroAssembler::CheckFastElements(Register map,
3287
                                       Register scratch,
3288
                                       Label* fail) {
3289
  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3290
  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3291
  STATIC_ASSERT(FAST_ELEMENTS == 2);
3292
  STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3293
  lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3294
  Branch(fail, hi, scratch,
3295
         Operand(Map::kMaximumBitField2FastHoleyElementValue));
3296
}
3297

    
3298

    
3299
void MacroAssembler::CheckFastObjectElements(Register map,
3300
                                             Register scratch,
3301
                                             Label* fail) {
3302
  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3303
  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3304
  STATIC_ASSERT(FAST_ELEMENTS == 2);
3305
  STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3306
  lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3307
  Branch(fail, ls, scratch,
3308
         Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3309
  Branch(fail, hi, scratch,
3310
         Operand(Map::kMaximumBitField2FastHoleyElementValue));
3311
}
3312

    
3313

    
3314
void MacroAssembler::CheckFastSmiElements(Register map,
3315
                                          Register scratch,
3316
                                          Label* fail) {
3317
  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3318
  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3319
  lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3320
  Branch(fail, hi, scratch,
3321
         Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3322
}
3323

    
3324

    
3325
void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
3326
                                                 Register key_reg,
3327
                                                 Register elements_reg,
3328
                                                 Register scratch1,
3329
                                                 Register scratch2,
3330
                                                 Register scratch3,
3331
                                                 Label* fail,
3332
                                                 int elements_offset) {
3333
  Label smi_value, maybe_nan, have_double_value, is_nan, done;
3334
  Register mantissa_reg = scratch2;
3335
  Register exponent_reg = scratch3;
3336

    
3337
  // Handle smi values specially.
3338
  JumpIfSmi(value_reg, &smi_value);
3339

    
3340
  // Ensure that the object is a heap number
3341
  CheckMap(value_reg,
3342
           scratch1,
3343
           Heap::kHeapNumberMapRootIndex,
3344
           fail,
3345
           DONT_DO_SMI_CHECK);
3346

    
3347
  // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
3348
  // in the exponent.
3349
  li(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32));
3350
  lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
3351
  Branch(&maybe_nan, ge, exponent_reg, Operand(scratch1));
3352

    
3353
  lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3354

    
3355
  bind(&have_double_value);
3356
  sll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
3357
  Addu(scratch1, scratch1, elements_reg);
3358
  sw(mantissa_reg, FieldMemOperand(
3359
     scratch1, FixedDoubleArray::kHeaderSize - elements_offset));
3360
  uint32_t offset = FixedDoubleArray::kHeaderSize - elements_offset +
3361
      sizeof(kHoleNanLower32);
3362
  sw(exponent_reg, FieldMemOperand(scratch1, offset));
3363
  jmp(&done);
3364

    
3365
  bind(&maybe_nan);
3366
  // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
3367
  // it's an Infinity, and the non-NaN code path applies.
3368
  Branch(&is_nan, gt, exponent_reg, Operand(scratch1));
3369
  lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3370
  Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg));
3371
  bind(&is_nan);
3372
  // Load canonical NaN for storing into the double array.
3373
  uint64_t nan_int64 = BitCast<uint64_t>(
3374
      FixedDoubleArray::canonical_not_the_hole_nan_as_double());
3375
  li(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
3376
  li(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
3377
  jmp(&have_double_value);
3378

    
3379
  bind(&smi_value);
3380
  Addu(scratch1, elements_reg,
3381
      Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
3382
              elements_offset));
3383
  sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
3384
  Addu(scratch1, scratch1, scratch2);
3385
  // scratch1 is now effective address of the double element
3386

    
3387
  Register untagged_value = elements_reg;
3388
  SmiUntag(untagged_value, value_reg);
3389
  mtc1(untagged_value, f2);
3390
  cvt_d_w(f0, f2);
3391
  sdc1(f0, MemOperand(scratch1, 0));
3392
  bind(&done);
3393
}
3394

    
3395

    
3396
void MacroAssembler::CompareMapAndBranch(Register obj,
3397
                                         Register scratch,
3398
                                         Handle<Map> map,
3399
                                         Label* early_success,
3400
                                         Condition cond,
3401
                                         Label* branch_to) {
3402
  lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3403
  CompareMapAndBranch(scratch, map, early_success, cond, branch_to);
3404
}
3405

    
3406

    
3407
void MacroAssembler::CompareMapAndBranch(Register obj_map,
3408
                                         Handle<Map> map,
3409
                                         Label* early_success,
3410
                                         Condition cond,
3411
                                         Label* branch_to) {
3412
  Branch(branch_to, cond, obj_map, Operand(map));
3413
}
3414

    
3415

    
3416
void MacroAssembler::CheckMap(Register obj,
3417
                              Register scratch,
3418
                              Handle<Map> map,
3419
                              Label* fail,
3420
                              SmiCheckType smi_check_type) {
3421
  if (smi_check_type == DO_SMI_CHECK) {
3422
    JumpIfSmi(obj, fail);
3423
  }
3424
  Label success;
3425
  CompareMapAndBranch(obj, scratch, map, &success, ne, fail);
3426
  bind(&success);
3427
}
3428

    
3429

    
3430
void MacroAssembler::DispatchMap(Register obj,
3431
                                 Register scratch,
3432
                                 Handle<Map> map,
3433
                                 Handle<Code> success,
3434
                                 SmiCheckType smi_check_type) {
3435
  Label fail;
3436
  if (smi_check_type == DO_SMI_CHECK) {
3437
    JumpIfSmi(obj, &fail);
3438
  }
3439
  lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3440
  Jump(success, RelocInfo::CODE_TARGET, eq, scratch, Operand(map));
3441
  bind(&fail);
3442
}
3443

    
3444

    
3445
void MacroAssembler::CheckMap(Register obj,
3446
                              Register scratch,
3447
                              Heap::RootListIndex index,
3448
                              Label* fail,
3449
                              SmiCheckType smi_check_type) {
3450
  if (smi_check_type == DO_SMI_CHECK) {
3451
    JumpIfSmi(obj, fail);
3452
  }
3453
  lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3454
  LoadRoot(at, index);
3455
  Branch(fail, ne, scratch, Operand(at));
3456
}
3457

    
3458

    
3459
void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
3460
  if (IsMipsSoftFloatABI) {
3461
    Move(dst, v0, v1);
3462
  } else {
3463
    Move(dst, f0);  // Reg f0 is o32 ABI FP return value.
3464
  }
3465
}
3466

    
3467

    
3468
void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
3469
  if (!IsMipsSoftFloatABI) {
3470
    Move(f12, dreg);
3471
  } else {
3472
    Move(a0, a1, dreg);
3473
  }
3474
}
3475

    
3476

    
3477
void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
3478
                                             DoubleRegister dreg2) {
3479
  if (!IsMipsSoftFloatABI) {
3480
    if (dreg2.is(f12)) {
3481
      ASSERT(!dreg1.is(f14));
3482
      Move(f14, dreg2);
3483
      Move(f12, dreg1);
3484
    } else {
3485
      Move(f12, dreg1);
3486
      Move(f14, dreg2);
3487
    }
3488
  } else {
3489
    Move(a0, a1, dreg1);
3490
    Move(a2, a3, dreg2);
3491
  }
3492
}
3493

    
3494

    
3495
void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
3496
                                             Register reg) {
3497
  if (!IsMipsSoftFloatABI) {
3498
    Move(f12, dreg);
3499
    Move(a2, reg);
3500
  } else {
3501
    Move(a2, reg);
3502
    Move(a0, a1, dreg);
3503
  }
3504
}
3505

    
3506

    
3507
void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
3508
  // This macro takes the dst register to make the code more readable
3509
  // at the call sites. However, the dst register has to be t1 to
3510
  // follow the calling convention which requires the call type to be
3511
  // in t1.
3512
  ASSERT(dst.is(t1));
3513
  if (call_kind == CALL_AS_FUNCTION) {
3514
    li(dst, Operand(Smi::FromInt(1)));
3515
  } else {
3516
    li(dst, Operand(Smi::FromInt(0)));
3517
  }
3518
}
3519

    
3520

    
3521
// -----------------------------------------------------------------------------
3522
// JavaScript invokes.
3523

    
3524
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
3525
                                    const ParameterCount& actual,
3526
                                    Handle<Code> code_constant,
3527
                                    Register code_reg,
3528
                                    Label* done,
3529
                                    bool* definitely_mismatches,
3530
                                    InvokeFlag flag,
3531
                                    const CallWrapper& call_wrapper,
3532
                                    CallKind call_kind) {
3533
  bool definitely_matches = false;
3534
  *definitely_mismatches = false;
3535
  Label regular_invoke;
3536

    
3537
  // Check whether the expected and actual arguments count match. If not,
3538
  // setup registers according to contract with ArgumentsAdaptorTrampoline:
3539
  //  a0: actual arguments count
3540
  //  a1: function (passed through to callee)
3541
  //  a2: expected arguments count
3542
  //  a3: callee code entry
3543

    
3544
  // The code below is made a lot easier because the calling code already sets
3545
  // up actual and expected registers according to the contract if values are
3546
  // passed in registers.
3547
  ASSERT(actual.is_immediate() || actual.reg().is(a0));
3548
  ASSERT(expected.is_immediate() || expected.reg().is(a2));
3549
  ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3));
3550

    
3551
  if (expected.is_immediate()) {
3552
    ASSERT(actual.is_immediate());
3553
    if (expected.immediate() == actual.immediate()) {
3554
      definitely_matches = true;
3555
    } else {
3556
      li(a0, Operand(actual.immediate()));
3557
      const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3558
      if (expected.immediate() == sentinel) {
3559
        // Don't worry about adapting arguments for builtins that
3560
        // don't want that done. Skip adaption code by making it look
3561
        // like we have a match between expected and actual number of
3562
        // arguments.
3563
        definitely_matches = true;
3564
      } else {
3565
        *definitely_mismatches = true;
3566
        li(a2, Operand(expected.immediate()));
3567
      }
3568
    }
3569
  } else if (actual.is_immediate()) {
3570
    Branch(&regular_invoke, eq, expected.reg(), Operand(actual.immediate()));
3571
    li(a0, Operand(actual.immediate()));
3572
  } else {
3573
    Branch(&regular_invoke, eq, expected.reg(), Operand(actual.reg()));
3574
  }
3575

    
3576
  if (!definitely_matches) {
3577
    if (!code_constant.is_null()) {
3578
      li(a3, Operand(code_constant));
3579
      addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag);
3580
    }
3581

    
3582
    Handle<Code> adaptor =
3583
        isolate()->builtins()->ArgumentsAdaptorTrampoline();
3584
    if (flag == CALL_FUNCTION) {
3585
      call_wrapper.BeforeCall(CallSize(adaptor));
3586
      SetCallKind(t1, call_kind);
3587
      Call(adaptor);
3588
      call_wrapper.AfterCall();
3589
      if (!*definitely_mismatches) {
3590
        Branch(done);
3591
      }
3592
    } else {
3593
      SetCallKind(t1, call_kind);
3594
      Jump(adaptor, RelocInfo::CODE_TARGET);
3595
    }
3596
    bind(&regular_invoke);
3597
  }
3598
}
3599

    
3600

    
3601
void MacroAssembler::InvokeCode(Register code,
3602
                                const ParameterCount& expected,
3603
                                const ParameterCount& actual,
3604
                                InvokeFlag flag,
3605
                                const CallWrapper& call_wrapper,
3606
                                CallKind call_kind) {
3607
  // You can't call a function without a valid frame.
3608
  ASSERT(flag == JUMP_FUNCTION || has_frame());
3609

    
3610
  Label done;
3611

    
3612
  bool definitely_mismatches = false;
3613
  InvokePrologue(expected, actual, Handle<Code>::null(), code,
3614
                 &done, &definitely_mismatches, flag,
3615
                 call_wrapper, call_kind);
3616
  if (!definitely_mismatches) {
3617
    if (flag == CALL_FUNCTION) {
3618
      call_wrapper.BeforeCall(CallSize(code));
3619
      SetCallKind(t1, call_kind);
3620
      Call(code);
3621
      call_wrapper.AfterCall();
3622
    } else {
3623
      ASSERT(flag == JUMP_FUNCTION);
3624
      SetCallKind(t1, call_kind);
3625
      Jump(code);
3626
    }
3627
    // Continue here if InvokePrologue does handle the invocation due to
3628
    // mismatched parameter counts.
3629
    bind(&done);
3630
  }
3631
}
3632

    
3633

    
3634
void MacroAssembler::InvokeCode(Handle<Code> code,
3635
                                const ParameterCount& expected,
3636
                                const ParameterCount& actual,
3637
                                RelocInfo::Mode rmode,
3638
                                InvokeFlag flag,
3639
                                CallKind call_kind) {
3640
  // You can't call a function without a valid frame.
3641
  ASSERT(flag == JUMP_FUNCTION || has_frame());
3642

    
3643
  Label done;
3644

    
3645
  bool definitely_mismatches = false;
3646
  InvokePrologue(expected, actual, code, no_reg,
3647
                 &done, &definitely_mismatches, flag,
3648
                 NullCallWrapper(), call_kind);
3649
  if (!definitely_mismatches) {
3650
    if (flag == CALL_FUNCTION) {
3651
      SetCallKind(t1, call_kind);
3652
      Call(code, rmode);
3653
    } else {
3654
      SetCallKind(t1, call_kind);
3655
      Jump(code, rmode);
3656
    }
3657
    // Continue here if InvokePrologue does handle the invocation due to
3658
    // mismatched parameter counts.
3659
    bind(&done);
3660
  }
3661
}
3662

    
3663

    
3664
void MacroAssembler::InvokeFunction(Register function,
3665
                                    const ParameterCount& actual,
3666
                                    InvokeFlag flag,
3667
                                    const CallWrapper& call_wrapper,
3668
                                    CallKind call_kind) {
3669
  // You can't call a function without a valid frame.
3670
  ASSERT(flag == JUMP_FUNCTION || has_frame());
3671

    
3672
  // Contract with called JS functions requires that function is passed in a1.
3673
  ASSERT(function.is(a1));
3674
  Register expected_reg = a2;
3675
  Register code_reg = a3;
3676

    
3677
  lw(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
3678
  lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3679
  lw(expected_reg,
3680
      FieldMemOperand(code_reg,
3681
                      SharedFunctionInfo::kFormalParameterCountOffset));
3682
  sra(expected_reg, expected_reg, kSmiTagSize);
3683
  lw(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
3684

    
3685
  ParameterCount expected(expected_reg);
3686
  InvokeCode(code_reg, expected, actual, flag, call_wrapper, call_kind);
3687
}
3688

    
3689

    
3690
void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
3691
                                    const ParameterCount& expected,
3692
                                    const ParameterCount& actual,
3693
                                    InvokeFlag flag,
3694
                                    const CallWrapper& call_wrapper,
3695
                                    CallKind call_kind) {
3696
  // You can't call a function without a valid frame.
3697
  ASSERT(flag == JUMP_FUNCTION || has_frame());
3698

    
3699
  // Get the function and setup the context.
3700
  LoadHeapObject(a1, function);
3701
  lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3702

    
3703
  // We call indirectly through the code field in the function to
3704
  // allow recompilation to take effect without changing any of the
3705
  // call sites.
3706
  lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
3707
  InvokeCode(a3, expected, actual, flag, call_wrapper, call_kind);
3708
}
3709

    
3710

    
3711
void MacroAssembler::IsObjectJSObjectType(Register heap_object,
3712
                                          Register map,
3713
                                          Register scratch,
3714
                                          Label* fail) {
3715
  lw(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
3716
  IsInstanceJSObjectType(map, scratch, fail);
3717
}
3718

    
3719

    
3720
void MacroAssembler::IsInstanceJSObjectType(Register map,
3721
                                            Register scratch,
3722
                                            Label* fail) {
3723
  lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
3724
  Branch(fail, lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
3725
  Branch(fail, gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
3726
}
3727

    
3728

    
3729
void MacroAssembler::IsObjectJSStringType(Register object,
3730
                                          Register scratch,
3731
                                          Label* fail) {
3732
  ASSERT(kNotStringTag != 0);
3733

    
3734
  lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3735
  lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3736
  And(scratch, scratch, Operand(kIsNotStringMask));
3737
  Branch(fail, ne, scratch, Operand(zero_reg));
3738
}
3739

    
3740

    
3741
void MacroAssembler::IsObjectNameType(Register object,
3742
                                      Register scratch,
3743
                                      Label* fail) {
3744
  lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3745
  lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3746
  Branch(fail, hi, scratch, Operand(LAST_NAME_TYPE));
3747
}
3748

    
3749

    
3750
// ---------------------------------------------------------------------------
3751
// Support functions.
3752

    
3753

    
3754
void MacroAssembler::TryGetFunctionPrototype(Register function,
3755
                                             Register result,
3756
                                             Register scratch,
3757
                                             Label* miss,
3758
                                             bool miss_on_bound_function) {
3759
  // Check that the receiver isn't a smi.
3760
  JumpIfSmi(function, miss);
3761

    
3762
  // Check that the function really is a function.  Load map into result reg.
3763
  GetObjectType(function, result, scratch);
3764
  Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
3765

    
3766
  if (miss_on_bound_function) {
3767
    lw(scratch,
3768
       FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3769
    lw(scratch,
3770
       FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
3771
    And(scratch, scratch,
3772
        Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
3773
    Branch(miss, ne, scratch, Operand(zero_reg));
3774
  }
3775

    
3776
  // Make sure that the function has an instance prototype.
3777
  Label non_instance;
3778
  lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
3779
  And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
3780
  Branch(&non_instance, ne, scratch, Operand(zero_reg));
3781

    
3782
  // Get the prototype or initial map from the function.
3783
  lw(result,
3784
     FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3785

    
3786
  // If the prototype or initial map is the hole, don't return it and
3787
  // simply miss the cache instead. This will allow us to allocate a
3788
  // prototype object on-demand in the runtime system.
3789
  LoadRoot(t8, Heap::kTheHoleValueRootIndex);
3790
  Branch(miss, eq, result, Operand(t8));
3791

    
3792
  // If the function does not have an initial map, we're done.
3793
  Label done;
3794
  GetObjectType(result, scratch, scratch);
3795
  Branch(&done, ne, scratch, Operand(MAP_TYPE));
3796

    
3797
  // Get the prototype from the initial map.
3798
  lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
3799
  jmp(&done);
3800

    
3801
  // Non-instance prototype: Fetch prototype from constructor field
3802
  // in initial map.
3803
  bind(&non_instance);
3804
  lw(result, FieldMemOperand(result, Map::kConstructorOffset));
3805

    
3806
  // All done.
3807
  bind(&done);
3808
}
3809

    
3810

    
3811
void MacroAssembler::GetObjectType(Register object,
3812
                                   Register map,
3813
                                   Register type_reg) {
3814
  lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
3815
  lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
3816
}
3817

    
3818

    
3819
// -----------------------------------------------------------------------------
3820
// Runtime calls.
3821

    
3822
void MacroAssembler::CallStub(CodeStub* stub,
3823
                              TypeFeedbackId ast_id,
3824
                              Condition cond,
3825
                              Register r1,
3826
                              const Operand& r2,
3827
                              BranchDelaySlot bd) {
3828
  ASSERT(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
3829
  Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id,
3830
       cond, r1, r2, bd);
3831
}
3832

    
3833

    
3834
void MacroAssembler::TailCallStub(CodeStub* stub) {
3835
  ASSERT(allow_stub_calls_ ||
3836
         stub->CompilingCallsToThisStubIsGCSafe(isolate()));
3837
  Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET);
3838
}
3839

    
3840

    
3841
static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
3842
  return ref0.address() - ref1.address();
3843
}
3844

    
3845

    
3846
void MacroAssembler::CallApiFunctionAndReturn(
3847
    ExternalReference function,
3848
    Address function_address,
3849
    ExternalReference thunk_ref,
3850
    Register thunk_last_arg,
3851
    int stack_space,
3852
    MemOperand return_value_operand,
3853
    MemOperand* context_restore_operand) {
3854
  ExternalReference next_address =
3855
      ExternalReference::handle_scope_next_address(isolate());
3856
  const int kNextOffset = 0;
3857
  const int kLimitOffset = AddressOffset(
3858
      ExternalReference::handle_scope_limit_address(isolate()),
3859
      next_address);
3860
  const int kLevelOffset = AddressOffset(
3861
      ExternalReference::handle_scope_level_address(isolate()),
3862
      next_address);
3863

    
3864
  // Allocate HandleScope in callee-save registers.
3865
  li(s3, Operand(next_address));
3866
  lw(s0, MemOperand(s3, kNextOffset));
3867
  lw(s1, MemOperand(s3, kLimitOffset));
3868
  lw(s2, MemOperand(s3, kLevelOffset));
3869
  Addu(s2, s2, Operand(1));
3870
  sw(s2, MemOperand(s3, kLevelOffset));
3871

    
3872
  if (FLAG_log_timer_events) {
3873
    FrameScope frame(this, StackFrame::MANUAL);
3874
    PushSafepointRegisters();
3875
    PrepareCallCFunction(1, a0);
3876
    li(a0, Operand(ExternalReference::isolate_address(isolate())));
3877
    CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
3878
    PopSafepointRegisters();
3879
  }
3880

    
3881
  Label profiler_disabled;
3882
  Label end_profiler_check;
3883
  bool* is_profiling_flag =
3884
      isolate()->cpu_profiler()->is_profiling_address();
3885
  STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
3886
  li(t9, reinterpret_cast<int32_t>(is_profiling_flag));
3887
  lb(t9, MemOperand(t9, 0));
3888
  beq(t9, zero_reg, &profiler_disabled);
3889

    
3890
  // Third parameter is the address of the actual getter function.
3891
  li(thunk_last_arg, reinterpret_cast<int32_t>(function_address));
3892
  li(t9, Operand(thunk_ref));
3893
  jmp(&end_profiler_check);
3894

    
3895
  bind(&profiler_disabled);
3896
  li(t9, Operand(function));
3897

    
3898
  bind(&end_profiler_check);
3899

    
3900
  // Native call returns to the DirectCEntry stub which redirects to the
3901
  // return address pushed on stack (could have moved after GC).
3902
  // DirectCEntry stub itself is generated early and never moves.
3903
  DirectCEntryStub stub;
3904
  stub.GenerateCall(this, t9);
3905

    
3906
  if (FLAG_log_timer_events) {
3907
    FrameScope frame(this, StackFrame::MANUAL);
3908
    PushSafepointRegisters();
3909
    PrepareCallCFunction(1, a0);
3910
    li(a0, Operand(ExternalReference::isolate_address(isolate())));
3911
    CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
3912
    PopSafepointRegisters();
3913
  }
3914

    
3915
  Label promote_scheduled_exception;
3916
  Label exception_handled;
3917
  Label delete_allocated_handles;
3918
  Label leave_exit_frame;
3919
  Label return_value_loaded;
3920

    
3921
  // Load value from ReturnValue.
3922
  lw(v0, return_value_operand);
3923
  bind(&return_value_loaded);
3924

    
3925
  // No more valid handles (the result handle was the last one). Restore
3926
  // previous handle scope.
3927
  sw(s0, MemOperand(s3, kNextOffset));
3928
  if (emit_debug_code()) {
3929
    lw(a1, MemOperand(s3, kLevelOffset));
3930
    Check(eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2));
3931
  }
3932
  Subu(s2, s2, Operand(1));
3933
  sw(s2, MemOperand(s3, kLevelOffset));
3934
  lw(at, MemOperand(s3, kLimitOffset));
3935
  Branch(&delete_allocated_handles, ne, s1, Operand(at));
3936

    
3937
  // Check if the function scheduled an exception.
3938
  bind(&leave_exit_frame);
3939
  LoadRoot(t0, Heap::kTheHoleValueRootIndex);
3940
  li(at, Operand(ExternalReference::scheduled_exception_address(isolate())));
3941
  lw(t1, MemOperand(at));
3942
  Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
3943
  bind(&exception_handled);
3944

    
3945
  bool restore_context = context_restore_operand != NULL;
3946
  if (restore_context) {
3947
    lw(cp, *context_restore_operand);
3948
  }
3949
  li(s0, Operand(stack_space));
3950
  LeaveExitFrame(false, s0, !restore_context, EMIT_RETURN);
3951

    
3952
  bind(&promote_scheduled_exception);
3953
  {
3954
    FrameScope frame(this, StackFrame::INTERNAL);
3955
    CallExternalReference(
3956
        ExternalReference(Runtime::kPromoteScheduledException, isolate()),
3957
        0);
3958
  }
3959
  jmp(&exception_handled);
3960

    
3961
  // HandleScope limit has changed. Delete allocated extensions.
3962
  bind(&delete_allocated_handles);
3963
  sw(s1, MemOperand(s3, kLimitOffset));
3964
  mov(s0, v0);
3965
  mov(a0, v0);
3966
  PrepareCallCFunction(1, s1);
3967
  li(a0, Operand(ExternalReference::isolate_address(isolate())));
3968
  CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()),
3969
      1);
3970
  mov(v0, s0);
3971
  jmp(&leave_exit_frame);
3972
}
3973

    
3974

    
3975
bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
3976
  if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
3977
  return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe(isolate());
3978
}
3979

    
3980

    
3981
void MacroAssembler::IllegalOperation(int num_arguments) {
3982
  if (num_arguments > 0) {
3983
    addiu(sp, sp, num_arguments * kPointerSize);
3984
  }
3985
  LoadRoot(v0, Heap::kUndefinedValueRootIndex);
3986
}
3987

    
3988

    
3989
void MacroAssembler::IndexFromHash(Register hash,
3990
                                   Register index) {
3991
  // If the hash field contains an array index pick it out. The assert checks
3992
  // that the constants for the maximum number of digits for an array index
3993
  // cached in the hash field and the number of bits reserved for it does not
3994
  // conflict.
3995
  ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
3996
         (1 << String::kArrayIndexValueBits));
3997
  // We want the smi-tagged index in key.  kArrayIndexValueMask has zeros in
3998
  // the low kHashShift bits.
3999
  STATIC_ASSERT(kSmiTag == 0);
4000
  Ext(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
4001
  sll(index, hash, kSmiTagSize);
4002
}
4003

    
4004

    
4005
void MacroAssembler::ObjectToDoubleFPURegister(Register object,
4006
                                               FPURegister result,
4007
                                               Register scratch1,
4008
                                               Register scratch2,
4009
                                               Register heap_number_map,
4010
                                               Label* not_number,
4011
                                               ObjectToDoubleFlags flags) {
4012
  Label done;
4013
  if ((flags & OBJECT_NOT_SMI) == 0) {
4014
    Label not_smi;
4015
    JumpIfNotSmi(object, &not_smi);
4016
    // Remove smi tag and convert to double.
4017
    sra(scratch1, object, kSmiTagSize);
4018
    mtc1(scratch1, result);
4019
    cvt_d_w(result, result);
4020
    Branch(&done);
4021
    bind(&not_smi);
4022
  }
4023
  // Check for heap number and load double value from it.
4024
  lw(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
4025
  Branch(not_number, ne, scratch1, Operand(heap_number_map));
4026

    
4027
  if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
4028
    // If exponent is all ones the number is either a NaN or +/-Infinity.
4029
    Register exponent = scratch1;
4030
    Register mask_reg = scratch2;
4031
    lw(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
4032
    li(mask_reg, HeapNumber::kExponentMask);
4033

    
4034
    And(exponent, exponent, mask_reg);
4035
    Branch(not_number, eq, exponent, Operand(mask_reg));
4036
  }
4037
  ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
4038
  bind(&done);
4039
}
4040

    
4041

    
4042
void MacroAssembler::SmiToDoubleFPURegister(Register smi,
4043
                                            FPURegister value,
4044
                                            Register scratch1) {
4045
  sra(scratch1, smi, kSmiTagSize);
4046
  mtc1(scratch1, value);
4047
  cvt_d_w(value, value);
4048
}
4049

    
4050

    
4051
void MacroAssembler::AdduAndCheckForOverflow(Register dst,
4052
                                             Register left,
4053
                                             Register right,
4054
                                             Register overflow_dst,
4055
                                             Register scratch) {
4056
  ASSERT(!dst.is(overflow_dst));
4057
  ASSERT(!dst.is(scratch));
4058
  ASSERT(!overflow_dst.is(scratch));
4059
  ASSERT(!overflow_dst.is(left));
4060
  ASSERT(!overflow_dst.is(right));
4061

    
4062
  if (left.is(right) && dst.is(left)) {
4063
    ASSERT(!dst.is(t9));
4064
    ASSERT(!scratch.is(t9));
4065
    ASSERT(!left.is(t9));
4066
    ASSERT(!right.is(t9));
4067
    ASSERT(!overflow_dst.is(t9));
4068
    mov(t9, right);
4069
    right = t9;
4070
  }
4071

    
4072
  if (dst.is(left)) {
4073
    mov(scratch, left);  // Preserve left.
4074
    addu(dst, left, right);  // Left is overwritten.
4075
    xor_(scratch, dst, scratch);  // Original left.
4076
    xor_(overflow_dst, dst, right);
4077
    and_(overflow_dst, overflow_dst, scratch);
4078
  } else if (dst.is(right)) {
4079
    mov(scratch, right);  // Preserve right.
4080
    addu(dst, left, right);  // Right is overwritten.
4081
    xor_(scratch, dst, scratch);  // Original right.
4082
    xor_(overflow_dst, dst, left);
4083
    and_(overflow_dst, overflow_dst, scratch);
4084
  } else {
4085
    addu(dst, left, right);
4086
    xor_(overflow_dst, dst, left);
4087
    xor_(scratch, dst, right);
4088
    and_(overflow_dst, scratch, overflow_dst);
4089
  }
4090
}
4091

    
4092

    
4093
void MacroAssembler::SubuAndCheckForOverflow(Register dst,
4094
                                             Register left,
4095
                                             Register right,
4096
                                             Register overflow_dst,
4097
                                             Register scratch) {
4098
  ASSERT(!dst.is(overflow_dst));
4099
  ASSERT(!dst.is(scratch));
4100
  ASSERT(!overflow_dst.is(scratch));
4101
  ASSERT(!overflow_dst.is(left));
4102
  ASSERT(!overflow_dst.is(right));
4103
  ASSERT(!scratch.is(left));
4104
  ASSERT(!scratch.is(right));
4105

    
4106
  // This happens with some crankshaft code. Since Subu works fine if
4107
  // left == right, let's not make that restriction here.
4108
  if (left.is(right)) {
4109
    mov(dst, zero_reg);
4110
    mov(overflow_dst, zero_reg);
4111
    return;
4112
  }
4113

    
4114
  if (dst.is(left)) {
4115
    mov(scratch, left);  // Preserve left.
4116
    subu(dst, left, right);  // Left is overwritten.
4117
    xor_(overflow_dst, dst, scratch);  // scratch is original left.
4118
    xor_(scratch, scratch, right);  // scratch is original left.
4119
    and_(overflow_dst, scratch, overflow_dst);
4120
  } else if (dst.is(right)) {
4121
    mov(scratch, right);  // Preserve right.
4122
    subu(dst, left, right);  // Right is overwritten.
4123
    xor_(overflow_dst, dst, left);
4124
    xor_(scratch, left, scratch);  // Original right.
4125
    and_(overflow_dst, scratch, overflow_dst);
4126
  } else {
4127
    subu(dst, left, right);
4128
    xor_(overflow_dst, dst, left);
4129
    xor_(scratch, left, right);
4130
    and_(overflow_dst, scratch, overflow_dst);
4131
  }
4132
}
4133

    
4134

    
4135
void MacroAssembler::CallRuntime(const Runtime::Function* f,
4136
                                 int num_arguments,
4137
                                 SaveFPRegsMode save_doubles) {
4138
  // All parameters are on the stack. v0 has the return value after call.
4139

    
4140
  // If the expected number of arguments of the runtime function is
4141
  // constant, we check that the actual number of arguments match the
4142
  // expectation.
4143
  if (f->nargs >= 0 && f->nargs != num_arguments) {
4144
    IllegalOperation(num_arguments);
4145
    return;
4146
  }
4147

    
4148
  // TODO(1236192): Most runtime routines don't need the number of
4149
  // arguments passed in because it is constant. At some point we
4150
  // should remove this need and make the runtime routine entry code
4151
  // smarter.
4152
  PrepareCEntryArgs(num_arguments);
4153
  PrepareCEntryFunction(ExternalReference(f, isolate()));
4154
  CEntryStub stub(1, save_doubles);
4155
  CallStub(&stub);
4156
}
4157

    
4158

    
4159
void MacroAssembler::CallExternalReference(const ExternalReference& ext,
4160
                                           int num_arguments,
4161
                                           BranchDelaySlot bd) {
4162
  PrepareCEntryArgs(num_arguments);
4163
  PrepareCEntryFunction(ext);
4164

    
4165
  CEntryStub stub(1);
4166
  CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
4167
}
4168

    
4169

    
4170
void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
4171
                                               int num_arguments,
4172
                                               int result_size) {
4173
  // TODO(1236192): Most runtime routines don't need the number of
4174
  // arguments passed in because it is constant. At some point we
4175
  // should remove this need and make the runtime routine entry code
4176
  // smarter.
4177
  PrepareCEntryArgs(num_arguments);
4178
  JumpToExternalReference(ext);
4179
}
4180

    
4181

    
4182
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
4183
                                     int num_arguments,
4184
                                     int result_size) {
4185
  TailCallExternalReference(ExternalReference(fid, isolate()),
4186
                            num_arguments,
4187
                            result_size);
4188
}
4189

    
4190

    
4191
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
4192
                                             BranchDelaySlot bd) {
4193
  PrepareCEntryFunction(builtin);
4194
  CEntryStub stub(1);
4195
  Jump(stub.GetCode(isolate()),
4196
       RelocInfo::CODE_TARGET,
4197
       al,
4198
       zero_reg,
4199
       Operand(zero_reg),
4200
       bd);
4201
}
4202

    
4203

    
4204
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
4205
                                   InvokeFlag flag,
4206
                                   const CallWrapper& call_wrapper) {
4207
  // You can't call a builtin without a valid frame.
4208
  ASSERT(flag == JUMP_FUNCTION || has_frame());
4209

    
4210
  GetBuiltinEntry(t9, id);
4211
  if (flag == CALL_FUNCTION) {
4212
    call_wrapper.BeforeCall(CallSize(t9));
4213
    SetCallKind(t1, CALL_AS_METHOD);
4214
    Call(t9);
4215
    call_wrapper.AfterCall();
4216
  } else {
4217
    ASSERT(flag == JUMP_FUNCTION);
4218
    SetCallKind(t1, CALL_AS_METHOD);
4219
    Jump(t9);
4220
  }
4221
}
4222

    
4223

    
4224
void MacroAssembler::GetBuiltinFunction(Register target,
4225
                                        Builtins::JavaScript id) {
4226
  // Load the builtins object into target register.
4227
  lw(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4228
  lw(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
4229
  // Load the JavaScript builtin function from the builtins object.
4230
  lw(target, FieldMemOperand(target,
4231
                          JSBuiltinsObject::OffsetOfFunctionWithId(id)));
4232
}
4233

    
4234

    
4235
void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
4236
  ASSERT(!target.is(a1));
4237
  GetBuiltinFunction(a1, id);
4238
  // Load the code entry point from the builtins object.
4239
  lw(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
4240
}
4241

    
4242

    
4243
void MacroAssembler::SetCounter(StatsCounter* counter, int value,
4244
                                Register scratch1, Register scratch2) {
4245
  if (FLAG_native_code_counters && counter->Enabled()) {
4246
    li(scratch1, Operand(value));
4247
    li(scratch2, Operand(ExternalReference(counter)));
4248
    sw(scratch1, MemOperand(scratch2));
4249
  }
4250
}
4251

    
4252

    
4253
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
4254
                                      Register scratch1, Register scratch2) {
4255
  ASSERT(value > 0);
4256
  if (FLAG_native_code_counters && counter->Enabled()) {
4257
    li(scratch2, Operand(ExternalReference(counter)));
4258
    lw(scratch1, MemOperand(scratch2));
4259
    Addu(scratch1, scratch1, Operand(value));
4260
    sw(scratch1, MemOperand(scratch2));
4261
  }
4262
}
4263

    
4264

    
4265
void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
4266
                                      Register scratch1, Register scratch2) {
4267
  ASSERT(value > 0);
4268
  if (FLAG_native_code_counters && counter->Enabled()) {
4269
    li(scratch2, Operand(ExternalReference(counter)));
4270
    lw(scratch1, MemOperand(scratch2));
4271
    Subu(scratch1, scratch1, Operand(value));
4272
    sw(scratch1, MemOperand(scratch2));
4273
  }
4274
}
4275

    
4276

    
4277
// -----------------------------------------------------------------------------
4278
// Debugging.
4279

    
4280
void MacroAssembler::Assert(Condition cc, BailoutReason reason,
4281
                            Register rs, Operand rt) {
4282
  if (emit_debug_code())
4283
    Check(cc, reason, rs, rt);
4284
}
4285

    
4286

    
4287
void MacroAssembler::AssertFastElements(Register elements) {
4288
  if (emit_debug_code()) {
4289
    ASSERT(!elements.is(at));
4290
    Label ok;
4291
    push(elements);
4292
    lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
4293
    LoadRoot(at, Heap::kFixedArrayMapRootIndex);
4294
    Branch(&ok, eq, elements, Operand(at));
4295
    LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
4296
    Branch(&ok, eq, elements, Operand(at));
4297
    LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
4298
    Branch(&ok, eq, elements, Operand(at));
4299
    Abort(kJSObjectWithFastElementsMapHasSlowElements);
4300
    bind(&ok);
4301
    pop(elements);
4302
  }
4303
}
4304

    
4305

    
4306
void MacroAssembler::Check(Condition cc, BailoutReason reason,
4307
                           Register rs, Operand rt) {
4308
  Label L;
4309
  Branch(&L, cc, rs, rt);
4310
  Abort(reason);
4311
  // Will not return here.
4312
  bind(&L);
4313
}
4314

    
4315

    
4316
void MacroAssembler::Abort(BailoutReason reason) {
4317
  Label abort_start;
4318
  bind(&abort_start);
4319
  // We want to pass the msg string like a smi to avoid GC
4320
  // problems, however msg is not guaranteed to be aligned
4321
  // properly. Instead, we pass an aligned pointer that is
4322
  // a proper v8 smi, but also pass the alignment difference
4323
  // from the real pointer as a smi.
4324
  const char* msg = GetBailoutReason(reason);
4325
  intptr_t p1 = reinterpret_cast<intptr_t>(msg);
4326
  intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
4327
  ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
4328
#ifdef DEBUG
4329
  if (msg != NULL) {
4330
    RecordComment("Abort message: ");
4331
    RecordComment(msg);
4332
  }
4333

    
4334
  if (FLAG_trap_on_abort) {
4335
    stop(msg);
4336
    return;
4337
  }
4338
#endif
4339

    
4340
  li(a0, Operand(p0));
4341
  push(a0);
4342
  li(a0, Operand(Smi::FromInt(p1 - p0)));
4343
  push(a0);
4344
  // Disable stub call restrictions to always allow calls to abort.
4345
  if (!has_frame_) {
4346
    // We don't actually want to generate a pile of code for this, so just
4347
    // claim there is a stack frame, without generating one.
4348
    FrameScope scope(this, StackFrame::NONE);
4349
    CallRuntime(Runtime::kAbort, 2);
4350
  } else {
4351
    CallRuntime(Runtime::kAbort, 2);
4352
  }
4353
  // Will not return here.
4354
  if (is_trampoline_pool_blocked()) {
4355
    // If the calling code cares about the exact number of
4356
    // instructions generated, we insert padding here to keep the size
4357
    // of the Abort macro constant.
4358
    // Currently in debug mode with debug_code enabled the number of
4359
    // generated instructions is 14, so we use this as a maximum value.
4360
    static const int kExpectedAbortInstructions = 14;
4361
    int abort_instructions = InstructionsGeneratedSince(&abort_start);
4362
    ASSERT(abort_instructions <= kExpectedAbortInstructions);
4363
    while (abort_instructions++ < kExpectedAbortInstructions) {
4364
      nop();
4365
    }
4366
  }
4367
}
4368

    
4369

    
4370
void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
4371
  if (context_chain_length > 0) {
4372
    // Move up the chain of contexts to the context containing the slot.
4373
    lw(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4374
    for (int i = 1; i < context_chain_length; i++) {
4375
      lw(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4376
    }
4377
  } else {
4378
    // Slot is in the current function context.  Move it into the
4379
    // destination register in case we store into it (the write barrier
4380
    // cannot be allowed to destroy the context in esi).
4381
    Move(dst, cp);
4382
  }
4383
}
4384

    
4385

    
4386
void MacroAssembler::LoadTransitionedArrayMapConditional(
4387
    ElementsKind expected_kind,
4388
    ElementsKind transitioned_kind,
4389
    Register map_in_out,
4390
    Register scratch,
4391
    Label* no_map_match) {
4392
  // Load the global or builtins object from the current context.
4393
  lw(scratch,
4394
     MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4395
  lw(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
4396

    
4397
  // Check that the function's map is the same as the expected cached map.
4398
  lw(scratch,
4399
     MemOperand(scratch,
4400
                Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
4401
  size_t offset = expected_kind * kPointerSize +
4402
      FixedArrayBase::kHeaderSize;
4403
  lw(at, FieldMemOperand(scratch, offset));
4404
  Branch(no_map_match, ne, map_in_out, Operand(at));
4405

    
4406
  // Use the transitioned cached map.
4407
  offset = transitioned_kind * kPointerSize +
4408
      FixedArrayBase::kHeaderSize;
4409
  lw(map_in_out, FieldMemOperand(scratch, offset));
4410
}
4411

    
4412

    
4413
void MacroAssembler::LoadInitialArrayMap(
4414
    Register function_in, Register scratch,
4415
    Register map_out, bool can_have_holes) {
4416
  ASSERT(!function_in.is(map_out));
4417
  Label done;
4418
  lw(map_out, FieldMemOperand(function_in,
4419
                              JSFunction::kPrototypeOrInitialMapOffset));
4420
  if (!FLAG_smi_only_arrays) {
4421
    ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
4422
    LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
4423
                                        kind,
4424
                                        map_out,
4425
                                        scratch,
4426
                                        &done);
4427
  } else if (can_have_holes) {
4428
    LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
4429
                                        FAST_HOLEY_SMI_ELEMENTS,
4430
                                        map_out,
4431
                                        scratch,
4432
                                        &done);
4433
  }
4434
  bind(&done);
4435
}
4436

    
4437

    
4438
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
4439
  // Load the global or builtins object from the current context.
4440
  lw(function,
4441
     MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4442
  // Load the native context from the global or builtins object.
4443
  lw(function, FieldMemOperand(function,
4444
                               GlobalObject::kNativeContextOffset));
4445
  // Load the function from the native context.
4446
  lw(function, MemOperand(function, Context::SlotOffset(index)));
4447
}
4448

    
4449

    
4450
void MacroAssembler::LoadArrayFunction(Register function) {
4451
  // Load the global or builtins object from the current context.
4452
  lw(function,
4453
     MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4454
  // Load the global context from the global or builtins object.
4455
  lw(function,
4456
     FieldMemOperand(function, GlobalObject::kGlobalContextOffset));
4457
  // Load the array function from the native context.
4458
  lw(function,
4459
     MemOperand(function, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
4460
}
4461

    
4462

    
4463
void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4464
                                                  Register map,
4465
                                                  Register scratch) {
4466
  // Load the initial map. The global functions all have initial maps.
4467
  lw(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4468
  if (emit_debug_code()) {
4469
    Label ok, fail;
4470
    CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
4471
    Branch(&ok);
4472
    bind(&fail);
4473
    Abort(kGlobalFunctionsMustHaveInitialMap);
4474
    bind(&ok);
4475
  }
4476
}
4477

    
4478

    
4479
void MacroAssembler::LoadNumber(Register object,
4480
                                FPURegister dst,
4481
                                Register heap_number_map,
4482
                                Register scratch,
4483
                                Label* not_number) {
4484
  Label is_smi, done;
4485

    
4486
  UntagAndJumpIfSmi(scratch, object, &is_smi);
4487
  JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
4488

    
4489
  ldc1(dst, FieldMemOperand(object, HeapNumber::kValueOffset));
4490
  Branch(&done);
4491

    
4492
  bind(&is_smi);
4493
  mtc1(scratch, dst);
4494
  cvt_d_w(dst, dst);
4495

    
4496
  bind(&done);
4497
}
4498

    
4499

    
4500
void MacroAssembler::LoadNumberAsInt32Double(Register object,
4501
                                             DoubleRegister double_dst,
4502
                                             Register heap_number_map,
4503
                                             Register scratch1,
4504
                                             Register scratch2,
4505
                                             FPURegister double_scratch,
4506
                                             Label* not_int32) {
4507
  ASSERT(!scratch1.is(object) && !scratch2.is(object));
4508
  ASSERT(!scratch1.is(scratch2));
4509
  ASSERT(!heap_number_map.is(object) &&
4510
         !heap_number_map.is(scratch1) &&
4511
         !heap_number_map.is(scratch2));
4512

    
4513
  Label done, obj_is_not_smi;
4514

    
4515
  UntagAndJumpIfNotSmi(scratch1, object, &obj_is_not_smi);
4516
  mtc1(scratch1, double_scratch);
4517
  cvt_d_w(double_dst, double_scratch);
4518
  Branch(&done);
4519

    
4520
  bind(&obj_is_not_smi);
4521
  JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
4522

    
4523
  // Load the number.
4524
  // Load the double value.
4525
  ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
4526

    
4527
  Register except_flag = scratch2;
4528
  EmitFPUTruncate(kRoundToZero,
4529
                  scratch1,
4530
                  double_dst,
4531
                  at,
4532
                  double_scratch,
4533
                  except_flag,
4534
                  kCheckForInexactConversion);
4535

    
4536
  // Jump to not_int32 if the operation did not succeed.
4537
  Branch(not_int32, ne, except_flag, Operand(zero_reg));
4538
  bind(&done);
4539
}
4540

    
4541

    
4542
void MacroAssembler::LoadNumberAsInt32(Register object,
4543
                                       Register dst,
4544
                                       Register heap_number_map,
4545
                                       Register scratch1,
4546
                                       Register scratch2,
4547
                                       FPURegister double_scratch0,
4548
                                       FPURegister double_scratch1,
4549
                                       Label* not_int32) {
4550
  ASSERT(!dst.is(object));
4551
  ASSERT(!scratch1.is(object) && !scratch2.is(object));
4552
  ASSERT(!scratch1.is(scratch2));
4553

    
4554
  Label done, maybe_undefined;
4555

    
4556
  UntagAndJumpIfSmi(dst, object, &done);
4557

    
4558
  JumpIfNotHeapNumber(object, heap_number_map, scratch1, &maybe_undefined);
4559

    
4560
  // Object is a heap number.
4561
  // Convert the floating point value to a 32-bit integer.
4562
  // Load the double value.
4563
  ldc1(double_scratch0, FieldMemOperand(object, HeapNumber::kValueOffset));
4564

    
4565
  Register except_flag = scratch2;
4566
  EmitFPUTruncate(kRoundToZero,
4567
                  dst,
4568
                  double_scratch0,
4569
                  scratch1,
4570
                  double_scratch1,
4571
                  except_flag,
4572
                  kCheckForInexactConversion);
4573

    
4574
  // Jump to not_int32 if the operation did not succeed.
4575
  Branch(not_int32, ne, except_flag, Operand(zero_reg));
4576
  Branch(&done);
4577

    
4578
  bind(&maybe_undefined);
4579
  LoadRoot(at, Heap::kUndefinedValueRootIndex);
4580
  Branch(not_int32, ne, object, Operand(at));
4581
  // |undefined| is truncated to 0.
4582
  li(dst, Operand(Smi::FromInt(0)));
4583
  // Fall through.
4584

    
4585
  bind(&done);
4586
}
4587

    
4588

    
4589
void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
4590
  if (frame_mode == BUILD_STUB_FRAME) {
4591
    Push(ra, fp, cp);
4592
    Push(Smi::FromInt(StackFrame::STUB));
4593
    // Adjust FP to point to saved FP.
4594
    Addu(fp, sp, Operand(2 * kPointerSize));
4595
  } else {
4596
    PredictableCodeSizeScope predictible_code_size_scope(
4597
      this, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
4598
    // The following three instructions must remain together and unmodified
4599
    // for code aging to work properly.
4600
    if (isolate()->IsCodePreAgingActive()) {
4601
      // Pre-age the code.
4602
      Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
4603
      nop(Assembler::CODE_AGE_MARKER_NOP);
4604
      // Save the function's original return address
4605
      // (it will be clobbered by Call(t9))
4606
      mov(at, ra);
4607
      // Load the stub address to t9 and call it
4608
      li(t9,
4609
         Operand(reinterpret_cast<uint32_t>(stub->instruction_start())));
4610
      Call(t9);
4611
      // Record the stub address in the empty space for GetCodeAgeAndParity()
4612
      dd(reinterpret_cast<uint32_t>(stub->instruction_start()));
4613
    } else {
4614
      Push(ra, fp, cp, a1);
4615
      nop(Assembler::CODE_AGE_SEQUENCE_NOP);
4616
      // Adjust fp to point to caller's fp.
4617
      Addu(fp, sp, Operand(2 * kPointerSize));
4618
    }
4619
  }
4620
}
4621

    
4622

    
4623
void MacroAssembler::EnterFrame(StackFrame::Type type) {
4624
  addiu(sp, sp, -5 * kPointerSize);
4625
  li(t8, Operand(Smi::FromInt(type)));
4626
  li(t9, Operand(CodeObject()), CONSTANT_SIZE);
4627
  sw(ra, MemOperand(sp, 4 * kPointerSize));
4628
  sw(fp, MemOperand(sp, 3 * kPointerSize));
4629
  sw(cp, MemOperand(sp, 2 * kPointerSize));
4630
  sw(t8, MemOperand(sp, 1 * kPointerSize));
4631
  sw(t9, MemOperand(sp, 0 * kPointerSize));
4632
  addiu(fp, sp, 3 * kPointerSize);
4633
}
4634

    
4635

    
4636
void MacroAssembler::LeaveFrame(StackFrame::Type type) {
4637
  mov(sp, fp);
4638
  lw(fp, MemOperand(sp, 0 * kPointerSize));
4639
  lw(ra, MemOperand(sp, 1 * kPointerSize));
4640
  addiu(sp, sp, 2 * kPointerSize);
4641
}
4642

    
4643

    
4644
void MacroAssembler::EnterExitFrame(bool save_doubles,
4645
                                    int stack_space) {
4646
  // Set up the frame structure on the stack.
4647
  STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
4648
  STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
4649
  STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
4650

    
4651
  // This is how the stack will look:
4652
  // fp + 2 (==kCallerSPDisplacement) - old stack's end
4653
  // [fp + 1 (==kCallerPCOffset)] - saved old ra
4654
  // [fp + 0 (==kCallerFPOffset)] - saved old fp
4655
  // [fp - 1 (==kSPOffset)] - sp of the called function
4656
  // [fp - 2 (==kCodeOffset)] - CodeObject
4657
  // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
4658
  //   new stack (will contain saved ra)
4659

    
4660
  // Save registers.
4661
  addiu(sp, sp, -4 * kPointerSize);
4662
  sw(ra, MemOperand(sp, 3 * kPointerSize));
4663
  sw(fp, MemOperand(sp, 2 * kPointerSize));
4664
  addiu(fp, sp, 2 * kPointerSize);  // Set up new frame pointer.
4665

    
4666
  if (emit_debug_code()) {
4667
    sw(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
4668
  }
4669

    
4670
  // Accessed from ExitFrame::code_slot.
4671
  li(t8, Operand(CodeObject()), CONSTANT_SIZE);
4672
  sw(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
4673

    
4674
  // Save the frame pointer and the context in top.
4675
  li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
4676
  sw(fp, MemOperand(t8));
4677
  li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4678
  sw(cp, MemOperand(t8));
4679

    
4680
  const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
4681
  if (save_doubles) {
4682
    // The stack  must be allign to 0 modulo 8 for stores with sdc1.
4683
    ASSERT(kDoubleSize == frame_alignment);
4684
    if (frame_alignment > 0) {
4685
      ASSERT(IsPowerOf2(frame_alignment));
4686
      And(sp, sp, Operand(-frame_alignment));  // Align stack.
4687
    }
4688
    int space = FPURegister::kMaxNumRegisters * kDoubleSize;
4689
    Subu(sp, sp, Operand(space));
4690
    // Remember: we only need to save every 2nd double FPU value.
4691
    for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
4692
      FPURegister reg = FPURegister::from_code(i);
4693
      sdc1(reg, MemOperand(sp, i * kDoubleSize));
4694
    }
4695
  }
4696

    
4697
  // Reserve place for the return address, stack space and an optional slot
4698
  // (used by the DirectCEntryStub to hold the return value if a struct is
4699
  // returned) and align the frame preparing for calling the runtime function.
4700
  ASSERT(stack_space >= 0);
4701
  Subu(sp, sp, Operand((stack_space + 2) * kPointerSize));
4702
  if (frame_alignment > 0) {
4703
    ASSERT(IsPowerOf2(frame_alignment));
4704
    And(sp, sp, Operand(-frame_alignment));  // Align stack.
4705
  }
4706

    
4707
  // Set the exit frame sp value to point just before the return address
4708
  // location.
4709
  addiu(at, sp, kPointerSize);
4710
  sw(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
4711
}
4712

    
4713

    
4714
void MacroAssembler::LeaveExitFrame(bool save_doubles,
4715
                                    Register argument_count,
4716
                                    bool restore_context,
4717
                                    bool do_return) {
4718
  // Optionally restore all double registers.
4719
  if (save_doubles) {
4720
    // Remember: we only need to restore every 2nd double FPU value.
4721
    lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset));
4722
    for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
4723
      FPURegister reg = FPURegister::from_code(i);
4724
      ldc1(reg, MemOperand(t8, i  * kDoubleSize + kPointerSize));
4725
    }
4726
  }
4727

    
4728
  // Clear top frame.
4729
  li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
4730
  sw(zero_reg, MemOperand(t8));
4731

    
4732
  // Restore current context from top and clear it in debug mode.
4733
  if (restore_context) {
4734
    li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4735
    lw(cp, MemOperand(t8));
4736
  }
4737
#ifdef DEBUG
4738
  li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4739
  sw(a3, MemOperand(t8));
4740
#endif
4741

    
4742
  // Pop the arguments, restore registers, and return.
4743
  mov(sp, fp);  // Respect ABI stack constraint.
4744
  lw(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
4745
  lw(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
4746

    
4747
  if (argument_count.is_valid()) {
4748
    sll(t8, argument_count, kPointerSizeLog2);
4749
    addu(sp, sp, t8);
4750
  }
4751

    
4752
  if (do_return) {
4753
    Ret(USE_DELAY_SLOT);
4754
    // If returning, the instruction in the delay slot will be the addiu below.
4755
  }
4756
  addiu(sp, sp, 8);
4757
}
4758

    
4759

    
4760
void MacroAssembler::InitializeNewString(Register string,
4761
                                         Register length,
4762
                                         Heap::RootListIndex map_index,
4763
                                         Register scratch1,
4764
                                         Register scratch2) {
4765
  sll(scratch1, length, kSmiTagSize);
4766
  LoadRoot(scratch2, map_index);
4767
  sw(scratch1, FieldMemOperand(string, String::kLengthOffset));
4768
  li(scratch1, Operand(String::kEmptyHashField));
4769
  sw(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
4770
  sw(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
4771
}
4772

    
4773

    
4774
int MacroAssembler::ActivationFrameAlignment() {
4775
#if V8_HOST_ARCH_MIPS
4776
  // Running on the real platform. Use the alignment as mandated by the local
4777
  // environment.
4778
  // Note: This will break if we ever start generating snapshots on one Mips
4779
  // platform for another Mips platform with a different alignment.
4780
  return OS::ActivationFrameAlignment();
4781
#else  // V8_HOST_ARCH_MIPS
4782
  // If we are using the simulator then we should always align to the expected
4783
  // alignment. As the simulator is used to generate snapshots we do not know
4784
  // if the target platform will need alignment, so this is controlled from a
4785
  // flag.
4786
  return FLAG_sim_stack_alignment;
4787
#endif  // V8_HOST_ARCH_MIPS
4788
}
4789

    
4790

    
4791
void MacroAssembler::AssertStackIsAligned() {
4792
  if (emit_debug_code()) {
4793
      const int frame_alignment = ActivationFrameAlignment();
4794
      const int frame_alignment_mask = frame_alignment - 1;
4795

    
4796
      if (frame_alignment > kPointerSize) {
4797
        Label alignment_as_expected;
4798
        ASSERT(IsPowerOf2(frame_alignment));
4799
        andi(at, sp, frame_alignment_mask);
4800
        Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
4801
        // Don't use Check here, as it will call Runtime_Abort re-entering here.
4802
        stop("Unexpected stack alignment");
4803
        bind(&alignment_as_expected);
4804
      }
4805
    }
4806
}
4807

    
4808

    
4809
void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
4810
    Register reg,
4811
    Register scratch,
4812
    Label* not_power_of_two_or_zero) {
4813
  Subu(scratch, reg, Operand(1));
4814
  Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
4815
         scratch, Operand(zero_reg));
4816
  and_(at, scratch, reg);  // In the delay slot.
4817
  Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
4818
}
4819

    
4820

    
4821
void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
4822
  ASSERT(!reg.is(overflow));
4823
  mov(overflow, reg);  // Save original value.
4824
  SmiTag(reg);
4825
  xor_(overflow, overflow, reg);  // Overflow if (value ^ 2 * value) < 0.
4826
}
4827

    
4828

    
4829
void MacroAssembler::SmiTagCheckOverflow(Register dst,
4830
                                         Register src,
4831
                                         Register overflow) {
4832
  if (dst.is(src)) {
4833
    // Fall back to slower case.
4834
    SmiTagCheckOverflow(dst, overflow);
4835
  } else {
4836
    ASSERT(!dst.is(src));
4837
    ASSERT(!dst.is(overflow));
4838
    ASSERT(!src.is(overflow));
4839
    SmiTag(dst, src);
4840
    xor_(overflow, dst, src);  // Overflow if (value ^ 2 * value) < 0.
4841
  }
4842
}
4843

    
4844

    
4845
void MacroAssembler::UntagAndJumpIfSmi(Register dst,
4846
                                       Register src,
4847
                                       Label* smi_case) {
4848
  JumpIfSmi(src, smi_case, at, USE_DELAY_SLOT);
4849
  SmiUntag(dst, src);
4850
}
4851

    
4852

    
4853
void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
4854
                                          Register src,
4855
                                          Label* non_smi_case) {
4856
  JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT);
4857
  SmiUntag(dst, src);
4858
}
4859

    
4860
void MacroAssembler::JumpIfSmi(Register value,
4861
                               Label* smi_label,
4862
                               Register scratch,
4863
                               BranchDelaySlot bd) {
4864
  ASSERT_EQ(0, kSmiTag);
4865
  andi(scratch, value, kSmiTagMask);
4866
  Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
4867
}
4868

    
4869
void MacroAssembler::JumpIfNotSmi(Register value,
4870
                                  Label* not_smi_label,
4871
                                  Register scratch,
4872
                                  BranchDelaySlot bd) {
4873
  ASSERT_EQ(0, kSmiTag);
4874
  andi(scratch, value, kSmiTagMask);
4875
  Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
4876
}
4877

    
4878

    
4879
void MacroAssembler::JumpIfNotBothSmi(Register reg1,
4880
                                      Register reg2,
4881
                                      Label* on_not_both_smi) {
4882
  STATIC_ASSERT(kSmiTag == 0);
4883
  ASSERT_EQ(1, kSmiTagMask);
4884
  or_(at, reg1, reg2);
4885
  JumpIfNotSmi(at, on_not_both_smi);
4886
}
4887

    
4888

    
4889
void MacroAssembler::JumpIfEitherSmi(Register reg1,
4890
                                     Register reg2,
4891
                                     Label* on_either_smi) {
4892
  STATIC_ASSERT(kSmiTag == 0);
4893
  ASSERT_EQ(1, kSmiTagMask);
4894
  // Both Smi tags must be 1 (not Smi).
4895
  and_(at, reg1, reg2);
4896
  JumpIfSmi(at, on_either_smi);
4897
}
4898

    
4899

    
4900
void MacroAssembler::AssertNotSmi(Register object) {
4901
  if (emit_debug_code()) {
4902
    STATIC_ASSERT(kSmiTag == 0);
4903
    andi(at, object, kSmiTagMask);
4904
    Check(ne, kOperandIsASmi, at, Operand(zero_reg));
4905
  }
4906
}
4907

    
4908

    
4909
void MacroAssembler::AssertSmi(Register object) {
4910
  if (emit_debug_code()) {
4911
    STATIC_ASSERT(kSmiTag == 0);
4912
    andi(at, object, kSmiTagMask);
4913
    Check(eq, kOperandIsASmi, at, Operand(zero_reg));
4914
  }
4915
}
4916

    
4917

    
4918
void MacroAssembler::AssertString(Register object) {
4919
  if (emit_debug_code()) {
4920
    STATIC_ASSERT(kSmiTag == 0);
4921
    And(t0, object, Operand(kSmiTagMask));
4922
    Check(ne, kOperandIsASmiAndNotAString, t0, Operand(zero_reg));
4923
    push(object);
4924
    lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
4925
    lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
4926
    Check(lo, kOperandIsNotAString, object, Operand(FIRST_NONSTRING_TYPE));
4927
    pop(object);
4928
  }
4929
}
4930

    
4931

    
4932
void MacroAssembler::AssertName(Register object) {
4933
  if (emit_debug_code()) {
4934
    STATIC_ASSERT(kSmiTag == 0);
4935
    And(t0, object, Operand(kSmiTagMask));
4936
    Check(ne, kOperandIsASmiAndNotAName, t0, Operand(zero_reg));
4937
    push(object);
4938
    lw(object, FieldMemOperand(object, HeapObject::kMapOffset));
4939
    lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
4940
    Check(le, kOperandIsNotAName, object, Operand(LAST_NAME_TYPE));
4941
    pop(object);
4942
  }
4943
}
4944

    
4945

    
4946
void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
4947
  if (emit_debug_code()) {
4948
    ASSERT(!reg.is(at));
4949
    LoadRoot(at, index);
4950
    Check(eq, kHeapNumberMapRegisterClobbered, reg, Operand(at));
4951
  }
4952
}
4953

    
4954

    
4955
void MacroAssembler::JumpIfNotHeapNumber(Register object,
4956
                                         Register heap_number_map,
4957
                                         Register scratch,
4958
                                         Label* on_not_heap_number) {
4959
  lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4960
  AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
4961
  Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
4962
}
4963

    
4964

    
4965
void MacroAssembler::LookupNumberStringCache(Register object,
4966
                                             Register result,
4967
                                             Register scratch1,
4968
                                             Register scratch2,
4969
                                             Register scratch3,
4970
                                             Label* not_found) {
4971
  // Use of registers. Register result is used as a temporary.
4972
  Register number_string_cache = result;
4973
  Register mask = scratch3;
4974

    
4975
  // Load the number string cache.
4976
  LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
4977

    
4978
  // Make the hash mask from the length of the number string cache. It
4979
  // contains two elements (number and string) for each cache entry.
4980
  lw(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
4981
  // Divide length by two (length is a smi).
4982
  sra(mask, mask, kSmiTagSize + 1);
4983
  Addu(mask, mask, -1);  // Make mask.
4984

    
4985
  // Calculate the entry in the number string cache. The hash value in the
4986
  // number string cache for smis is just the smi value, and the hash for
4987
  // doubles is the xor of the upper and lower words. See
4988
  // Heap::GetNumberStringCache.
4989
  Label is_smi;
4990
  Label load_result_from_cache;
4991
  JumpIfSmi(object, &is_smi);
4992
  CheckMap(object,
4993
           scratch1,
4994
           Heap::kHeapNumberMapRootIndex,
4995
           not_found,
4996
           DONT_DO_SMI_CHECK);
4997

    
4998
  STATIC_ASSERT(8 == kDoubleSize);
4999
  Addu(scratch1,
5000
       object,
5001
       Operand(HeapNumber::kValueOffset - kHeapObjectTag));
5002
  lw(scratch2, MemOperand(scratch1, kPointerSize));
5003
  lw(scratch1, MemOperand(scratch1, 0));
5004
  Xor(scratch1, scratch1, Operand(scratch2));
5005
  And(scratch1, scratch1, Operand(mask));
5006

    
5007
  // Calculate address of entry in string cache: each entry consists
5008
  // of two pointer sized fields.
5009
  sll(scratch1, scratch1, kPointerSizeLog2 + 1);
5010
  Addu(scratch1, number_string_cache, scratch1);
5011

    
5012
  Register probe = mask;
5013
  lw(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
5014
  JumpIfSmi(probe, not_found);
5015
  ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
5016
  ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
5017
  BranchF(&load_result_from_cache, NULL, eq, f12, f14);
5018
  Branch(not_found);
5019

    
5020
  bind(&is_smi);
5021
  Register scratch = scratch1;
5022
  sra(scratch, object, 1);   // Shift away the tag.
5023
  And(scratch, mask, Operand(scratch));
5024

    
5025
  // Calculate address of entry in string cache: each entry consists
5026
  // of two pointer sized fields.
5027
  sll(scratch, scratch, kPointerSizeLog2 + 1);
5028
  Addu(scratch, number_string_cache, scratch);
5029

    
5030
  // Check if the entry is the smi we are looking for.
5031
  lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
5032
  Branch(not_found, ne, object, Operand(probe));
5033

    
5034
  // Get the result from the cache.
5035
  bind(&load_result_from_cache);
5036
  lw(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
5037

    
5038
  IncrementCounter(isolate()->counters()->number_to_string_native(),
5039
                   1,
5040
                   scratch1,
5041
                   scratch2);
5042
}
5043

    
5044

    
5045
void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
5046
    Register first,
5047
    Register second,
5048
    Register scratch1,
5049
    Register scratch2,
5050
    Label* failure) {
5051
  // Test that both first and second are sequential ASCII strings.
5052
  // Assume that they are non-smis.
5053
  lw(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
5054
  lw(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
5055
  lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
5056
  lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
5057

    
5058
  JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
5059
                                               scratch2,
5060
                                               scratch1,
5061
                                               scratch2,
5062
                                               failure);
5063
}
5064

    
5065

    
5066
void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
5067
                                                         Register second,
5068
                                                         Register scratch1,
5069
                                                         Register scratch2,
5070
                                                         Label* failure) {
5071
  // Check that neither is a smi.
5072
  STATIC_ASSERT(kSmiTag == 0);
5073
  And(scratch1, first, Operand(second));
5074
  JumpIfSmi(scratch1, failure);
5075
  JumpIfNonSmisNotBothSequentialAsciiStrings(first,
5076
                                             second,
5077
                                             scratch1,
5078
                                             scratch2,
5079
                                             failure);
5080
}
5081

    
5082

    
5083
void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
5084
    Register first,
5085
    Register second,
5086
    Register scratch1,
5087
    Register scratch2,
5088
    Label* failure) {
5089
  const int kFlatAsciiStringMask =
5090
      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
5091
  const int kFlatAsciiStringTag =
5092
      kStringTag | kOneByteStringTag | kSeqStringTag;
5093
  ASSERT(kFlatAsciiStringTag <= 0xffff);  // Ensure this fits 16-bit immed.
5094
  andi(scratch1, first, kFlatAsciiStringMask);
5095
  Branch(failure, ne, scratch1, Operand(kFlatAsciiStringTag));
5096
  andi(scratch2, second, kFlatAsciiStringMask);
5097
  Branch(failure, ne, scratch2, Operand(kFlatAsciiStringTag));
5098
}
5099

    
5100

    
5101
void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
5102
                                                            Register scratch,
5103
                                                            Label* failure) {
5104
  const int kFlatAsciiStringMask =
5105
      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
5106
  const int kFlatAsciiStringTag =
5107
      kStringTag | kOneByteStringTag | kSeqStringTag;
5108
  And(scratch, type, Operand(kFlatAsciiStringMask));
5109
  Branch(failure, ne, scratch, Operand(kFlatAsciiStringTag));
5110
}
5111

    
5112

    
5113
static const int kRegisterPassedArguments = 4;
5114

    
5115
int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
5116
                                              int num_double_arguments) {
5117
  int stack_passed_words = 0;
5118
  num_reg_arguments += 2 * num_double_arguments;
5119

    
5120
  // Up to four simple arguments are passed in registers a0..a3.
5121
  if (num_reg_arguments > kRegisterPassedArguments) {
5122
    stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
5123
  }
5124
  stack_passed_words += kCArgSlotCount;
5125
  return stack_passed_words;
5126
}
5127

    
5128

    
5129
void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
5130
                                          int num_double_arguments,
5131
                                          Register scratch) {
5132
  int frame_alignment = ActivationFrameAlignment();
5133

    
5134
  // Up to four simple arguments are passed in registers a0..a3.
5135
  // Those four arguments must have reserved argument slots on the stack for
5136
  // mips, even though those argument slots are not normally used.
5137
  // Remaining arguments are pushed on the stack, above (higher address than)
5138
  // the argument slots.
5139
  int stack_passed_arguments = CalculateStackPassedWords(
5140
      num_reg_arguments, num_double_arguments);
5141
  if (frame_alignment > kPointerSize) {
5142
    // Make stack end at alignment and make room for num_arguments - 4 words
5143
    // and the original value of sp.
5144
    mov(scratch, sp);
5145
    Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
5146
    ASSERT(IsPowerOf2(frame_alignment));
5147
    And(sp, sp, Operand(-frame_alignment));
5148
    sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
5149
  } else {
5150
    Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
5151
  }
5152
}
5153

    
5154

    
5155
void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
5156
                                          Register scratch) {
5157
  PrepareCallCFunction(num_reg_arguments, 0, scratch);
5158
}
5159

    
5160

    
5161
void MacroAssembler::CallCFunction(ExternalReference function,
5162
                                   int num_reg_arguments,
5163
                                   int num_double_arguments) {
5164
  li(t8, Operand(function));
5165
  CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
5166
}
5167

    
5168

    
5169
void MacroAssembler::CallCFunction(Register function,
5170
                                   int num_reg_arguments,
5171
                                   int num_double_arguments) {
5172
  CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
5173
}
5174

    
5175

    
5176
void MacroAssembler::CallCFunction(ExternalReference function,
5177
                                   int num_arguments) {
5178
  CallCFunction(function, num_arguments, 0);
5179
}
5180

    
5181

    
5182
void MacroAssembler::CallCFunction(Register function,
5183
                                   int num_arguments) {
5184
  CallCFunction(function, num_arguments, 0);
5185
}
5186

    
5187

    
5188
void MacroAssembler::CallCFunctionHelper(Register function,
5189
                                         int num_reg_arguments,
5190
                                         int num_double_arguments) {
5191
  ASSERT(has_frame());
5192
  // Make sure that the stack is aligned before calling a C function unless
5193
  // running in the simulator. The simulator has its own alignment check which
5194
  // provides more information.
5195
  // The argument stots are presumed to have been set up by
5196
  // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
5197

    
5198
#if V8_HOST_ARCH_MIPS
5199
  if (emit_debug_code()) {
5200
    int frame_alignment = OS::ActivationFrameAlignment();
5201
    int frame_alignment_mask = frame_alignment - 1;
5202
    if (frame_alignment > kPointerSize) {
5203
      ASSERT(IsPowerOf2(frame_alignment));
5204
      Label alignment_as_expected;
5205
      And(at, sp, Operand(frame_alignment_mask));
5206
      Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
5207
      // Don't use Check here, as it will call Runtime_Abort possibly
5208
      // re-entering here.
5209
      stop("Unexpected alignment in CallCFunction");
5210
      bind(&alignment_as_expected);
5211
    }
5212
  }
5213
#endif  // V8_HOST_ARCH_MIPS
5214

    
5215
  // Just call directly. The function called cannot cause a GC, or
5216
  // allow preemption, so the return address in the link register
5217
  // stays correct.
5218

    
5219
  if (!function.is(t9)) {
5220
    mov(t9, function);
5221
    function = t9;
5222
  }
5223

    
5224
  Call(function);
5225

    
5226
  int stack_passed_arguments = CalculateStackPassedWords(
5227
      num_reg_arguments, num_double_arguments);
5228

    
5229
  if (OS::ActivationFrameAlignment() > kPointerSize) {
5230
    lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
5231
  } else {
5232
    Addu(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
5233
  }
5234
}
5235

    
5236

    
5237
#undef BRANCH_ARGS_CHECK
5238

    
5239

    
5240
void MacroAssembler::PatchRelocatedValue(Register li_location,
5241
                                         Register scratch,
5242
                                         Register new_value) {
5243
  lw(scratch, MemOperand(li_location));
5244
  // At this point scratch is a lui(at, ...) instruction.
5245
  if (emit_debug_code()) {
5246
    And(scratch, scratch, kOpcodeMask);
5247
    Check(eq, kTheInstructionToPatchShouldBeALui,
5248
        scratch, Operand(LUI));
5249
    lw(scratch, MemOperand(li_location));
5250
  }
5251
  srl(t9, new_value, kImm16Bits);
5252
  Ins(scratch, t9, 0, kImm16Bits);
5253
  sw(scratch, MemOperand(li_location));
5254

    
5255
  lw(scratch, MemOperand(li_location, kInstrSize));
5256
  // scratch is now ori(at, ...).
5257
  if (emit_debug_code()) {
5258
    And(scratch, scratch, kOpcodeMask);
5259
    Check(eq, kTheInstructionToPatchShouldBeAnOri,
5260
        scratch, Operand(ORI));
5261
    lw(scratch, MemOperand(li_location, kInstrSize));
5262
  }
5263
  Ins(scratch, new_value, 0, kImm16Bits);
5264
  sw(scratch, MemOperand(li_location, kInstrSize));
5265

    
5266
  // Update the I-cache so the new lui and ori can be executed.
5267
  FlushICache(li_location, 2);
5268
}
5269

    
5270
void MacroAssembler::GetRelocatedValue(Register li_location,
5271
                                       Register value,
5272
                                       Register scratch) {
5273
  lw(value, MemOperand(li_location));
5274
  if (emit_debug_code()) {
5275
    And(value, value, kOpcodeMask);
5276
    Check(eq, kTheInstructionShouldBeALui,
5277
        value, Operand(LUI));
5278
    lw(value, MemOperand(li_location));
5279
  }
5280

    
5281
  // value now holds a lui instruction. Extract the immediate.
5282
  sll(value, value, kImm16Bits);
5283

    
5284
  lw(scratch, MemOperand(li_location, kInstrSize));
5285
  if (emit_debug_code()) {
5286
    And(scratch, scratch, kOpcodeMask);
5287
    Check(eq, kTheInstructionShouldBeAnOri,
5288
        scratch, Operand(ORI));
5289
    lw(scratch, MemOperand(li_location, kInstrSize));
5290
  }
5291
  // "scratch" now holds an ori instruction. Extract the immediate.
5292
  andi(scratch, scratch, kImm16Mask);
5293

    
5294
  // Merge the results.
5295
  or_(value, value, scratch);
5296
}
5297

    
5298

    
5299
void MacroAssembler::CheckPageFlag(
5300
    Register object,
5301
    Register scratch,
5302
    int mask,
5303
    Condition cc,
5304
    Label* condition_met) {
5305
  And(scratch, object, Operand(~Page::kPageAlignmentMask));
5306
  lw(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
5307
  And(scratch, scratch, Operand(mask));
5308
  Branch(condition_met, cc, scratch, Operand(zero_reg));
5309
}
5310

    
5311

    
5312
void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
5313
                                        Register scratch,
5314
                                        Label* if_deprecated) {
5315
  if (map->CanBeDeprecated()) {
5316
    li(scratch, Operand(map));
5317
    lw(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
5318
    And(scratch, scratch, Operand(Smi::FromInt(Map::Deprecated::kMask)));
5319
    Branch(if_deprecated, ne, scratch, Operand(zero_reg));
5320
  }
5321
}
5322

    
5323

    
5324
void MacroAssembler::JumpIfBlack(Register object,
5325
                                 Register scratch0,
5326
                                 Register scratch1,
5327
                                 Label* on_black) {
5328
  HasColor(object, scratch0, scratch1, on_black, 1, 0);  // kBlackBitPattern.
5329
  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
5330
}
5331

    
5332

    
5333
void MacroAssembler::HasColor(Register object,
5334
                              Register bitmap_scratch,
5335
                              Register mask_scratch,
5336
                              Label* has_color,
5337
                              int first_bit,
5338
                              int second_bit) {
5339
  ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
5340
  ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
5341

    
5342
  GetMarkBits(object, bitmap_scratch, mask_scratch);
5343

    
5344
  Label other_color, word_boundary;
5345
  lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5346
  And(t8, t9, Operand(mask_scratch));
5347
  Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg));
5348
  // Shift left 1 by adding.
5349
  Addu(mask_scratch, mask_scratch, Operand(mask_scratch));
5350
  Branch(&word_boundary, eq, mask_scratch, Operand(zero_reg));
5351
  And(t8, t9, Operand(mask_scratch));
5352
  Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg));
5353
  jmp(&other_color);
5354

    
5355
  bind(&word_boundary);
5356
  lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
5357
  And(t9, t9, Operand(1));
5358
  Branch(has_color, second_bit == 1 ? ne : eq, t9, Operand(zero_reg));
5359
  bind(&other_color);
5360
}
5361

    
5362

    
5363
// Detect some, but not all, common pointer-free objects.  This is used by the
5364
// incremental write barrier which doesn't care about oddballs (they are always
5365
// marked black immediately so this code is not hit).
5366
void MacroAssembler::JumpIfDataObject(Register value,
5367
                                      Register scratch,
5368
                                      Label* not_data_object) {
5369
  ASSERT(!AreAliased(value, scratch, t8, no_reg));
5370
  Label is_data_object;
5371
  lw(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
5372
  LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5373
  Branch(&is_data_object, eq, t8, Operand(scratch));
5374
  ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5375
  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5376
  // If it's a string and it's not a cons string then it's an object containing
5377
  // no GC pointers.
5378
  lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5379
  And(t8, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
5380
  Branch(not_data_object, ne, t8, Operand(zero_reg));
5381
  bind(&is_data_object);
5382
}
5383

    
5384

    
5385
void MacroAssembler::GetMarkBits(Register addr_reg,
5386
                                 Register bitmap_reg,
5387
                                 Register mask_reg) {
5388
  ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
5389
  And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
5390
  Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
5391
  const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
5392
  Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
5393
  sll(t8, t8, kPointerSizeLog2);
5394
  Addu(bitmap_reg, bitmap_reg, t8);
5395
  li(t8, Operand(1));
5396
  sllv(mask_reg, t8, mask_reg);
5397
}
5398

    
5399

    
5400
void MacroAssembler::EnsureNotWhite(
5401
    Register value,
5402
    Register bitmap_scratch,
5403
    Register mask_scratch,
5404
    Register load_scratch,
5405
    Label* value_is_white_and_not_data) {
5406
  ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
5407
  GetMarkBits(value, bitmap_scratch, mask_scratch);
5408

    
5409
  // If the value is black or grey we don't need to do anything.
5410
  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
5411
  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
5412
  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
5413
  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
5414

    
5415
  Label done;
5416

    
5417
  // Since both black and grey have a 1 in the first position and white does
5418
  // not have a 1 there we only need to check one bit.
5419
  lw(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5420
  And(t8, mask_scratch, load_scratch);
5421
  Branch(&done, ne, t8, Operand(zero_reg));
5422

    
5423
  if (emit_debug_code()) {
5424
    // Check for impossible bit pattern.
5425
    Label ok;
5426
    // sll may overflow, making the check conservative.
5427
    sll(t8, mask_scratch, 1);
5428
    And(t8, load_scratch, t8);
5429
    Branch(&ok, eq, t8, Operand(zero_reg));
5430
    stop("Impossible marking bit pattern");
5431
    bind(&ok);
5432
  }
5433

    
5434
  // Value is white.  We check whether it is data that doesn't need scanning.
5435
  // Currently only checks for HeapNumber and non-cons strings.
5436
  Register map = load_scratch;  // Holds map while checking type.
5437
  Register length = load_scratch;  // Holds length of object after testing type.
5438
  Label is_data_object;
5439

    
5440
  // Check for heap-number
5441
  lw(map, FieldMemOperand(value, HeapObject::kMapOffset));
5442
  LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5443
  {
5444
    Label skip;
5445
    Branch(&skip, ne, t8, Operand(map));
5446
    li(length, HeapNumber::kSize);
5447
    Branch(&is_data_object);
5448
    bind(&skip);
5449
  }
5450

    
5451
  // Check for strings.
5452
  ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5453
  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5454
  // If it's a string and it's not a cons string then it's an object containing
5455
  // no GC pointers.
5456
  Register instance_type = load_scratch;
5457
  lbu(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
5458
  And(t8, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
5459
  Branch(value_is_white_and_not_data, ne, t8, Operand(zero_reg));
5460
  // It's a non-indirect (non-cons and non-slice) string.
5461
  // If it's external, the length is just ExternalString::kSize.
5462
  // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
5463
  // External strings are the only ones with the kExternalStringTag bit
5464
  // set.
5465
  ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
5466
  ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
5467
  And(t8, instance_type, Operand(kExternalStringTag));
5468
  {
5469
    Label skip;
5470
    Branch(&skip, eq, t8, Operand(zero_reg));
5471
    li(length, ExternalString::kSize);
5472
    Branch(&is_data_object);
5473
    bind(&skip);
5474
  }
5475

    
5476
  // Sequential string, either ASCII or UC16.
5477
  // For ASCII (char-size of 1) we shift the smi tag away to get the length.
5478
  // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
5479
  // getting the length multiplied by 2.
5480
  ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4);
5481
  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
5482
  lw(t9, FieldMemOperand(value, String::kLengthOffset));
5483
  And(t8, instance_type, Operand(kStringEncodingMask));
5484
  {
5485
    Label skip;
5486
    Branch(&skip, eq, t8, Operand(zero_reg));
5487
    srl(t9, t9, 1);
5488
    bind(&skip);
5489
  }
5490
  Addu(length, t9, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
5491
  And(length, length, Operand(~kObjectAlignmentMask));
5492

    
5493
  bind(&is_data_object);
5494
  // Value is a data object, and it is white.  Mark it black.  Since we know
5495
  // that the object is white we can make it black by flipping one bit.
5496
  lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5497
  Or(t8, t8, Operand(mask_scratch));
5498
  sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5499

    
5500
  And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
5501
  lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5502
  Addu(t8, t8, Operand(length));
5503
  sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5504

    
5505
  bind(&done);
5506
}
5507

    
5508

    
5509
void MacroAssembler::LoadInstanceDescriptors(Register map,
5510
                                             Register descriptors) {
5511
  lw(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
5512
}
5513

    
5514

    
5515
void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
5516
  lw(dst, FieldMemOperand(map, Map::kBitField3Offset));
5517
  DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
5518
}
5519

    
5520

    
5521
void MacroAssembler::EnumLength(Register dst, Register map) {
5522
  STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
5523
  lw(dst, FieldMemOperand(map, Map::kBitField3Offset));
5524
  And(dst, dst, Operand(Smi::FromInt(Map::EnumLengthBits::kMask)));
5525
}
5526

    
5527

    
5528
void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
5529
  Register  empty_fixed_array_value = t2;
5530
  LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
5531
  Label next, start;
5532
  mov(a2, a0);
5533

    
5534
  // Check if the enum length field is properly initialized, indicating that
5535
  // there is an enum cache.
5536
  lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
5537

    
5538
  EnumLength(a3, a1);
5539
  Branch(call_runtime, eq, a3, Operand(Smi::FromInt(Map::kInvalidEnumCache)));
5540

    
5541
  jmp(&start);
5542

    
5543
  bind(&next);
5544
  lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
5545

    
5546
  // For all objects but the receiver, check that the cache is empty.
5547
  EnumLength(a3, a1);
5548
  Branch(call_runtime, ne, a3, Operand(Smi::FromInt(0)));
5549

    
5550
  bind(&start);
5551

    
5552
  // Check that there are no elements. Register r2 contains the current JS
5553
  // object we've reached through the prototype chain.
5554
  lw(a2, FieldMemOperand(a2, JSObject::kElementsOffset));
5555
  Branch(call_runtime, ne, a2, Operand(empty_fixed_array_value));
5556

    
5557
  lw(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
5558
  Branch(&next, ne, a2, Operand(null_value));
5559
}
5560

    
5561

    
5562
void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
5563
  ASSERT(!output_reg.is(input_reg));
5564
  Label done;
5565
  li(output_reg, Operand(255));
5566
  // Normal branch: nop in delay slot.
5567
  Branch(&done, gt, input_reg, Operand(output_reg));
5568
  // Use delay slot in this branch.
5569
  Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg));
5570
  mov(output_reg, zero_reg);  // In delay slot.
5571
  mov(output_reg, input_reg);  // Value is in range 0..255.
5572
  bind(&done);
5573
}
5574

    
5575

    
5576
void MacroAssembler::ClampDoubleToUint8(Register result_reg,
5577
                                        DoubleRegister input_reg,
5578
                                        DoubleRegister temp_double_reg) {
5579
  Label above_zero;
5580
  Label done;
5581
  Label in_bounds;
5582

    
5583
  Move(temp_double_reg, 0.0);
5584
  BranchF(&above_zero, NULL, gt, input_reg, temp_double_reg);
5585

    
5586
  // Double value is less than zero, NaN or Inf, return 0.
5587
  mov(result_reg, zero_reg);
5588
  Branch(&done);
5589

    
5590
  // Double value is >= 255, return 255.
5591
  bind(&above_zero);
5592
  Move(temp_double_reg, 255.0);
5593
  BranchF(&in_bounds, NULL, le, input_reg, temp_double_reg);
5594
  li(result_reg, Operand(255));
5595
  Branch(&done);
5596

    
5597
  // In 0-255 range, round and truncate.
5598
  bind(&in_bounds);
5599
  cvt_w_d(temp_double_reg, input_reg);
5600
  mfc1(result_reg, temp_double_reg);
5601
  bind(&done);
5602
}
5603

    
5604

    
5605
void MacroAssembler::TestJSArrayForAllocationMemento(
5606
    Register receiver_reg,
5607
    Register scratch_reg,
5608
    Label* no_memento_found,
5609
    Condition cond,
5610
    Label* allocation_memento_present) {
5611
  ExternalReference new_space_start =
5612
      ExternalReference::new_space_start(isolate());
5613
  ExternalReference new_space_allocation_top =
5614
      ExternalReference::new_space_allocation_top_address(isolate());
5615
  Addu(scratch_reg, receiver_reg,
5616
       Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
5617
  Branch(no_memento_found, lt, scratch_reg, Operand(new_space_start));
5618
  li(at, Operand(new_space_allocation_top));
5619
  lw(at, MemOperand(at));
5620
  Branch(no_memento_found, gt, scratch_reg, Operand(at));
5621
  lw(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
5622
  if (allocation_memento_present) {
5623
    Branch(allocation_memento_present, cond, scratch_reg,
5624
           Operand(isolate()->factory()->allocation_memento_map()));
5625
  }
5626
}
5627

    
5628

    
5629
Register GetRegisterThatIsNotOneOf(Register reg1,
5630
                                   Register reg2,
5631
                                   Register reg3,
5632
                                   Register reg4,
5633
                                   Register reg5,
5634
                                   Register reg6) {
5635
  RegList regs = 0;
5636
  if (reg1.is_valid()) regs |= reg1.bit();
5637
  if (reg2.is_valid()) regs |= reg2.bit();
5638
  if (reg3.is_valid()) regs |= reg3.bit();
5639
  if (reg4.is_valid()) regs |= reg4.bit();
5640
  if (reg5.is_valid()) regs |= reg5.bit();
5641
  if (reg6.is_valid()) regs |= reg6.bit();
5642

    
5643
  for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
5644
    Register candidate = Register::FromAllocationIndex(i);
5645
    if (regs & candidate.bit()) continue;
5646
    return candidate;
5647
  }
5648
  UNREACHABLE();
5649
  return no_reg;
5650
}
5651

    
5652

    
5653
bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
5654
  if (r1.is(r2)) return true;
5655
  if (r1.is(r3)) return true;
5656
  if (r1.is(r4)) return true;
5657
  if (r2.is(r3)) return true;
5658
  if (r2.is(r4)) return true;
5659
  if (r3.is(r4)) return true;
5660
  return false;
5661
}
5662

    
5663

    
5664
CodePatcher::CodePatcher(byte* address, int instructions)
5665
    : address_(address),
5666
      size_(instructions * Assembler::kInstrSize),
5667
      masm_(NULL, address, size_ + Assembler::kGap) {
5668
  // Create a new macro assembler pointing to the address of the code to patch.
5669
  // The size is adjusted with kGap on order for the assembler to generate size
5670
  // bytes of instructions without failing with buffer size constraints.
5671
  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5672
}
5673

    
5674

    
5675
CodePatcher::~CodePatcher() {
5676
  // Indicate that code has changed.
5677
  CPU::FlushICache(address_, size_);
5678

    
5679
  // Check that the code was patched as expected.
5680
  ASSERT(masm_.pc_ == address_ + size_);
5681
  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5682
}
5683

    
5684

    
5685
void CodePatcher::Emit(Instr instr) {
5686
  masm()->emit(instr);
5687
}
5688

    
5689

    
5690
void CodePatcher::Emit(Address addr) {
5691
  masm()->emit(reinterpret_cast<Instr>(addr));
5692
}
5693

    
5694

    
5695
void CodePatcher::ChangeBranchCondition(Condition cond) {
5696
  Instr instr = Assembler::instr_at(masm_.pc_);
5697
  ASSERT(Assembler::IsBranch(instr));
5698
  uint32_t opcode = Assembler::GetOpcodeField(instr);
5699
  // Currently only the 'eq' and 'ne' cond values are supported and the simple
5700
  // branch instructions (with opcode being the branch type).
5701
  // There are some special cases (see Assembler::IsBranch()) so extending this
5702
  // would be tricky.
5703
  ASSERT(opcode == BEQ ||
5704
         opcode == BNE ||
5705
        opcode == BLEZ ||
5706
        opcode == BGTZ ||
5707
        opcode == BEQL ||
5708
        opcode == BNEL ||
5709
       opcode == BLEZL ||
5710
       opcode == BGTZL);
5711
  opcode = (cond == eq) ? BEQ : BNE;
5712
  instr = (instr & ~kOpcodeMask) | opcode;
5713
  masm_.emit(instr);
5714
}
5715

    
5716

    
5717
} }  // namespace v8::internal
5718

    
5719
#endif  // V8_TARGET_ARCH_MIPS