The data contained in this repository can be downloaded to your computer using one of several clients.
Please see the documentation of your version control software client for more information.

Please select the desired protocol below to get the URL.

This URL has Read-Only access.

Statistics
| Branch: | Revision:

main_repo / deps / v8 / src / mips / macro-assembler-mips.h @ f230a1cf

History | View | Annotate | Download (64.7 KB)

1
// Copyright 2012 the V8 project authors. All rights reserved.
2
// Redistribution and use in source and binary forms, with or without
3
// modification, are permitted provided that the following conditions are
4
// met:
5
//
6
//     * Redistributions of source code must retain the above copyright
7
//       notice, this list of conditions and the following disclaimer.
8
//     * Redistributions in binary form must reproduce the above
9
//       copyright notice, this list of conditions and the following
10
//       disclaimer in the documentation and/or other materials provided
11
//       with the distribution.
12
//     * Neither the name of Google Inc. nor the names of its
13
//       contributors may be used to endorse or promote products derived
14
//       from this software without specific prior written permission.
15
//
16
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27

    
28
#ifndef V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
29
#define V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
30

    
31
#include "assembler.h"
32
#include "mips/assembler-mips.h"
33
#include "v8globals.h"
34

    
35
namespace v8 {
36
namespace internal {
37

    
38
// Forward declaration.
39
class JumpTarget;
40

    
41
// Reserved Register Usage Summary.
42
//
43
// Registers t8, t9, and at are reserved for use by the MacroAssembler.
44
//
45
// The programmer should know that the MacroAssembler may clobber these three,
46
// but won't touch other registers except in special cases.
47
//
48
// Per the MIPS ABI, register t9 must be used for indirect function call
49
// via 'jalr t9' or 'jr t9' instructions. This is relied upon by gcc when
50
// trying to update gp register for position-independent-code. Whenever
51
// MIPS generated code calls C code, it must be via t9 register.
52

    
53

    
54
// Flags used for LeaveExitFrame function.
55
enum LeaveExitFrameMode {
56
  EMIT_RETURN = true,
57
  NO_EMIT_RETURN = false
58
};
59

    
60
// Flags used for AllocateHeapNumber
61
enum TaggingMode {
62
  // Tag the result.
63
  TAG_RESULT,
64
  // Don't tag
65
  DONT_TAG_RESULT
66
};
67

    
68
// Flags used for the ObjectToDoubleFPURegister function.
69
enum ObjectToDoubleFlags {
70
  // No special flags.
71
  NO_OBJECT_TO_DOUBLE_FLAGS = 0,
72
  // Object is known to be a non smi.
73
  OBJECT_NOT_SMI = 1 << 0,
74
  // Don't load NaNs or infinities, branch to the non number case instead.
75
  AVOID_NANS_AND_INFINITIES = 1 << 1
76
};
77

    
78
// Allow programmer to use Branch Delay Slot of Branches, Jumps, Calls.
79
enum BranchDelaySlot {
80
  USE_DELAY_SLOT,
81
  PROTECT
82
};
83

    
84
// Flags used for the li macro-assembler function.
85
enum LiFlags {
86
  // If the constant value can be represented in just 16 bits, then
87
  // optimize the li to use a single instruction, rather than lui/ori pair.
88
  OPTIMIZE_SIZE = 0,
89
  // Always use 2 instructions (lui/ori pair), even if the constant could
90
  // be loaded with just one, so that this value is patchable later.
91
  CONSTANT_SIZE = 1
92
};
93

    
94

    
95
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
96
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
97
enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved };
98

    
99
Register GetRegisterThatIsNotOneOf(Register reg1,
100
                                   Register reg2 = no_reg,
101
                                   Register reg3 = no_reg,
102
                                   Register reg4 = no_reg,
103
                                   Register reg5 = no_reg,
104
                                   Register reg6 = no_reg);
105

    
106
bool AreAliased(Register r1, Register r2, Register r3, Register r4);
107

    
108

    
109
// -----------------------------------------------------------------------------
110
// Static helper functions.
111

    
112
inline MemOperand ContextOperand(Register context, int index) {
113
  return MemOperand(context, Context::SlotOffset(index));
114
}
115

    
116

    
117
inline MemOperand GlobalObjectOperand()  {
118
  return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX);
119
}
120

    
121

    
122
// Generate a MemOperand for loading a field from an object.
123
inline MemOperand FieldMemOperand(Register object, int offset) {
124
  return MemOperand(object, offset - kHeapObjectTag);
125
}
126

    
127

    
128
// Generate a MemOperand for storing arguments 5..N on the stack
129
// when calling CallCFunction().
130
inline MemOperand CFunctionArgumentOperand(int index) {
131
  ASSERT(index > kCArgSlotCount);
132
  // Argument 5 takes the slot just past the four Arg-slots.
133
  int offset = (index - 5) * kPointerSize + kCArgsSlotsSize;
134
  return MemOperand(sp, offset);
135
}
136

    
137

    
138
// MacroAssembler implements a collection of frequently used macros.
139
class MacroAssembler: public Assembler {
140
 public:
141
  // The isolate parameter can be NULL if the macro assembler should
142
  // not use isolate-dependent functionality. In this case, it's the
143
  // responsibility of the caller to never invoke such function on the
144
  // macro assembler.
145
  MacroAssembler(Isolate* isolate, void* buffer, int size);
146

    
147
  // Arguments macros.
148
#define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
149
#define COND_ARGS cond, r1, r2
150

    
151
  // Cases when relocation is not needed.
152
#define DECLARE_NORELOC_PROTOTYPE(Name, target_type) \
153
  void Name(target_type target, BranchDelaySlot bd = PROTECT); \
154
  inline void Name(BranchDelaySlot bd, target_type target) { \
155
    Name(target, bd); \
156
  } \
157
  void Name(target_type target, \
158
            COND_TYPED_ARGS, \
159
            BranchDelaySlot bd = PROTECT); \
160
  inline void Name(BranchDelaySlot bd, \
161
                   target_type target, \
162
                   COND_TYPED_ARGS) { \
163
    Name(target, COND_ARGS, bd); \
164
  }
165

    
166
#define DECLARE_BRANCH_PROTOTYPES(Name) \
167
  DECLARE_NORELOC_PROTOTYPE(Name, Label*) \
168
  DECLARE_NORELOC_PROTOTYPE(Name, int16_t)
169

    
170
  DECLARE_BRANCH_PROTOTYPES(Branch)
171
  DECLARE_BRANCH_PROTOTYPES(BranchAndLink)
172

    
173
#undef DECLARE_BRANCH_PROTOTYPES
174
#undef COND_TYPED_ARGS
175
#undef COND_ARGS
176

    
177

    
178
  // Jump, Call, and Ret pseudo instructions implementing inter-working.
179
#define COND_ARGS Condition cond = al, Register rs = zero_reg, \
180
  const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
181

    
182
  void Jump(Register target, COND_ARGS);
183
  void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
184
  void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
185
  void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
186
  static int CallSize(Register target, COND_ARGS);
187
  void Call(Register target, COND_ARGS);
188
  static int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS);
189
  void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
190
  int CallSize(Handle<Code> code,
191
               RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
192
               TypeFeedbackId ast_id = TypeFeedbackId::None(),
193
               COND_ARGS);
194
  void Call(Handle<Code> code,
195
            RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
196
            TypeFeedbackId ast_id = TypeFeedbackId::None(),
197
            COND_ARGS);
198
  void Ret(COND_ARGS);
199
  inline void Ret(BranchDelaySlot bd, Condition cond = al,
200
    Register rs = zero_reg, const Operand& rt = Operand(zero_reg)) {
201
    Ret(cond, rs, rt, bd);
202
  }
203

    
204
  void Branch(Label* L,
205
              Condition cond,
206
              Register rs,
207
              Heap::RootListIndex index,
208
              BranchDelaySlot bdslot = PROTECT);
209

    
210
#undef COND_ARGS
211

    
212
  // Emit code to discard a non-negative number of pointer-sized elements
213
  // from the stack, clobbering only the sp register.
214
  void Drop(int count,
215
            Condition cond = cc_always,
216
            Register reg = no_reg,
217
            const Operand& op = Operand(no_reg));
218

    
219
  // Trivial case of DropAndRet that utilizes the delay slot and only emits
220
  // 2 instructions.
221
  void DropAndRet(int drop);
222

    
223
  void DropAndRet(int drop,
224
                  Condition cond,
225
                  Register reg,
226
                  const Operand& op);
227

    
228
  // Swap two registers.  If the scratch register is omitted then a slightly
229
  // less efficient form using xor instead of mov is emitted.
230
  void Swap(Register reg1, Register reg2, Register scratch = no_reg);
231

    
232
  void Call(Label* target);
233

    
234
  inline void Move(Register dst, Register src) {
235
    if (!dst.is(src)) {
236
      mov(dst, src);
237
    }
238
  }
239

    
240
  inline void Move(FPURegister dst, FPURegister src) {
241
    if (!dst.is(src)) {
242
      mov_d(dst, src);
243
    }
244
  }
245

    
246
  inline void Move(Register dst_low, Register dst_high, FPURegister src) {
247
    mfc1(dst_low, src);
248
    mfc1(dst_high, FPURegister::from_code(src.code() + 1));
249
  }
250

    
251
  inline void FmoveHigh(Register dst_high, FPURegister src) {
252
    mfc1(dst_high, FPURegister::from_code(src.code() + 1));
253
  }
254

    
255
  inline void FmoveLow(Register dst_low, FPURegister src) {
256
    mfc1(dst_low, src);
257
  }
258

    
259
  inline void Move(FPURegister dst, Register src_low, Register src_high) {
260
    mtc1(src_low, dst);
261
    mtc1(src_high, FPURegister::from_code(dst.code() + 1));
262
  }
263

    
264
  // Conditional move.
265
  void Move(FPURegister dst, double imm);
266
  void Movz(Register rd, Register rs, Register rt);
267
  void Movn(Register rd, Register rs, Register rt);
268
  void Movt(Register rd, Register rs, uint16_t cc = 0);
269
  void Movf(Register rd, Register rs, uint16_t cc = 0);
270

    
271
  void Clz(Register rd, Register rs);
272

    
273
  // Jump unconditionally to given label.
274
  // We NEED a nop in the branch delay slot, as it used by v8, for example in
275
  // CodeGenerator::ProcessDeferred().
276
  // Currently the branch delay slot is filled by the MacroAssembler.
277
  // Use rather b(Label) for code generation.
278
  void jmp(Label* L) {
279
    Branch(L);
280
  }
281

    
282
  // Load an object from the root table.
283
  void LoadRoot(Register destination,
284
                Heap::RootListIndex index);
285
  void LoadRoot(Register destination,
286
                Heap::RootListIndex index,
287
                Condition cond, Register src1, const Operand& src2);
288

    
289
  // Store an object to the root table.
290
  void StoreRoot(Register source,
291
                 Heap::RootListIndex index);
292
  void StoreRoot(Register source,
293
                 Heap::RootListIndex index,
294
                 Condition cond, Register src1, const Operand& src2);
295

    
296
  void LoadHeapObject(Register dst, Handle<HeapObject> object);
297

    
298
  void LoadObject(Register result, Handle<Object> object) {
299
    AllowDeferredHandleDereference heap_object_check;
300
    if (object->IsHeapObject()) {
301
      LoadHeapObject(result, Handle<HeapObject>::cast(object));
302
    } else {
303
      li(result, object);
304
    }
305
  }
306

    
307
  // ---------------------------------------------------------------------------
308
  // GC Support
309

    
310
  void IncrementalMarkingRecordWriteHelper(Register object,
311
                                           Register value,
312
                                           Register address);
313

    
314
  enum RememberedSetFinalAction {
315
    kReturnAtEnd,
316
    kFallThroughAtEnd
317
  };
318

    
319

    
320
  // Record in the remembered set the fact that we have a pointer to new space
321
  // at the address pointed to by the addr register.  Only works if addr is not
322
  // in new space.
323
  void RememberedSetHelper(Register object,  // Used for debug code.
324
                           Register addr,
325
                           Register scratch,
326
                           SaveFPRegsMode save_fp,
327
                           RememberedSetFinalAction and_then);
328

    
329
  void CheckPageFlag(Register object,
330
                     Register scratch,
331
                     int mask,
332
                     Condition cc,
333
                     Label* condition_met);
334

    
335
  void CheckMapDeprecated(Handle<Map> map,
336
                          Register scratch,
337
                          Label* if_deprecated);
338

    
339
  // Check if object is in new space.  Jumps if the object is not in new space.
340
  // The register scratch can be object itself, but it will be clobbered.
341
  void JumpIfNotInNewSpace(Register object,
342
                           Register scratch,
343
                           Label* branch) {
344
    InNewSpace(object, scratch, ne, branch);
345
  }
346

    
347
  // Check if object is in new space.  Jumps if the object is in new space.
348
  // The register scratch can be object itself, but scratch will be clobbered.
349
  void JumpIfInNewSpace(Register object,
350
                        Register scratch,
351
                        Label* branch) {
352
    InNewSpace(object, scratch, eq, branch);
353
  }
354

    
355
  // Check if an object has a given incremental marking color.
356
  void HasColor(Register object,
357
                Register scratch0,
358
                Register scratch1,
359
                Label* has_color,
360
                int first_bit,
361
                int second_bit);
362

    
363
  void JumpIfBlack(Register object,
364
                   Register scratch0,
365
                   Register scratch1,
366
                   Label* on_black);
367

    
368
  // Checks the color of an object.  If the object is already grey or black
369
  // then we just fall through, since it is already live.  If it is white and
370
  // we can determine that it doesn't need to be scanned, then we just mark it
371
  // black and fall through.  For the rest we jump to the label so the
372
  // incremental marker can fix its assumptions.
373
  void EnsureNotWhite(Register object,
374
                      Register scratch1,
375
                      Register scratch2,
376
                      Register scratch3,
377
                      Label* object_is_white_and_not_data);
378

    
379
  // Detects conservatively whether an object is data-only, i.e. it does need to
380
  // be scanned by the garbage collector.
381
  void JumpIfDataObject(Register value,
382
                        Register scratch,
383
                        Label* not_data_object);
384

    
385
  // Notify the garbage collector that we wrote a pointer into an object.
386
  // |object| is the object being stored into, |value| is the object being
387
  // stored.  value and scratch registers are clobbered by the operation.
388
  // The offset is the offset from the start of the object, not the offset from
389
  // the tagged HeapObject pointer.  For use with FieldOperand(reg, off).
390
  void RecordWriteField(
391
      Register object,
392
      int offset,
393
      Register value,
394
      Register scratch,
395
      RAStatus ra_status,
396
      SaveFPRegsMode save_fp,
397
      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
398
      SmiCheck smi_check = INLINE_SMI_CHECK);
399

    
400
  // As above, but the offset has the tag presubtracted.  For use with
401
  // MemOperand(reg, off).
402
  inline void RecordWriteContextSlot(
403
      Register context,
404
      int offset,
405
      Register value,
406
      Register scratch,
407
      RAStatus ra_status,
408
      SaveFPRegsMode save_fp,
409
      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
410
      SmiCheck smi_check = INLINE_SMI_CHECK) {
411
    RecordWriteField(context,
412
                     offset + kHeapObjectTag,
413
                     value,
414
                     scratch,
415
                     ra_status,
416
                     save_fp,
417
                     remembered_set_action,
418
                     smi_check);
419
  }
420

    
421
  // For a given |object| notify the garbage collector that the slot |address|
422
  // has been written.  |value| is the object being stored. The value and
423
  // address registers are clobbered by the operation.
424
  void RecordWrite(
425
      Register object,
426
      Register address,
427
      Register value,
428
      RAStatus ra_status,
429
      SaveFPRegsMode save_fp,
430
      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
431
      SmiCheck smi_check = INLINE_SMI_CHECK);
432

    
433

    
434
  // ---------------------------------------------------------------------------
435
  // Inline caching support.
436

    
437
  // Generate code for checking access rights - used for security checks
438
  // on access to global objects across environments. The holder register
439
  // is left untouched, whereas both scratch registers are clobbered.
440
  void CheckAccessGlobalProxy(Register holder_reg,
441
                              Register scratch,
442
                              Label* miss);
443

    
444
  void GetNumberHash(Register reg0, Register scratch);
445

    
446
  void LoadFromNumberDictionary(Label* miss,
447
                                Register elements,
448
                                Register key,
449
                                Register result,
450
                                Register reg0,
451
                                Register reg1,
452
                                Register reg2);
453

    
454

    
455
  inline void MarkCode(NopMarkerTypes type) {
456
    nop(type);
457
  }
458

    
459
  // Check if the given instruction is a 'type' marker.
460
  // i.e. check if it is a sll zero_reg, zero_reg, <type> (referenced as
461
  // nop(type)). These instructions are generated to mark special location in
462
  // the code, like some special IC code.
463
  static inline bool IsMarkedCode(Instr instr, int type) {
464
    ASSERT((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
465
    return IsNop(instr, type);
466
  }
467

    
468

    
469
  static inline int GetCodeMarker(Instr instr) {
470
    uint32_t opcode = ((instr & kOpcodeMask));
471
    uint32_t rt = ((instr & kRtFieldMask) >> kRtShift);
472
    uint32_t rs = ((instr & kRsFieldMask) >> kRsShift);
473
    uint32_t sa = ((instr & kSaFieldMask) >> kSaShift);
474

    
475
    // Return <n> if we have a sll zero_reg, zero_reg, n
476
    // else return -1.
477
    bool sllzz = (opcode == SLL &&
478
                  rt == static_cast<uint32_t>(ToNumber(zero_reg)) &&
479
                  rs == static_cast<uint32_t>(ToNumber(zero_reg)));
480
    int type =
481
        (sllzz && FIRST_IC_MARKER <= sa && sa < LAST_CODE_MARKER) ? sa : -1;
482
    ASSERT((type == -1) ||
483
           ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
484
    return type;
485
  }
486

    
487

    
488

    
489
  // ---------------------------------------------------------------------------
490
  // Allocation support.
491

    
492
  // Allocate an object in new space or old pointer space. The object_size is
493
  // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
494
  // is passed. If the space is exhausted control continues at the gc_required
495
  // label. The allocated object is returned in result. If the flag
496
  // tag_allocated_object is true the result is tagged as as a heap object.
497
  // All registers are clobbered also when control continues at the gc_required
498
  // label.
499
  void Allocate(int object_size,
500
                Register result,
501
                Register scratch1,
502
                Register scratch2,
503
                Label* gc_required,
504
                AllocationFlags flags);
505

    
506
  void Allocate(Register object_size,
507
                Register result,
508
                Register scratch1,
509
                Register scratch2,
510
                Label* gc_required,
511
                AllocationFlags flags);
512

    
513
  // Undo allocation in new space. The object passed and objects allocated after
514
  // it will no longer be allocated. The caller must make sure that no pointers
515
  // are left to the object(s) no longer allocated as they would be invalid when
516
  // allocation is undone.
517
  void UndoAllocationInNewSpace(Register object, Register scratch);
518

    
519

    
520
  void AllocateTwoByteString(Register result,
521
                             Register length,
522
                             Register scratch1,
523
                             Register scratch2,
524
                             Register scratch3,
525
                             Label* gc_required);
526
  void AllocateAsciiString(Register result,
527
                           Register length,
528
                           Register scratch1,
529
                           Register scratch2,
530
                           Register scratch3,
531
                           Label* gc_required);
532
  void AllocateTwoByteConsString(Register result,
533
                                 Register length,
534
                                 Register scratch1,
535
                                 Register scratch2,
536
                                 Label* gc_required);
537
  void AllocateAsciiConsString(Register result,
538
                               Register length,
539
                               Register scratch1,
540
                               Register scratch2,
541
                               Label* gc_required);
542
  void AllocateTwoByteSlicedString(Register result,
543
                                   Register length,
544
                                   Register scratch1,
545
                                   Register scratch2,
546
                                   Label* gc_required);
547
  void AllocateAsciiSlicedString(Register result,
548
                                 Register length,
549
                                 Register scratch1,
550
                                 Register scratch2,
551
                                 Label* gc_required);
552

    
553
  // Allocates a heap number or jumps to the gc_required label if the young
554
  // space is full and a scavenge is needed. All registers are clobbered also
555
  // when control continues at the gc_required label.
556
  void AllocateHeapNumber(Register result,
557
                          Register scratch1,
558
                          Register scratch2,
559
                          Register heap_number_map,
560
                          Label* gc_required,
561
                          TaggingMode tagging_mode = TAG_RESULT);
562
  void AllocateHeapNumberWithValue(Register result,
563
                                   FPURegister value,
564
                                   Register scratch1,
565
                                   Register scratch2,
566
                                   Label* gc_required);
567

    
568
  // ---------------------------------------------------------------------------
569
  // Instruction macros.
570

    
571
#define DEFINE_INSTRUCTION(instr)                                              \
572
  void instr(Register rd, Register rs, const Operand& rt);                     \
573
  void instr(Register rd, Register rs, Register rt) {                          \
574
    instr(rd, rs, Operand(rt));                                                \
575
  }                                                                            \
576
  void instr(Register rs, Register rt, int32_t j) {                            \
577
    instr(rs, rt, Operand(j));                                                 \
578
  }
579

    
580
#define DEFINE_INSTRUCTION2(instr)                                             \
581
  void instr(Register rs, const Operand& rt);                                  \
582
  void instr(Register rs, Register rt) {                                       \
583
    instr(rs, Operand(rt));                                                    \
584
  }                                                                            \
585
  void instr(Register rs, int32_t j) {                                         \
586
    instr(rs, Operand(j));                                                     \
587
  }
588

    
589
  DEFINE_INSTRUCTION(Addu);
590
  DEFINE_INSTRUCTION(Subu);
591
  DEFINE_INSTRUCTION(Mul);
592
  DEFINE_INSTRUCTION2(Mult);
593
  DEFINE_INSTRUCTION2(Multu);
594
  DEFINE_INSTRUCTION2(Div);
595
  DEFINE_INSTRUCTION2(Divu);
596

    
597
  DEFINE_INSTRUCTION(And);
598
  DEFINE_INSTRUCTION(Or);
599
  DEFINE_INSTRUCTION(Xor);
600
  DEFINE_INSTRUCTION(Nor);
601
  DEFINE_INSTRUCTION2(Neg);
602

    
603
  DEFINE_INSTRUCTION(Slt);
604
  DEFINE_INSTRUCTION(Sltu);
605

    
606
  // MIPS32 R2 instruction macro.
607
  DEFINE_INSTRUCTION(Ror);
608

    
609
#undef DEFINE_INSTRUCTION
610
#undef DEFINE_INSTRUCTION2
611

    
612

    
613
  // ---------------------------------------------------------------------------
614
  // Pseudo-instructions.
615

    
616
  void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
617

    
618
  // Load int32 in the rd register.
619
  void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
620
  inline void li(Register rd, int32_t j, LiFlags mode = OPTIMIZE_SIZE) {
621
    li(rd, Operand(j), mode);
622
  }
623
  inline void li(Register dst, Handle<Object> value,
624
                 LiFlags mode = OPTIMIZE_SIZE) {
625
    li(dst, Operand(value), mode);
626
  }
627

    
628
  // Push multiple registers on the stack.
629
  // Registers are saved in numerical order, with higher numbered registers
630
  // saved in higher memory addresses.
631
  void MultiPush(RegList regs);
632
  void MultiPushReversed(RegList regs);
633

    
634
  void MultiPushFPU(RegList regs);
635
  void MultiPushReversedFPU(RegList regs);
636

    
637
  void push(Register src) {
638
    Addu(sp, sp, Operand(-kPointerSize));
639
    sw(src, MemOperand(sp, 0));
640
  }
641
  void Push(Register src) { push(src); }
642

    
643
  // Push a handle.
644
  void Push(Handle<Object> handle);
645
  void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
646

    
647
  // Push two registers. Pushes leftmost register first (to highest address).
648
  void Push(Register src1, Register src2) {
649
    Subu(sp, sp, Operand(2 * kPointerSize));
650
    sw(src1, MemOperand(sp, 1 * kPointerSize));
651
    sw(src2, MemOperand(sp, 0 * kPointerSize));
652
  }
653

    
654
  // Push three registers. Pushes leftmost register first (to highest address).
655
  void Push(Register src1, Register src2, Register src3) {
656
    Subu(sp, sp, Operand(3 * kPointerSize));
657
    sw(src1, MemOperand(sp, 2 * kPointerSize));
658
    sw(src2, MemOperand(sp, 1 * kPointerSize));
659
    sw(src3, MemOperand(sp, 0 * kPointerSize));
660
  }
661

    
662
  // Push four registers. Pushes leftmost register first (to highest address).
663
  void Push(Register src1, Register src2, Register src3, Register src4) {
664
    Subu(sp, sp, Operand(4 * kPointerSize));
665
    sw(src1, MemOperand(sp, 3 * kPointerSize));
666
    sw(src2, MemOperand(sp, 2 * kPointerSize));
667
    sw(src3, MemOperand(sp, 1 * kPointerSize));
668
    sw(src4, MemOperand(sp, 0 * kPointerSize));
669
  }
670

    
671
  void Push(Register src, Condition cond, Register tst1, Register tst2) {
672
    // Since we don't have conditional execution we use a Branch.
673
    Branch(3, cond, tst1, Operand(tst2));
674
    Subu(sp, sp, Operand(kPointerSize));
675
    sw(src, MemOperand(sp, 0));
676
  }
677

    
678
  // Pops multiple values from the stack and load them in the
679
  // registers specified in regs. Pop order is the opposite as in MultiPush.
680
  void MultiPop(RegList regs);
681
  void MultiPopReversed(RegList regs);
682

    
683
  void MultiPopFPU(RegList regs);
684
  void MultiPopReversedFPU(RegList regs);
685

    
686
  void pop(Register dst) {
687
    lw(dst, MemOperand(sp, 0));
688
    Addu(sp, sp, Operand(kPointerSize));
689
  }
690
  void Pop(Register dst) { pop(dst); }
691

    
692
  // Pop two registers. Pops rightmost register first (from lower address).
693
  void Pop(Register src1, Register src2) {
694
    ASSERT(!src1.is(src2));
695
    lw(src2, MemOperand(sp, 0 * kPointerSize));
696
    lw(src1, MemOperand(sp, 1 * kPointerSize));
697
    Addu(sp, sp, 2 * kPointerSize);
698
  }
699

    
700
  // Pop three registers. Pops rightmost register first (from lower address).
701
  void Pop(Register src1, Register src2, Register src3) {
702
    lw(src3, MemOperand(sp, 0 * kPointerSize));
703
    lw(src2, MemOperand(sp, 1 * kPointerSize));
704
    lw(src1, MemOperand(sp, 2 * kPointerSize));
705
    Addu(sp, sp, 3 * kPointerSize);
706
  }
707

    
708
  void Pop(uint32_t count = 1) {
709
    Addu(sp, sp, Operand(count * kPointerSize));
710
  }
711

    
712
  // Push and pop the registers that can hold pointers, as defined by the
713
  // RegList constant kSafepointSavedRegisters.
714
  void PushSafepointRegisters();
715
  void PopSafepointRegisters();
716
  void PushSafepointRegistersAndDoubles();
717
  void PopSafepointRegistersAndDoubles();
718
  // Store value in register src in the safepoint stack slot for
719
  // register dst.
720
  void StoreToSafepointRegisterSlot(Register src, Register dst);
721
  void StoreToSafepointRegistersAndDoublesSlot(Register src, Register dst);
722
  // Load the value of the src register from its safepoint stack slot
723
  // into register dst.
724
  void LoadFromSafepointRegisterSlot(Register dst, Register src);
725

    
726
  // Flush the I-cache from asm code. You should use CPU::FlushICache from C.
727
  // Does not handle errors.
728
  void FlushICache(Register address, unsigned instructions);
729

    
730
  // MIPS32 R2 instruction macro.
731
  void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
732
  void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
733

    
734
  // ---------------------------------------------------------------------------
735
  // FPU macros. These do not handle special cases like NaN or +- inf.
736

    
737
  // Convert unsigned word to double.
738
  void Cvt_d_uw(FPURegister fd, FPURegister fs, FPURegister scratch);
739
  void Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch);
740

    
741
  // Convert double to unsigned word.
742
  void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch);
743
  void Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch);
744

    
745
  void Trunc_w_d(FPURegister fd, FPURegister fs);
746
  void Round_w_d(FPURegister fd, FPURegister fs);
747
  void Floor_w_d(FPURegister fd, FPURegister fs);
748
  void Ceil_w_d(FPURegister fd, FPURegister fs);
749
  // Wrapper function for the different cmp/branch types.
750
  void BranchF(Label* target,
751
               Label* nan,
752
               Condition cc,
753
               FPURegister cmp1,
754
               FPURegister cmp2,
755
               BranchDelaySlot bd = PROTECT);
756

    
757
  // Alternate (inline) version for better readability with USE_DELAY_SLOT.
758
  inline void BranchF(BranchDelaySlot bd,
759
                      Label* target,
760
                      Label* nan,
761
                      Condition cc,
762
                      FPURegister cmp1,
763
                      FPURegister cmp2) {
764
    BranchF(target, nan, cc, cmp1, cmp2, bd);
765
  };
766

    
767
  // Truncates a double using a specific rounding mode, and writes the value
768
  // to the result register.
769
  // The except_flag will contain any exceptions caused by the instruction.
770
  // If check_inexact is kDontCheckForInexactConversion, then the inexact
771
  // exception is masked.
772
  void EmitFPUTruncate(FPURoundingMode rounding_mode,
773
                       Register result,
774
                       DoubleRegister double_input,
775
                       Register scratch,
776
                       DoubleRegister double_scratch,
777
                       Register except_flag,
778
                       CheckForInexactConversion check_inexact
779
                           = kDontCheckForInexactConversion);
780

    
781
  // Performs a truncating conversion of a floating point number as used by
782
  // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
783
  // succeeds, otherwise falls through if result is saturated. On return
784
  // 'result' either holds answer, or is clobbered on fall through.
785
  //
786
  // Only public for the test code in test-code-stubs-arm.cc.
787
  void TryInlineTruncateDoubleToI(Register result,
788
                                  DoubleRegister input,
789
                                  Label* done);
790

    
791
  // Performs a truncating conversion of a floating point number as used by
792
  // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
793
  // Exits with 'result' holding the answer.
794
  void TruncateDoubleToI(Register result, DoubleRegister double_input);
795

    
796
  // Performs a truncating conversion of a heap number as used by
797
  // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
798
  // must be different registers. Exits with 'result' holding the answer.
799
  void TruncateHeapNumberToI(Register result, Register object);
800

    
801
  // Converts the smi or heap number in object to an int32 using the rules
802
  // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
803
  // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
804
  // different registers.
805
  void TruncateNumberToI(Register object,
806
                         Register result,
807
                         Register heap_number_map,
808
                         Register scratch,
809
                         Label* not_int32);
810

    
811
  // Loads the number from object into dst register.
812
  // If |object| is neither smi nor heap number, |not_number| is jumped to
813
  // with |object| still intact.
814
  void LoadNumber(Register object,
815
                  FPURegister dst,
816
                  Register heap_number_map,
817
                  Register scratch,
818
                  Label* not_number);
819

    
820
  // Loads the number from object into double_dst in the double format.
821
  // Control will jump to not_int32 if the value cannot be exactly represented
822
  // by a 32-bit integer.
823
  // Floating point value in the 32-bit integer range that are not exact integer
824
  // won't be loaded.
825
  void LoadNumberAsInt32Double(Register object,
826
                               DoubleRegister double_dst,
827
                               Register heap_number_map,
828
                               Register scratch1,
829
                               Register scratch2,
830
                               FPURegister double_scratch,
831
                               Label* not_int32);
832

    
833
  // Loads the number from object into dst as a 32-bit integer.
834
  // Control will jump to not_int32 if the object cannot be exactly represented
835
  // by a 32-bit integer.
836
  // Floating point value in the 32-bit integer range that are not exact integer
837
  // won't be converted.
838
  void LoadNumberAsInt32(Register object,
839
                         Register dst,
840
                         Register heap_number_map,
841
                         Register scratch1,
842
                         Register scratch2,
843
                         FPURegister double_scratch0,
844
                         FPURegister double_scratch1,
845
                         Label* not_int32);
846

    
847
  // Enter exit frame.
848
  // argc - argument count to be dropped by LeaveExitFrame.
849
  // save_doubles - saves FPU registers on stack, currently disabled.
850
  // stack_space - extra stack space.
851
  void EnterExitFrame(bool save_doubles,
852
                      int stack_space = 0);
853

    
854
  // Leave the current exit frame.
855
  void LeaveExitFrame(bool save_doubles,
856
                      Register arg_count,
857
                      bool restore_context,
858
                      bool do_return = NO_EMIT_RETURN);
859

    
860
  // Get the actual activation frame alignment for target environment.
861
  static int ActivationFrameAlignment();
862

    
863
  // Make sure the stack is aligned. Only emits code in debug mode.
864
  void AssertStackIsAligned();
865

    
866
  void LoadContext(Register dst, int context_chain_length);
867

    
868
  // Conditionally load the cached Array transitioned map of type
869
  // transitioned_kind from the native context if the map in register
870
  // map_in_out is the cached Array map in the native context of
871
  // expected_kind.
872
  void LoadTransitionedArrayMapConditional(
873
      ElementsKind expected_kind,
874
      ElementsKind transitioned_kind,
875
      Register map_in_out,
876
      Register scratch,
877
      Label* no_map_match);
878

    
879
  // Load the initial map for new Arrays from a JSFunction.
880
  void LoadInitialArrayMap(Register function_in,
881
                           Register scratch,
882
                           Register map_out,
883
                           bool can_have_holes);
884

    
885
  void LoadGlobalFunction(int index, Register function);
886
  void LoadArrayFunction(Register function);
887

    
888
  // Load the initial map from the global function. The registers
889
  // function and map can be the same, function is then overwritten.
890
  void LoadGlobalFunctionInitialMap(Register function,
891
                                    Register map,
892
                                    Register scratch);
893

    
894
  void InitializeRootRegister() {
895
    ExternalReference roots_array_start =
896
        ExternalReference::roots_array_start(isolate());
897
    li(kRootRegister, Operand(roots_array_start));
898
  }
899

    
900
  // -------------------------------------------------------------------------
901
  // JavaScript invokes.
902

    
903
  // Set up call kind marking in t1. The method takes t1 as an
904
  // explicit first parameter to make the code more readable at the
905
  // call sites.
906
  void SetCallKind(Register dst, CallKind kind);
907

    
908
  // Invoke the JavaScript function code by either calling or jumping.
909
  void InvokeCode(Register code,
910
                  const ParameterCount& expected,
911
                  const ParameterCount& actual,
912
                  InvokeFlag flag,
913
                  const CallWrapper& call_wrapper,
914
                  CallKind call_kind);
915

    
916
  void InvokeCode(Handle<Code> code,
917
                  const ParameterCount& expected,
918
                  const ParameterCount& actual,
919
                  RelocInfo::Mode rmode,
920
                  InvokeFlag flag,
921
                  CallKind call_kind);
922

    
923
  // Invoke the JavaScript function in the given register. Changes the
924
  // current context to the context in the function before invoking.
925
  void InvokeFunction(Register function,
926
                      const ParameterCount& actual,
927
                      InvokeFlag flag,
928
                      const CallWrapper& call_wrapper,
929
                      CallKind call_kind);
930

    
931
  void InvokeFunction(Handle<JSFunction> function,
932
                      const ParameterCount& expected,
933
                      const ParameterCount& actual,
934
                      InvokeFlag flag,
935
                      const CallWrapper& call_wrapper,
936
                      CallKind call_kind);
937

    
938

    
939
  void IsObjectJSObjectType(Register heap_object,
940
                            Register map,
941
                            Register scratch,
942
                            Label* fail);
943

    
944
  void IsInstanceJSObjectType(Register map,
945
                              Register scratch,
946
                              Label* fail);
947

    
948
  void IsObjectJSStringType(Register object,
949
                            Register scratch,
950
                            Label* fail);
951

    
952
  void IsObjectNameType(Register object,
953
                        Register scratch,
954
                        Label* fail);
955

    
956
#ifdef ENABLE_DEBUGGER_SUPPORT
957
  // -------------------------------------------------------------------------
958
  // Debugger Support.
959

    
960
  void DebugBreak();
961
#endif
962

    
963

    
964
  // -------------------------------------------------------------------------
965
  // Exception handling.
966

    
967
  // Push a new try handler and link into try handler chain.
968
  void PushTryHandler(StackHandler::Kind kind, int handler_index);
969

    
970
  // Unlink the stack handler on top of the stack from the try handler chain.
971
  // Must preserve the result register.
972
  void PopTryHandler();
973

    
974
  // Passes thrown value to the handler of top of the try handler chain.
975
  void Throw(Register value);
976

    
977
  // Propagates an uncatchable exception to the top of the current JS stack's
978
  // handler chain.
979
  void ThrowUncatchable(Register value);
980

    
981
  // Copies a fixed number of fields of heap objects from src to dst.
982
  void CopyFields(Register dst, Register src, RegList temps, int field_count);
983

    
984
  // Copies a number of bytes from src to dst. All registers are clobbered. On
985
  // exit src and dst will point to the place just after where the last byte was
986
  // read or written and length will be zero.
987
  void CopyBytes(Register src,
988
                 Register dst,
989
                 Register length,
990
                 Register scratch);
991

    
992
  // Initialize fields with filler values.  Fields starting at |start_offset|
993
  // not including end_offset are overwritten with the value in |filler|.  At
994
  // the end the loop, |start_offset| takes the value of |end_offset|.
995
  void InitializeFieldsWithFiller(Register start_offset,
996
                                  Register end_offset,
997
                                  Register filler);
998

    
999
  // -------------------------------------------------------------------------
1000
  // Support functions.
1001

    
1002
  // Try to get function prototype of a function and puts the value in
1003
  // the result register. Checks that the function really is a
1004
  // function and jumps to the miss label if the fast checks fail. The
1005
  // function register will be untouched; the other registers may be
1006
  // clobbered.
1007
  void TryGetFunctionPrototype(Register function,
1008
                               Register result,
1009
                               Register scratch,
1010
                               Label* miss,
1011
                               bool miss_on_bound_function = false);
1012

    
1013
  void GetObjectType(Register function,
1014
                     Register map,
1015
                     Register type_reg);
1016

    
1017
  // Check if a map for a JSObject indicates that the object has fast elements.
1018
  // Jump to the specified label if it does not.
1019
  void CheckFastElements(Register map,
1020
                         Register scratch,
1021
                         Label* fail);
1022

    
1023
  // Check if a map for a JSObject indicates that the object can have both smi
1024
  // and HeapObject elements.  Jump to the specified label if it does not.
1025
  void CheckFastObjectElements(Register map,
1026
                               Register scratch,
1027
                               Label* fail);
1028

    
1029
  // Check if a map for a JSObject indicates that the object has fast smi only
1030
  // elements.  Jump to the specified label if it does not.
1031
  void CheckFastSmiElements(Register map,
1032
                            Register scratch,
1033
                            Label* fail);
1034

    
1035
  // Check to see if maybe_number can be stored as a double in
1036
  // FastDoubleElements. If it can, store it at the index specified by key in
1037
  // the FastDoubleElements array elements. Otherwise jump to fail.
1038
  void StoreNumberToDoubleElements(Register value_reg,
1039
                                   Register key_reg,
1040
                                   Register elements_reg,
1041
                                   Register scratch1,
1042
                                   Register scratch2,
1043
                                   Register scratch3,
1044
                                   Label* fail,
1045
                                   int elements_offset = 0);
1046

    
1047
  // Compare an object's map with the specified map and its transitioned
1048
  // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Jumps to
1049
  // "branch_to" if the result of the comparison is "cond". If multiple map
1050
  // compares are required, the compare sequences branches to early_success.
1051
  void CompareMapAndBranch(Register obj,
1052
                           Register scratch,
1053
                           Handle<Map> map,
1054
                           Label* early_success,
1055
                           Condition cond,
1056
                           Label* branch_to);
1057

    
1058
  // As above, but the map of the object is already loaded into the register
1059
  // which is preserved by the code generated.
1060
  void CompareMapAndBranch(Register obj_map,
1061
                           Handle<Map> map,
1062
                           Label* early_success,
1063
                           Condition cond,
1064
                           Label* branch_to);
1065

    
1066
  // Check if the map of an object is equal to a specified map and branch to
1067
  // label if not. Skip the smi check if not required (object is known to be a
1068
  // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
1069
  // against maps that are ElementsKind transition maps of the specificed map.
1070
  void CheckMap(Register obj,
1071
                Register scratch,
1072
                Handle<Map> map,
1073
                Label* fail,
1074
                SmiCheckType smi_check_type);
1075

    
1076

    
1077
  void CheckMap(Register obj,
1078
                Register scratch,
1079
                Heap::RootListIndex index,
1080
                Label* fail,
1081
                SmiCheckType smi_check_type);
1082

    
1083
  // Check if the map of an object is equal to a specified map and branch to a
1084
  // specified target if equal. Skip the smi check if not required (object is
1085
  // known to be a heap object)
1086
  void DispatchMap(Register obj,
1087
                   Register scratch,
1088
                   Handle<Map> map,
1089
                   Handle<Code> success,
1090
                   SmiCheckType smi_check_type);
1091

    
1092
  // Generates code for reporting that an illegal operation has
1093
  // occurred.
1094
  void IllegalOperation(int num_arguments);
1095

    
1096

    
1097
  // Load and check the instance type of an object for being a string.
1098
  // Loads the type into the second argument register.
1099
  // Returns a condition that will be enabled if the object was a string.
1100
  Condition IsObjectStringType(Register obj,
1101
                               Register type,
1102
                               Register result) {
1103
    lw(type, FieldMemOperand(obj, HeapObject::kMapOffset));
1104
    lbu(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
1105
    And(type, type, Operand(kIsNotStringMask));
1106
    ASSERT_EQ(0, kStringTag);
1107
    return eq;
1108
  }
1109

    
1110

    
1111
  // Picks out an array index from the hash field.
1112
  // Register use:
1113
  //   hash - holds the index's hash. Clobbered.
1114
  //   index - holds the overwritten index on exit.
1115
  void IndexFromHash(Register hash, Register index);
1116

    
1117
  // Get the number of least significant bits from a register.
1118
  void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
1119
  void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
1120

    
1121
  // Load the value of a number object into a FPU double register. If the
1122
  // object is not a number a jump to the label not_number is performed
1123
  // and the FPU double register is unchanged.
1124
  void ObjectToDoubleFPURegister(
1125
      Register object,
1126
      FPURegister value,
1127
      Register scratch1,
1128
      Register scratch2,
1129
      Register heap_number_map,
1130
      Label* not_number,
1131
      ObjectToDoubleFlags flags = NO_OBJECT_TO_DOUBLE_FLAGS);
1132

    
1133
  // Load the value of a smi object into a FPU double register. The register
1134
  // scratch1 can be the same register as smi in which case smi will hold the
1135
  // untagged value afterwards.
1136
  void SmiToDoubleFPURegister(Register smi,
1137
                              FPURegister value,
1138
                              Register scratch1);
1139

    
1140
  // -------------------------------------------------------------------------
1141
  // Overflow handling functions.
1142
  // Usage: first call the appropriate arithmetic function, then call one of the
1143
  // jump functions with the overflow_dst register as the second parameter.
1144

    
1145
  void AdduAndCheckForOverflow(Register dst,
1146
                               Register left,
1147
                               Register right,
1148
                               Register overflow_dst,
1149
                               Register scratch = at);
1150

    
1151
  void SubuAndCheckForOverflow(Register dst,
1152
                               Register left,
1153
                               Register right,
1154
                               Register overflow_dst,
1155
                               Register scratch = at);
1156

    
1157
  void BranchOnOverflow(Label* label,
1158
                        Register overflow_check,
1159
                        BranchDelaySlot bd = PROTECT) {
1160
    Branch(label, lt, overflow_check, Operand(zero_reg), bd);
1161
  }
1162

    
1163
  void BranchOnNoOverflow(Label* label,
1164
                          Register overflow_check,
1165
                          BranchDelaySlot bd = PROTECT) {
1166
    Branch(label, ge, overflow_check, Operand(zero_reg), bd);
1167
  }
1168

    
1169
  void RetOnOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) {
1170
    Ret(lt, overflow_check, Operand(zero_reg), bd);
1171
  }
1172

    
1173
  void RetOnNoOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) {
1174
    Ret(ge, overflow_check, Operand(zero_reg), bd);
1175
  }
1176

    
1177
  // -------------------------------------------------------------------------
1178
  // Runtime calls.
1179

    
1180
  // See comments at the beginning of CEntryStub::Generate.
1181
  inline void PrepareCEntryArgs(int num_args) {
1182
    li(s0, num_args);
1183
    li(s1, (num_args - 1) * kPointerSize);
1184
  }
1185

    
1186
  inline void PrepareCEntryFunction(const ExternalReference& ref) {
1187
    li(s2, Operand(ref));
1188
  }
1189

    
1190
  // Call a code stub.
1191
  void CallStub(CodeStub* stub,
1192
                TypeFeedbackId ast_id = TypeFeedbackId::None(),
1193
                Condition cond = cc_always,
1194
                Register r1 = zero_reg,
1195
                const Operand& r2 = Operand(zero_reg),
1196
                BranchDelaySlot bd = PROTECT);
1197

    
1198
  // Tail call a code stub (jump).
1199
  void TailCallStub(CodeStub* stub);
1200

    
1201
  void CallJSExitStub(CodeStub* stub);
1202

    
1203
  // Call a runtime routine.
1204
  void CallRuntime(const Runtime::Function* f,
1205
                   int num_arguments,
1206
                   SaveFPRegsMode save_doubles = kDontSaveFPRegs);
1207
  void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
1208
    const Runtime::Function* function = Runtime::FunctionForId(id);
1209
    CallRuntime(function, function->nargs, kSaveFPRegs);
1210
  }
1211

    
1212
  // Convenience function: Same as above, but takes the fid instead.
1213
  void CallRuntime(Runtime::FunctionId id, int num_arguments) {
1214
    CallRuntime(Runtime::FunctionForId(id), num_arguments);
1215
  }
1216

    
1217
  // Convenience function: call an external reference.
1218
  void CallExternalReference(const ExternalReference& ext,
1219
                             int num_arguments,
1220
                             BranchDelaySlot bd = PROTECT);
1221

    
1222
  // Tail call of a runtime routine (jump).
1223
  // Like JumpToExternalReference, but also takes care of passing the number
1224
  // of parameters.
1225
  void TailCallExternalReference(const ExternalReference& ext,
1226
                                 int num_arguments,
1227
                                 int result_size);
1228

    
1229
  // Convenience function: tail call a runtime routine (jump).
1230
  void TailCallRuntime(Runtime::FunctionId fid,
1231
                       int num_arguments,
1232
                       int result_size);
1233

    
1234
  int CalculateStackPassedWords(int num_reg_arguments,
1235
                                int num_double_arguments);
1236

    
1237
  // Before calling a C-function from generated code, align arguments on stack
1238
  // and add space for the four mips argument slots.
1239
  // After aligning the frame, non-register arguments must be stored on the
1240
  // stack, after the argument-slots using helper: CFunctionArgumentOperand().
1241
  // The argument count assumes all arguments are word sized.
1242
  // Some compilers/platforms require the stack to be aligned when calling
1243
  // C++ code.
1244
  // Needs a scratch register to do some arithmetic. This register will be
1245
  // trashed.
1246
  void PrepareCallCFunction(int num_reg_arguments,
1247
                            int num_double_registers,
1248
                            Register scratch);
1249
  void PrepareCallCFunction(int num_reg_arguments,
1250
                            Register scratch);
1251

    
1252
  // Arguments 1-4 are placed in registers a0 thru a3 respectively.
1253
  // Arguments 5..n are stored to stack using following:
1254
  //  sw(t0, CFunctionArgumentOperand(5));
1255

    
1256
  // Calls a C function and cleans up the space for arguments allocated
1257
  // by PrepareCallCFunction. The called function is not allowed to trigger a
1258
  // garbage collection, since that might move the code and invalidate the
1259
  // return address (unless this is somehow accounted for by the called
1260
  // function).
1261
  void CallCFunction(ExternalReference function, int num_arguments);
1262
  void CallCFunction(Register function, int num_arguments);
1263
  void CallCFunction(ExternalReference function,
1264
                     int num_reg_arguments,
1265
                     int num_double_arguments);
1266
  void CallCFunction(Register function,
1267
                     int num_reg_arguments,
1268
                     int num_double_arguments);
1269
  void GetCFunctionDoubleResult(const DoubleRegister dst);
1270

    
1271
  // There are two ways of passing double arguments on MIPS, depending on
1272
  // whether soft or hard floating point ABI is used. These functions
1273
  // abstract parameter passing for the three different ways we call
1274
  // C functions from generated code.
1275
  void SetCallCDoubleArguments(DoubleRegister dreg);
1276
  void SetCallCDoubleArguments(DoubleRegister dreg1, DoubleRegister dreg2);
1277
  void SetCallCDoubleArguments(DoubleRegister dreg, Register reg);
1278

    
1279
  // Calls an API function.  Allocates HandleScope, extracts returned value
1280
  // from handle and propagates exceptions.  Restores context.  stack_space
1281
  // - space to be unwound on exit (includes the call JS arguments space and
1282
  // the additional space allocated for the fast call).
1283
  void CallApiFunctionAndReturn(ExternalReference function,
1284
                                Address function_address,
1285
                                ExternalReference thunk_ref,
1286
                                Register thunk_last_arg,
1287
                                int stack_space,
1288
                                MemOperand return_value_operand,
1289
                                MemOperand* context_restore_operand);
1290

    
1291
  // Jump to the builtin routine.
1292
  void JumpToExternalReference(const ExternalReference& builtin,
1293
                               BranchDelaySlot bd = PROTECT);
1294

    
1295
  // Invoke specified builtin JavaScript function. Adds an entry to
1296
  // the unresolved list if the name does not resolve.
1297
  void InvokeBuiltin(Builtins::JavaScript id,
1298
                     InvokeFlag flag,
1299
                     const CallWrapper& call_wrapper = NullCallWrapper());
1300

    
1301
  // Store the code object for the given builtin in the target register and
1302
  // setup the function in a1.
1303
  void GetBuiltinEntry(Register target, Builtins::JavaScript id);
1304

    
1305
  // Store the function for the given builtin in the target register.
1306
  void GetBuiltinFunction(Register target, Builtins::JavaScript id);
1307

    
1308
  struct Unresolved {
1309
    int pc;
1310
    uint32_t flags;  // See Bootstrapper::FixupFlags decoders/encoders.
1311
    const char* name;
1312
  };
1313

    
1314
  Handle<Object> CodeObject() {
1315
    ASSERT(!code_object_.is_null());
1316
    return code_object_;
1317
  }
1318

    
1319
  // -------------------------------------------------------------------------
1320
  // StatsCounter support.
1321

    
1322
  void SetCounter(StatsCounter* counter, int value,
1323
                  Register scratch1, Register scratch2);
1324
  void IncrementCounter(StatsCounter* counter, int value,
1325
                        Register scratch1, Register scratch2);
1326
  void DecrementCounter(StatsCounter* counter, int value,
1327
                        Register scratch1, Register scratch2);
1328

    
1329

    
1330
  // -------------------------------------------------------------------------
1331
  // Debugging.
1332

    
1333
  // Calls Abort(msg) if the condition cc is not satisfied.
1334
  // Use --debug_code to enable.
1335
  void Assert(Condition cc, BailoutReason reason, Register rs, Operand rt);
1336
  void AssertFastElements(Register elements);
1337

    
1338
  // Like Assert(), but always enabled.
1339
  void Check(Condition cc, BailoutReason reason, Register rs, Operand rt);
1340

    
1341
  // Print a message to stdout and abort execution.
1342
  void Abort(BailoutReason msg);
1343

    
1344
  // Verify restrictions about code generated in stubs.
1345
  void set_generating_stub(bool value) { generating_stub_ = value; }
1346
  bool generating_stub() { return generating_stub_; }
1347
  void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
1348
  bool allow_stub_calls() { return allow_stub_calls_; }
1349
  void set_has_frame(bool value) { has_frame_ = value; }
1350
  bool has_frame() { return has_frame_; }
1351
  inline bool AllowThisStubCall(CodeStub* stub);
1352

    
1353
  // ---------------------------------------------------------------------------
1354
  // Number utilities.
1355

    
1356
  // Check whether the value of reg is a power of two and not zero. If not
1357
  // control continues at the label not_power_of_two. If reg is a power of two
1358
  // the register scratch contains the value of (reg - 1) when control falls
1359
  // through.
1360
  void JumpIfNotPowerOfTwoOrZero(Register reg,
1361
                                 Register scratch,
1362
                                 Label* not_power_of_two_or_zero);
1363

    
1364
  // -------------------------------------------------------------------------
1365
  // Smi utilities.
1366

    
1367
  void SmiTag(Register reg) {
1368
    Addu(reg, reg, reg);
1369
  }
1370

    
1371
  // Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow().
1372
  void SmiTagCheckOverflow(Register reg, Register overflow);
1373
  void SmiTagCheckOverflow(Register dst, Register src, Register overflow);
1374

    
1375
  void SmiTag(Register dst, Register src) {
1376
    Addu(dst, src, src);
1377
  }
1378

    
1379
  void SmiUntag(Register reg) {
1380
    sra(reg, reg, kSmiTagSize);
1381
  }
1382

    
1383
  void SmiUntag(Register dst, Register src) {
1384
    sra(dst, src, kSmiTagSize);
1385
  }
1386

    
1387
  // Untag the source value into destination and jump if source is a smi.
1388
  // Souce and destination can be the same register.
1389
  void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
1390

    
1391
  // Untag the source value into destination and jump if source is not a smi.
1392
  // Souce and destination can be the same register.
1393
  void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
1394

    
1395
  // Jump the register contains a smi.
1396
  void JumpIfSmi(Register value,
1397
                 Label* smi_label,
1398
                 Register scratch = at,
1399
                 BranchDelaySlot bd = PROTECT);
1400

    
1401
  // Jump if the register contains a non-smi.
1402
  void JumpIfNotSmi(Register value,
1403
                    Label* not_smi_label,
1404
                    Register scratch = at,
1405
                    BranchDelaySlot bd = PROTECT);
1406

    
1407
  // Jump if either of the registers contain a non-smi.
1408
  void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
1409
  // Jump if either of the registers contain a smi.
1410
  void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
1411

    
1412
  // Abort execution if argument is a smi, enabled via --debug-code.
1413
  void AssertNotSmi(Register object);
1414
  void AssertSmi(Register object);
1415

    
1416
  // Abort execution if argument is not a string, enabled via --debug-code.
1417
  void AssertString(Register object);
1418

    
1419
  // Abort execution if argument is not a name, enabled via --debug-code.
1420
  void AssertName(Register object);
1421

    
1422
  // Abort execution if reg is not the root value with the given index,
1423
  // enabled via --debug-code.
1424
  void AssertIsRoot(Register reg, Heap::RootListIndex index);
1425

    
1426
  // ---------------------------------------------------------------------------
1427
  // HeapNumber utilities.
1428

    
1429
  void JumpIfNotHeapNumber(Register object,
1430
                           Register heap_number_map,
1431
                           Register scratch,
1432
                           Label* on_not_heap_number);
1433

    
1434
  // -------------------------------------------------------------------------
1435
  // String utilities.
1436

    
1437
  // Generate code to do a lookup in the number string cache. If the number in
1438
  // the register object is found in the cache the generated code falls through
1439
  // with the result in the result register. The object and the result register
1440
  // can be the same. If the number is not found in the cache the code jumps to
1441
  // the label not_found with only the content of register object unchanged.
1442
  void LookupNumberStringCache(Register object,
1443
                               Register result,
1444
                               Register scratch1,
1445
                               Register scratch2,
1446
                               Register scratch3,
1447
                               Label* not_found);
1448

    
1449
  // Checks if both instance types are sequential ASCII strings and jumps to
1450
  // label if either is not.
1451
  void JumpIfBothInstanceTypesAreNotSequentialAscii(
1452
      Register first_object_instance_type,
1453
      Register second_object_instance_type,
1454
      Register scratch1,
1455
      Register scratch2,
1456
      Label* failure);
1457

    
1458
  // Check if instance type is sequential ASCII string and jump to label if
1459
  // it is not.
1460
  void JumpIfInstanceTypeIsNotSequentialAscii(Register type,
1461
                                              Register scratch,
1462
                                              Label* failure);
1463

    
1464
  void JumpIfNotUniqueName(Register reg, Label* not_unique_name);
1465

    
1466
  // Test that both first and second are sequential ASCII strings.
1467
  // Assume that they are non-smis.
1468
  void JumpIfNonSmisNotBothSequentialAsciiStrings(Register first,
1469
                                                  Register second,
1470
                                                  Register scratch1,
1471
                                                  Register scratch2,
1472
                                                  Label* failure);
1473

    
1474
  // Test that both first and second are sequential ASCII strings.
1475
  // Check that they are non-smis.
1476
  void JumpIfNotBothSequentialAsciiStrings(Register first,
1477
                                           Register second,
1478
                                           Register scratch1,
1479
                                           Register scratch2,
1480
                                           Label* failure);
1481

    
1482
  void ClampUint8(Register output_reg, Register input_reg);
1483

    
1484
  void ClampDoubleToUint8(Register result_reg,
1485
                          DoubleRegister input_reg,
1486
                          DoubleRegister temp_double_reg);
1487

    
1488

    
1489
  void LoadInstanceDescriptors(Register map, Register descriptors);
1490
  void EnumLength(Register dst, Register map);
1491
  void NumberOfOwnDescriptors(Register dst, Register map);
1492

    
1493
  template<typename Field>
1494
  void DecodeField(Register reg) {
1495
    static const int shift = Field::kShift;
1496
    static const int mask = (Field::kMask >> shift) << kSmiTagSize;
1497
    srl(reg, reg, shift);
1498
    And(reg, reg, Operand(mask));
1499
  }
1500

    
1501
  // Generates function and stub prologue code.
1502
  void Prologue(PrologueFrameMode frame_mode);
1503

    
1504
  // Activation support.
1505
  void EnterFrame(StackFrame::Type type);
1506
  void LeaveFrame(StackFrame::Type type);
1507

    
1508
  // Patch the relocated value (lui/ori pair).
1509
  void PatchRelocatedValue(Register li_location,
1510
                           Register scratch,
1511
                           Register new_value);
1512
  // Get the relocatad value (loaded data) from the lui/ori pair.
1513
  void GetRelocatedValue(Register li_location,
1514
                         Register value,
1515
                         Register scratch);
1516

    
1517
  // Expects object in a0 and returns map with validated enum cache
1518
  // in a0.  Assumes that any other register can be used as a scratch.
1519
  void CheckEnumCache(Register null_value, Label* call_runtime);
1520

    
1521
  // AllocationMemento support. Arrays may have an associated
1522
  // AllocationMemento object that can be checked for in order to pretransition
1523
  // to another type.
1524
  // On entry, receiver_reg should point to the array object.
1525
  // scratch_reg gets clobbered.
1526
  // If allocation info is present, jump to allocation_memento_present.
1527
  void TestJSArrayForAllocationMemento(
1528
      Register receiver_reg,
1529
      Register scratch_reg,
1530
      Label* no_memento_found,
1531
      Condition cond = al,
1532
      Label* allocation_memento_present = NULL);
1533

    
1534
  void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
1535
                                         Register scratch_reg,
1536
                                         Label* memento_found) {
1537
    Label no_memento_found;
1538
    TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
1539
                                    &no_memento_found, eq, memento_found);
1540
    bind(&no_memento_found);
1541
  }
1542

    
1543
 private:
1544
  void CallCFunctionHelper(Register function,
1545
                           int num_reg_arguments,
1546
                           int num_double_arguments);
1547

    
1548
  void BranchShort(int16_t offset, BranchDelaySlot bdslot = PROTECT);
1549
  void BranchShort(int16_t offset, Condition cond, Register rs,
1550
                   const Operand& rt,
1551
                   BranchDelaySlot bdslot = PROTECT);
1552
  void BranchShort(Label* L, BranchDelaySlot bdslot = PROTECT);
1553
  void BranchShort(Label* L, Condition cond, Register rs,
1554
                   const Operand& rt,
1555
                   BranchDelaySlot bdslot = PROTECT);
1556
  void BranchAndLinkShort(int16_t offset, BranchDelaySlot bdslot = PROTECT);
1557
  void BranchAndLinkShort(int16_t offset, Condition cond, Register rs,
1558
                          const Operand& rt,
1559
                          BranchDelaySlot bdslot = PROTECT);
1560
  void BranchAndLinkShort(Label* L, BranchDelaySlot bdslot = PROTECT);
1561
  void BranchAndLinkShort(Label* L, Condition cond, Register rs,
1562
                          const Operand& rt,
1563
                          BranchDelaySlot bdslot = PROTECT);
1564
  void J(Label* L, BranchDelaySlot bdslot);
1565
  void Jr(Label* L, BranchDelaySlot bdslot);
1566
  void Jalr(Label* L, BranchDelaySlot bdslot);
1567

    
1568
  // Helper functions for generating invokes.
1569
  void InvokePrologue(const ParameterCount& expected,
1570
                      const ParameterCount& actual,
1571
                      Handle<Code> code_constant,
1572
                      Register code_reg,
1573
                      Label* done,
1574
                      bool* definitely_mismatches,
1575
                      InvokeFlag flag,
1576
                      const CallWrapper& call_wrapper,
1577
                      CallKind call_kind);
1578

    
1579
  // Get the code for the given builtin. Returns if able to resolve
1580
  // the function in the 'resolved' flag.
1581
  Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
1582

    
1583
  void InitializeNewString(Register string,
1584
                           Register length,
1585
                           Heap::RootListIndex map_index,
1586
                           Register scratch1,
1587
                           Register scratch2);
1588

    
1589
  // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
1590
  void InNewSpace(Register object,
1591
                  Register scratch,
1592
                  Condition cond,  // eq for new space, ne otherwise.
1593
                  Label* branch);
1594

    
1595
  // Helper for finding the mark bits for an address.  Afterwards, the
1596
  // bitmap register points at the word with the mark bits and the mask
1597
  // the position of the first bit.  Leaves addr_reg unchanged.
1598
  inline void GetMarkBits(Register addr_reg,
1599
                          Register bitmap_reg,
1600
                          Register mask_reg);
1601

    
1602
  // Helper for throwing exceptions.  Compute a handler address and jump to
1603
  // it.  See the implementation for register usage.
1604
  void JumpToHandlerEntry();
1605

    
1606
  // Compute memory operands for safepoint stack slots.
1607
  static int SafepointRegisterStackIndex(int reg_code);
1608
  MemOperand SafepointRegisterSlot(Register reg);
1609
  MemOperand SafepointRegistersAndDoublesSlot(Register reg);
1610

    
1611
  bool generating_stub_;
1612
  bool allow_stub_calls_;
1613
  bool has_frame_;
1614
  // This handle will be patched with the code object on installation.
1615
  Handle<Object> code_object_;
1616

    
1617
  // Needs access to SafepointRegisterStackIndex for compiled frame
1618
  // traversal.
1619
  friend class StandardFrame;
1620
};
1621

    
1622

    
1623
// The code patcher is used to patch (typically) small parts of code e.g. for
1624
// debugging and other types of instrumentation. When using the code patcher
1625
// the exact number of bytes specified must be emitted. It is not legal to emit
1626
// relocation information. If any of these constraints are violated it causes
1627
// an assertion to fail.
1628
class CodePatcher {
1629
 public:
1630
  CodePatcher(byte* address, int instructions);
1631
  virtual ~CodePatcher();
1632

    
1633
  // Macro assembler to emit code.
1634
  MacroAssembler* masm() { return &masm_; }
1635

    
1636
  // Emit an instruction directly.
1637
  void Emit(Instr instr);
1638

    
1639
  // Emit an address directly.
1640
  void Emit(Address addr);
1641

    
1642
  // Change the condition part of an instruction leaving the rest of the current
1643
  // instruction unchanged.
1644
  void ChangeBranchCondition(Condition cond);
1645

    
1646
 private:
1647
  byte* address_;  // The address of the code being patched.
1648
  int size_;  // Number of bytes of the expected patch size.
1649
  MacroAssembler masm_;  // Macro assembler used to generate the code.
1650
};
1651

    
1652

    
1653

    
1654
#ifdef GENERATED_CODE_COVERAGE
1655
#define CODE_COVERAGE_STRINGIFY(x) #x
1656
#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
1657
#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
1658
#define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
1659
#else
1660
#define ACCESS_MASM(masm) masm->
1661
#endif
1662

    
1663
} }  // namespace v8::internal
1664

    
1665
#endif  // V8_MIPS_MACRO_ASSEMBLER_MIPS_H_