The data contained in this repository can be downloaded to your computer using one of several clients.
Please see the documentation of your version control software client for more information.

Please select the desired protocol below to get the URL.

This URL has Read-Only access.

Statistics
| Branch: | Revision:

main_repo / deps / v8 / src / mips / assembler-mips.cc @ f230a1cf

History | View | Annotate | Download (65 KB)

1
// Copyright (c) 1994-2006 Sun Microsystems Inc.
2
// All Rights Reserved.
3
//
4
// Redistribution and use in source and binary forms, with or without
5
// modification, are permitted provided that the following conditions are
6
// met:
7
//
8
// - Redistributions of source code must retain the above copyright notice,
9
// this list of conditions and the following disclaimer.
10
//
11
// - Redistribution in binary form must reproduce the above copyright
12
// notice, this list of conditions and the following disclaimer in the
13
// documentation and/or other materials provided with the distribution.
14
//
15
// - Neither the name of Sun Microsystems or the names of contributors may
16
// be used to endorse or promote products derived from this software without
17
// specific prior written permission.
18
//
19
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20
// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21
// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30

    
31
// The original source code covered by the above license above has been
32
// modified significantly by Google Inc.
33
// Copyright 2012 the V8 project authors. All rights reserved.
34

    
35

    
36
#include "v8.h"
37

    
38
#if V8_TARGET_ARCH_MIPS
39

    
40
#include "mips/assembler-mips-inl.h"
41
#include "serialize.h"
42

    
43
namespace v8 {
44
namespace internal {
45

    
46
#ifdef DEBUG
47
bool CpuFeatures::initialized_ = false;
48
#endif
49
unsigned CpuFeatures::supported_ = 0;
50
unsigned CpuFeatures::found_by_runtime_probing_only_ = 0;
51
unsigned CpuFeatures::cross_compile_ = 0;
52

    
53

    
54
ExternalReference ExternalReference::cpu_features() {
55
  ASSERT(CpuFeatures::initialized_);
56
  return ExternalReference(&CpuFeatures::supported_);
57
}
58

    
59

    
60
// Get the CPU features enabled by the build. For cross compilation the
61
// preprocessor symbols CAN_USE_FPU_INSTRUCTIONS
62
// can be defined to enable FPU instructions when building the
63
// snapshot.
64
static uint64_t CpuFeaturesImpliedByCompiler() {
65
  uint64_t answer = 0;
66
#ifdef CAN_USE_FPU_INSTRUCTIONS
67
  answer |= static_cast<uint64_t>(1) << FPU;
68
#endif  // def CAN_USE_FPU_INSTRUCTIONS
69

    
70
#ifdef __mips__
71
  // If the compiler is allowed to use FPU then we can use FPU too in our code
72
  // generation even when generating snapshots.  This won't work for cross
73
  // compilation.
74
#if(defined(__mips_hard_float) && __mips_hard_float != 0)
75
  answer |= static_cast<uint64_t>(1) << FPU;
76
#endif  // defined(__mips_hard_float) && __mips_hard_float != 0
77
#endif  // def __mips__
78

    
79
  return answer;
80
}
81

    
82

    
83
const char* DoubleRegister::AllocationIndexToString(int index) {
84
  ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
85
  const char* const names[] = {
86
    "f0",
87
    "f2",
88
    "f4",
89
    "f6",
90
    "f8",
91
    "f10",
92
    "f12",
93
    "f14",
94
    "f16",
95
    "f18",
96
    "f20",
97
    "f22",
98
    "f24",
99
    "f26"
100
  };
101
  return names[index];
102
}
103

    
104

    
105
void CpuFeatures::Probe() {
106
  unsigned standard_features = (OS::CpuFeaturesImpliedByPlatform() |
107
                                CpuFeaturesImpliedByCompiler());
108
  ASSERT(supported_ == 0 || supported_ == standard_features);
109
#ifdef DEBUG
110
  initialized_ = true;
111
#endif
112

    
113
  // Get the features implied by the OS and the compiler settings. This is the
114
  // minimal set of features which is also allowed for generated code in the
115
  // snapshot.
116
  supported_ |= standard_features;
117

    
118
  if (Serializer::enabled()) {
119
    // No probing for features if we might serialize (generate snapshot).
120
    return;
121
  }
122

    
123
  // If the compiler is allowed to use fpu then we can use fpu too in our
124
  // code generation.
125
#if !defined(__mips__)
126
  // For the simulator build, use FPU.
127
  supported_ |= static_cast<uint64_t>(1) << FPU;
128
#else
129
  // Probe for additional features not already known to be available.
130
  CPU cpu;
131
  if (cpu.has_fpu()) {
132
    // This implementation also sets the FPU flags if
133
    // runtime detection of FPU returns true.
134
    supported_ |= static_cast<uint64_t>(1) << FPU;
135
    found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << FPU;
136
  }
137
#endif
138
}
139

    
140

    
141
int ToNumber(Register reg) {
142
  ASSERT(reg.is_valid());
143
  const int kNumbers[] = {
144
    0,    // zero_reg
145
    1,    // at
146
    2,    // v0
147
    3,    // v1
148
    4,    // a0
149
    5,    // a1
150
    6,    // a2
151
    7,    // a3
152
    8,    // t0
153
    9,    // t1
154
    10,   // t2
155
    11,   // t3
156
    12,   // t4
157
    13,   // t5
158
    14,   // t6
159
    15,   // t7
160
    16,   // s0
161
    17,   // s1
162
    18,   // s2
163
    19,   // s3
164
    20,   // s4
165
    21,   // s5
166
    22,   // s6
167
    23,   // s7
168
    24,   // t8
169
    25,   // t9
170
    26,   // k0
171
    27,   // k1
172
    28,   // gp
173
    29,   // sp
174
    30,   // fp
175
    31,   // ra
176
  };
177
  return kNumbers[reg.code()];
178
}
179

    
180

    
181
Register ToRegister(int num) {
182
  ASSERT(num >= 0 && num < kNumRegisters);
183
  const Register kRegisters[] = {
184
    zero_reg,
185
    at,
186
    v0, v1,
187
    a0, a1, a2, a3,
188
    t0, t1, t2, t3, t4, t5, t6, t7,
189
    s0, s1, s2, s3, s4, s5, s6, s7,
190
    t8, t9,
191
    k0, k1,
192
    gp,
193
    sp,
194
    fp,
195
    ra
196
  };
197
  return kRegisters[num];
198
}
199

    
200

    
201
// -----------------------------------------------------------------------------
202
// Implementation of RelocInfo.
203

    
204
const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
205
                                  1 << RelocInfo::INTERNAL_REFERENCE;
206

    
207

    
208
bool RelocInfo::IsCodedSpecially() {
209
  // The deserializer needs to know whether a pointer is specially coded.  Being
210
  // specially coded on MIPS means that it is a lui/ori instruction, and that is
211
  // always the case inside code objects.
212
  return true;
213
}
214

    
215

    
216
// Patch the code at the current address with the supplied instructions.
217
void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
218
  Instr* pc = reinterpret_cast<Instr*>(pc_);
219
  Instr* instr = reinterpret_cast<Instr*>(instructions);
220
  for (int i = 0; i < instruction_count; i++) {
221
    *(pc + i) = *(instr + i);
222
  }
223

    
224
  // Indicate that code has changed.
225
  CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
226
}
227

    
228

    
229
// Patch the code at the current PC with a call to the target address.
230
// Additional guard instructions can be added if required.
231
void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
232
  // Patch the code at the current address with a call to the target.
233
  UNIMPLEMENTED_MIPS();
234
}
235

    
236

    
237
// -----------------------------------------------------------------------------
238
// Implementation of Operand and MemOperand.
239
// See assembler-mips-inl.h for inlined constructors.
240

    
241
Operand::Operand(Handle<Object> handle) {
242
  AllowDeferredHandleDereference using_raw_address;
243
  rm_ = no_reg;
244
  // Verify all Objects referred by code are NOT in new space.
245
  Object* obj = *handle;
246
  if (obj->IsHeapObject()) {
247
    ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
248
    imm32_ = reinterpret_cast<intptr_t>(handle.location());
249
    rmode_ = RelocInfo::EMBEDDED_OBJECT;
250
  } else {
251
    // No relocation needed.
252
    imm32_ = reinterpret_cast<intptr_t>(obj);
253
    rmode_ = RelocInfo::NONE32;
254
  }
255
}
256

    
257

    
258
MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
259
  offset_ = offset;
260
}
261

    
262

    
263
// -----------------------------------------------------------------------------
264
// Specific instructions, constants, and masks.
265

    
266
static const int kNegOffset = 0x00008000;
267
// addiu(sp, sp, 4) aka Pop() operation or part of Pop(r)
268
// operations as post-increment of sp.
269
const Instr kPopInstruction = ADDIU | (kRegister_sp_Code << kRsShift)
270
      | (kRegister_sp_Code << kRtShift) | (kPointerSize & kImm16Mask);
271
// addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
272
const Instr kPushInstruction = ADDIU | (kRegister_sp_Code << kRsShift)
273
      | (kRegister_sp_Code << kRtShift) | (-kPointerSize & kImm16Mask);
274
// sw(r, MemOperand(sp, 0))
275
const Instr kPushRegPattern = SW | (kRegister_sp_Code << kRsShift)
276
      |  (0 & kImm16Mask);
277
//  lw(r, MemOperand(sp, 0))
278
const Instr kPopRegPattern = LW | (kRegister_sp_Code << kRsShift)
279
      |  (0 & kImm16Mask);
280

    
281
const Instr kLwRegFpOffsetPattern = LW | (kRegister_fp_Code << kRsShift)
282
      |  (0 & kImm16Mask);
283

    
284
const Instr kSwRegFpOffsetPattern = SW | (kRegister_fp_Code << kRsShift)
285
      |  (0 & kImm16Mask);
286

    
287
const Instr kLwRegFpNegOffsetPattern = LW | (kRegister_fp_Code << kRsShift)
288
      |  (kNegOffset & kImm16Mask);
289

    
290
const Instr kSwRegFpNegOffsetPattern = SW | (kRegister_fp_Code << kRsShift)
291
      |  (kNegOffset & kImm16Mask);
292
// A mask for the Rt register for push, pop, lw, sw instructions.
293
const Instr kRtMask = kRtFieldMask;
294
const Instr kLwSwInstrTypeMask = 0xffe00000;
295
const Instr kLwSwInstrArgumentMask  = ~kLwSwInstrTypeMask;
296
const Instr kLwSwOffsetMask = kImm16Mask;
297

    
298

    
299
Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
300
    : AssemblerBase(isolate, buffer, buffer_size),
301
      recorded_ast_id_(TypeFeedbackId::None()),
302
      positions_recorder_(this) {
303
  reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
304

    
305
  last_trampoline_pool_end_ = 0;
306
  no_trampoline_pool_before_ = 0;
307
  trampoline_pool_blocked_nesting_ = 0;
308
  // We leave space (16 * kTrampolineSlotsSize)
309
  // for BlockTrampolinePoolScope buffer.
310
  next_buffer_check_ = kMaxBranchOffset - kTrampolineSlotsSize * 16;
311
  internal_trampoline_exception_ = false;
312
  last_bound_pos_ = 0;
313

    
314
  trampoline_emitted_ = false;
315
  unbound_labels_count_ = 0;
316
  block_buffer_growth_ = false;
317

    
318
  ClearRecordedAstId();
319
}
320

    
321

    
322
void Assembler::GetCode(CodeDesc* desc) {
323
  ASSERT(pc_ <= reloc_info_writer.pos());  // No overlap.
324
  // Set up code descriptor.
325
  desc->buffer = buffer_;
326
  desc->buffer_size = buffer_size_;
327
  desc->instr_size = pc_offset();
328
  desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
329
}
330

    
331

    
332
void Assembler::Align(int m) {
333
  ASSERT(m >= 4 && IsPowerOf2(m));
334
  while ((pc_offset() & (m - 1)) != 0) {
335
    nop();
336
  }
337
}
338

    
339

    
340
void Assembler::CodeTargetAlign() {
341
  // No advantage to aligning branch/call targets to more than
342
  // single instruction, that I am aware of.
343
  Align(4);
344
}
345

    
346

    
347
Register Assembler::GetRtReg(Instr instr) {
348
  Register rt;
349
  rt.code_ = (instr & kRtFieldMask) >> kRtShift;
350
  return rt;
351
}
352

    
353

    
354
Register Assembler::GetRsReg(Instr instr) {
355
  Register rs;
356
  rs.code_ = (instr & kRsFieldMask) >> kRsShift;
357
  return rs;
358
}
359

    
360

    
361
Register Assembler::GetRdReg(Instr instr) {
362
  Register rd;
363
  rd.code_ = (instr & kRdFieldMask) >> kRdShift;
364
  return rd;
365
}
366

    
367

    
368
uint32_t Assembler::GetRt(Instr instr) {
369
  return (instr & kRtFieldMask) >> kRtShift;
370
}
371

    
372

    
373
uint32_t Assembler::GetRtField(Instr instr) {
374
  return instr & kRtFieldMask;
375
}
376

    
377

    
378
uint32_t Assembler::GetRs(Instr instr) {
379
  return (instr & kRsFieldMask) >> kRsShift;
380
}
381

    
382

    
383
uint32_t Assembler::GetRsField(Instr instr) {
384
  return instr & kRsFieldMask;
385
}
386

    
387

    
388
uint32_t Assembler::GetRd(Instr instr) {
389
  return  (instr & kRdFieldMask) >> kRdShift;
390
}
391

    
392

    
393
uint32_t Assembler::GetRdField(Instr instr) {
394
  return  instr & kRdFieldMask;
395
}
396

    
397

    
398
uint32_t Assembler::GetSa(Instr instr) {
399
  return (instr & kSaFieldMask) >> kSaShift;
400
}
401

    
402

    
403
uint32_t Assembler::GetSaField(Instr instr) {
404
  return instr & kSaFieldMask;
405
}
406

    
407

    
408
uint32_t Assembler::GetOpcodeField(Instr instr) {
409
  return instr & kOpcodeMask;
410
}
411

    
412

    
413
uint32_t Assembler::GetFunction(Instr instr) {
414
  return (instr & kFunctionFieldMask) >> kFunctionShift;
415
}
416

    
417

    
418
uint32_t Assembler::GetFunctionField(Instr instr) {
419
  return instr & kFunctionFieldMask;
420
}
421

    
422

    
423
uint32_t Assembler::GetImmediate16(Instr instr) {
424
  return instr & kImm16Mask;
425
}
426

    
427

    
428
uint32_t Assembler::GetLabelConst(Instr instr) {
429
  return instr & ~kImm16Mask;
430
}
431

    
432

    
433
bool Assembler::IsPop(Instr instr) {
434
  return (instr & ~kRtMask) == kPopRegPattern;
435
}
436

    
437

    
438
bool Assembler::IsPush(Instr instr) {
439
  return (instr & ~kRtMask) == kPushRegPattern;
440
}
441

    
442

    
443
bool Assembler::IsSwRegFpOffset(Instr instr) {
444
  return ((instr & kLwSwInstrTypeMask) == kSwRegFpOffsetPattern);
445
}
446

    
447

    
448
bool Assembler::IsLwRegFpOffset(Instr instr) {
449
  return ((instr & kLwSwInstrTypeMask) == kLwRegFpOffsetPattern);
450
}
451

    
452

    
453
bool Assembler::IsSwRegFpNegOffset(Instr instr) {
454
  return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
455
          kSwRegFpNegOffsetPattern);
456
}
457

    
458

    
459
bool Assembler::IsLwRegFpNegOffset(Instr instr) {
460
  return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
461
          kLwRegFpNegOffsetPattern);
462
}
463

    
464

    
465
// Labels refer to positions in the (to be) generated code.
466
// There are bound, linked, and unused labels.
467
//
468
// Bound labels refer to known positions in the already
469
// generated code. pos() is the position the label refers to.
470
//
471
// Linked labels refer to unknown positions in the code
472
// to be generated; pos() is the position of the last
473
// instruction using the label.
474

    
475
// The link chain is terminated by a value in the instruction of -1,
476
// which is an otherwise illegal value (branch -1 is inf loop).
477
// The instruction 16-bit offset field addresses 32-bit words, but in
478
// code is conv to an 18-bit value addressing bytes, hence the -4 value.
479

    
480
const int kEndOfChain = -4;
481
// Determines the end of the Jump chain (a subset of the label link chain).
482
const int kEndOfJumpChain = 0;
483

    
484

    
485
bool Assembler::IsBranch(Instr instr) {
486
  uint32_t opcode   = GetOpcodeField(instr);
487
  uint32_t rt_field = GetRtField(instr);
488
  uint32_t rs_field = GetRsField(instr);
489
  // Checks if the instruction is a branch.
490
  return opcode == BEQ ||
491
      opcode == BNE ||
492
      opcode == BLEZ ||
493
      opcode == BGTZ ||
494
      opcode == BEQL ||
495
      opcode == BNEL ||
496
      opcode == BLEZL ||
497
      opcode == BGTZL ||
498
      (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
499
                            rt_field == BLTZAL || rt_field == BGEZAL)) ||
500
      (opcode == COP1 && rs_field == BC1);  // Coprocessor branch.
501
}
502

    
503

    
504
bool Assembler::IsEmittedConstant(Instr instr) {
505
  uint32_t label_constant = GetLabelConst(instr);
506
  return label_constant == 0;  // Emitted label const in reg-exp engine.
507
}
508

    
509

    
510
bool Assembler::IsBeq(Instr instr) {
511
  return GetOpcodeField(instr) == BEQ;
512
}
513

    
514

    
515
bool Assembler::IsBne(Instr instr) {
516
  return GetOpcodeField(instr) == BNE;
517
}
518

    
519

    
520
bool Assembler::IsJump(Instr instr) {
521
  uint32_t opcode   = GetOpcodeField(instr);
522
  uint32_t rt_field = GetRtField(instr);
523
  uint32_t rd_field = GetRdField(instr);
524
  uint32_t function_field = GetFunctionField(instr);
525
  // Checks if the instruction is a jump.
526
  return opcode == J || opcode == JAL ||
527
      (opcode == SPECIAL && rt_field == 0 &&
528
      ((function_field == JALR) || (rd_field == 0 && (function_field == JR))));
529
}
530

    
531

    
532
bool Assembler::IsJ(Instr instr) {
533
  uint32_t opcode = GetOpcodeField(instr);
534
  // Checks if the instruction is a jump.
535
  return opcode == J;
536
}
537

    
538

    
539
bool Assembler::IsJal(Instr instr) {
540
  return GetOpcodeField(instr) == JAL;
541
}
542

    
543

    
544
bool Assembler::IsJr(Instr instr) {
545
  return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
546
}
547

    
548

    
549
bool Assembler::IsJalr(Instr instr) {
550
  return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JALR;
551
}
552

    
553

    
554
bool Assembler::IsLui(Instr instr) {
555
  uint32_t opcode = GetOpcodeField(instr);
556
  // Checks if the instruction is a load upper immediate.
557
  return opcode == LUI;
558
}
559

    
560

    
561
bool Assembler::IsOri(Instr instr) {
562
  uint32_t opcode = GetOpcodeField(instr);
563
  // Checks if the instruction is a load upper immediate.
564
  return opcode == ORI;
565
}
566

    
567

    
568
bool Assembler::IsNop(Instr instr, unsigned int type) {
569
  // See Assembler::nop(type).
570
  ASSERT(type < 32);
571
  uint32_t opcode = GetOpcodeField(instr);
572
  uint32_t function = GetFunctionField(instr);
573
  uint32_t rt = GetRt(instr);
574
  uint32_t rd = GetRd(instr);
575
  uint32_t sa = GetSa(instr);
576

    
577
  // Traditional mips nop == sll(zero_reg, zero_reg, 0)
578
  // When marking non-zero type, use sll(zero_reg, at, type)
579
  // to avoid use of mips ssnop and ehb special encodings
580
  // of the sll instruction.
581

    
582
  Register nop_rt_reg = (type == 0) ? zero_reg : at;
583
  bool ret = (opcode == SPECIAL && function == SLL &&
584
              rd == static_cast<uint32_t>(ToNumber(zero_reg)) &&
585
              rt == static_cast<uint32_t>(ToNumber(nop_rt_reg)) &&
586
              sa == type);
587

    
588
  return ret;
589
}
590

    
591

    
592
int32_t Assembler::GetBranchOffset(Instr instr) {
593
  ASSERT(IsBranch(instr));
594
  return (static_cast<int16_t>(instr & kImm16Mask)) << 2;
595
}
596

    
597

    
598
bool Assembler::IsLw(Instr instr) {
599
  return ((instr & kOpcodeMask) == LW);
600
}
601

    
602

    
603
int16_t Assembler::GetLwOffset(Instr instr) {
604
  ASSERT(IsLw(instr));
605
  return ((instr & kImm16Mask));
606
}
607

    
608

    
609
Instr Assembler::SetLwOffset(Instr instr, int16_t offset) {
610
  ASSERT(IsLw(instr));
611

    
612
  // We actually create a new lw instruction based on the original one.
613
  Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask)
614
      | (offset & kImm16Mask);
615

    
616
  return temp_instr;
617
}
618

    
619

    
620
bool Assembler::IsSw(Instr instr) {
621
  return ((instr & kOpcodeMask) == SW);
622
}
623

    
624

    
625
Instr Assembler::SetSwOffset(Instr instr, int16_t offset) {
626
  ASSERT(IsSw(instr));
627
  return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
628
}
629

    
630

    
631
bool Assembler::IsAddImmediate(Instr instr) {
632
  return ((instr & kOpcodeMask) == ADDIU);
633
}
634

    
635

    
636
Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) {
637
  ASSERT(IsAddImmediate(instr));
638
  return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
639
}
640

    
641

    
642
bool Assembler::IsAndImmediate(Instr instr) {
643
  return GetOpcodeField(instr) == ANDI;
644
}
645

    
646

    
647
int Assembler::target_at(int32_t pos) {
648
  Instr instr = instr_at(pos);
649
  if ((instr & ~kImm16Mask) == 0) {
650
    // Emitted label constant, not part of a branch.
651
    if (instr == 0) {
652
       return kEndOfChain;
653
     } else {
654
       int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
655
       return (imm18 + pos);
656
     }
657
  }
658
  // Check we have a branch or jump instruction.
659
  ASSERT(IsBranch(instr) || IsJ(instr) || IsLui(instr));
660
  // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
661
  // the compiler uses arithmectic shifts for signed integers.
662
  if (IsBranch(instr)) {
663
    int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
664

    
665
    if (imm18 == kEndOfChain) {
666
      // EndOfChain sentinel is returned directly, not relative to pc or pos.
667
      return kEndOfChain;
668
    } else {
669
      return pos + kBranchPCOffset + imm18;
670
    }
671
  } else if (IsLui(instr)) {
672
    Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
673
    Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
674
    ASSERT(IsOri(instr_ori));
675
    int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
676
    imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
677

    
678
    if (imm == kEndOfJumpChain) {
679
      // EndOfChain sentinel is returned directly, not relative to pc or pos.
680
      return kEndOfChain;
681
    } else {
682
      uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
683
      int32_t delta = instr_address - imm;
684
      ASSERT(pos > delta);
685
      return pos - delta;
686
    }
687
  } else {
688
    int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
689
    if (imm28 == kEndOfJumpChain) {
690
      // EndOfChain sentinel is returned directly, not relative to pc or pos.
691
      return kEndOfChain;
692
    } else {
693
      uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
694
      instr_address &= kImm28Mask;
695
      int32_t delta = instr_address - imm28;
696
      ASSERT(pos > delta);
697
      return pos - delta;
698
    }
699
  }
700
}
701

    
702

    
703
void Assembler::target_at_put(int32_t pos, int32_t target_pos) {
704
  Instr instr = instr_at(pos);
705
  if ((instr & ~kImm16Mask) == 0) {
706
    ASSERT(target_pos == kEndOfChain || target_pos >= 0);
707
    // Emitted label constant, not part of a branch.
708
    // Make label relative to Code* of generated Code object.
709
    instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
710
    return;
711
  }
712

    
713
  ASSERT(IsBranch(instr) || IsJ(instr) || IsLui(instr));
714
  if (IsBranch(instr)) {
715
    int32_t imm18 = target_pos - (pos + kBranchPCOffset);
716
    ASSERT((imm18 & 3) == 0);
717

    
718
    instr &= ~kImm16Mask;
719
    int32_t imm16 = imm18 >> 2;
720
    ASSERT(is_int16(imm16));
721

    
722
    instr_at_put(pos, instr | (imm16 & kImm16Mask));
723
  } else if (IsLui(instr)) {
724
    Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
725
    Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
726
    ASSERT(IsOri(instr_ori));
727
    uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
728
    ASSERT((imm & 3) == 0);
729

    
730
    instr_lui &= ~kImm16Mask;
731
    instr_ori &= ~kImm16Mask;
732

    
733
    instr_at_put(pos + 0 * Assembler::kInstrSize,
734
                 instr_lui | ((imm & kHiMask) >> kLuiShift));
735
    instr_at_put(pos + 1 * Assembler::kInstrSize,
736
                 instr_ori | (imm & kImm16Mask));
737
  } else {
738
    uint32_t imm28 = reinterpret_cast<uint32_t>(buffer_) + target_pos;
739
    imm28 &= kImm28Mask;
740
    ASSERT((imm28 & 3) == 0);
741

    
742
    instr &= ~kImm26Mask;
743
    uint32_t imm26 = imm28 >> 2;
744
    ASSERT(is_uint26(imm26));
745

    
746
    instr_at_put(pos, instr | (imm26 & kImm26Mask));
747
  }
748
}
749

    
750

    
751
void Assembler::print(Label* L) {
752
  if (L->is_unused()) {
753
    PrintF("unused label\n");
754
  } else if (L->is_bound()) {
755
    PrintF("bound label to %d\n", L->pos());
756
  } else if (L->is_linked()) {
757
    Label l = *L;
758
    PrintF("unbound label");
759
    while (l.is_linked()) {
760
      PrintF("@ %d ", l.pos());
761
      Instr instr = instr_at(l.pos());
762
      if ((instr & ~kImm16Mask) == 0) {
763
        PrintF("value\n");
764
      } else {
765
        PrintF("%d\n", instr);
766
      }
767
      next(&l);
768
    }
769
  } else {
770
    PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
771
  }
772
}
773

    
774

    
775
void Assembler::bind_to(Label* L, int pos) {
776
  ASSERT(0 <= pos && pos <= pc_offset());  // Must have valid binding position.
777
  int32_t trampoline_pos = kInvalidSlotPos;
778
  if (L->is_linked() && !trampoline_emitted_) {
779
    unbound_labels_count_--;
780
    next_buffer_check_ += kTrampolineSlotsSize;
781
  }
782

    
783
  while (L->is_linked()) {
784
    int32_t fixup_pos = L->pos();
785
    int32_t dist = pos - fixup_pos;
786
    next(L);  // Call next before overwriting link with target at fixup_pos.
787
    Instr instr = instr_at(fixup_pos);
788
    if (IsBranch(instr)) {
789
      if (dist > kMaxBranchOffset) {
790
        if (trampoline_pos == kInvalidSlotPos) {
791
          trampoline_pos = get_trampoline_entry(fixup_pos);
792
          CHECK(trampoline_pos != kInvalidSlotPos);
793
        }
794
        ASSERT((trampoline_pos - fixup_pos) <= kMaxBranchOffset);
795
        target_at_put(fixup_pos, trampoline_pos);
796
        fixup_pos = trampoline_pos;
797
        dist = pos - fixup_pos;
798
      }
799
      target_at_put(fixup_pos, pos);
800
    } else {
801
      ASSERT(IsJ(instr) || IsLui(instr) || IsEmittedConstant(instr));
802
      target_at_put(fixup_pos, pos);
803
    }
804
  }
805
  L->bind_to(pos);
806

    
807
  // Keep track of the last bound label so we don't eliminate any instructions
808
  // before a bound label.
809
  if (pos > last_bound_pos_)
810
    last_bound_pos_ = pos;
811
}
812

    
813

    
814
void Assembler::bind(Label* L) {
815
  ASSERT(!L->is_bound());  // Label can only be bound once.
816
  bind_to(L, pc_offset());
817
}
818

    
819

    
820
void Assembler::next(Label* L) {
821
  ASSERT(L->is_linked());
822
  int link = target_at(L->pos());
823
  if (link == kEndOfChain) {
824
    L->Unuse();
825
  } else {
826
    ASSERT(link >= 0);
827
    L->link_to(link);
828
  }
829
}
830

    
831

    
832
bool Assembler::is_near(Label* L) {
833
  if (L->is_bound()) {
834
    return ((pc_offset() - L->pos()) < kMaxBranchOffset - 4 * kInstrSize);
835
  }
836
  return false;
837
}
838

    
839

    
840
// We have to use a temporary register for things that can be relocated even
841
// if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
842
// space.  There is no guarantee that the relocated location can be similarly
843
// encoded.
844
bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
845
  return !RelocInfo::IsNone(rmode);
846
}
847

    
848
void Assembler::GenInstrRegister(Opcode opcode,
849
                                 Register rs,
850
                                 Register rt,
851
                                 Register rd,
852
                                 uint16_t sa,
853
                                 SecondaryField func) {
854
  ASSERT(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa));
855
  Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
856
      | (rd.code() << kRdShift) | (sa << kSaShift) | func;
857
  emit(instr);
858
}
859

    
860

    
861
void Assembler::GenInstrRegister(Opcode opcode,
862
                                 Register rs,
863
                                 Register rt,
864
                                 uint16_t msb,
865
                                 uint16_t lsb,
866
                                 SecondaryField func) {
867
  ASSERT(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb));
868
  Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
869
      | (msb << kRdShift) | (lsb << kSaShift) | func;
870
  emit(instr);
871
}
872

    
873

    
874
void Assembler::GenInstrRegister(Opcode opcode,
875
                                 SecondaryField fmt,
876
                                 FPURegister ft,
877
                                 FPURegister fs,
878
                                 FPURegister fd,
879
                                 SecondaryField func) {
880
  ASSERT(fd.is_valid() && fs.is_valid() && ft.is_valid());
881
  Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
882
      | (fd.code() << kFdShift) | func;
883
  emit(instr);
884
}
885

    
886

    
887
void Assembler::GenInstrRegister(Opcode opcode,
888
                                 FPURegister fr,
889
                                 FPURegister ft,
890
                                 FPURegister fs,
891
                                 FPURegister fd,
892
                                 SecondaryField func) {
893
  ASSERT(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid());
894
  Instr instr = opcode | (fr.code() << kFrShift) | (ft.code() << kFtShift)
895
      | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
896
  emit(instr);
897
}
898

    
899

    
900
void Assembler::GenInstrRegister(Opcode opcode,
901
                                 SecondaryField fmt,
902
                                 Register rt,
903
                                 FPURegister fs,
904
                                 FPURegister fd,
905
                                 SecondaryField func) {
906
  ASSERT(fd.is_valid() && fs.is_valid() && rt.is_valid());
907
  Instr instr = opcode | fmt | (rt.code() << kRtShift)
908
      | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
909
  emit(instr);
910
}
911

    
912

    
913
void Assembler::GenInstrRegister(Opcode opcode,
914
                                 SecondaryField fmt,
915
                                 Register rt,
916
                                 FPUControlRegister fs,
917
                                 SecondaryField func) {
918
  ASSERT(fs.is_valid() && rt.is_valid());
919
  Instr instr =
920
      opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
921
  emit(instr);
922
}
923

    
924

    
925
// Instructions with immediate value.
926
// Registers are in the order of the instruction encoding, from left to right.
927
void Assembler::GenInstrImmediate(Opcode opcode,
928
                                  Register rs,
929
                                  Register rt,
930
                                  int32_t j) {
931
  ASSERT(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
932
  Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
933
      | (j & kImm16Mask);
934
  emit(instr);
935
}
936

    
937

    
938
void Assembler::GenInstrImmediate(Opcode opcode,
939
                                  Register rs,
940
                                  SecondaryField SF,
941
                                  int32_t j) {
942
  ASSERT(rs.is_valid() && (is_int16(j) || is_uint16(j)));
943
  Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask);
944
  emit(instr);
945
}
946

    
947

    
948
void Assembler::GenInstrImmediate(Opcode opcode,
949
                                  Register rs,
950
                                  FPURegister ft,
951
                                  int32_t j) {
952
  ASSERT(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
953
  Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
954
      | (j & kImm16Mask);
955
  emit(instr);
956
}
957

    
958

    
959
void Assembler::GenInstrJump(Opcode opcode,
960
                             uint32_t address) {
961
  BlockTrampolinePoolScope block_trampoline_pool(this);
962
  ASSERT(is_uint26(address));
963
  Instr instr = opcode | address;
964
  emit(instr);
965
  BlockTrampolinePoolFor(1);  // For associated delay slot.
966
}
967

    
968

    
969
// Returns the next free trampoline entry.
970
int32_t Assembler::get_trampoline_entry(int32_t pos) {
971
  int32_t trampoline_entry = kInvalidSlotPos;
972

    
973
  if (!internal_trampoline_exception_) {
974
    if (trampoline_.start() > pos) {
975
     trampoline_entry = trampoline_.take_slot();
976
    }
977

    
978
    if (kInvalidSlotPos == trampoline_entry) {
979
      internal_trampoline_exception_ = true;
980
    }
981
  }
982
  return trampoline_entry;
983
}
984

    
985

    
986
uint32_t Assembler::jump_address(Label* L) {
987
  int32_t target_pos;
988

    
989
  if (L->is_bound()) {
990
    target_pos = L->pos();
991
  } else {
992
    if (L->is_linked()) {
993
      target_pos = L->pos();  // L's link.
994
      L->link_to(pc_offset());
995
    } else {
996
      L->link_to(pc_offset());
997
      return kEndOfJumpChain;
998
    }
999
  }
1000

    
1001
  uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
1002
  ASSERT((imm & 3) == 0);
1003

    
1004
  return imm;
1005
}
1006

    
1007

    
1008
int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
1009
  int32_t target_pos;
1010

    
1011
  if (L->is_bound()) {
1012
    target_pos = L->pos();
1013
  } else {
1014
    if (L->is_linked()) {
1015
      target_pos = L->pos();
1016
      L->link_to(pc_offset());
1017
    } else {
1018
      L->link_to(pc_offset());
1019
      if (!trampoline_emitted_) {
1020
        unbound_labels_count_++;
1021
        next_buffer_check_ -= kTrampolineSlotsSize;
1022
      }
1023
      return kEndOfChain;
1024
    }
1025
  }
1026

    
1027
  int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
1028
  ASSERT((offset & 3) == 0);
1029
  ASSERT(is_int16(offset >> 2));
1030

    
1031
  return offset;
1032
}
1033

    
1034

    
1035
void Assembler::label_at_put(Label* L, int at_offset) {
1036
  int target_pos;
1037
  if (L->is_bound()) {
1038
    target_pos = L->pos();
1039
    instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
1040
  } else {
1041
    if (L->is_linked()) {
1042
      target_pos = L->pos();  // L's link.
1043
      int32_t imm18 = target_pos - at_offset;
1044
      ASSERT((imm18 & 3) == 0);
1045
      int32_t imm16 = imm18 >> 2;
1046
      ASSERT(is_int16(imm16));
1047
      instr_at_put(at_offset, (imm16 & kImm16Mask));
1048
    } else {
1049
      target_pos = kEndOfChain;
1050
      instr_at_put(at_offset, 0);
1051
      if (!trampoline_emitted_) {
1052
        unbound_labels_count_++;
1053
        next_buffer_check_ -= kTrampolineSlotsSize;
1054
      }
1055
    }
1056
    L->link_to(at_offset);
1057
  }
1058
}
1059

    
1060

    
1061
//------- Branch and jump instructions --------
1062

    
1063
void Assembler::b(int16_t offset) {
1064
  beq(zero_reg, zero_reg, offset);
1065
}
1066

    
1067

    
1068
void Assembler::bal(int16_t offset) {
1069
  positions_recorder()->WriteRecordedPositions();
1070
  bgezal(zero_reg, offset);
1071
}
1072

    
1073

    
1074
void Assembler::beq(Register rs, Register rt, int16_t offset) {
1075
  BlockTrampolinePoolScope block_trampoline_pool(this);
1076
  GenInstrImmediate(BEQ, rs, rt, offset);
1077
  BlockTrampolinePoolFor(1);  // For associated delay slot.
1078
}
1079

    
1080

    
1081
void Assembler::bgez(Register rs, int16_t offset) {
1082
  BlockTrampolinePoolScope block_trampoline_pool(this);
1083
  GenInstrImmediate(REGIMM, rs, BGEZ, offset);
1084
  BlockTrampolinePoolFor(1);  // For associated delay slot.
1085
}
1086

    
1087

    
1088
void Assembler::bgezal(Register rs, int16_t offset) {
1089
  BlockTrampolinePoolScope block_trampoline_pool(this);
1090
  positions_recorder()->WriteRecordedPositions();
1091
  GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
1092
  BlockTrampolinePoolFor(1);  // For associated delay slot.
1093
}
1094

    
1095

    
1096
void Assembler::bgtz(Register rs, int16_t offset) {
1097
  BlockTrampolinePoolScope block_trampoline_pool(this);
1098
  GenInstrImmediate(BGTZ, rs, zero_reg, offset);
1099
  BlockTrampolinePoolFor(1);  // For associated delay slot.
1100
}
1101

    
1102

    
1103
void Assembler::blez(Register rs, int16_t offset) {
1104
  BlockTrampolinePoolScope block_trampoline_pool(this);
1105
  GenInstrImmediate(BLEZ, rs, zero_reg, offset);
1106
  BlockTrampolinePoolFor(1);  // For associated delay slot.
1107
}
1108

    
1109

    
1110
void Assembler::bltz(Register rs, int16_t offset) {
1111
  BlockTrampolinePoolScope block_trampoline_pool(this);
1112
  GenInstrImmediate(REGIMM, rs, BLTZ, offset);
1113
  BlockTrampolinePoolFor(1);  // For associated delay slot.
1114
}
1115

    
1116

    
1117
void Assembler::bltzal(Register rs, int16_t offset) {
1118
  BlockTrampolinePoolScope block_trampoline_pool(this);
1119
  positions_recorder()->WriteRecordedPositions();
1120
  GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
1121
  BlockTrampolinePoolFor(1);  // For associated delay slot.
1122
}
1123

    
1124

    
1125
void Assembler::bne(Register rs, Register rt, int16_t offset) {
1126
  BlockTrampolinePoolScope block_trampoline_pool(this);
1127
  GenInstrImmediate(BNE, rs, rt, offset);
1128
  BlockTrampolinePoolFor(1);  // For associated delay slot.
1129
}
1130

    
1131

    
1132
void Assembler::j(int32_t target) {
1133
#if DEBUG
1134
  // Get pc of delay slot.
1135
  uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1136
  bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
1137
                  (kImm26Bits + kImmFieldShift)) == 0;
1138
  ASSERT(in_range && ((target & 3) == 0));
1139
#endif
1140
  GenInstrJump(J, target >> 2);
1141
}
1142

    
1143

    
1144
void Assembler::jr(Register rs) {
1145
  BlockTrampolinePoolScope block_trampoline_pool(this);
1146
  if (rs.is(ra)) {
1147
    positions_recorder()->WriteRecordedPositions();
1148
  }
1149
  GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
1150
  BlockTrampolinePoolFor(1);  // For associated delay slot.
1151
}
1152

    
1153

    
1154
void Assembler::jal(int32_t target) {
1155
#ifdef DEBUG
1156
  // Get pc of delay slot.
1157
  uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1158
  bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
1159
                  (kImm26Bits + kImmFieldShift)) == 0;
1160
  ASSERT(in_range && ((target & 3) == 0));
1161
#endif
1162
  positions_recorder()->WriteRecordedPositions();
1163
  GenInstrJump(JAL, target >> 2);
1164
}
1165

    
1166

    
1167
void Assembler::jalr(Register rs, Register rd) {
1168
  BlockTrampolinePoolScope block_trampoline_pool(this);
1169
  positions_recorder()->WriteRecordedPositions();
1170
  GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
1171
  BlockTrampolinePoolFor(1);  // For associated delay slot.
1172
}
1173

    
1174

    
1175
void Assembler::j_or_jr(int32_t target, Register rs) {
1176
  // Get pc of delay slot.
1177
  uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1178
  bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
1179
                  (kImm26Bits + kImmFieldShift)) == 0;
1180
  if (in_range) {
1181
      j(target);
1182
  } else {
1183
      jr(t9);
1184
  }
1185
}
1186

    
1187

    
1188
void Assembler::jal_or_jalr(int32_t target, Register rs) {
1189
  // Get pc of delay slot.
1190
  uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
1191
  bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
1192
                  (kImm26Bits+kImmFieldShift)) == 0;
1193
  if (in_range) {
1194
      jal(target);
1195
  } else {
1196
      jalr(t9);
1197
  }
1198
}
1199

    
1200

    
1201
//-------Data-processing-instructions---------
1202

    
1203
// Arithmetic.
1204

    
1205
void Assembler::addu(Register rd, Register rs, Register rt) {
1206
  GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
1207
}
1208

    
1209

    
1210
void Assembler::addiu(Register rd, Register rs, int32_t j) {
1211
  GenInstrImmediate(ADDIU, rs, rd, j);
1212
}
1213

    
1214

    
1215
void Assembler::subu(Register rd, Register rs, Register rt) {
1216
  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU);
1217
}
1218

    
1219

    
1220
void Assembler::mul(Register rd, Register rs, Register rt) {
1221
  GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
1222
}
1223

    
1224

    
1225
void Assembler::mult(Register rs, Register rt) {
1226
  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
1227
}
1228

    
1229

    
1230
void Assembler::multu(Register rs, Register rt) {
1231
  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
1232
}
1233

    
1234

    
1235
void Assembler::div(Register rs, Register rt) {
1236
  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV);
1237
}
1238

    
1239

    
1240
void Assembler::divu(Register rs, Register rt) {
1241
  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
1242
}
1243

    
1244

    
1245
// Logical.
1246

    
1247
void Assembler::and_(Register rd, Register rs, Register rt) {
1248
  GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND);
1249
}
1250

    
1251

    
1252
void Assembler::andi(Register rt, Register rs, int32_t j) {
1253
  ASSERT(is_uint16(j));
1254
  GenInstrImmediate(ANDI, rs, rt, j);
1255
}
1256

    
1257

    
1258
void Assembler::or_(Register rd, Register rs, Register rt) {
1259
  GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR);
1260
}
1261

    
1262

    
1263
void Assembler::ori(Register rt, Register rs, int32_t j) {
1264
  ASSERT(is_uint16(j));
1265
  GenInstrImmediate(ORI, rs, rt, j);
1266
}
1267

    
1268

    
1269
void Assembler::xor_(Register rd, Register rs, Register rt) {
1270
  GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR);
1271
}
1272

    
1273

    
1274
void Assembler::xori(Register rt, Register rs, int32_t j) {
1275
  ASSERT(is_uint16(j));
1276
  GenInstrImmediate(XORI, rs, rt, j);
1277
}
1278

    
1279

    
1280
void Assembler::nor(Register rd, Register rs, Register rt) {
1281
  GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR);
1282
}
1283

    
1284

    
1285
// Shifts.
1286
void Assembler::sll(Register rd,
1287
                    Register rt,
1288
                    uint16_t sa,
1289
                    bool coming_from_nop) {
1290
  // Don't allow nop instructions in the form sll zero_reg, zero_reg to be
1291
  // generated using the sll instruction. They must be generated using
1292
  // nop(int/NopMarkerTypes) or MarkCode(int/NopMarkerTypes) pseudo
1293
  // instructions.
1294
  ASSERT(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg)));
1295
  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SLL);
1296
}
1297

    
1298

    
1299
void Assembler::sllv(Register rd, Register rt, Register rs) {
1300
  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV);
1301
}
1302

    
1303

    
1304
void Assembler::srl(Register rd, Register rt, uint16_t sa) {
1305
  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRL);
1306
}
1307

    
1308

    
1309
void Assembler::srlv(Register rd, Register rt, Register rs) {
1310
  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV);
1311
}
1312

    
1313

    
1314
void Assembler::sra(Register rd, Register rt, uint16_t sa) {
1315
  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRA);
1316
}
1317

    
1318

    
1319
void Assembler::srav(Register rd, Register rt, Register rs) {
1320
  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV);
1321
}
1322

    
1323

    
1324
void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
1325
  // Should be called via MacroAssembler::Ror.
1326
  ASSERT(rd.is_valid() && rt.is_valid() && is_uint5(sa));
1327
  ASSERT(kArchVariant == kMips32r2);
1328
  Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
1329
      | (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
1330
  emit(instr);
1331
}
1332

    
1333

    
1334
void Assembler::rotrv(Register rd, Register rt, Register rs) {
1335
  // Should be called via MacroAssembler::Ror.
1336
  ASSERT(rd.is_valid() && rt.is_valid() && rs.is_valid() );
1337
  ASSERT(kArchVariant == kMips32r2);
1338
  Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
1339
     | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
1340
  emit(instr);
1341
}
1342

    
1343

    
1344
//------------Memory-instructions-------------
1345

    
1346
// Helper for base-reg + offset, when offset is larger than int16.
1347
void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
1348
  ASSERT(!src.rm().is(at));
1349
  lui(at, (src.offset_ >> kLuiShift) & kImm16Mask);
1350
  ori(at, at, src.offset_ & kImm16Mask);  // Load 32-bit offset.
1351
  addu(at, at, src.rm());  // Add base register.
1352
}
1353

    
1354

    
1355
void Assembler::lb(Register rd, const MemOperand& rs) {
1356
  if (is_int16(rs.offset_)) {
1357
    GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
1358
  } else {  // Offset > 16 bits, use multiple instructions to load.
1359
    LoadRegPlusOffsetToAt(rs);
1360
    GenInstrImmediate(LB, at, rd, 0);  // Equiv to lb(rd, MemOperand(at, 0));
1361
  }
1362
}
1363

    
1364

    
1365
void Assembler::lbu(Register rd, const MemOperand& rs) {
1366
  if (is_int16(rs.offset_)) {
1367
    GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
1368
  } else {  // Offset > 16 bits, use multiple instructions to load.
1369
    LoadRegPlusOffsetToAt(rs);
1370
    GenInstrImmediate(LBU, at, rd, 0);  // Equiv to lbu(rd, MemOperand(at, 0));
1371
  }
1372
}
1373

    
1374

    
1375
void Assembler::lh(Register rd, const MemOperand& rs) {
1376
  if (is_int16(rs.offset_)) {
1377
    GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
1378
  } else {  // Offset > 16 bits, use multiple instructions to load.
1379
    LoadRegPlusOffsetToAt(rs);
1380
    GenInstrImmediate(LH, at, rd, 0);  // Equiv to lh(rd, MemOperand(at, 0));
1381
  }
1382
}
1383

    
1384

    
1385
void Assembler::lhu(Register rd, const MemOperand& rs) {
1386
  if (is_int16(rs.offset_)) {
1387
    GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
1388
  } else {  // Offset > 16 bits, use multiple instructions to load.
1389
    LoadRegPlusOffsetToAt(rs);
1390
    GenInstrImmediate(LHU, at, rd, 0);  // Equiv to lhu(rd, MemOperand(at, 0));
1391
  }
1392
}
1393

    
1394

    
1395
void Assembler::lw(Register rd, const MemOperand& rs) {
1396
  if (is_int16(rs.offset_)) {
1397
    GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
1398
  } else {  // Offset > 16 bits, use multiple instructions to load.
1399
    LoadRegPlusOffsetToAt(rs);
1400
    GenInstrImmediate(LW, at, rd, 0);  // Equiv to lw(rd, MemOperand(at, 0));
1401
  }
1402
}
1403

    
1404

    
1405
void Assembler::lwl(Register rd, const MemOperand& rs) {
1406
  GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
1407
}
1408

    
1409

    
1410
void Assembler::lwr(Register rd, const MemOperand& rs) {
1411
  GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
1412
}
1413

    
1414

    
1415
void Assembler::sb(Register rd, const MemOperand& rs) {
1416
  if (is_int16(rs.offset_)) {
1417
    GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
1418
  } else {  // Offset > 16 bits, use multiple instructions to store.
1419
    LoadRegPlusOffsetToAt(rs);
1420
    GenInstrImmediate(SB, at, rd, 0);  // Equiv to sb(rd, MemOperand(at, 0));
1421
  }
1422
}
1423

    
1424

    
1425
void Assembler::sh(Register rd, const MemOperand& rs) {
1426
  if (is_int16(rs.offset_)) {
1427
    GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
1428
  } else {  // Offset > 16 bits, use multiple instructions to store.
1429
    LoadRegPlusOffsetToAt(rs);
1430
    GenInstrImmediate(SH, at, rd, 0);  // Equiv to sh(rd, MemOperand(at, 0));
1431
  }
1432
}
1433

    
1434

    
1435
void Assembler::sw(Register rd, const MemOperand& rs) {
1436
  if (is_int16(rs.offset_)) {
1437
    GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
1438
  } else {  // Offset > 16 bits, use multiple instructions to store.
1439
    LoadRegPlusOffsetToAt(rs);
1440
    GenInstrImmediate(SW, at, rd, 0);  // Equiv to sw(rd, MemOperand(at, 0));
1441
  }
1442
}
1443

    
1444

    
1445
void Assembler::swl(Register rd, const MemOperand& rs) {
1446
  GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
1447
}
1448

    
1449

    
1450
void Assembler::swr(Register rd, const MemOperand& rs) {
1451
  GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
1452
}
1453

    
1454

    
1455
void Assembler::lui(Register rd, int32_t j) {
1456
  ASSERT(is_uint16(j));
1457
  GenInstrImmediate(LUI, zero_reg, rd, j);
1458
}
1459

    
1460

    
1461
//-------------Misc-instructions--------------
1462

    
1463
// Break / Trap instructions.
1464
void Assembler::break_(uint32_t code, bool break_as_stop) {
1465
  ASSERT((code & ~0xfffff) == 0);
1466
  // We need to invalidate breaks that could be stops as well because the
1467
  // simulator expects a char pointer after the stop instruction.
1468
  // See constants-mips.h for explanation.
1469
  ASSERT((break_as_stop &&
1470
          code <= kMaxStopCode &&
1471
          code > kMaxWatchpointCode) ||
1472
         (!break_as_stop &&
1473
          (code > kMaxStopCode ||
1474
           code <= kMaxWatchpointCode)));
1475
  Instr break_instr = SPECIAL | BREAK | (code << 6);
1476
  emit(break_instr);
1477
}
1478

    
1479

    
1480
void Assembler::stop(const char* msg, uint32_t code) {
1481
  ASSERT(code > kMaxWatchpointCode);
1482
  ASSERT(code <= kMaxStopCode);
1483
#if V8_HOST_ARCH_MIPS
1484
  break_(0x54321);
1485
#else  // V8_HOST_ARCH_MIPS
1486
  BlockTrampolinePoolFor(2);
1487
  // The Simulator will handle the stop instruction and get the message address.
1488
  // On MIPS stop() is just a special kind of break_().
1489
  break_(code, true);
1490
  emit(reinterpret_cast<Instr>(msg));
1491
#endif
1492
}
1493

    
1494

    
1495
void Assembler::tge(Register rs, Register rt, uint16_t code) {
1496
  ASSERT(is_uint10(code));
1497
  Instr instr = SPECIAL | TGE | rs.code() << kRsShift
1498
      | rt.code() << kRtShift | code << 6;
1499
  emit(instr);
1500
}
1501

    
1502

    
1503
void Assembler::tgeu(Register rs, Register rt, uint16_t code) {
1504
  ASSERT(is_uint10(code));
1505
  Instr instr = SPECIAL | TGEU | rs.code() << kRsShift
1506
      | rt.code() << kRtShift | code << 6;
1507
  emit(instr);
1508
}
1509

    
1510

    
1511
void Assembler::tlt(Register rs, Register rt, uint16_t code) {
1512
  ASSERT(is_uint10(code));
1513
  Instr instr =
1514
      SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
1515
  emit(instr);
1516
}
1517

    
1518

    
1519
void Assembler::tltu(Register rs, Register rt, uint16_t code) {
1520
  ASSERT(is_uint10(code));
1521
  Instr instr =
1522
      SPECIAL | TLTU | rs.code() << kRsShift
1523
      | rt.code() << kRtShift | code << 6;
1524
  emit(instr);
1525
}
1526

    
1527

    
1528
void Assembler::teq(Register rs, Register rt, uint16_t code) {
1529
  ASSERT(is_uint10(code));
1530
  Instr instr =
1531
      SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
1532
  emit(instr);
1533
}
1534

    
1535

    
1536
void Assembler::tne(Register rs, Register rt, uint16_t code) {
1537
  ASSERT(is_uint10(code));
1538
  Instr instr =
1539
      SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
1540
  emit(instr);
1541
}
1542

    
1543

    
1544
// Move from HI/LO register.
1545

    
1546
void Assembler::mfhi(Register rd) {
1547
  GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI);
1548
}
1549

    
1550

    
1551
void Assembler::mflo(Register rd) {
1552
  GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFLO);
1553
}
1554

    
1555

    
1556
// Set on less than instructions.
1557
void Assembler::slt(Register rd, Register rs, Register rt) {
1558
  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLT);
1559
}
1560

    
1561

    
1562
void Assembler::sltu(Register rd, Register rs, Register rt) {
1563
  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLTU);
1564
}
1565

    
1566

    
1567
void Assembler::slti(Register rt, Register rs, int32_t j) {
1568
  GenInstrImmediate(SLTI, rs, rt, j);
1569
}
1570

    
1571

    
1572
void Assembler::sltiu(Register rt, Register rs, int32_t j) {
1573
  GenInstrImmediate(SLTIU, rs, rt, j);
1574
}
1575

    
1576

    
1577
// Conditional move.
1578
void Assembler::movz(Register rd, Register rs, Register rt) {
1579
  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVZ);
1580
}
1581

    
1582

    
1583
void Assembler::movn(Register rd, Register rs, Register rt) {
1584
  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN);
1585
}
1586

    
1587

    
1588
void Assembler::movt(Register rd, Register rs, uint16_t cc) {
1589
  Register rt;
1590
  rt.code_ = (cc & 0x0007) << 2 | 1;
1591
  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
1592
}
1593

    
1594

    
1595
void Assembler::movf(Register rd, Register rs, uint16_t cc) {
1596
  Register rt;
1597
  rt.code_ = (cc & 0x0007) << 2 | 0;
1598
  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
1599
}
1600

    
1601

    
1602
// Bit twiddling.
1603
void Assembler::clz(Register rd, Register rs) {
1604
  // Clz instr requires same GPR number in 'rd' and 'rt' fields.
1605
  GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
1606
}
1607

    
1608

    
1609
void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
1610
  // Should be called via MacroAssembler::Ins.
1611
  // Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
1612
  ASSERT(kArchVariant == kMips32r2);
1613
  GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
1614
}
1615

    
1616

    
1617
void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
1618
  // Should be called via MacroAssembler::Ext.
1619
  // Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
1620
  ASSERT(kArchVariant == kMips32r2);
1621
  GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
1622
}
1623

    
1624

    
1625
//--------Coprocessor-instructions----------------
1626

    
1627
// Load, store, move.
1628
void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
1629
  GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
1630
}
1631

    
1632

    
1633
void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
1634
  // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
1635
  // load to two 32-bit loads.
1636
  GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
1637
  FPURegister nextfpreg;
1638
  nextfpreg.setcode(fd.code() + 1);
1639
  GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ + 4);
1640
}
1641

    
1642

    
1643
void Assembler::swc1(FPURegister fd, const MemOperand& src) {
1644
  GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
1645
}
1646

    
1647

    
1648
void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
1649
  // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
1650
  // store to two 32-bit stores.
1651
  GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
1652
  FPURegister nextfpreg;
1653
  nextfpreg.setcode(fd.code() + 1);
1654
  GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ + 4);
1655
}
1656

    
1657

    
1658
void Assembler::mtc1(Register rt, FPURegister fs) {
1659
  GenInstrRegister(COP1, MTC1, rt, fs, f0);
1660
}
1661

    
1662

    
1663
void Assembler::mfc1(Register rt, FPURegister fs) {
1664
  GenInstrRegister(COP1, MFC1, rt, fs, f0);
1665
}
1666

    
1667

    
1668
void Assembler::ctc1(Register rt, FPUControlRegister fs) {
1669
  GenInstrRegister(COP1, CTC1, rt, fs);
1670
}
1671

    
1672

    
1673
void Assembler::cfc1(Register rt, FPUControlRegister fs) {
1674
  GenInstrRegister(COP1, CFC1, rt, fs);
1675
}
1676

    
1677

    
1678
void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
1679
  uint64_t i;
1680
  OS::MemCopy(&i, &d, 8);
1681

    
1682
  *lo = i & 0xffffffff;
1683
  *hi = i >> 32;
1684
}
1685

    
1686

    
1687
// Arithmetic.
1688

    
1689
void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) {
1690
  GenInstrRegister(COP1, D, ft, fs, fd, ADD_D);
1691
}
1692

    
1693

    
1694
void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) {
1695
  GenInstrRegister(COP1, D, ft, fs, fd, SUB_D);
1696
}
1697

    
1698

    
1699
void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
1700
  GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
1701
}
1702

    
1703

    
1704
void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
1705
    FPURegister ft) {
1706
  GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_D);
1707
}
1708

    
1709

    
1710
void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
1711
  GenInstrRegister(COP1, D, ft, fs, fd, DIV_D);
1712
}
1713

    
1714

    
1715
void Assembler::abs_d(FPURegister fd, FPURegister fs) {
1716
  GenInstrRegister(COP1, D, f0, fs, fd, ABS_D);
1717
}
1718

    
1719

    
1720
void Assembler::mov_d(FPURegister fd, FPURegister fs) {
1721
  GenInstrRegister(COP1, D, f0, fs, fd, MOV_D);
1722
}
1723

    
1724

    
1725
void Assembler::neg_d(FPURegister fd, FPURegister fs) {
1726
  GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
1727
}
1728

    
1729

    
1730
void Assembler::sqrt_d(FPURegister fd, FPURegister fs) {
1731
  GenInstrRegister(COP1, D, f0, fs, fd, SQRT_D);
1732
}
1733

    
1734

    
1735
// Conversions.
1736

    
1737
void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) {
1738
  GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S);
1739
}
1740

    
1741

    
1742
void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) {
1743
  GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D);
1744
}
1745

    
1746

    
1747
void Assembler::trunc_w_s(FPURegister fd, FPURegister fs) {
1748
  GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_W_S);
1749
}
1750

    
1751

    
1752
void Assembler::trunc_w_d(FPURegister fd, FPURegister fs) {
1753
  GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_W_D);
1754
}
1755

    
1756

    
1757
void Assembler::round_w_s(FPURegister fd, FPURegister fs) {
1758
  GenInstrRegister(COP1, S, f0, fs, fd, ROUND_W_S);
1759
}
1760

    
1761

    
1762
void Assembler::round_w_d(FPURegister fd, FPURegister fs) {
1763
  GenInstrRegister(COP1, D, f0, fs, fd, ROUND_W_D);
1764
}
1765

    
1766

    
1767
void Assembler::floor_w_s(FPURegister fd, FPURegister fs) {
1768
  GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_W_S);
1769
}
1770

    
1771

    
1772
void Assembler::floor_w_d(FPURegister fd, FPURegister fs) {
1773
  GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_W_D);
1774
}
1775

    
1776

    
1777
void Assembler::ceil_w_s(FPURegister fd, FPURegister fs) {
1778
  GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S);
1779
}
1780

    
1781

    
1782
void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
1783
  GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D);
1784
}
1785

    
1786

    
1787
void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
1788
  ASSERT(kArchVariant == kMips32r2);
1789
  GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
1790
}
1791

    
1792

    
1793
void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
1794
  ASSERT(kArchVariant == kMips32r2);
1795
  GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
1796
}
1797

    
1798

    
1799
void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
1800
  ASSERT(kArchVariant == kMips32r2);
1801
  GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
1802
}
1803

    
1804

    
1805
void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
1806
  ASSERT(kArchVariant == kMips32r2);
1807
  GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
1808
}
1809

    
1810

    
1811
void Assembler::round_l_s(FPURegister fd, FPURegister fs) {
1812
  GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S);
1813
}
1814

    
1815

    
1816
void Assembler::round_l_d(FPURegister fd, FPURegister fs) {
1817
  GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D);
1818
}
1819

    
1820

    
1821
void Assembler::floor_l_s(FPURegister fd, FPURegister fs) {
1822
  GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S);
1823
}
1824

    
1825

    
1826
void Assembler::floor_l_d(FPURegister fd, FPURegister fs) {
1827
  GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D);
1828
}
1829

    
1830

    
1831
void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) {
1832
  GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S);
1833
}
1834

    
1835

    
1836
void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
1837
  GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D);
1838
}
1839

    
1840

    
1841
void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
1842
  GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
1843
}
1844

    
1845

    
1846
void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
1847
  ASSERT(kArchVariant == kMips32r2);
1848
  GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
1849
}
1850

    
1851

    
1852
void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) {
1853
  GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D);
1854
}
1855

    
1856

    
1857
void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
1858
  GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W);
1859
}
1860

    
1861

    
1862
void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
1863
  ASSERT(kArchVariant == kMips32r2);
1864
  GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
1865
}
1866

    
1867

    
1868
void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
1869
  GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S);
1870
}
1871

    
1872

    
1873
// Conditions.
1874
void Assembler::c(FPUCondition cond, SecondaryField fmt,
1875
    FPURegister fs, FPURegister ft, uint16_t cc) {
1876
  ASSERT(is_uint3(cc));
1877
  ASSERT((fmt & ~(31 << kRsShift)) == 0);
1878
  Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift
1879
      | cc << 8 | 3 << 4 | cond;
1880
  emit(instr);
1881
}
1882

    
1883

    
1884
void Assembler::fcmp(FPURegister src1, const double src2,
1885
      FPUCondition cond) {
1886
  ASSERT(src2 == 0.0);
1887
  mtc1(zero_reg, f14);
1888
  cvt_d_w(f14, f14);
1889
  c(cond, D, src1, f14, 0);
1890
}
1891

    
1892

    
1893
void Assembler::bc1f(int16_t offset, uint16_t cc) {
1894
  ASSERT(is_uint3(cc));
1895
  Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
1896
  emit(instr);
1897
}
1898

    
1899

    
1900
void Assembler::bc1t(int16_t offset, uint16_t cc) {
1901
  ASSERT(is_uint3(cc));
1902
  Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
1903
  emit(instr);
1904
}
1905

    
1906

    
1907
// Debugging.
1908
void Assembler::RecordJSReturn() {
1909
  positions_recorder()->WriteRecordedPositions();
1910
  CheckBuffer();
1911
  RecordRelocInfo(RelocInfo::JS_RETURN);
1912
}
1913

    
1914

    
1915
void Assembler::RecordDebugBreakSlot() {
1916
  positions_recorder()->WriteRecordedPositions();
1917
  CheckBuffer();
1918
  RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
1919
}
1920

    
1921

    
1922
void Assembler::RecordComment(const char* msg) {
1923
  if (FLAG_code_comments) {
1924
    CheckBuffer();
1925
    RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
1926
  }
1927
}
1928

    
1929

    
1930
int Assembler::RelocateInternalReference(byte* pc, intptr_t pc_delta) {
1931
  Instr instr = instr_at(pc);
1932
  ASSERT(IsJ(instr) || IsLui(instr));
1933
  if (IsLui(instr)) {
1934
    Instr instr_lui = instr_at(pc + 0 * Assembler::kInstrSize);
1935
    Instr instr_ori = instr_at(pc + 1 * Assembler::kInstrSize);
1936
    ASSERT(IsOri(instr_ori));
1937
    int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
1938
    imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
1939
    if (imm == kEndOfJumpChain) {
1940
      return 0;  // Number of instructions patched.
1941
    }
1942
    imm += pc_delta;
1943
    ASSERT((imm & 3) == 0);
1944

    
1945
    instr_lui &= ~kImm16Mask;
1946
    instr_ori &= ~kImm16Mask;
1947

    
1948
    instr_at_put(pc + 0 * Assembler::kInstrSize,
1949
                 instr_lui | ((imm >> kLuiShift) & kImm16Mask));
1950
    instr_at_put(pc + 1 * Assembler::kInstrSize,
1951
                 instr_ori | (imm & kImm16Mask));
1952
    return 2;  // Number of instructions patched.
1953
  } else {
1954
    uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
1955
    if (static_cast<int32_t>(imm28) == kEndOfJumpChain) {
1956
      return 0;  // Number of instructions patched.
1957
    }
1958
    imm28 += pc_delta;
1959
    imm28 &= kImm28Mask;
1960
    ASSERT((imm28 & 3) == 0);
1961

    
1962
    instr &= ~kImm26Mask;
1963
    uint32_t imm26 = imm28 >> 2;
1964
    ASSERT(is_uint26(imm26));
1965

    
1966
    instr_at_put(pc, instr | (imm26 & kImm26Mask));
1967
    return 1;  // Number of instructions patched.
1968
  }
1969
}
1970

    
1971

    
1972
void Assembler::GrowBuffer() {
1973
  if (!own_buffer_) FATAL("external code buffer is too small");
1974

    
1975
  // Compute new buffer size.
1976
  CodeDesc desc;  // The new buffer.
1977
  if (buffer_size_ < 4*KB) {
1978
    desc.buffer_size = 4*KB;
1979
  } else if (buffer_size_ < 1*MB) {
1980
    desc.buffer_size = 2*buffer_size_;
1981
  } else {
1982
    desc.buffer_size = buffer_size_ + 1*MB;
1983
  }
1984
  CHECK_GT(desc.buffer_size, 0);  // No overflow.
1985

    
1986
  // Set up new buffer.
1987
  desc.buffer = NewArray<byte>(desc.buffer_size);
1988

    
1989
  desc.instr_size = pc_offset();
1990
  desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
1991

    
1992
  // Copy the data.
1993
  int pc_delta = desc.buffer - buffer_;
1994
  int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
1995
  OS::MemMove(desc.buffer, buffer_, desc.instr_size);
1996
  OS::MemMove(reloc_info_writer.pos() + rc_delta,
1997
              reloc_info_writer.pos(), desc.reloc_size);
1998

    
1999
  // Switch buffers.
2000
  DeleteArray(buffer_);
2001
  buffer_ = desc.buffer;
2002
  buffer_size_ = desc.buffer_size;
2003
  pc_ += pc_delta;
2004
  reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
2005
                               reloc_info_writer.last_pc() + pc_delta);
2006

    
2007
  // Relocate runtime entries.
2008
  for (RelocIterator it(desc); !it.done(); it.next()) {
2009
    RelocInfo::Mode rmode = it.rinfo()->rmode();
2010
    if (rmode == RelocInfo::INTERNAL_REFERENCE) {
2011
      byte* p = reinterpret_cast<byte*>(it.rinfo()->pc());
2012
      RelocateInternalReference(p, pc_delta);
2013
    }
2014
  }
2015

    
2016
  ASSERT(!overflow());
2017
}
2018

    
2019

    
2020
void Assembler::db(uint8_t data) {
2021
  CheckBuffer();
2022
  *reinterpret_cast<uint8_t*>(pc_) = data;
2023
  pc_ += sizeof(uint8_t);
2024
}
2025

    
2026

    
2027
void Assembler::dd(uint32_t data) {
2028
  CheckBuffer();
2029
  *reinterpret_cast<uint32_t*>(pc_) = data;
2030
  pc_ += sizeof(uint32_t);
2031
}
2032

    
2033

    
2034
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
2035
  // We do not try to reuse pool constants.
2036
  RelocInfo rinfo(pc_, rmode, data, NULL);
2037
  if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
2038
    // Adjust code for new modes.
2039
    ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
2040
           || RelocInfo::IsJSReturn(rmode)
2041
           || RelocInfo::IsComment(rmode)
2042
           || RelocInfo::IsPosition(rmode));
2043
    // These modes do not need an entry in the constant pool.
2044
  }
2045
  if (!RelocInfo::IsNone(rinfo.rmode())) {
2046
    // Don't record external references unless the heap will be serialized.
2047
    if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
2048
#ifdef DEBUG
2049
      if (!Serializer::enabled()) {
2050
        Serializer::TooLateToEnableNow();
2051
      }
2052
#endif
2053
      if (!Serializer::enabled() && !emit_debug_code()) {
2054
        return;
2055
      }
2056
    }
2057
    ASSERT(buffer_space() >= kMaxRelocSize);  // Too late to grow buffer here.
2058
    if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
2059
      RelocInfo reloc_info_with_ast_id(pc_,
2060
                                       rmode,
2061
                                       RecordedAstId().ToInt(),
2062
                                       NULL);
2063
      ClearRecordedAstId();
2064
      reloc_info_writer.Write(&reloc_info_with_ast_id);
2065
    } else {
2066
      reloc_info_writer.Write(&rinfo);
2067
    }
2068
  }
2069
}
2070

    
2071

    
2072
void Assembler::BlockTrampolinePoolFor(int instructions) {
2073
  BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
2074
}
2075

    
2076

    
2077
void Assembler::CheckTrampolinePool() {
2078
  // Some small sequences of instructions must not be broken up by the
2079
  // insertion of a trampoline pool; such sequences are protected by setting
2080
  // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
2081
  // which are both checked here. Also, recursive calls to CheckTrampolinePool
2082
  // are blocked by trampoline_pool_blocked_nesting_.
2083
  if ((trampoline_pool_blocked_nesting_ > 0) ||
2084
      (pc_offset() < no_trampoline_pool_before_)) {
2085
    // Emission is currently blocked; make sure we try again as soon as
2086
    // possible.
2087
    if (trampoline_pool_blocked_nesting_ > 0) {
2088
      next_buffer_check_ = pc_offset() + kInstrSize;
2089
    } else {
2090
      next_buffer_check_ = no_trampoline_pool_before_;
2091
    }
2092
    return;
2093
  }
2094

    
2095
  ASSERT(!trampoline_emitted_);
2096
  ASSERT(unbound_labels_count_ >= 0);
2097
  if (unbound_labels_count_ > 0) {
2098
    // First we emit jump (2 instructions), then we emit trampoline pool.
2099
    { BlockTrampolinePoolScope block_trampoline_pool(this);
2100
      Label after_pool;
2101
      b(&after_pool);
2102
      nop();
2103

    
2104
      int pool_start = pc_offset();
2105
      for (int i = 0; i < unbound_labels_count_; i++) {
2106
        uint32_t imm32;
2107
        imm32 = jump_address(&after_pool);
2108
        { BlockGrowBufferScope block_buf_growth(this);
2109
          // Buffer growth (and relocation) must be blocked for internal
2110
          // references until associated instructions are emitted and available
2111
          // to be patched.
2112
          RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2113
          lui(at, (imm32 & kHiMask) >> kLuiShift);
2114
          ori(at, at, (imm32 & kImm16Mask));
2115
        }
2116
        jr(at);
2117
        nop();
2118
      }
2119
      bind(&after_pool);
2120
      trampoline_ = Trampoline(pool_start, unbound_labels_count_);
2121

    
2122
      trampoline_emitted_ = true;
2123
      // As we are only going to emit trampoline once, we need to prevent any
2124
      // further emission.
2125
      next_buffer_check_ = kMaxInt;
2126
    }
2127
  } else {
2128
    // Number of branches to unbound label at this point is zero, so we can
2129
    // move next buffer check to maximum.
2130
    next_buffer_check_ = pc_offset() +
2131
        kMaxBranchOffset - kTrampolineSlotsSize * 16;
2132
  }
2133
  return;
2134
}
2135

    
2136

    
2137
Address Assembler::target_address_at(Address pc) {
2138
  Instr instr1 = instr_at(pc);
2139
  Instr instr2 = instr_at(pc + kInstrSize);
2140
  // Interpret 2 instructions generated by li: lui/ori
2141
  if ((GetOpcodeField(instr1) == LUI) && (GetOpcodeField(instr2) == ORI)) {
2142
    // Assemble the 32 bit value.
2143
    return reinterpret_cast<Address>(
2144
        (GetImmediate16(instr1) << 16) | GetImmediate16(instr2));
2145
  }
2146

    
2147
  // We should never get here, force a bad address if we do.
2148
  UNREACHABLE();
2149
  return (Address)0x0;
2150
}
2151

    
2152

    
2153
// MIPS and ia32 use opposite encoding for qNaN and sNaN, such that ia32
2154
// qNaN is a MIPS sNaN, and ia32 sNaN is MIPS qNaN. If running from a heap
2155
// snapshot generated on ia32, the resulting MIPS sNaN must be quieted.
2156
// OS::nan_value() returns a qNaN.
2157
void Assembler::QuietNaN(HeapObject* object) {
2158
  HeapNumber::cast(object)->set_value(OS::nan_value());
2159
}
2160

    
2161

    
2162
// On Mips, a target address is stored in a lui/ori instruction pair, each
2163
// of which load 16 bits of the 32-bit address to a register.
2164
// Patching the address must replace both instr, and flush the i-cache.
2165
//
2166
// There is an optimization below, which emits a nop when the address
2167
// fits in just 16 bits. This is unlikely to help, and should be benchmarked,
2168
// and possibly removed.
2169
void Assembler::set_target_address_at(Address pc, Address target) {
2170
  Instr instr2 = instr_at(pc + kInstrSize);
2171
  uint32_t rt_code = GetRtField(instr2);
2172
  uint32_t* p = reinterpret_cast<uint32_t*>(pc);
2173
  uint32_t itarget = reinterpret_cast<uint32_t>(target);
2174

    
2175
#ifdef DEBUG
2176
  // Check we have the result from a li macro-instruction, using instr pair.
2177
  Instr instr1 = instr_at(pc);
2178
  CHECK((GetOpcodeField(instr1) == LUI && GetOpcodeField(instr2) == ORI));
2179
#endif
2180

    
2181
  // Must use 2 instructions to insure patchable code => just use lui and ori.
2182
  // lui rt, upper-16.
2183
  // ori rt rt, lower-16.
2184
  *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
2185
  *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
2186

    
2187
  // The following code is an optimization for the common case of Call()
2188
  // or Jump() which is load to register, and jump through register:
2189
  //     li(t9, address); jalr(t9)    (or jr(t9)).
2190
  // If the destination address is in the same 256 MB page as the call, it
2191
  // is faster to do a direct jal, or j, rather than jump thru register, since
2192
  // that lets the cpu pipeline prefetch the target address. However each
2193
  // time the address above is patched, we have to patch the direct jal/j
2194
  // instruction, as well as possibly revert to jalr/jr if we now cross a
2195
  // 256 MB page. Note that with the jal/j instructions, we do not need to
2196
  // load the register, but that code is left, since it makes it easy to
2197
  // revert this process. A further optimization could try replacing the
2198
  // li sequence with nops.
2199
  // This optimization can only be applied if the rt-code from instr2 is the
2200
  // register used for the jalr/jr. Finally, we have to skip 'jr ra', which is
2201
  // mips return. Occasionally this lands after an li().
2202

    
2203
  Instr instr3 = instr_at(pc + 2 * kInstrSize);
2204
  uint32_t ipc = reinterpret_cast<uint32_t>(pc + 3 * kInstrSize);
2205
  bool in_range = ((ipc ^ itarget) >> (kImm26Bits + kImmFieldShift)) == 0;
2206
  uint32_t target_field =
2207
      static_cast<uint32_t>(itarget & kJumpAddrMask) >> kImmFieldShift;
2208
  bool patched_jump = false;
2209

    
2210
#ifndef ALLOW_JAL_IN_BOUNDARY_REGION
2211
  // This is a workaround to the 24k core E156 bug (affect some 34k cores also).
2212
  // Since the excluded space is only 64KB out of 256MB (0.02 %), we will just
2213
  // apply this workaround for all cores so we don't have to identify the core.
2214
  if (in_range) {
2215
    // The 24k core E156 bug has some very specific requirements, we only check
2216
    // the most simple one: if the address of the delay slot instruction is in
2217
    // the first or last 32 KB of the 256 MB segment.
2218
    uint32_t segment_mask = ((256 * MB) - 1) ^ ((32 * KB) - 1);
2219
    uint32_t ipc_segment_addr = ipc & segment_mask;
2220
    if (ipc_segment_addr == 0 || ipc_segment_addr == segment_mask)
2221
      in_range = false;
2222
  }
2223
#endif
2224

    
2225
  if (IsJalr(instr3)) {
2226
    // Try to convert JALR to JAL.
2227
    if (in_range && GetRt(instr2) == GetRs(instr3)) {
2228
      *(p+2) = JAL | target_field;
2229
      patched_jump = true;
2230
    }
2231
  } else if (IsJr(instr3)) {
2232
    // Try to convert JR to J, skip returns (jr ra).
2233
    bool is_ret = static_cast<int>(GetRs(instr3)) == ra.code();
2234
    if (in_range && !is_ret && GetRt(instr2) == GetRs(instr3)) {
2235
      *(p+2) = J | target_field;
2236
      patched_jump = true;
2237
    }
2238
  } else if (IsJal(instr3)) {
2239
    if (in_range) {
2240
      // We are patching an already converted JAL.
2241
      *(p+2) = JAL | target_field;
2242
    } else {
2243
      // Patch JAL, but out of range, revert to JALR.
2244
      // JALR rs reg is the rt reg specified in the ORI instruction.
2245
      uint32_t rs_field = GetRt(instr2) << kRsShift;
2246
      uint32_t rd_field = ra.code() << kRdShift;  // Return-address (ra) reg.
2247
      *(p+2) = SPECIAL | rs_field | rd_field | JALR;
2248
    }
2249
    patched_jump = true;
2250
  } else if (IsJ(instr3)) {
2251
    if (in_range) {
2252
      // We are patching an already converted J (jump).
2253
      *(p+2) = J | target_field;
2254
    } else {
2255
      // Trying patch J, but out of range, just go back to JR.
2256
      // JR 'rs' reg is the 'rt' reg specified in the ORI instruction (instr2).
2257
      uint32_t rs_field = GetRt(instr2) << kRsShift;
2258
      *(p+2) = SPECIAL | rs_field | JR;
2259
    }
2260
    patched_jump = true;
2261
  }
2262

    
2263
  CPU::FlushICache(pc, (patched_jump ? 3 : 2) * sizeof(int32_t));
2264
}
2265

    
2266

    
2267
void Assembler::JumpLabelToJumpRegister(Address pc) {
2268
  // Address pc points to lui/ori instructions.
2269
  // Jump to label may follow at pc + 2 * kInstrSize.
2270
  uint32_t* p = reinterpret_cast<uint32_t*>(pc);
2271
#ifdef DEBUG
2272
  Instr instr1 = instr_at(pc);
2273
#endif
2274
  Instr instr2 = instr_at(pc + 1 * kInstrSize);
2275
  Instr instr3 = instr_at(pc + 2 * kInstrSize);
2276
  bool patched = false;
2277

    
2278
  if (IsJal(instr3)) {
2279
    ASSERT(GetOpcodeField(instr1) == LUI);
2280
    ASSERT(GetOpcodeField(instr2) == ORI);
2281

    
2282
    uint32_t rs_field = GetRt(instr2) << kRsShift;
2283
    uint32_t rd_field = ra.code() << kRdShift;  // Return-address (ra) reg.
2284
    *(p+2) = SPECIAL | rs_field | rd_field | JALR;
2285
    patched = true;
2286
  } else if (IsJ(instr3)) {
2287
    ASSERT(GetOpcodeField(instr1) == LUI);
2288
    ASSERT(GetOpcodeField(instr2) == ORI);
2289

    
2290
    uint32_t rs_field = GetRt(instr2) << kRsShift;
2291
    *(p+2) = SPECIAL | rs_field | JR;
2292
    patched = true;
2293
  }
2294

    
2295
  if (patched) {
2296
      CPU::FlushICache(pc+2, sizeof(Address));
2297
  }
2298
}
2299

    
2300
} }  // namespace v8::internal
2301

    
2302
#endif  // V8_TARGET_ARCH_MIPS