The data contained in this repository can be downloaded to your computer using one of several clients.
Please see the documentation of your version control software client for more information.

Please select the desired protocol below to get the URL.

This URL has Read-Only access.

Statistics
| Branch: | Revision:

main_repo / deps / v8 / src / mips / code-stubs-mips.cc @ f230a1cf

History | View | Annotate | Download (222 KB)

1
// Copyright 2012 the V8 project authors. All rights reserved.
2
// Redistribution and use in source and binary forms, with or without
3
// modification, are permitted provided that the following conditions are
4
// met:
5
//
6
//     * Redistributions of source code must retain the above copyright
7
//       notice, this list of conditions and the following disclaimer.
8
//     * Redistributions in binary form must reproduce the above
9
//       copyright notice, this list of conditions and the following
10
//       disclaimer in the documentation and/or other materials provided
11
//       with the distribution.
12
//     * Neither the name of Google Inc. nor the names of its
13
//       contributors may be used to endorse or promote products derived
14
//       from this software without specific prior written permission.
15
//
16
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27

    
28
#include "v8.h"
29

    
30
#if V8_TARGET_ARCH_MIPS
31

    
32
#include "bootstrapper.h"
33
#include "code-stubs.h"
34
#include "codegen.h"
35
#include "regexp-macro-assembler.h"
36
#include "stub-cache.h"
37

    
38
namespace v8 {
39
namespace internal {
40

    
41

    
42
void FastNewClosureStub::InitializeInterfaceDescriptor(
43
    Isolate* isolate,
44
    CodeStubInterfaceDescriptor* descriptor) {
45
  static Register registers[] = { a2 };
46
  descriptor->register_param_count_ = 1;
47
  descriptor->register_params_ = registers;
48
  descriptor->deoptimization_handler_ =
49
      Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry;
50
}
51

    
52

    
53
void ToNumberStub::InitializeInterfaceDescriptor(
54
    Isolate* isolate,
55
    CodeStubInterfaceDescriptor* descriptor) {
56
  static Register registers[] = { a0 };
57
  descriptor->register_param_count_ = 1;
58
  descriptor->register_params_ = registers;
59
  descriptor->deoptimization_handler_ = NULL;
60
}
61

    
62

    
63
void NumberToStringStub::InitializeInterfaceDescriptor(
64
    Isolate* isolate,
65
    CodeStubInterfaceDescriptor* descriptor) {
66
  static Register registers[] = { a0 };
67
  descriptor->register_param_count_ = 1;
68
  descriptor->register_params_ = registers;
69
  descriptor->deoptimization_handler_ =
70
      Runtime::FunctionForId(Runtime::kNumberToString)->entry;
71
}
72

    
73

    
74
void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
75
    Isolate* isolate,
76
    CodeStubInterfaceDescriptor* descriptor) {
77
  static Register registers[] = { a3, a2, a1 };
78
  descriptor->register_param_count_ = 3;
79
  descriptor->register_params_ = registers;
80
  descriptor->deoptimization_handler_ =
81
      Runtime::FunctionForId(Runtime::kCreateArrayLiteralShallow)->entry;
82
}
83

    
84

    
85
void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
86
    Isolate* isolate,
87
    CodeStubInterfaceDescriptor* descriptor) {
88
  static Register registers[] = { a3, a2, a1, a0 };
89
  descriptor->register_param_count_ = 4;
90
  descriptor->register_params_ = registers;
91
  descriptor->deoptimization_handler_ =
92
      Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry;
93
}
94

    
95

    
96
void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
97
    Isolate* isolate,
98
    CodeStubInterfaceDescriptor* descriptor) {
99
  static Register registers[] = { a2 };
100
  descriptor->register_param_count_ = 1;
101
  descriptor->register_params_ = registers;
102
  descriptor->deoptimization_handler_ = NULL;
103
}
104

    
105

    
106
void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
107
    Isolate* isolate,
108
    CodeStubInterfaceDescriptor* descriptor) {
109
  static Register registers[] = { a1, a0 };
110
  descriptor->register_param_count_ = 2;
111
  descriptor->register_params_ = registers;
112
  descriptor->deoptimization_handler_ =
113
      FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
114
}
115

    
116

    
117
void LoadFieldStub::InitializeInterfaceDescriptor(
118
    Isolate* isolate,
119
    CodeStubInterfaceDescriptor* descriptor) {
120
  static Register registers[] = { a0 };
121
  descriptor->register_param_count_ = 1;
122
  descriptor->register_params_ = registers;
123
  descriptor->deoptimization_handler_ = NULL;
124
}
125

    
126

    
127
void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
128
    Isolate* isolate,
129
    CodeStubInterfaceDescriptor* descriptor) {
130
  static Register registers[] = { a1 };
131
  descriptor->register_param_count_ = 1;
132
  descriptor->register_params_ = registers;
133
  descriptor->deoptimization_handler_ = NULL;
134
}
135

    
136

    
137
void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
138
    Isolate* isolate,
139
    CodeStubInterfaceDescriptor* descriptor) {
140
  static Register registers[] = { a2, a1, a0 };
141
  descriptor->register_param_count_ = 3;
142
  descriptor->register_params_ = registers;
143
  descriptor->deoptimization_handler_ =
144
      FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure);
145
}
146

    
147

    
148
void TransitionElementsKindStub::InitializeInterfaceDescriptor(
149
    Isolate* isolate,
150
    CodeStubInterfaceDescriptor* descriptor) {
151
  static Register registers[] = { a0, a1 };
152
  descriptor->register_param_count_ = 2;
153
  descriptor->register_params_ = registers;
154
  Address entry =
155
      Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
156
  descriptor->deoptimization_handler_ = FUNCTION_ADDR(entry);
157
}
158

    
159

    
160
void CompareNilICStub::InitializeInterfaceDescriptor(
161
    Isolate* isolate,
162
    CodeStubInterfaceDescriptor* descriptor) {
163
  static Register registers[] = { a0 };
164
  descriptor->register_param_count_ = 1;
165
  descriptor->register_params_ = registers;
166
  descriptor->deoptimization_handler_ =
167
      FUNCTION_ADDR(CompareNilIC_Miss);
168
  descriptor->SetMissHandler(
169
      ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate));
170
}
171

    
172

    
173
static void InitializeArrayConstructorDescriptor(
174
    Isolate* isolate,
175
    CodeStubInterfaceDescriptor* descriptor,
176
    int constant_stack_parameter_count) {
177
  // register state
178
  // a0 -- number of arguments
179
  // a1 -- function
180
  // a2 -- type info cell with elements kind
181
  static Register registers[] = { a1, a2 };
182
  descriptor->register_param_count_ = 2;
183
  if (constant_stack_parameter_count != 0) {
184
    // stack param count needs (constructor pointer, and single argument)
185
    descriptor->stack_parameter_count_ = a0;
186
  }
187
  descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
188
  descriptor->register_params_ = registers;
189
  descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
190
  descriptor->deoptimization_handler_ =
191
      Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
192
}
193

    
194

    
195
static void InitializeInternalArrayConstructorDescriptor(
196
    Isolate* isolate,
197
    CodeStubInterfaceDescriptor* descriptor,
198
    int constant_stack_parameter_count) {
199
  // register state
200
  // a0 -- number of arguments
201
  // a1 -- constructor function
202
  static Register registers[] = { a1 };
203
  descriptor->register_param_count_ = 1;
204

    
205
  if (constant_stack_parameter_count != 0) {
206
    // Stack param count needs (constructor pointer, and single argument).
207
    descriptor->stack_parameter_count_ = a0;
208
  }
209
  descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
210
  descriptor->register_params_ = registers;
211
  descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
212
  descriptor->deoptimization_handler_ =
213
      Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
214
}
215

    
216

    
217
void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
218
    Isolate* isolate,
219
    CodeStubInterfaceDescriptor* descriptor) {
220
  InitializeArrayConstructorDescriptor(isolate, descriptor, 0);
221
}
222

    
223

    
224
void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
225
    Isolate* isolate,
226
    CodeStubInterfaceDescriptor* descriptor) {
227
  InitializeArrayConstructorDescriptor(isolate, descriptor, 1);
228
}
229

    
230

    
231
void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
232
    Isolate* isolate,
233
    CodeStubInterfaceDescriptor* descriptor) {
234
  InitializeArrayConstructorDescriptor(isolate, descriptor, -1);
235
}
236

    
237

    
238
void ToBooleanStub::InitializeInterfaceDescriptor(
239
    Isolate* isolate,
240
    CodeStubInterfaceDescriptor* descriptor) {
241
  static Register registers[] = { a0 };
242
  descriptor->register_param_count_ = 1;
243
  descriptor->register_params_ = registers;
244
  descriptor->deoptimization_handler_ =
245
      FUNCTION_ADDR(ToBooleanIC_Miss);
246
  descriptor->SetMissHandler(
247
      ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate));
248
}
249

    
250

    
251
void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
252
    Isolate* isolate,
253
    CodeStubInterfaceDescriptor* descriptor) {
254
  InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 0);
255
}
256

    
257

    
258
void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
259
    Isolate* isolate,
260
    CodeStubInterfaceDescriptor* descriptor) {
261
  InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 1);
262
}
263

    
264

    
265
void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
266
    Isolate* isolate,
267
    CodeStubInterfaceDescriptor* descriptor) {
268
  InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1);
269
}
270

    
271

    
272
void StoreGlobalStub::InitializeInterfaceDescriptor(
273
    Isolate* isolate,
274
    CodeStubInterfaceDescriptor* descriptor) {
275
  static Register registers[] = { a1, a2, a0 };
276
  descriptor->register_param_count_ = 3;
277
  descriptor->register_params_ = registers;
278
  descriptor->deoptimization_handler_ =
279
      FUNCTION_ADDR(StoreIC_MissFromStubFailure);
280
}
281

    
282

    
283
void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
284
    Isolate* isolate,
285
    CodeStubInterfaceDescriptor* descriptor) {
286
  static Register registers[] = { a0, a3, a1, a2 };
287
  descriptor->register_param_count_ = 4;
288
  descriptor->register_params_ = registers;
289
  descriptor->deoptimization_handler_ =
290
      FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss);
291
}
292

    
293

    
294
#define __ ACCESS_MASM(masm)
295

    
296

    
297
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
298
                                          Label* slow,
299
                                          Condition cc);
300
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
301
                                    Register lhs,
302
                                    Register rhs,
303
                                    Label* rhs_not_nan,
304
                                    Label* slow,
305
                                    bool strict);
306
static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
307
                                           Register lhs,
308
                                           Register rhs);
309

    
310

    
311
void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
312
  // Update the static counter each time a new code stub is generated.
313
  Isolate* isolate = masm->isolate();
314
  isolate->counters()->code_stubs()->Increment();
315

    
316
  CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate);
317
  int param_count = descriptor->register_param_count_;
318
  {
319
    // Call the runtime system in a fresh internal frame.
320
    FrameScope scope(masm, StackFrame::INTERNAL);
321
    ASSERT(descriptor->register_param_count_ == 0 ||
322
           a0.is(descriptor->register_params_[param_count - 1]));
323
    // Push arguments
324
    for (int i = 0; i < param_count; ++i) {
325
      __ push(descriptor->register_params_[i]);
326
    }
327
    ExternalReference miss = descriptor->miss_handler();
328
    __ CallExternalReference(miss, descriptor->register_param_count_);
329
  }
330

    
331
  __ Ret();
332
}
333

    
334

    
335
void FastNewContextStub::Generate(MacroAssembler* masm) {
336
  // Try to allocate the context in new space.
337
  Label gc;
338
  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
339

    
340
  // Attempt to allocate the context in new space.
341
  __ Allocate(FixedArray::SizeFor(length), v0, a1, a2, &gc, TAG_OBJECT);
342

    
343
  // Load the function from the stack.
344
  __ lw(a3, MemOperand(sp, 0));
345

    
346
  // Set up the object header.
347
  __ LoadRoot(a1, Heap::kFunctionContextMapRootIndex);
348
  __ li(a2, Operand(Smi::FromInt(length)));
349
  __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
350
  __ sw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
351

    
352
  // Set up the fixed slots, copy the global object from the previous context.
353
  __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
354
  __ li(a1, Operand(Smi::FromInt(0)));
355
  __ sw(a3, MemOperand(v0, Context::SlotOffset(Context::CLOSURE_INDEX)));
356
  __ sw(cp, MemOperand(v0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
357
  __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::EXTENSION_INDEX)));
358
  __ sw(a2, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
359

    
360
  // Initialize the rest of the slots to undefined.
361
  __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
362
  for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
363
    __ sw(a1, MemOperand(v0, Context::SlotOffset(i)));
364
  }
365

    
366
  // Remove the on-stack argument and return.
367
  __ mov(cp, v0);
368
  __ DropAndRet(1);
369

    
370
  // Need to collect. Call into runtime system.
371
  __ bind(&gc);
372
  __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
373
}
374

    
375

    
376
void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
377
  // Stack layout on entry:
378
  //
379
  // [sp]: function.
380
  // [sp + kPointerSize]: serialized scope info
381

    
382
  // Try to allocate the context in new space.
383
  Label gc;
384
  int length = slots_ + Context::MIN_CONTEXT_SLOTS;
385
  __ Allocate(FixedArray::SizeFor(length), v0, a1, a2, &gc, TAG_OBJECT);
386

    
387
  // Load the function from the stack.
388
  __ lw(a3, MemOperand(sp, 0));
389

    
390
  // Load the serialized scope info from the stack.
391
  __ lw(a1, MemOperand(sp, 1 * kPointerSize));
392

    
393
  // Set up the object header.
394
  __ LoadRoot(a2, Heap::kBlockContextMapRootIndex);
395
  __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
396
  __ li(a2, Operand(Smi::FromInt(length)));
397
  __ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
398

    
399
  // If this block context is nested in the native context we get a smi
400
  // sentinel instead of a function. The block context should get the
401
  // canonical empty function of the native context as its closure which
402
  // we still have to look up.
403
  Label after_sentinel;
404
  __ JumpIfNotSmi(a3, &after_sentinel);
405
  if (FLAG_debug_code) {
406
    __ Assert(eq, kExpected0AsASmiSentinel, a3, Operand(zero_reg));
407
  }
408
  __ lw(a3, GlobalObjectOperand());
409
  __ lw(a3, FieldMemOperand(a3, GlobalObject::kNativeContextOffset));
410
  __ lw(a3, ContextOperand(a3, Context::CLOSURE_INDEX));
411
  __ bind(&after_sentinel);
412

    
413
  // Set up the fixed slots, copy the global object from the previous context.
414
  __ lw(a2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
415
  __ sw(a3, ContextOperand(v0, Context::CLOSURE_INDEX));
416
  __ sw(cp, ContextOperand(v0, Context::PREVIOUS_INDEX));
417
  __ sw(a1, ContextOperand(v0, Context::EXTENSION_INDEX));
418
  __ sw(a2, ContextOperand(v0, Context::GLOBAL_OBJECT_INDEX));
419

    
420
  // Initialize the rest of the slots to the hole value.
421
  __ LoadRoot(a1, Heap::kTheHoleValueRootIndex);
422
  for (int i = 0; i < slots_; i++) {
423
    __ sw(a1, ContextOperand(v0, i + Context::MIN_CONTEXT_SLOTS));
424
  }
425

    
426
  // Remove the on-stack argument and return.
427
  __ mov(cp, v0);
428
  __ DropAndRet(2);
429

    
430
  // Need to collect. Call into runtime system.
431
  __ bind(&gc);
432
  __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
433
}
434

    
435

    
436
// Takes a Smi and converts to an IEEE 64 bit floating point value in two
437
// registers.  The format is 1 sign bit, 11 exponent bits (biased 1023) and
438
// 52 fraction bits (20 in the first word, 32 in the second).  Zeros is a
439
// scratch register.  Destroys the source register.  No GC occurs during this
440
// stub so you don't have to set up the frame.
441
class ConvertToDoubleStub : public PlatformCodeStub {
442
 public:
443
  ConvertToDoubleStub(Register result_reg_1,
444
                      Register result_reg_2,
445
                      Register source_reg,
446
                      Register scratch_reg)
447
      : result1_(result_reg_1),
448
        result2_(result_reg_2),
449
        source_(source_reg),
450
        zeros_(scratch_reg) { }
451

    
452
 private:
453
  Register result1_;
454
  Register result2_;
455
  Register source_;
456
  Register zeros_;
457

    
458
  // Minor key encoding in 16 bits.
459
  class ModeBits: public BitField<OverwriteMode, 0, 2> {};
460
  class OpBits: public BitField<Token::Value, 2, 14> {};
461

    
462
  Major MajorKey() { return ConvertToDouble; }
463
  int MinorKey() {
464
    // Encode the parameters in a unique 16 bit value.
465
    return  result1_.code() +
466
           (result2_.code() << 4) +
467
           (source_.code() << 8) +
468
           (zeros_.code() << 12);
469
  }
470

    
471
  void Generate(MacroAssembler* masm);
472
};
473

    
474

    
475
void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
476
#ifndef BIG_ENDIAN_FLOATING_POINT
477
  Register exponent = result1_;
478
  Register mantissa = result2_;
479
#else
480
  Register exponent = result2_;
481
  Register mantissa = result1_;
482
#endif
483
  Label not_special;
484
  // Convert from Smi to integer.
485
  __ sra(source_, source_, kSmiTagSize);
486
  // Move sign bit from source to destination.  This works because the sign bit
487
  // in the exponent word of the double has the same position and polarity as
488
  // the 2's complement sign bit in a Smi.
489
  STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
490
  __ And(exponent, source_, Operand(HeapNumber::kSignMask));
491
  // Subtract from 0 if source was negative.
492
  __ subu(at, zero_reg, source_);
493
  __ Movn(source_, at, exponent);
494

    
495
  // We have -1, 0 or 1, which we treat specially. Register source_ contains
496
  // absolute value: it is either equal to 1 (special case of -1 and 1),
497
  // greater than 1 (not a special case) or less than 1 (special case of 0).
498
  __ Branch(&not_special, gt, source_, Operand(1));
499

    
500
  // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
501
  const uint32_t exponent_word_for_1 =
502
      HeapNumber::kExponentBias << HeapNumber::kExponentShift;
503
  // Safe to use 'at' as dest reg here.
504
  __ Or(at, exponent, Operand(exponent_word_for_1));
505
  __ Movn(exponent, at, source_);  // Write exp when source not 0.
506
  // 1, 0 and -1 all have 0 for the second word.
507
  __ Ret(USE_DELAY_SLOT);
508
  __ mov(mantissa, zero_reg);
509

    
510
  __ bind(&not_special);
511
  // Count leading zeros.
512
  // Gets the wrong answer for 0, but we already checked for that case above.
513
  __ Clz(zeros_, source_);
514
  // Compute exponent and or it into the exponent register.
515
  // We use mantissa as a scratch register here.
516
  __ li(mantissa, Operand(31 + HeapNumber::kExponentBias));
517
  __ subu(mantissa, mantissa, zeros_);
518
  __ sll(mantissa, mantissa, HeapNumber::kExponentShift);
519
  __ Or(exponent, exponent, mantissa);
520

    
521
  // Shift up the source chopping the top bit off.
522
  __ Addu(zeros_, zeros_, Operand(1));
523
  // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
524
  __ sllv(source_, source_, zeros_);
525
  // Compute lower part of fraction (last 12 bits).
526
  __ sll(mantissa, source_, HeapNumber::kMantissaBitsInTopWord);
527
  // And the top (top 20 bits).
528
  __ srl(source_, source_, 32 - HeapNumber::kMantissaBitsInTopWord);
529

    
530
  __ Ret(USE_DELAY_SLOT);
531
  __ or_(exponent, exponent, source_);
532
}
533

    
534

    
535
void DoubleToIStub::Generate(MacroAssembler* masm) {
536
  Label out_of_range, only_low, negate, done;
537
  Register input_reg = source();
538
  Register result_reg = destination();
539

    
540
  int double_offset = offset();
541
  // Account for saved regs if input is sp.
542
  if (input_reg.is(sp)) double_offset += 3 * kPointerSize;
543

    
544
  Register scratch =
545
      GetRegisterThatIsNotOneOf(input_reg, result_reg);
546
  Register scratch2 =
547
      GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
548
  Register scratch3 =
549
      GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch2);
550
  DoubleRegister double_scratch = kLithiumScratchDouble;
551

    
552
  __ Push(scratch, scratch2, scratch3);
553

    
554
  if (!skip_fastpath()) {
555
    // Load double input.
556
    __ ldc1(double_scratch, MemOperand(input_reg, double_offset));
557

    
558
    // Clear cumulative exception flags and save the FCSR.
559
    __ cfc1(scratch2, FCSR);
560
    __ ctc1(zero_reg, FCSR);
561

    
562
    // Try a conversion to a signed integer.
563
    __ Trunc_w_d(double_scratch, double_scratch);
564
    // Move the converted value into the result register.
565
    __ mfc1(result_reg, double_scratch);
566

    
567
    // Retrieve and restore the FCSR.
568
    __ cfc1(scratch, FCSR);
569
    __ ctc1(scratch2, FCSR);
570

    
571
    // Check for overflow and NaNs.
572
    __ And(
573
        scratch, scratch,
574
        kFCSROverflowFlagMask | kFCSRUnderflowFlagMask
575
           | kFCSRInvalidOpFlagMask);
576
    // If we had no exceptions we are done.
577
    __ Branch(&done, eq, scratch, Operand(zero_reg));
578
  }
579

    
580
  // Load the double value and perform a manual truncation.
581
  Register input_high = scratch2;
582
  Register input_low = scratch3;
583

    
584
  __ lw(input_low, MemOperand(input_reg, double_offset));
585
  __ lw(input_high, MemOperand(input_reg, double_offset + kIntSize));
586

    
587
  Label normal_exponent, restore_sign;
588
  // Extract the biased exponent in result.
589
  __ Ext(result_reg,
590
         input_high,
591
         HeapNumber::kExponentShift,
592
         HeapNumber::kExponentBits);
593

    
594
  // Check for Infinity and NaNs, which should return 0.
595
  __ Subu(scratch, result_reg, HeapNumber::kExponentMask);
596
  __ Movz(result_reg, zero_reg, scratch);
597
  __ Branch(&done, eq, scratch, Operand(zero_reg));
598

    
599
  // Express exponent as delta to (number of mantissa bits + 31).
600
  __ Subu(result_reg,
601
          result_reg,
602
          Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
603

    
604
  // If the delta is strictly positive, all bits would be shifted away,
605
  // which means that we can return 0.
606
  __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg));
607
  __ mov(result_reg, zero_reg);
608
  __ Branch(&done);
609

    
610
  __ bind(&normal_exponent);
611
  const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
612
  // Calculate shift.
613
  __ Addu(scratch, result_reg, Operand(kShiftBase + HeapNumber::kMantissaBits));
614

    
615
  // Save the sign.
616
  Register sign = result_reg;
617
  result_reg = no_reg;
618
  __ And(sign, input_high, Operand(HeapNumber::kSignMask));
619

    
620
  // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
621
  // to check for this specific case.
622
  Label high_shift_needed, high_shift_done;
623
  __ Branch(&high_shift_needed, lt, scratch, Operand(32));
624
  __ mov(input_high, zero_reg);
625
  __ Branch(&high_shift_done);
626
  __ bind(&high_shift_needed);
627

    
628
  // Set the implicit 1 before the mantissa part in input_high.
629
  __ Or(input_high,
630
        input_high,
631
        Operand(1 << HeapNumber::kMantissaBitsInTopWord));
632
  // Shift the mantissa bits to the correct position.
633
  // We don't need to clear non-mantissa bits as they will be shifted away.
634
  // If they weren't, it would mean that the answer is in the 32bit range.
635
  __ sllv(input_high, input_high, scratch);
636

    
637
  __ bind(&high_shift_done);
638

    
639
  // Replace the shifted bits with bits from the lower mantissa word.
640
  Label pos_shift, shift_done;
641
  __ li(at, 32);
642
  __ subu(scratch, at, scratch);
643
  __ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
644

    
645
  // Negate scratch.
646
  __ Subu(scratch, zero_reg, scratch);
647
  __ sllv(input_low, input_low, scratch);
648
  __ Branch(&shift_done);
649

    
650
  __ bind(&pos_shift);
651
  __ srlv(input_low, input_low, scratch);
652

    
653
  __ bind(&shift_done);
654
  __ Or(input_high, input_high, Operand(input_low));
655
  // Restore sign if necessary.
656
  __ mov(scratch, sign);
657
  result_reg = sign;
658
  sign = no_reg;
659
  __ Subu(result_reg, zero_reg, input_high);
660
  __ Movz(result_reg, input_high, scratch);
661

    
662
  __ bind(&done);
663

    
664
  __ Pop(scratch, scratch2, scratch3);
665
  __ Ret();
666
}
667

    
668

    
669
bool WriteInt32ToHeapNumberStub::IsPregenerated(Isolate* isolate) {
670
  // These variants are compiled ahead of time.  See next method.
671
  if (the_int_.is(a1) &&
672
      the_heap_number_.is(v0) &&
673
      scratch_.is(a2) &&
674
      sign_.is(a3)) {
675
    return true;
676
  }
677
  if (the_int_.is(a2) &&
678
      the_heap_number_.is(v0) &&
679
      scratch_.is(a3) &&
680
      sign_.is(a0)) {
681
    return true;
682
  }
683
  // Other register combinations are generated as and when they are needed,
684
  // so it is unsafe to call them from stubs (we can't generate a stub while
685
  // we are generating a stub).
686
  return false;
687
}
688

    
689

    
690
void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(
691
    Isolate* isolate) {
692
  WriteInt32ToHeapNumberStub stub1(a1, v0, a2, a3);
693
  WriteInt32ToHeapNumberStub stub2(a2, v0, a3, a0);
694
  stub1.GetCode(isolate)->set_is_pregenerated(true);
695
  stub2.GetCode(isolate)->set_is_pregenerated(true);
696
}
697

    
698

    
699
// See comment for class, this does NOT work for int32's that are in Smi range.
700
void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
701
  Label max_negative_int;
702
  // the_int_ has the answer which is a signed int32 but not a Smi.
703
  // We test for the special value that has a different exponent.
704
  STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
705
  // Test sign, and save for later conditionals.
706
  __ And(sign_, the_int_, Operand(0x80000000u));
707
  __ Branch(&max_negative_int, eq, the_int_, Operand(0x80000000u));
708

    
709
  // Set up the correct exponent in scratch_.  All non-Smi int32s have the same.
710
  // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
711
  uint32_t non_smi_exponent =
712
      (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
713
  __ li(scratch_, Operand(non_smi_exponent));
714
  // Set the sign bit in scratch_ if the value was negative.
715
  __ or_(scratch_, scratch_, sign_);
716
  // Subtract from 0 if the value was negative.
717
  __ subu(at, zero_reg, the_int_);
718
  __ Movn(the_int_, at, sign_);
719
  // We should be masking the implict first digit of the mantissa away here,
720
  // but it just ends up combining harmlessly with the last digit of the
721
  // exponent that happens to be 1.  The sign bit is 0 so we shift 10 to get
722
  // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
723
  ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
724
  const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
725
  __ srl(at, the_int_, shift_distance);
726
  __ or_(scratch_, scratch_, at);
727
  __ sw(scratch_, FieldMemOperand(the_heap_number_,
728
                                   HeapNumber::kExponentOffset));
729
  __ sll(scratch_, the_int_, 32 - shift_distance);
730
  __ Ret(USE_DELAY_SLOT);
731
  __ sw(scratch_, FieldMemOperand(the_heap_number_,
732
                                   HeapNumber::kMantissaOffset));
733

    
734
  __ bind(&max_negative_int);
735
  // The max negative int32 is stored as a positive number in the mantissa of
736
  // a double because it uses a sign bit instead of using two's complement.
737
  // The actual mantissa bits stored are all 0 because the implicit most
738
  // significant 1 bit is not stored.
739
  non_smi_exponent += 1 << HeapNumber::kExponentShift;
740
  __ li(scratch_, Operand(HeapNumber::kSignMask | non_smi_exponent));
741
  __ sw(scratch_,
742
        FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
743
  __ mov(scratch_, zero_reg);
744
  __ Ret(USE_DELAY_SLOT);
745
  __ sw(scratch_,
746
        FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
747
}
748

    
749

    
750
// Handle the case where the lhs and rhs are the same object.
751
// Equality is almost reflexive (everything but NaN), so this is a test
752
// for "identity and not NaN".
753
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
754
                                          Label* slow,
755
                                          Condition cc) {
756
  Label not_identical;
757
  Label heap_number, return_equal;
758
  Register exp_mask_reg = t5;
759

    
760
  __ Branch(&not_identical, ne, a0, Operand(a1));
761

    
762
  __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
763

    
764
  // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
765
  // so we do the second best thing - test it ourselves.
766
  // They are both equal and they are not both Smis so both of them are not
767
  // Smis. If it's not a heap number, then return equal.
768
  if (cc == less || cc == greater) {
769
    __ GetObjectType(a0, t4, t4);
770
    __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
771
  } else {
772
    __ GetObjectType(a0, t4, t4);
773
    __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
774
    // Comparing JS objects with <=, >= is complicated.
775
    if (cc != eq) {
776
    __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
777
      // Normally here we fall through to return_equal, but undefined is
778
      // special: (undefined == undefined) == true, but
779
      // (undefined <= undefined) == false!  See ECMAScript 11.8.5.
780
      if (cc == less_equal || cc == greater_equal) {
781
        __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
782
        __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
783
        __ Branch(&return_equal, ne, a0, Operand(t2));
784
        ASSERT(is_int16(GREATER) && is_int16(LESS));
785
        __ Ret(USE_DELAY_SLOT);
786
        if (cc == le) {
787
          // undefined <= undefined should fail.
788
          __ li(v0, Operand(GREATER));
789
        } else  {
790
          // undefined >= undefined should fail.
791
          __ li(v0, Operand(LESS));
792
        }
793
      }
794
    }
795
  }
796

    
797
  __ bind(&return_equal);
798
  ASSERT(is_int16(GREATER) && is_int16(LESS));
799
  __ Ret(USE_DELAY_SLOT);
800
  if (cc == less) {
801
    __ li(v0, Operand(GREATER));  // Things aren't less than themselves.
802
  } else if (cc == greater) {
803
    __ li(v0, Operand(LESS));     // Things aren't greater than themselves.
804
  } else {
805
    __ mov(v0, zero_reg);         // Things are <=, >=, ==, === themselves.
806
  }
807

    
808
  // For less and greater we don't have to check for NaN since the result of
809
  // x < x is false regardless.  For the others here is some code to check
810
  // for NaN.
811
  if (cc != lt && cc != gt) {
812
    __ bind(&heap_number);
813
    // It is a heap number, so return non-equal if it's NaN and equal if it's
814
    // not NaN.
815

    
816
    // The representation of NaN values has all exponent bits (52..62) set,
817
    // and not all mantissa bits (0..51) clear.
818
    // Read top bits of double representation (second word of value).
819
    __ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
820
    // Test that exponent bits are all set.
821
    __ And(t3, t2, Operand(exp_mask_reg));
822
    // If all bits not set (ne cond), then not a NaN, objects are equal.
823
    __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg));
824

    
825
    // Shift out flag and all exponent bits, retaining only mantissa.
826
    __ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord);
827
    // Or with all low-bits of mantissa.
828
    __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
829
    __ Or(v0, t3, Operand(t2));
830
    // For equal we already have the right value in v0:  Return zero (equal)
831
    // if all bits in mantissa are zero (it's an Infinity) and non-zero if
832
    // not (it's a NaN).  For <= and >= we need to load v0 with the failing
833
    // value if it's a NaN.
834
    if (cc != eq) {
835
      // All-zero means Infinity means equal.
836
      __ Ret(eq, v0, Operand(zero_reg));
837
      ASSERT(is_int16(GREATER) && is_int16(LESS));
838
      __ Ret(USE_DELAY_SLOT);
839
      if (cc == le) {
840
        __ li(v0, Operand(GREATER));  // NaN <= NaN should fail.
841
      } else {
842
        __ li(v0, Operand(LESS));     // NaN >= NaN should fail.
843
      }
844
    }
845
  }
846
  // No fall through here.
847

    
848
  __ bind(&not_identical);
849
}
850

    
851

    
852
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
853
                                    Register lhs,
854
                                    Register rhs,
855
                                    Label* both_loaded_as_doubles,
856
                                    Label* slow,
857
                                    bool strict) {
858
  ASSERT((lhs.is(a0) && rhs.is(a1)) ||
859
         (lhs.is(a1) && rhs.is(a0)));
860

    
861
  Label lhs_is_smi;
862
  __ JumpIfSmi(lhs, &lhs_is_smi);
863
  // Rhs is a Smi.
864
  // Check whether the non-smi is a heap number.
865
  __ GetObjectType(lhs, t4, t4);
866
  if (strict) {
867
    // If lhs was not a number and rhs was a Smi then strict equality cannot
868
    // succeed. Return non-equal (lhs is already not zero).
869
    __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
870
    __ mov(v0, lhs);
871
  } else {
872
    // Smi compared non-strictly with a non-Smi non-heap-number. Call
873
    // the runtime.
874
    __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
875
  }
876

    
877
  // Rhs is a smi, lhs is a number.
878
  // Convert smi rhs to double.
879
  __ sra(at, rhs, kSmiTagSize);
880
  __ mtc1(at, f14);
881
  __ cvt_d_w(f14, f14);
882
  __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
883

    
884
  // We now have both loaded as doubles.
885
  __ jmp(both_loaded_as_doubles);
886

    
887
  __ bind(&lhs_is_smi);
888
  // Lhs is a Smi.  Check whether the non-smi is a heap number.
889
  __ GetObjectType(rhs, t4, t4);
890
  if (strict) {
891
    // If lhs was not a number and rhs was a Smi then strict equality cannot
892
    // succeed. Return non-equal.
893
    __ Ret(USE_DELAY_SLOT, ne, t4, Operand(HEAP_NUMBER_TYPE));
894
    __ li(v0, Operand(1));
895
  } else {
896
    // Smi compared non-strictly with a non-Smi non-heap-number. Call
897
    // the runtime.
898
    __ Branch(slow, ne, t4, Operand(HEAP_NUMBER_TYPE));
899
  }
900

    
901
  // Lhs is a smi, rhs is a number.
902
  // Convert smi lhs to double.
903
  __ sra(at, lhs, kSmiTagSize);
904
  __ mtc1(at, f12);
905
  __ cvt_d_w(f12, f12);
906
  __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
907
  // Fall through to both_loaded_as_doubles.
908
}
909

    
910

    
911
static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
912
                                           Register lhs,
913
                                           Register rhs) {
914
    // If either operand is a JS object or an oddball value, then they are
915
    // not equal since their pointers are different.
916
    // There is no test for undetectability in strict equality.
917
    STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
918
    Label first_non_object;
919
    // Get the type of the first operand into a2 and compare it with
920
    // FIRST_SPEC_OBJECT_TYPE.
921
    __ GetObjectType(lhs, a2, a2);
922
    __ Branch(&first_non_object, less, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
923

    
924
    // Return non-zero.
925
    Label return_not_equal;
926
    __ bind(&return_not_equal);
927
    __ Ret(USE_DELAY_SLOT);
928
    __ li(v0, Operand(1));
929

    
930
    __ bind(&first_non_object);
931
    // Check for oddballs: true, false, null, undefined.
932
    __ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));
933

    
934
    __ GetObjectType(rhs, a3, a3);
935
    __ Branch(&return_not_equal, greater, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
936

    
937
    // Check for oddballs: true, false, null, undefined.
938
    __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
939

    
940
    // Now that we have the types we might as well check for
941
    // internalized-internalized.
942
    STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
943
    __ Or(a2, a2, Operand(a3));
944
    __ And(at, a2, Operand(kIsNotStringMask | kIsNotInternalizedMask));
945
    __ Branch(&return_not_equal, eq, at, Operand(zero_reg));
946
}
947

    
948

    
949
static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
950
                                       Register lhs,
951
                                       Register rhs,
952
                                       Label* both_loaded_as_doubles,
953
                                       Label* not_heap_numbers,
954
                                       Label* slow) {
955
  __ GetObjectType(lhs, a3, a2);
956
  __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE));
957
  __ lw(a2, FieldMemOperand(rhs, HeapObject::kMapOffset));
958
  // If first was a heap number & second wasn't, go to slow case.
959
  __ Branch(slow, ne, a3, Operand(a2));
960

    
961
  // Both are heap numbers. Load them up then jump to the code we have
962
  // for that.
963
  __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
964
  __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
965

    
966
  __ jmp(both_loaded_as_doubles);
967
}
968

    
969

    
970
// Fast negative check for internalized-to-internalized equality.
971
static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
972
                                                     Register lhs,
973
                                                     Register rhs,
974
                                                     Label* possible_strings,
975
                                                     Label* not_both_strings) {
976
  ASSERT((lhs.is(a0) && rhs.is(a1)) ||
977
         (lhs.is(a1) && rhs.is(a0)));
978

    
979
  // a2 is object type of rhs.
980
  Label object_test;
981
  STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
982
  __ And(at, a2, Operand(kIsNotStringMask));
983
  __ Branch(&object_test, ne, at, Operand(zero_reg));
984
  __ And(at, a2, Operand(kIsNotInternalizedMask));
985
  __ Branch(possible_strings, ne, at, Operand(zero_reg));
986
  __ GetObjectType(rhs, a3, a3);
987
  __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
988
  __ And(at, a3, Operand(kIsNotInternalizedMask));
989
  __ Branch(possible_strings, ne, at, Operand(zero_reg));
990

    
991
  // Both are internalized strings. We already checked they weren't the same
992
  // pointer so they are not equal.
993
  __ Ret(USE_DELAY_SLOT);
994
  __ li(v0, Operand(1));   // Non-zero indicates not equal.
995

    
996
  __ bind(&object_test);
997
  __ Branch(not_both_strings, lt, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
998
  __ GetObjectType(rhs, a2, a3);
999
  __ Branch(not_both_strings, lt, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
1000

    
1001
  // If both objects are undetectable, they are equal.  Otherwise, they
1002
  // are not equal, since they are different objects and an object is not
1003
  // equal to undefined.
1004
  __ lw(a3, FieldMemOperand(lhs, HeapObject::kMapOffset));
1005
  __ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset));
1006
  __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
1007
  __ and_(a0, a2, a3);
1008
  __ And(a0, a0, Operand(1 << Map::kIsUndetectable));
1009
  __ Ret(USE_DELAY_SLOT);
1010
  __ xori(v0, a0, 1 << Map::kIsUndetectable);
1011
}
1012

    
1013

    
1014
static void ICCompareStub_CheckInputType(MacroAssembler* masm,
1015
                                         Register input,
1016
                                         Register scratch,
1017
                                         CompareIC::State expected,
1018
                                         Label* fail) {
1019
  Label ok;
1020
  if (expected == CompareIC::SMI) {
1021
    __ JumpIfNotSmi(input, fail);
1022
  } else if (expected == CompareIC::NUMBER) {
1023
    __ JumpIfSmi(input, &ok);
1024
    __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
1025
                DONT_DO_SMI_CHECK);
1026
  }
1027
  // We could be strict about internalized/string here, but as long as
1028
  // hydrogen doesn't care, the stub doesn't have to care either.
1029
  __ bind(&ok);
1030
}
1031

    
1032

    
1033
// On entry a1 and a2 are the values to be compared.
1034
// On exit a0 is 0, positive or negative to indicate the result of
1035
// the comparison.
1036
void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
1037
  Register lhs = a1;
1038
  Register rhs = a0;
1039
  Condition cc = GetCondition();
1040

    
1041
  Label miss;
1042
  ICCompareStub_CheckInputType(masm, lhs, a2, left_, &miss);
1043
  ICCompareStub_CheckInputType(masm, rhs, a3, right_, &miss);
1044

    
1045
  Label slow;  // Call builtin.
1046
  Label not_smis, both_loaded_as_doubles;
1047

    
1048
  Label not_two_smis, smi_done;
1049
  __ Or(a2, a1, a0);
1050
  __ JumpIfNotSmi(a2, &not_two_smis);
1051
  __ sra(a1, a1, 1);
1052
  __ sra(a0, a0, 1);
1053
  __ Ret(USE_DELAY_SLOT);
1054
  __ subu(v0, a1, a0);
1055
  __ bind(&not_two_smis);
1056

    
1057
  // NOTICE! This code is only reached after a smi-fast-case check, so
1058
  // it is certain that at least one operand isn't a smi.
1059

    
1060
  // Handle the case where the objects are identical.  Either returns the answer
1061
  // or goes to slow.  Only falls through if the objects were not identical.
1062
  EmitIdenticalObjectComparison(masm, &slow, cc);
1063

    
1064
  // If either is a Smi (we know that not both are), then they can only
1065
  // be strictly equal if the other is a HeapNumber.
1066
  STATIC_ASSERT(kSmiTag == 0);
1067
  ASSERT_EQ(0, Smi::FromInt(0));
1068
  __ And(t2, lhs, Operand(rhs));
1069
  __ JumpIfNotSmi(t2, &not_smis, t0);
1070
  // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
1071
  // 1) Return the answer.
1072
  // 2) Go to slow.
1073
  // 3) Fall through to both_loaded_as_doubles.
1074
  // 4) Jump to rhs_not_nan.
1075
  // In cases 3 and 4 we have found out we were dealing with a number-number
1076
  // comparison and the numbers have been loaded into f12 and f14 as doubles,
1077
  // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
1078
  EmitSmiNonsmiComparison(masm, lhs, rhs,
1079
                          &both_loaded_as_doubles, &slow, strict());
1080

    
1081
  __ bind(&both_loaded_as_doubles);
1082
  // f12, f14 are the double representations of the left hand side
1083
  // and the right hand side if we have FPU. Otherwise a2, a3 represent
1084
  // left hand side and a0, a1 represent right hand side.
1085

    
1086
  Isolate* isolate = masm->isolate();
1087
  Label nan;
1088
  __ li(t0, Operand(LESS));
1089
  __ li(t1, Operand(GREATER));
1090
  __ li(t2, Operand(EQUAL));
1091

    
1092
  // Check if either rhs or lhs is NaN.
1093
  __ BranchF(NULL, &nan, eq, f12, f14);
1094

    
1095
  // Check if LESS condition is satisfied. If true, move conditionally
1096
  // result to v0.
1097
  __ c(OLT, D, f12, f14);
1098
  __ Movt(v0, t0);
1099
  // Use previous check to store conditionally to v0 oposite condition
1100
  // (GREATER). If rhs is equal to lhs, this will be corrected in next
1101
  // check.
1102
  __ Movf(v0, t1);
1103
  // Check if EQUAL condition is satisfied. If true, move conditionally
1104
  // result to v0.
1105
  __ c(EQ, D, f12, f14);
1106
  __ Movt(v0, t2);
1107

    
1108
  __ Ret();
1109

    
1110
  __ bind(&nan);
1111
  // NaN comparisons always fail.
1112
  // Load whatever we need in v0 to make the comparison fail.
1113
  ASSERT(is_int16(GREATER) && is_int16(LESS));
1114
  __ Ret(USE_DELAY_SLOT);
1115
  if (cc == lt || cc == le) {
1116
    __ li(v0, Operand(GREATER));
1117
  } else {
1118
    __ li(v0, Operand(LESS));
1119
  }
1120

    
1121

    
1122
  __ bind(&not_smis);
1123
  // At this point we know we are dealing with two different objects,
1124
  // and neither of them is a Smi. The objects are in lhs_ and rhs_.
1125
  if (strict()) {
1126
    // This returns non-equal for some object types, or falls through if it
1127
    // was not lucky.
1128
    EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
1129
  }
1130

    
1131
  Label check_for_internalized_strings;
1132
  Label flat_string_check;
1133
  // Check for heap-number-heap-number comparison. Can jump to slow case,
1134
  // or load both doubles and jump to the code that handles
1135
  // that case. If the inputs are not doubles then jumps to
1136
  // check_for_internalized_strings.
1137
  // In this case a2 will contain the type of lhs_.
1138
  EmitCheckForTwoHeapNumbers(masm,
1139
                             lhs,
1140
                             rhs,
1141
                             &both_loaded_as_doubles,
1142
                             &check_for_internalized_strings,
1143
                             &flat_string_check);
1144

    
1145
  __ bind(&check_for_internalized_strings);
1146
  if (cc == eq && !strict()) {
1147
    // Returns an answer for two internalized strings or two
1148
    // detectable objects.
1149
    // Otherwise jumps to string case or not both strings case.
1150
    // Assumes that a2 is the type of lhs_ on entry.
1151
    EmitCheckForInternalizedStringsOrObjects(
1152
        masm, lhs, rhs, &flat_string_check, &slow);
1153
  }
1154

    
1155
  // Check for both being sequential ASCII strings, and inline if that is the
1156
  // case.
1157
  __ bind(&flat_string_check);
1158

    
1159
  __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, a2, a3, &slow);
1160

    
1161
  __ IncrementCounter(isolate->counters()->string_compare_native(), 1, a2, a3);
1162
  if (cc == eq) {
1163
    StringCompareStub::GenerateFlatAsciiStringEquals(masm,
1164
                                                     lhs,
1165
                                                     rhs,
1166
                                                     a2,
1167
                                                     a3,
1168
                                                     t0);
1169
  } else {
1170
    StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
1171
                                                       lhs,
1172
                                                       rhs,
1173
                                                       a2,
1174
                                                       a3,
1175
                                                       t0,
1176
                                                       t1);
1177
  }
1178
  // Never falls through to here.
1179

    
1180
  __ bind(&slow);
1181
  // Prepare for call to builtin. Push object pointers, a0 (lhs) first,
1182
  // a1 (rhs) second.
1183
  __ Push(lhs, rhs);
1184
  // Figure out which native to call and setup the arguments.
1185
  Builtins::JavaScript native;
1186
  if (cc == eq) {
1187
    native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
1188
  } else {
1189
    native = Builtins::COMPARE;
1190
    int ncr;  // NaN compare result.
1191
    if (cc == lt || cc == le) {
1192
      ncr = GREATER;
1193
    } else {
1194
      ASSERT(cc == gt || cc == ge);  // Remaining cases.
1195
      ncr = LESS;
1196
    }
1197
    __ li(a0, Operand(Smi::FromInt(ncr)));
1198
    __ push(a0);
1199
  }
1200

    
1201
  // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
1202
  // tagged as a small integer.
1203
  __ InvokeBuiltin(native, JUMP_FUNCTION);
1204

    
1205
  __ bind(&miss);
1206
  GenerateMiss(masm);
1207
}
1208

    
1209

    
1210
void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
1211
  // We don't allow a GC during a store buffer overflow so there is no need to
1212
  // store the registers in any particular way, but we do have to store and
1213
  // restore them.
1214
  __ MultiPush(kJSCallerSaved | ra.bit());
1215
  if (save_doubles_ == kSaveFPRegs) {
1216
    __ MultiPushFPU(kCallerSavedFPU);
1217
  }
1218
  const int argument_count = 1;
1219
  const int fp_argument_count = 0;
1220
  const Register scratch = a1;
1221

    
1222
  AllowExternalCallThatCantCauseGC scope(masm);
1223
  __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
1224
  __ li(a0, Operand(ExternalReference::isolate_address(masm->isolate())));
1225
  __ CallCFunction(
1226
      ExternalReference::store_buffer_overflow_function(masm->isolate()),
1227
      argument_count);
1228
  if (save_doubles_ == kSaveFPRegs) {
1229
    __ MultiPopFPU(kCallerSavedFPU);
1230
  }
1231

    
1232
  __ MultiPop(kJSCallerSaved | ra.bit());
1233
  __ Ret();
1234
}
1235

    
1236

    
1237
void BinaryOpStub::InitializeInterfaceDescriptor(
1238
    Isolate* isolate,
1239
    CodeStubInterfaceDescriptor* descriptor) {
1240
  static Register registers[] = { a1, a0 };
1241
  descriptor->register_param_count_ = 2;
1242
  descriptor->register_params_ = registers;
1243
  descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
1244
  descriptor->SetMissHandler(
1245
      ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
1246
}
1247

    
1248

    
1249
void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
1250
  // Untagged case: double input in f4, double result goes
1251
  //   into f4.
1252
  // Tagged case: tagged input on top of stack and in a0,
1253
  //   tagged result (heap number) goes into v0.
1254

    
1255
  Label input_not_smi;
1256
  Label loaded;
1257
  Label calculate;
1258
  Label invalid_cache;
1259
  const Register scratch0 = t5;
1260
  const Register scratch1 = t3;
1261
  const Register cache_entry = a0;
1262
  const bool tagged = (argument_type_ == TAGGED);
1263

    
1264
  if (tagged) {
1265
    // Argument is a number and is on stack and in a0.
1266
    // Load argument and check if it is a smi.
1267
    __ JumpIfNotSmi(a0, &input_not_smi);
1268

    
1269
    // Input is a smi. Convert to double and load the low and high words
1270
    // of the double into a2, a3.
1271
    __ sra(t0, a0, kSmiTagSize);
1272
    __ mtc1(t0, f4);
1273
    __ cvt_d_w(f4, f4);
1274
    __ Move(a2, a3, f4);
1275
    __ Branch(&loaded);
1276

    
1277
    __ bind(&input_not_smi);
1278
    // Check if input is a HeapNumber.
1279
    __ CheckMap(a0,
1280
                a1,
1281
                Heap::kHeapNumberMapRootIndex,
1282
                &calculate,
1283
                DONT_DO_SMI_CHECK);
1284
    // Input is a HeapNumber. Store the
1285
    // low and high words into a2, a3.
1286
    __ lw(a2, FieldMemOperand(a0, HeapNumber::kValueOffset));
1287
    __ lw(a3, FieldMemOperand(a0, HeapNumber::kValueOffset + 4));
1288
  } else {
1289
    // Input is untagged double in f4. Output goes to f4.
1290
    __ Move(a2, a3, f4);
1291
  }
1292
  __ bind(&loaded);
1293
  // a2 = low 32 bits of double value.
1294
  // a3 = high 32 bits of double value.
1295
  // Compute hash (the shifts are arithmetic):
1296
  //   h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
1297
  __ Xor(a1, a2, a3);
1298
  __ sra(t0, a1, 16);
1299
  __ Xor(a1, a1, t0);
1300
  __ sra(t0, a1, 8);
1301
  __ Xor(a1, a1, t0);
1302
  ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
1303
  __ And(a1, a1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
1304

    
1305
  // a2 = low 32 bits of double value.
1306
  // a3 = high 32 bits of double value.
1307
  // a1 = TranscendentalCache::hash(double value).
1308
  __ li(cache_entry, Operand(
1309
      ExternalReference::transcendental_cache_array_address(
1310
          masm->isolate())));
1311
  // a0 points to cache array.
1312
  __ lw(cache_entry, MemOperand(cache_entry, type_ * sizeof(
1313
      Isolate::Current()->transcendental_cache()->caches_[0])));
1314
  // a0 points to the cache for the type type_.
1315
  // If NULL, the cache hasn't been initialized yet, so go through runtime.
1316
  __ Branch(&invalid_cache, eq, cache_entry, Operand(zero_reg));
1317

    
1318
#ifdef DEBUG
1319
  // Check that the layout of cache elements match expectations.
1320
  { TranscendentalCache::SubCache::Element test_elem[2];
1321
    char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
1322
    char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
1323
    char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
1324
    char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
1325
    char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
1326
    CHECK_EQ(12, elem2_start - elem_start);  // Two uint_32's and a pointer.
1327
    CHECK_EQ(0, elem_in0 - elem_start);
1328
    CHECK_EQ(kIntSize, elem_in1 - elem_start);
1329
    CHECK_EQ(2 * kIntSize, elem_out - elem_start);
1330
  }
1331
#endif
1332

    
1333
  // Find the address of the a1'st entry in the cache, i.e., &a0[a1*12].
1334
  __ sll(t0, a1, 1);
1335
  __ Addu(a1, a1, t0);
1336
  __ sll(t0, a1, 2);
1337
  __ Addu(cache_entry, cache_entry, t0);
1338

    
1339
  // Check if cache matches: Double value is stored in uint32_t[2] array.
1340
  __ lw(t0, MemOperand(cache_entry, 0));
1341
  __ lw(t1, MemOperand(cache_entry, 4));
1342
  __ lw(t2, MemOperand(cache_entry, 8));
1343
  __ Branch(&calculate, ne, a2, Operand(t0));
1344
  __ Branch(&calculate, ne, a3, Operand(t1));
1345
  // Cache hit. Load result, cleanup and return.
1346
  Counters* counters = masm->isolate()->counters();
1347
  __ IncrementCounter(
1348
      counters->transcendental_cache_hit(), 1, scratch0, scratch1);
1349
  if (tagged) {
1350
    // Pop input value from stack and load result into v0.
1351
    __ Drop(1);
1352
    __ mov(v0, t2);
1353
  } else {
1354
    // Load result into f4.
1355
    __ ldc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
1356
  }
1357
  __ Ret();
1358

    
1359
  __ bind(&calculate);
1360
  __ IncrementCounter(
1361
      counters->transcendental_cache_miss(), 1, scratch0, scratch1);
1362
  if (tagged) {
1363
    __ bind(&invalid_cache);
1364
    __ TailCallExternalReference(ExternalReference(RuntimeFunction(),
1365
                                                   masm->isolate()),
1366
                                 1,
1367
                                 1);
1368
  } else {
1369
    Label no_update;
1370
    Label skip_cache;
1371

    
1372
    // Call C function to calculate the result and update the cache.
1373
    // a0: precalculated cache entry address.
1374
    // a2 and a3: parts of the double value.
1375
    // Store a0, a2 and a3 on stack for later before calling C function.
1376
    __ Push(a3, a2, cache_entry);
1377
    GenerateCallCFunction(masm, scratch0);
1378
    __ GetCFunctionDoubleResult(f4);
1379

    
1380
    // Try to update the cache. If we cannot allocate a
1381
    // heap number, we return the result without updating.
1382
    __ Pop(a3, a2, cache_entry);
1383
    __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
1384
    __ AllocateHeapNumber(t2, scratch0, scratch1, t1, &no_update);
1385
    __ sdc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
1386

    
1387
    __ sw(a2, MemOperand(cache_entry, 0 * kPointerSize));
1388
    __ sw(a3, MemOperand(cache_entry, 1 * kPointerSize));
1389
    __ sw(t2, MemOperand(cache_entry, 2 * kPointerSize));
1390

    
1391
    __ Ret(USE_DELAY_SLOT);
1392
    __ mov(v0, cache_entry);
1393

    
1394
    __ bind(&invalid_cache);
1395
    // The cache is invalid. Call runtime which will recreate the
1396
    // cache.
1397
    __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
1398
    __ AllocateHeapNumber(a0, scratch0, scratch1, t1, &skip_cache);
1399
    __ sdc1(f4, FieldMemOperand(a0, HeapNumber::kValueOffset));
1400
    {
1401
      FrameScope scope(masm, StackFrame::INTERNAL);
1402
      __ push(a0);
1403
      __ CallRuntime(RuntimeFunction(), 1);
1404
    }
1405
    __ ldc1(f4, FieldMemOperand(v0, HeapNumber::kValueOffset));
1406
    __ Ret();
1407

    
1408
    __ bind(&skip_cache);
1409
    // Call C function to calculate the result and answer directly
1410
    // without updating the cache.
1411
    GenerateCallCFunction(masm, scratch0);
1412
    __ GetCFunctionDoubleResult(f4);
1413
    __ bind(&no_update);
1414

    
1415
    // We return the value in f4 without adding it to the cache, but
1416
    // we cause a scavenging GC so that future allocations will succeed.
1417
    {
1418
      FrameScope scope(masm, StackFrame::INTERNAL);
1419

    
1420
      // Allocate an aligned object larger than a HeapNumber.
1421
      ASSERT(4 * kPointerSize >= HeapNumber::kSize);
1422
      __ li(scratch0, Operand(4 * kPointerSize));
1423
      __ push(scratch0);
1424
      __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
1425
    }
1426
    __ Ret();
1427
  }
1428
}
1429

    
1430

    
1431
void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
1432
                                                    Register scratch) {
1433
  __ push(ra);
1434
  __ PrepareCallCFunction(2, scratch);
1435
  if (IsMipsSoftFloatABI) {
1436
    __ Move(a0, a1, f4);
1437
  } else {
1438
    __ mov_d(f12, f4);
1439
  }
1440
  AllowExternalCallThatCantCauseGC scope(masm);
1441
  Isolate* isolate = masm->isolate();
1442
  switch (type_) {
1443
    case TranscendentalCache::SIN:
1444
      __ CallCFunction(
1445
          ExternalReference::math_sin_double_function(isolate),
1446
          0, 1);
1447
      break;
1448
    case TranscendentalCache::COS:
1449
      __ CallCFunction(
1450
          ExternalReference::math_cos_double_function(isolate),
1451
          0, 1);
1452
      break;
1453
    case TranscendentalCache::TAN:
1454
      __ CallCFunction(ExternalReference::math_tan_double_function(isolate),
1455
          0, 1);
1456
      break;
1457
    case TranscendentalCache::LOG:
1458
      __ CallCFunction(
1459
          ExternalReference::math_log_double_function(isolate),
1460
          0, 1);
1461
      break;
1462
    default:
1463
      UNIMPLEMENTED();
1464
      break;
1465
  }
1466
  __ pop(ra);
1467
}
1468

    
1469

    
1470
Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
1471
  switch (type_) {
1472
    // Add more cases when necessary.
1473
    case TranscendentalCache::SIN: return Runtime::kMath_sin;
1474
    case TranscendentalCache::COS: return Runtime::kMath_cos;
1475
    case TranscendentalCache::TAN: return Runtime::kMath_tan;
1476
    case TranscendentalCache::LOG: return Runtime::kMath_log;
1477
    default:
1478
      UNIMPLEMENTED();
1479
      return Runtime::kAbort;
1480
  }
1481
}
1482

    
1483

    
1484
void MathPowStub::Generate(MacroAssembler* masm) {
1485
  const Register base = a1;
1486
  const Register exponent = a2;
1487
  const Register heapnumbermap = t1;
1488
  const Register heapnumber = v0;
1489
  const DoubleRegister double_base = f2;
1490
  const DoubleRegister double_exponent = f4;
1491
  const DoubleRegister double_result = f0;
1492
  const DoubleRegister double_scratch = f6;
1493
  const FPURegister single_scratch = f8;
1494
  const Register scratch = t5;
1495
  const Register scratch2 = t3;
1496

    
1497
  Label call_runtime, done, int_exponent;
1498
  if (exponent_type_ == ON_STACK) {
1499
    Label base_is_smi, unpack_exponent;
1500
    // The exponent and base are supplied as arguments on the stack.
1501
    // This can only happen if the stub is called from non-optimized code.
1502
    // Load input parameters from stack to double registers.
1503
    __ lw(base, MemOperand(sp, 1 * kPointerSize));
1504
    __ lw(exponent, MemOperand(sp, 0 * kPointerSize));
1505

    
1506
    __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
1507

    
1508
    __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
1509
    __ lw(scratch, FieldMemOperand(base, JSObject::kMapOffset));
1510
    __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
1511

    
1512
    __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
1513
    __ jmp(&unpack_exponent);
1514

    
1515
    __ bind(&base_is_smi);
1516
    __ mtc1(scratch, single_scratch);
1517
    __ cvt_d_w(double_base, single_scratch);
1518
    __ bind(&unpack_exponent);
1519

    
1520
    __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
1521

    
1522
    __ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
1523
    __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
1524
    __ ldc1(double_exponent,
1525
            FieldMemOperand(exponent, HeapNumber::kValueOffset));
1526
  } else if (exponent_type_ == TAGGED) {
1527
    // Base is already in double_base.
1528
    __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
1529

    
1530
    __ ldc1(double_exponent,
1531
            FieldMemOperand(exponent, HeapNumber::kValueOffset));
1532
  }
1533

    
1534
  if (exponent_type_ != INTEGER) {
1535
    Label int_exponent_convert;
1536
    // Detect integer exponents stored as double.
1537
    __ EmitFPUTruncate(kRoundToMinusInf,
1538
                       scratch,
1539
                       double_exponent,
1540
                       at,
1541
                       double_scratch,
1542
                       scratch2,
1543
                       kCheckForInexactConversion);
1544
    // scratch2 == 0 means there was no conversion error.
1545
    __ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
1546

    
1547
    if (exponent_type_ == ON_STACK) {
1548
      // Detect square root case.  Crankshaft detects constant +/-0.5 at
1549
      // compile time and uses DoMathPowHalf instead.  We then skip this check
1550
      // for non-constant cases of +/-0.5 as these hardly occur.
1551
      Label not_plus_half;
1552

    
1553
      // Test for 0.5.
1554
      __ Move(double_scratch, 0.5);
1555
      __ BranchF(USE_DELAY_SLOT,
1556
                 &not_plus_half,
1557
                 NULL,
1558
                 ne,
1559
                 double_exponent,
1560
                 double_scratch);
1561
      // double_scratch can be overwritten in the delay slot.
1562
      // Calculates square root of base.  Check for the special case of
1563
      // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
1564
      __ Move(double_scratch, -V8_INFINITY);
1565
      __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
1566
      __ neg_d(double_result, double_scratch);
1567

    
1568
      // Add +0 to convert -0 to +0.
1569
      __ add_d(double_scratch, double_base, kDoubleRegZero);
1570
      __ sqrt_d(double_result, double_scratch);
1571
      __ jmp(&done);
1572

    
1573
      __ bind(&not_plus_half);
1574
      __ Move(double_scratch, -0.5);
1575
      __ BranchF(USE_DELAY_SLOT,
1576
                 &call_runtime,
1577
                 NULL,
1578
                 ne,
1579
                 double_exponent,
1580
                 double_scratch);
1581
      // double_scratch can be overwritten in the delay slot.
1582
      // Calculates square root of base.  Check for the special case of
1583
      // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
1584
      __ Move(double_scratch, -V8_INFINITY);
1585
      __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
1586
      __ Move(double_result, kDoubleRegZero);
1587

    
1588
      // Add +0 to convert -0 to +0.
1589
      __ add_d(double_scratch, double_base, kDoubleRegZero);
1590
      __ Move(double_result, 1);
1591
      __ sqrt_d(double_scratch, double_scratch);
1592
      __ div_d(double_result, double_result, double_scratch);
1593
      __ jmp(&done);
1594
    }
1595

    
1596
    __ push(ra);
1597
    {
1598
      AllowExternalCallThatCantCauseGC scope(masm);
1599
      __ PrepareCallCFunction(0, 2, scratch2);
1600
      __ SetCallCDoubleArguments(double_base, double_exponent);
1601
      __ CallCFunction(
1602
          ExternalReference::power_double_double_function(masm->isolate()),
1603
          0, 2);
1604
    }
1605
    __ pop(ra);
1606
    __ GetCFunctionDoubleResult(double_result);
1607
    __ jmp(&done);
1608

    
1609
    __ bind(&int_exponent_convert);
1610
  }
1611

    
1612
  // Calculate power with integer exponent.
1613
  __ bind(&int_exponent);
1614

    
1615
  // Get two copies of exponent in the registers scratch and exponent.
1616
  if (exponent_type_ == INTEGER) {
1617
    __ mov(scratch, exponent);
1618
  } else {
1619
    // Exponent has previously been stored into scratch as untagged integer.
1620
    __ mov(exponent, scratch);
1621
  }
1622

    
1623
  __ mov_d(double_scratch, double_base);  // Back up base.
1624
  __ Move(double_result, 1.0);
1625

    
1626
  // Get absolute value of exponent.
1627
  Label positive_exponent;
1628
  __ Branch(&positive_exponent, ge, scratch, Operand(zero_reg));
1629
  __ Subu(scratch, zero_reg, scratch);
1630
  __ bind(&positive_exponent);
1631

    
1632
  Label while_true, no_carry, loop_end;
1633
  __ bind(&while_true);
1634

    
1635
  __ And(scratch2, scratch, 1);
1636

    
1637
  __ Branch(&no_carry, eq, scratch2, Operand(zero_reg));
1638
  __ mul_d(double_result, double_result, double_scratch);
1639
  __ bind(&no_carry);
1640

    
1641
  __ sra(scratch, scratch, 1);
1642

    
1643
  __ Branch(&loop_end, eq, scratch, Operand(zero_reg));
1644
  __ mul_d(double_scratch, double_scratch, double_scratch);
1645

    
1646
  __ Branch(&while_true);
1647

    
1648
  __ bind(&loop_end);
1649

    
1650
  __ Branch(&done, ge, exponent, Operand(zero_reg));
1651
  __ Move(double_scratch, 1.0);
1652
  __ div_d(double_result, double_scratch, double_result);
1653
  // Test whether result is zero.  Bail out to check for subnormal result.
1654
  // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
1655
  __ BranchF(&done, NULL, ne, double_result, kDoubleRegZero);
1656

    
1657
  // double_exponent may not contain the exponent value if the input was a
1658
  // smi.  We set it with exponent value before bailing out.
1659
  __ mtc1(exponent, single_scratch);
1660
  __ cvt_d_w(double_exponent, single_scratch);
1661

    
1662
  // Returning or bailing out.
1663
  Counters* counters = masm->isolate()->counters();
1664
  if (exponent_type_ == ON_STACK) {
1665
    // The arguments are still on the stack.
1666
    __ bind(&call_runtime);
1667
    __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
1668

    
1669
    // The stub is called from non-optimized code, which expects the result
1670
    // as heap number in exponent.
1671
    __ bind(&done);
1672
    __ AllocateHeapNumber(
1673
        heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
1674
    __ sdc1(double_result,
1675
            FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
1676
    ASSERT(heapnumber.is(v0));
1677
    __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
1678
    __ DropAndRet(2);
1679
  } else {
1680
    __ push(ra);
1681
    {
1682
      AllowExternalCallThatCantCauseGC scope(masm);
1683
      __ PrepareCallCFunction(0, 2, scratch);
1684
      __ SetCallCDoubleArguments(double_base, double_exponent);
1685
      __ CallCFunction(
1686
          ExternalReference::power_double_double_function(masm->isolate()),
1687
          0, 2);
1688
    }
1689
    __ pop(ra);
1690
    __ GetCFunctionDoubleResult(double_result);
1691

    
1692
    __ bind(&done);
1693
    __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
1694
    __ Ret();
1695
  }
1696
}
1697

    
1698

    
1699
bool CEntryStub::NeedsImmovableCode() {
1700
  return true;
1701
}
1702

    
1703

    
1704
bool CEntryStub::IsPregenerated(Isolate* isolate) {
1705
  return (!save_doubles_ || isolate->fp_stubs_generated()) &&
1706
          result_size_ == 1;
1707
}
1708

    
1709

    
1710
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
1711
  CEntryStub::GenerateAheadOfTime(isolate);
1712
  WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate);
1713
  StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
1714
  StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
1715
  RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
1716
  ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
1717
  CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
1718
  BinaryOpStub::GenerateAheadOfTime(isolate);
1719
}
1720

    
1721

    
1722
void CodeStub::GenerateFPStubs(Isolate* isolate) {
1723
  SaveFPRegsMode mode = kSaveFPRegs;
1724
  CEntryStub save_doubles(1, mode);
1725
  StoreBufferOverflowStub stub(mode);
1726
  // These stubs might already be in the snapshot, detect that and don't
1727
  // regenerate, which would lead to code stub initialization state being messed
1728
  // up.
1729
  Code* save_doubles_code;
1730
  if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) {
1731
    save_doubles_code = *save_doubles.GetCode(isolate);
1732
  }
1733
  Code* store_buffer_overflow_code;
1734
  if (!stub.FindCodeInCache(&store_buffer_overflow_code, isolate)) {
1735
      store_buffer_overflow_code = *stub.GetCode(isolate);
1736
  }
1737
  save_doubles_code->set_is_pregenerated(true);
1738
  store_buffer_overflow_code->set_is_pregenerated(true);
1739
  isolate->set_fp_stubs_generated(true);
1740
}
1741

    
1742

    
1743
void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
1744
  CEntryStub stub(1, kDontSaveFPRegs);
1745
  Handle<Code> code = stub.GetCode(isolate);
1746
  code->set_is_pregenerated(true);
1747
}
1748

    
1749

    
1750
static void JumpIfOOM(MacroAssembler* masm,
1751
                      Register value,
1752
                      Register scratch,
1753
                      Label* oom_label) {
1754
  STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3);
1755
  STATIC_ASSERT(kFailureTag == 3);
1756
  __ andi(scratch, value, 0xf);
1757
  __ Branch(oom_label, eq, scratch, Operand(0xf));
1758
}
1759

    
1760

    
1761
void CEntryStub::GenerateCore(MacroAssembler* masm,
1762
                              Label* throw_normal_exception,
1763
                              Label* throw_termination_exception,
1764
                              Label* throw_out_of_memory_exception,
1765
                              bool do_gc,
1766
                              bool always_allocate) {
1767
  // v0: result parameter for PerformGC, if any
1768
  // s0: number of arguments including receiver (C callee-saved)
1769
  // s1: pointer to the first argument          (C callee-saved)
1770
  // s2: pointer to builtin function            (C callee-saved)
1771

    
1772
  Isolate* isolate = masm->isolate();
1773

    
1774
  if (do_gc) {
1775
    // Move result passed in v0 into a0 to call PerformGC.
1776
    __ mov(a0, v0);
1777
    __ PrepareCallCFunction(2, 0, a1);
1778
    __ li(a1, Operand(ExternalReference::isolate_address(masm->isolate())));
1779
    __ CallCFunction(ExternalReference::perform_gc_function(isolate), 2, 0);
1780
  }
1781

    
1782
  ExternalReference scope_depth =
1783
      ExternalReference::heap_always_allocate_scope_depth(isolate);
1784
  if (always_allocate) {
1785
    __ li(a0, Operand(scope_depth));
1786
    __ lw(a1, MemOperand(a0));
1787
    __ Addu(a1, a1, Operand(1));
1788
    __ sw(a1, MemOperand(a0));
1789
  }
1790

    
1791
  // Prepare arguments for C routine.
1792
  // a0 = argc
1793
  __ mov(a0, s0);
1794
  // a1 = argv (set in the delay slot after find_ra below).
1795

    
1796
  // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
1797
  // also need to reserve the 4 argument slots on the stack.
1798

    
1799
  __ AssertStackIsAligned();
1800

    
1801
  __ li(a2, Operand(ExternalReference::isolate_address(isolate)));
1802

    
1803
  // To let the GC traverse the return address of the exit frames, we need to
1804
  // know where the return address is. The CEntryStub is unmovable, so
1805
  // we can store the address on the stack to be able to find it again and
1806
  // we never have to restore it, because it will not change.
1807
  { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
1808
    // This branch-and-link sequence is needed to find the current PC on mips,
1809
    // saved to the ra register.
1810
    // Use masm-> here instead of the double-underscore macro since extra
1811
    // coverage code can interfere with the proper calculation of ra.
1812
    Label find_ra;
1813
    masm->bal(&find_ra);  // bal exposes branch delay slot.
1814
    masm->mov(a1, s1);
1815
    masm->bind(&find_ra);
1816

    
1817
    // Adjust the value in ra to point to the correct return location, 2nd
1818
    // instruction past the real call into C code (the jalr(t9)), and push it.
1819
    // This is the return address of the exit frame.
1820
    const int kNumInstructionsToJump = 5;
1821
    masm->Addu(ra, ra, kNumInstructionsToJump * kPointerSize);
1822
    masm->sw(ra, MemOperand(sp));  // This spot was reserved in EnterExitFrame.
1823
    // Stack space reservation moved to the branch delay slot below.
1824
    // Stack is still aligned.
1825

    
1826
    // Call the C routine.
1827
    masm->mov(t9, s2);  // Function pointer to t9 to conform to ABI for PIC.
1828
    masm->jalr(t9);
1829
    // Set up sp in the delay slot.
1830
    masm->addiu(sp, sp, -kCArgsSlotsSize);
1831
    // Make sure the stored 'ra' points to this position.
1832
    ASSERT_EQ(kNumInstructionsToJump,
1833
              masm->InstructionsGeneratedSince(&find_ra));
1834
  }
1835

    
1836
  if (always_allocate) {
1837
    // It's okay to clobber a2 and a3 here. v0 & v1 contain result.
1838
    __ li(a2, Operand(scope_depth));
1839
    __ lw(a3, MemOperand(a2));
1840
    __ Subu(a3, a3, Operand(1));
1841
    __ sw(a3, MemOperand(a2));
1842
  }
1843

    
1844
  // Check for failure result.
1845
  Label failure_returned;
1846
  STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
1847
  __ addiu(a2, v0, 1);
1848
  __ andi(t0, a2, kFailureTagMask);
1849
  __ Branch(USE_DELAY_SLOT, &failure_returned, eq, t0, Operand(zero_reg));
1850
  // Restore stack (remove arg slots) in branch delay slot.
1851
  __ addiu(sp, sp, kCArgsSlotsSize);
1852

    
1853

    
1854
  // Exit C frame and return.
1855
  // v0:v1: result
1856
  // sp: stack pointer
1857
  // fp: frame pointer
1858
  __ LeaveExitFrame(save_doubles_, s0, true, EMIT_RETURN);
1859

    
1860
  // Check if we should retry or throw exception.
1861
  Label retry;
1862
  __ bind(&failure_returned);
1863
  STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
1864
  __ andi(t0, v0, ((1 << kFailureTypeTagSize) - 1) << kFailureTagSize);
1865
  __ Branch(&retry, eq, t0, Operand(zero_reg));
1866

    
1867
  // Special handling of out of memory exceptions.
1868
  JumpIfOOM(masm, v0, t0, throw_out_of_memory_exception);
1869

    
1870
  // Retrieve the pending exception.
1871
  __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1872
                                      isolate)));
1873
  __ lw(v0, MemOperand(t0));
1874

    
1875
  // See if we just retrieved an OOM exception.
1876
  JumpIfOOM(masm, v0, t0, throw_out_of_memory_exception);
1877

    
1878
  // Clear the pending exception.
1879
  __ li(a3, Operand(isolate->factory()->the_hole_value()));
1880
  __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1881
                                      isolate)));
1882
  __ sw(a3, MemOperand(t0));
1883

    
1884
  // Special handling of termination exceptions which are uncatchable
1885
  // by javascript code.
1886
  __ LoadRoot(t0, Heap::kTerminationExceptionRootIndex);
1887
  __ Branch(throw_termination_exception, eq, v0, Operand(t0));
1888

    
1889
  // Handle normal exception.
1890
  __ jmp(throw_normal_exception);
1891

    
1892
  __ bind(&retry);
1893
  // Last failure (v0) will be moved to (a0) for parameter when retrying.
1894
}
1895

    
1896

    
1897
void CEntryStub::Generate(MacroAssembler* masm) {
1898
  // Called from JavaScript; parameters are on stack as if calling JS function
1899
  // s0: number of arguments including receiver
1900
  // s1: size of arguments excluding receiver
1901
  // s2: pointer to builtin function
1902
  // fp: frame pointer    (restored after C call)
1903
  // sp: stack pointer    (restored as callee's sp after C call)
1904
  // cp: current context  (C callee-saved)
1905

    
1906
  ProfileEntryHookStub::MaybeCallEntryHook(masm);
1907

    
1908
  // NOTE: Invocations of builtins may return failure objects
1909
  // instead of a proper result. The builtin entry handles
1910
  // this by performing a garbage collection and retrying the
1911
  // builtin once.
1912

    
1913
  // NOTE: s0-s2 hold the arguments of this function instead of a0-a2.
1914
  // The reason for this is that these arguments would need to be saved anyway
1915
  // so it's faster to set them up directly.
1916
  // See MacroAssembler::PrepareCEntryArgs and PrepareCEntryFunction.
1917

    
1918
  // Compute the argv pointer in a callee-saved register.
1919
  __ Addu(s1, sp, s1);
1920

    
1921
  // Enter the exit frame that transitions from JavaScript to C++.
1922
  FrameScope scope(masm, StackFrame::MANUAL);
1923
  __ EnterExitFrame(save_doubles_);
1924

    
1925
  // s0: number of arguments (C callee-saved)
1926
  // s1: pointer to first argument (C callee-saved)
1927
  // s2: pointer to builtin function (C callee-saved)
1928

    
1929
  Label throw_normal_exception;
1930
  Label throw_termination_exception;
1931
  Label throw_out_of_memory_exception;
1932

    
1933
  // Call into the runtime system.
1934
  GenerateCore(masm,
1935
               &throw_normal_exception,
1936
               &throw_termination_exception,
1937
               &throw_out_of_memory_exception,
1938
               false,
1939
               false);
1940

    
1941
  // Do space-specific GC and retry runtime call.
1942
  GenerateCore(masm,
1943
               &throw_normal_exception,
1944
               &throw_termination_exception,
1945
               &throw_out_of_memory_exception,
1946
               true,
1947
               false);
1948

    
1949
  // Do full GC and retry runtime call one final time.
1950
  Failure* failure = Failure::InternalError();
1951
  __ li(v0, Operand(reinterpret_cast<int32_t>(failure)));
1952
  GenerateCore(masm,
1953
               &throw_normal_exception,
1954
               &throw_termination_exception,
1955
               &throw_out_of_memory_exception,
1956
               true,
1957
               true);
1958

    
1959
  __ bind(&throw_out_of_memory_exception);
1960
  // Set external caught exception to false.
1961
  Isolate* isolate = masm->isolate();
1962
  ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
1963
                                    isolate);
1964
  __ li(a0, Operand(false, RelocInfo::NONE32));
1965
  __ li(a2, Operand(external_caught));
1966
  __ sw(a0, MemOperand(a2));
1967

    
1968
  // Set pending exception and v0 to out of memory exception.
1969
  Label already_have_failure;
1970
  JumpIfOOM(masm, v0, t0, &already_have_failure);
1971
  Failure* out_of_memory = Failure::OutOfMemoryException(0x1);
1972
  __ li(v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
1973
  __ bind(&already_have_failure);
1974
  __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1975
                                      isolate)));
1976
  __ sw(v0, MemOperand(a2));
1977
  // Fall through to the next label.
1978

    
1979
  __ bind(&throw_termination_exception);
1980
  __ ThrowUncatchable(v0);
1981

    
1982
  __ bind(&throw_normal_exception);
1983
  __ Throw(v0);
1984
}
1985

    
1986

    
1987
void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
1988
  Label invoke, handler_entry, exit;
1989
  Isolate* isolate = masm->isolate();
1990

    
1991
  // Registers:
1992
  // a0: entry address
1993
  // a1: function
1994
  // a2: receiver
1995
  // a3: argc
1996
  //
1997
  // Stack:
1998
  // 4 args slots
1999
  // args
2000

    
2001
  ProfileEntryHookStub::MaybeCallEntryHook(masm);
2002

    
2003
  // Save callee saved registers on the stack.
2004
  __ MultiPush(kCalleeSaved | ra.bit());
2005

    
2006
  // Save callee-saved FPU registers.
2007
  __ MultiPushFPU(kCalleeSavedFPU);
2008
  // Set up the reserved register for 0.0.
2009
  __ Move(kDoubleRegZero, 0.0);
2010

    
2011

    
2012
  // Load argv in s0 register.
2013
  int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
2014
  offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
2015

    
2016
  __ InitializeRootRegister();
2017
  __ lw(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize));
2018

    
2019
  // We build an EntryFrame.
2020
  __ li(t3, Operand(-1));  // Push a bad frame pointer to fail if it is used.
2021
  int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
2022
  __ li(t2, Operand(Smi::FromInt(marker)));
2023
  __ li(t1, Operand(Smi::FromInt(marker)));
2024
  __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
2025
                                      isolate)));
2026
  __ lw(t0, MemOperand(t0));
2027
  __ Push(t3, t2, t1, t0);
2028
  // Set up frame pointer for the frame to be pushed.
2029
  __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
2030

    
2031
  // Registers:
2032
  // a0: entry_address
2033
  // a1: function
2034
  // a2: receiver_pointer
2035
  // a3: argc
2036
  // s0: argv
2037
  //
2038
  // Stack:
2039
  // caller fp          |
2040
  // function slot      | entry frame
2041
  // context slot       |
2042
  // bad fp (0xff...f)  |
2043
  // callee saved registers + ra
2044
  // 4 args slots
2045
  // args
2046

    
2047
  // If this is the outermost JS call, set js_entry_sp value.
2048
  Label non_outermost_js;
2049
  ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
2050
  __ li(t1, Operand(ExternalReference(js_entry_sp)));
2051
  __ lw(t2, MemOperand(t1));
2052
  __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg));
2053
  __ sw(fp, MemOperand(t1));
2054
  __ li(t0, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
2055
  Label cont;
2056
  __ b(&cont);
2057
  __ nop();   // Branch delay slot nop.
2058
  __ bind(&non_outermost_js);
2059
  __ li(t0, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
2060
  __ bind(&cont);
2061
  __ push(t0);
2062

    
2063
  // Jump to a faked try block that does the invoke, with a faked catch
2064
  // block that sets the pending exception.
2065
  __ jmp(&invoke);
2066
  __ bind(&handler_entry);
2067
  handler_offset_ = handler_entry.pos();
2068
  // Caught exception: Store result (exception) in the pending exception
2069
  // field in the JSEnv and return a failure sentinel.  Coming in here the
2070
  // fp will be invalid because the PushTryHandler below sets it to 0 to
2071
  // signal the existence of the JSEntry frame.
2072
  __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
2073
                                      isolate)));
2074
  __ sw(v0, MemOperand(t0));  // We come back from 'invoke'. result is in v0.
2075
  __ li(v0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
2076
  __ b(&exit);  // b exposes branch delay slot.
2077
  __ nop();   // Branch delay slot nop.
2078

    
2079
  // Invoke: Link this frame into the handler chain.  There's only one
2080
  // handler block in this code object, so its index is 0.
2081
  __ bind(&invoke);
2082
  __ PushTryHandler(StackHandler::JS_ENTRY, 0);
2083
  // If an exception not caught by another handler occurs, this handler
2084
  // returns control to the code after the bal(&invoke) above, which
2085
  // restores all kCalleeSaved registers (including cp and fp) to their
2086
  // saved values before returning a failure to C.
2087

    
2088
  // Clear any pending exceptions.
2089
  __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
2090
  __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
2091
                                      isolate)));
2092
  __ sw(t1, MemOperand(t0));
2093

    
2094
  // Invoke the function by calling through JS entry trampoline builtin.
2095
  // Notice that we cannot store a reference to the trampoline code directly in
2096
  // this stub, because runtime stubs are not traversed when doing GC.
2097

    
2098
  // Registers:
2099
  // a0: entry_address
2100
  // a1: function
2101
  // a2: receiver_pointer
2102
  // a3: argc
2103
  // s0: argv
2104
  //
2105
  // Stack:
2106
  // handler frame
2107
  // entry frame
2108
  // callee saved registers + ra
2109
  // 4 args slots
2110
  // args
2111

    
2112
  if (is_construct) {
2113
    ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
2114
                                      isolate);
2115
    __ li(t0, Operand(construct_entry));
2116
  } else {
2117
    ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
2118
    __ li(t0, Operand(entry));
2119
  }
2120
  __ lw(t9, MemOperand(t0));  // Deref address.
2121

    
2122
  // Call JSEntryTrampoline.
2123
  __ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
2124
  __ Call(t9);
2125

    
2126
  // Unlink this frame from the handler chain.
2127
  __ PopTryHandler();
2128

    
2129
  __ bind(&exit);  // v0 holds result
2130
  // Check if the current stack frame is marked as the outermost JS frame.
2131
  Label non_outermost_js_2;
2132
  __ pop(t1);
2133
  __ Branch(&non_outermost_js_2,
2134
            ne,
2135
            t1,
2136
            Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
2137
  __ li(t1, Operand(ExternalReference(js_entry_sp)));
2138
  __ sw(zero_reg, MemOperand(t1));
2139
  __ bind(&non_outermost_js_2);
2140

    
2141
  // Restore the top frame descriptors from the stack.
2142
  __ pop(t1);
2143
  __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
2144
                                      isolate)));
2145
  __ sw(t1, MemOperand(t0));
2146

    
2147
  // Reset the stack to the callee saved registers.
2148
  __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
2149

    
2150
  // Restore callee-saved fpu registers.
2151
  __ MultiPopFPU(kCalleeSavedFPU);
2152

    
2153
  // Restore callee saved registers from the stack.
2154
  __ MultiPop(kCalleeSaved | ra.bit());
2155
  // Return.
2156
  __ Jump(ra);
2157
}
2158

    
2159

    
2160
// Uses registers a0 to t0.
2161
// Expected input (depending on whether args are in registers or on the stack):
2162
// * object: a0 or at sp + 1 * kPointerSize.
2163
// * function: a1 or at sp.
2164
//
2165
// An inlined call site may have been generated before calling this stub.
2166
// In this case the offset to the inline site to patch is passed on the stack,
2167
// in the safepoint slot for register t0.
2168
void InstanceofStub::Generate(MacroAssembler* masm) {
2169
  // Call site inlining and patching implies arguments in registers.
2170
  ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
2171
  // ReturnTrueFalse is only implemented for inlined call sites.
2172
  ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
2173

    
2174
  // Fixed register usage throughout the stub:
2175
  const Register object = a0;  // Object (lhs).
2176
  Register map = a3;  // Map of the object.
2177
  const Register function = a1;  // Function (rhs).
2178
  const Register prototype = t0;  // Prototype of the function.
2179
  const Register inline_site = t5;
2180
  const Register scratch = a2;
2181

    
2182
  const int32_t kDeltaToLoadBoolResult = 5 * kPointerSize;
2183

    
2184
  Label slow, loop, is_instance, is_not_instance, not_js_object;
2185

    
2186
  if (!HasArgsInRegisters()) {
2187
    __ lw(object, MemOperand(sp, 1 * kPointerSize));
2188
    __ lw(function, MemOperand(sp, 0));
2189
  }
2190

    
2191
  // Check that the left hand is a JS object and load map.
2192
  __ JumpIfSmi(object, &not_js_object);
2193
  __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
2194

    
2195
  // If there is a call site cache don't look in the global cache, but do the
2196
  // real lookup and update the call site cache.
2197
  if (!HasCallSiteInlineCheck()) {
2198
    Label miss;
2199
    __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
2200
    __ Branch(&miss, ne, function, Operand(at));
2201
    __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex);
2202
    __ Branch(&miss, ne, map, Operand(at));
2203
    __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
2204
    __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
2205

    
2206
    __ bind(&miss);
2207
  }
2208

    
2209
  // Get the prototype of the function.
2210
  __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
2211

    
2212
  // Check that the function prototype is a JS object.
2213
  __ JumpIfSmi(prototype, &slow);
2214
  __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
2215

    
2216
  // Update the global instanceof or call site inlined cache with the current
2217
  // map and function. The cached answer will be set when it is known below.
2218
  if (!HasCallSiteInlineCheck()) {
2219
    __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
2220
    __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
2221
  } else {
2222
    ASSERT(HasArgsInRegisters());
2223
    // Patch the (relocated) inlined map check.
2224

    
2225
    // The offset was stored in t0 safepoint slot.
2226
    // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
2227
    __ LoadFromSafepointRegisterSlot(scratch, t0);
2228
    __ Subu(inline_site, ra, scratch);
2229
    // Get the map location in scratch and patch it.
2230
    __ GetRelocatedValue(inline_site, scratch, v1);  // v1 used as scratch.
2231
    __ sw(map, FieldMemOperand(scratch, Cell::kValueOffset));
2232
  }
2233

    
2234
  // Register mapping: a3 is object map and t0 is function prototype.
2235
  // Get prototype of object into a2.
2236
  __ lw(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
2237

    
2238
  // We don't need map any more. Use it as a scratch register.
2239
  Register scratch2 = map;
2240
  map = no_reg;
2241

    
2242
  // Loop through the prototype chain looking for the function prototype.
2243
  __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
2244
  __ bind(&loop);
2245
  __ Branch(&is_instance, eq, scratch, Operand(prototype));
2246
  __ Branch(&is_not_instance, eq, scratch, Operand(scratch2));
2247
  __ lw(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
2248
  __ lw(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
2249
  __ Branch(&loop);
2250

    
2251
  __ bind(&is_instance);
2252
  ASSERT(Smi::FromInt(0) == 0);
2253
  if (!HasCallSiteInlineCheck()) {
2254
    __ mov(v0, zero_reg);
2255
    __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
2256
  } else {
2257
    // Patch the call site to return true.
2258
    __ LoadRoot(v0, Heap::kTrueValueRootIndex);
2259
    __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
2260
    // Get the boolean result location in scratch and patch it.
2261
    __ PatchRelocatedValue(inline_site, scratch, v0);
2262

    
2263
    if (!ReturnTrueFalseObject()) {
2264
      ASSERT_EQ(Smi::FromInt(0), 0);
2265
      __ mov(v0, zero_reg);
2266
    }
2267
  }
2268
  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
2269

    
2270
  __ bind(&is_not_instance);
2271
  if (!HasCallSiteInlineCheck()) {
2272
    __ li(v0, Operand(Smi::FromInt(1)));
2273
    __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
2274
  } else {
2275
    // Patch the call site to return false.
2276
    __ LoadRoot(v0, Heap::kFalseValueRootIndex);
2277
    __ Addu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
2278
    // Get the boolean result location in scratch and patch it.
2279
    __ PatchRelocatedValue(inline_site, scratch, v0);
2280

    
2281
    if (!ReturnTrueFalseObject()) {
2282
      __ li(v0, Operand(Smi::FromInt(1)));
2283
    }
2284
  }
2285

    
2286
  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
2287

    
2288
  Label object_not_null, object_not_null_or_smi;
2289
  __ bind(&not_js_object);
2290
  // Before null, smi and string value checks, check that the rhs is a function
2291
  // as for a non-function rhs an exception needs to be thrown.
2292
  __ JumpIfSmi(function, &slow);
2293
  __ GetObjectType(function, scratch2, scratch);
2294
  __ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE));
2295

    
2296
  // Null is not instance of anything.
2297
  __ Branch(&object_not_null,
2298
            ne,
2299
            scratch,
2300
            Operand(masm->isolate()->factory()->null_value()));
2301
  __ li(v0, Operand(Smi::FromInt(1)));
2302
  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
2303

    
2304
  __ bind(&object_not_null);
2305
  // Smi values are not instances of anything.
2306
  __ JumpIfNotSmi(object, &object_not_null_or_smi);
2307
  __ li(v0, Operand(Smi::FromInt(1)));
2308
  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
2309

    
2310
  __ bind(&object_not_null_or_smi);
2311
  // String values are not instances of anything.
2312
  __ IsObjectJSStringType(object, scratch, &slow);
2313
  __ li(v0, Operand(Smi::FromInt(1)));
2314
  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
2315

    
2316
  // Slow-case.  Tail call builtin.
2317
  __ bind(&slow);
2318
  if (!ReturnTrueFalseObject()) {
2319
    if (HasArgsInRegisters()) {
2320
      __ Push(a0, a1);
2321
    }
2322
  __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
2323
  } else {
2324
    {
2325
      FrameScope scope(masm, StackFrame::INTERNAL);
2326
      __ Push(a0, a1);
2327
      __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
2328
    }
2329
    __ mov(a0, v0);
2330
    __ LoadRoot(v0, Heap::kTrueValueRootIndex);
2331
    __ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg));
2332
    __ LoadRoot(v0, Heap::kFalseValueRootIndex);
2333
    __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
2334
  }
2335
}
2336

    
2337

    
2338
void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
2339
  Label miss;
2340
  Register receiver;
2341
  if (kind() == Code::KEYED_LOAD_IC) {
2342
    // ----------- S t a t e -------------
2343
    //  -- ra    : return address
2344
    //  -- a0    : key
2345
    //  -- a1    : receiver
2346
    // -----------------------------------
2347
    __ Branch(&miss, ne, a0,
2348
        Operand(masm->isolate()->factory()->prototype_string()));
2349
    receiver = a1;
2350
  } else {
2351
    ASSERT(kind() == Code::LOAD_IC);
2352
    // ----------- S t a t e -------------
2353
    //  -- a2    : name
2354
    //  -- ra    : return address
2355
    //  -- a0    : receiver
2356
    //  -- sp[0] : receiver
2357
    // -----------------------------------
2358
    receiver = a0;
2359
  }
2360

    
2361
  StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, a3, t0, &miss);
2362
  __ bind(&miss);
2363
  StubCompiler::TailCallBuiltin(
2364
      masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
2365
}
2366

    
2367

    
2368
void StringLengthStub::Generate(MacroAssembler* masm) {
2369
  Label miss;
2370
  Register receiver;
2371
  if (kind() == Code::KEYED_LOAD_IC) {
2372
    // ----------- S t a t e -------------
2373
    //  -- ra    : return address
2374
    //  -- a0    : key
2375
    //  -- a1    : receiver
2376
    // -----------------------------------
2377
    __ Branch(&miss, ne, a0,
2378
        Operand(masm->isolate()->factory()->length_string()));
2379
    receiver = a1;
2380
  } else {
2381
    ASSERT(kind() == Code::LOAD_IC);
2382
    // ----------- S t a t e -------------
2383
    //  -- a2    : name
2384
    //  -- ra    : return address
2385
    //  -- a0    : receiver
2386
    //  -- sp[0] : receiver
2387
    // -----------------------------------
2388
    receiver = a0;
2389
  }
2390

    
2391
  StubCompiler::GenerateLoadStringLength(masm, receiver, a3, t0, &miss);
2392

    
2393
  __ bind(&miss);
2394
  StubCompiler::TailCallBuiltin(
2395
      masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
2396
}
2397

    
2398

    
2399
void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
2400
  // This accepts as a receiver anything JSArray::SetElementsLength accepts
2401
  // (currently anything except for external arrays which means anything with
2402
  // elements of FixedArray type).  Value must be a number, but only smis are
2403
  // accepted as the most common case.
2404
  Label miss;
2405

    
2406
  Register receiver;
2407
  Register value;
2408
  if (kind() == Code::KEYED_STORE_IC) {
2409
    // ----------- S t a t e -------------
2410
    //  -- ra    : return address
2411
    //  -- a0    : value
2412
    //  -- a1    : key
2413
    //  -- a2    : receiver
2414
    // -----------------------------------
2415
    __ Branch(&miss, ne, a1,
2416
        Operand(masm->isolate()->factory()->length_string()));
2417
    receiver = a2;
2418
    value = a0;
2419
  } else {
2420
    ASSERT(kind() == Code::STORE_IC);
2421
    // ----------- S t a t e -------------
2422
    //  -- ra    : return address
2423
    //  -- a0    : value
2424
    //  -- a1    : receiver
2425
    //  -- a2    : key
2426
    // -----------------------------------
2427
    receiver = a1;
2428
    value = a0;
2429
  }
2430
  Register scratch = a3;
2431

    
2432
  // Check that the receiver isn't a smi.
2433
  __ JumpIfSmi(receiver, &miss);
2434

    
2435
  // Check that the object is a JS array.
2436
  __ GetObjectType(receiver, scratch, scratch);
2437
  __ Branch(&miss, ne, scratch, Operand(JS_ARRAY_TYPE));
2438

    
2439
  // Check that elements are FixedArray.
2440
  // We rely on StoreIC_ArrayLength below to deal with all types of
2441
  // fast elements (including COW).
2442
  __ lw(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset));
2443
  __ GetObjectType(scratch, scratch, scratch);
2444
  __ Branch(&miss, ne, scratch, Operand(FIXED_ARRAY_TYPE));
2445

    
2446
  // Check that the array has fast properties, otherwise the length
2447
  // property might have been redefined.
2448
  __ lw(scratch, FieldMemOperand(receiver, JSArray::kPropertiesOffset));
2449
  __ lw(scratch, FieldMemOperand(scratch, FixedArray::kMapOffset));
2450
  __ LoadRoot(at, Heap::kHashTableMapRootIndex);
2451
  __ Branch(&miss, eq, scratch, Operand(at));
2452

    
2453
  // Check that value is a smi.
2454
  __ JumpIfNotSmi(value, &miss);
2455

    
2456
  // Prepare tail call to StoreIC_ArrayLength.
2457
  __ Push(receiver, value);
2458

    
2459
  ExternalReference ref =
2460
      ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate());
2461
  __ TailCallExternalReference(ref, 2, 1);
2462

    
2463
  __ bind(&miss);
2464

    
2465
  StubCompiler::TailCallBuiltin(
2466
      masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
2467
}
2468

    
2469

    
2470
Register InstanceofStub::left() { return a0; }
2471

    
2472

    
2473
Register InstanceofStub::right() { return a1; }
2474

    
2475

    
2476
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
2477
  // The displacement is the offset of the last parameter (if any)
2478
  // relative to the frame pointer.
2479
  const int kDisplacement =
2480
      StandardFrameConstants::kCallerSPOffset - kPointerSize;
2481

    
2482
  // Check that the key is a smiGenerateReadElement.
2483
  Label slow;
2484
  __ JumpIfNotSmi(a1, &slow);
2485

    
2486
  // Check if the calling frame is an arguments adaptor frame.
2487
  Label adaptor;
2488
  __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2489
  __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
2490
  __ Branch(&adaptor,
2491
            eq,
2492
            a3,
2493
            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2494

    
2495
  // Check index (a1) against formal parameters count limit passed in
2496
  // through register a0. Use unsigned comparison to get negative
2497
  // check for free.
2498
  __ Branch(&slow, hs, a1, Operand(a0));
2499

    
2500
  // Read the argument from the stack and return it.
2501
  __ subu(a3, a0, a1);
2502
  __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
2503
  __ Addu(a3, fp, Operand(t3));
2504
  __ Ret(USE_DELAY_SLOT);
2505
  __ lw(v0, MemOperand(a3, kDisplacement));
2506

    
2507
  // Arguments adaptor case: Check index (a1) against actual arguments
2508
  // limit found in the arguments adaptor frame. Use unsigned
2509
  // comparison to get negative check for free.
2510
  __ bind(&adaptor);
2511
  __ lw(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
2512
  __ Branch(&slow, Ugreater_equal, a1, Operand(a0));
2513

    
2514
  // Read the argument from the adaptor frame and return it.
2515
  __ subu(a3, a0, a1);
2516
  __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize);
2517
  __ Addu(a3, a2, Operand(t3));
2518
  __ Ret(USE_DELAY_SLOT);
2519
  __ lw(v0, MemOperand(a3, kDisplacement));
2520

    
2521
  // Slow-case: Handle non-smi or out-of-bounds access to arguments
2522
  // by calling the runtime system.
2523
  __ bind(&slow);
2524
  __ push(a1);
2525
  __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
2526
}
2527

    
2528

    
2529
void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
2530
  // sp[0] : number of parameters
2531
  // sp[4] : receiver displacement
2532
  // sp[8] : function
2533
  // Check if the calling frame is an arguments adaptor frame.
2534
  Label runtime;
2535
  __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2536
  __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
2537
  __ Branch(&runtime,
2538
            ne,
2539
            a2,
2540
            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2541

    
2542
  // Patch the arguments.length and the parameters pointer in the current frame.
2543
  __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
2544
  __ sw(a2, MemOperand(sp, 0 * kPointerSize));
2545
  __ sll(t3, a2, 1);
2546
  __ Addu(a3, a3, Operand(t3));
2547
  __ addiu(a3, a3, StandardFrameConstants::kCallerSPOffset);
2548
  __ sw(a3, MemOperand(sp, 1 * kPointerSize));
2549

    
2550
  __ bind(&runtime);
2551
  __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
2552
}
2553

    
2554

    
2555
void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
2556
  // Stack layout:
2557
  //  sp[0] : number of parameters (tagged)
2558
  //  sp[4] : address of receiver argument
2559
  //  sp[8] : function
2560
  // Registers used over whole function:
2561
  //  t2 : allocated object (tagged)
2562
  //  t5 : mapped parameter count (tagged)
2563

    
2564
  __ lw(a1, MemOperand(sp, 0 * kPointerSize));
2565
  // a1 = parameter count (tagged)
2566

    
2567
  // Check if the calling frame is an arguments adaptor frame.
2568
  Label runtime;
2569
  Label adaptor_frame, try_allocate;
2570
  __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2571
  __ lw(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
2572
  __ Branch(&adaptor_frame,
2573
            eq,
2574
            a2,
2575
            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2576

    
2577
  // No adaptor, parameter count = argument count.
2578
  __ mov(a2, a1);
2579
  __ b(&try_allocate);
2580
  __ nop();   // Branch delay slot nop.
2581

    
2582
  // We have an adaptor frame. Patch the parameters pointer.
2583
  __ bind(&adaptor_frame);
2584
  __ lw(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
2585
  __ sll(t6, a2, 1);
2586
  __ Addu(a3, a3, Operand(t6));
2587
  __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
2588
  __ sw(a3, MemOperand(sp, 1 * kPointerSize));
2589

    
2590
  // a1 = parameter count (tagged)
2591
  // a2 = argument count (tagged)
2592
  // Compute the mapped parameter count = min(a1, a2) in a1.
2593
  Label skip_min;
2594
  __ Branch(&skip_min, lt, a1, Operand(a2));
2595
  __ mov(a1, a2);
2596
  __ bind(&skip_min);
2597

    
2598
  __ bind(&try_allocate);
2599

    
2600
  // Compute the sizes of backing store, parameter map, and arguments object.
2601
  // 1. Parameter map, has 2 extra words containing context and backing store.
2602
  const int kParameterMapHeaderSize =
2603
      FixedArray::kHeaderSize + 2 * kPointerSize;
2604
  // If there are no mapped parameters, we do not need the parameter_map.
2605
  Label param_map_size;
2606
  ASSERT_EQ(0, Smi::FromInt(0));
2607
  __ Branch(USE_DELAY_SLOT, &param_map_size, eq, a1, Operand(zero_reg));
2608
  __ mov(t5, zero_reg);  // In delay slot: param map size = 0 when a1 == 0.
2609
  __ sll(t5, a1, 1);
2610
  __ addiu(t5, t5, kParameterMapHeaderSize);
2611
  __ bind(&param_map_size);
2612

    
2613
  // 2. Backing store.
2614
  __ sll(t6, a2, 1);
2615
  __ Addu(t5, t5, Operand(t6));
2616
  __ Addu(t5, t5, Operand(FixedArray::kHeaderSize));
2617

    
2618
  // 3. Arguments object.
2619
  __ Addu(t5, t5, Operand(Heap::kArgumentsObjectSize));
2620

    
2621
  // Do the allocation of all three objects in one go.
2622
  __ Allocate(t5, v0, a3, t0, &runtime, TAG_OBJECT);
2623

    
2624
  // v0 = address of new object(s) (tagged)
2625
  // a2 = argument count (tagged)
2626
  // Get the arguments boilerplate from the current native context into t0.
2627
  const int kNormalOffset =
2628
      Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
2629
  const int kAliasedOffset =
2630
      Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
2631

    
2632
  __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2633
  __ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset));
2634
  Label skip2_ne, skip2_eq;
2635
  __ Branch(&skip2_ne, ne, a1, Operand(zero_reg));
2636
  __ lw(t0, MemOperand(t0, kNormalOffset));
2637
  __ bind(&skip2_ne);
2638

    
2639
  __ Branch(&skip2_eq, eq, a1, Operand(zero_reg));
2640
  __ lw(t0, MemOperand(t0, kAliasedOffset));
2641
  __ bind(&skip2_eq);
2642

    
2643
  // v0 = address of new object (tagged)
2644
  // a1 = mapped parameter count (tagged)
2645
  // a2 = argument count (tagged)
2646
  // t0 = address of boilerplate object (tagged)
2647
  // Copy the JS object part.
2648
  for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
2649
    __ lw(a3, FieldMemOperand(t0, i));
2650
    __ sw(a3, FieldMemOperand(v0, i));
2651
  }
2652

    
2653
  // Set up the callee in-object property.
2654
  STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
2655
  __ lw(a3, MemOperand(sp, 2 * kPointerSize));
2656
  const int kCalleeOffset = JSObject::kHeaderSize +
2657
      Heap::kArgumentsCalleeIndex * kPointerSize;
2658
  __ sw(a3, FieldMemOperand(v0, kCalleeOffset));
2659

    
2660
  // Use the length (smi tagged) and set that as an in-object property too.
2661
  STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
2662
  const int kLengthOffset = JSObject::kHeaderSize +
2663
      Heap::kArgumentsLengthIndex * kPointerSize;
2664
  __ sw(a2, FieldMemOperand(v0, kLengthOffset));
2665

    
2666
  // Set up the elements pointer in the allocated arguments object.
2667
  // If we allocated a parameter map, t0 will point there, otherwise
2668
  // it will point to the backing store.
2669
  __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSize));
2670
  __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
2671

    
2672
  // v0 = address of new object (tagged)
2673
  // a1 = mapped parameter count (tagged)
2674
  // a2 = argument count (tagged)
2675
  // t0 = address of parameter map or backing store (tagged)
2676
  // Initialize parameter map. If there are no mapped arguments, we're done.
2677
  Label skip_parameter_map;
2678
  Label skip3;
2679
  __ Branch(&skip3, ne, a1, Operand(Smi::FromInt(0)));
2680
  // Move backing store address to a3, because it is
2681
  // expected there when filling in the unmapped arguments.
2682
  __ mov(a3, t0);
2683
  __ bind(&skip3);
2684

    
2685
  __ Branch(&skip_parameter_map, eq, a1, Operand(Smi::FromInt(0)));
2686

    
2687
  __ LoadRoot(t2, Heap::kNonStrictArgumentsElementsMapRootIndex);
2688
  __ sw(t2, FieldMemOperand(t0, FixedArray::kMapOffset));
2689
  __ Addu(t2, a1, Operand(Smi::FromInt(2)));
2690
  __ sw(t2, FieldMemOperand(t0, FixedArray::kLengthOffset));
2691
  __ sw(cp, FieldMemOperand(t0, FixedArray::kHeaderSize + 0 * kPointerSize));
2692
  __ sll(t6, a1, 1);
2693
  __ Addu(t2, t0, Operand(t6));
2694
  __ Addu(t2, t2, Operand(kParameterMapHeaderSize));
2695
  __ sw(t2, FieldMemOperand(t0, FixedArray::kHeaderSize + 1 * kPointerSize));
2696

    
2697
  // Copy the parameter slots and the holes in the arguments.
2698
  // We need to fill in mapped_parameter_count slots. They index the context,
2699
  // where parameters are stored in reverse order, at
2700
  //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
2701
  // The mapped parameter thus need to get indices
2702
  //   MIN_CONTEXT_SLOTS+parameter_count-1 ..
2703
  //       MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
2704
  // We loop from right to left.
2705
  Label parameters_loop, parameters_test;
2706
  __ mov(t2, a1);
2707
  __ lw(t5, MemOperand(sp, 0 * kPointerSize));
2708
  __ Addu(t5, t5, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
2709
  __ Subu(t5, t5, Operand(a1));
2710
  __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
2711
  __ sll(t6, t2, 1);
2712
  __ Addu(a3, t0, Operand(t6));
2713
  __ Addu(a3, a3, Operand(kParameterMapHeaderSize));
2714

    
2715
  // t2 = loop variable (tagged)
2716
  // a1 = mapping index (tagged)
2717
  // a3 = address of backing store (tagged)
2718
  // t0 = address of parameter map (tagged)
2719
  // t1 = temporary scratch (a.o., for address calculation)
2720
  // t3 = the hole value
2721
  __ jmp(&parameters_test);
2722

    
2723
  __ bind(&parameters_loop);
2724
  __ Subu(t2, t2, Operand(Smi::FromInt(1)));
2725
  __ sll(t1, t2, 1);
2726
  __ Addu(t1, t1, Operand(kParameterMapHeaderSize - kHeapObjectTag));
2727
  __ Addu(t6, t0, t1);
2728
  __ sw(t5, MemOperand(t6));
2729
  __ Subu(t1, t1, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
2730
  __ Addu(t6, a3, t1);
2731
  __ sw(t3, MemOperand(t6));
2732
  __ Addu(t5, t5, Operand(Smi::FromInt(1)));
2733
  __ bind(&parameters_test);
2734
  __ Branch(&parameters_loop, ne, t2, Operand(Smi::FromInt(0)));
2735

    
2736
  __ bind(&skip_parameter_map);
2737
  // a2 = argument count (tagged)
2738
  // a3 = address of backing store (tagged)
2739
  // t1 = scratch
2740
  // Copy arguments header and remaining slots (if there are any).
2741
  __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
2742
  __ sw(t1, FieldMemOperand(a3, FixedArray::kMapOffset));
2743
  __ sw(a2, FieldMemOperand(a3, FixedArray::kLengthOffset));
2744

    
2745
  Label arguments_loop, arguments_test;
2746
  __ mov(t5, a1);
2747
  __ lw(t0, MemOperand(sp, 1 * kPointerSize));
2748
  __ sll(t6, t5, 1);
2749
  __ Subu(t0, t0, Operand(t6));
2750
  __ jmp(&arguments_test);
2751

    
2752
  __ bind(&arguments_loop);
2753
  __ Subu(t0, t0, Operand(kPointerSize));
2754
  __ lw(t2, MemOperand(t0, 0));
2755
  __ sll(t6, t5, 1);
2756
  __ Addu(t1, a3, Operand(t6));
2757
  __ sw(t2, FieldMemOperand(t1, FixedArray::kHeaderSize));
2758
  __ Addu(t5, t5, Operand(Smi::FromInt(1)));
2759

    
2760
  __ bind(&arguments_test);
2761
  __ Branch(&arguments_loop, lt, t5, Operand(a2));
2762

    
2763
  // Return and remove the on-stack parameters.
2764
  __ DropAndRet(3);
2765

    
2766
  // Do the runtime call to allocate the arguments object.
2767
  // a2 = argument count (tagged)
2768
  __ bind(&runtime);
2769
  __ sw(a2, MemOperand(sp, 0 * kPointerSize));  // Patch argument count.
2770
  __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
2771
}
2772

    
2773

    
2774
void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
2775
  // sp[0] : number of parameters
2776
  // sp[4] : receiver displacement
2777
  // sp[8] : function
2778
  // Check if the calling frame is an arguments adaptor frame.
2779
  Label adaptor_frame, try_allocate, runtime;
2780
  __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2781
  __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
2782
  __ Branch(&adaptor_frame,
2783
            eq,
2784
            a3,
2785
            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2786

    
2787
  // Get the length from the frame.
2788
  __ lw(a1, MemOperand(sp, 0));
2789
  __ Branch(&try_allocate);
2790

    
2791
  // Patch the arguments.length and the parameters pointer.
2792
  __ bind(&adaptor_frame);
2793
  __ lw(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
2794
  __ sw(a1, MemOperand(sp, 0));
2795
  __ sll(at, a1, kPointerSizeLog2 - kSmiTagSize);
2796
  __ Addu(a3, a2, Operand(at));
2797

    
2798
  __ Addu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
2799
  __ sw(a3, MemOperand(sp, 1 * kPointerSize));
2800

    
2801
  // Try the new space allocation. Start out with computing the size
2802
  // of the arguments object and the elements array in words.
2803
  Label add_arguments_object;
2804
  __ bind(&try_allocate);
2805
  __ Branch(&add_arguments_object, eq, a1, Operand(zero_reg));
2806
  __ srl(a1, a1, kSmiTagSize);
2807

    
2808
  __ Addu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize));
2809
  __ bind(&add_arguments_object);
2810
  __ Addu(a1, a1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
2811

    
2812
  // Do the allocation of both objects in one go.
2813
  __ Allocate(a1, v0, a2, a3, &runtime,
2814
              static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
2815

    
2816
  // Get the arguments boilerplate from the current native context.
2817
  __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2818
  __ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset));
2819
  __ lw(t0, MemOperand(t0, Context::SlotOffset(
2820
      Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
2821

    
2822
  // Copy the JS object part.
2823
  __ CopyFields(v0, t0, a3.bit(), JSObject::kHeaderSize / kPointerSize);
2824

    
2825
  // Get the length (smi tagged) and set that as an in-object property too.
2826
  STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
2827
  __ lw(a1, MemOperand(sp, 0 * kPointerSize));
2828
  __ sw(a1, FieldMemOperand(v0, JSObject::kHeaderSize +
2829
      Heap::kArgumentsLengthIndex * kPointerSize));
2830

    
2831
  Label done;
2832
  __ Branch(&done, eq, a1, Operand(zero_reg));
2833

    
2834
  // Get the parameters pointer from the stack.
2835
  __ lw(a2, MemOperand(sp, 1 * kPointerSize));
2836

    
2837
  // Set up the elements pointer in the allocated arguments object and
2838
  // initialize the header in the elements fixed array.
2839
  __ Addu(t0, v0, Operand(Heap::kArgumentsObjectSizeStrict));
2840
  __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
2841
  __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
2842
  __ sw(a3, FieldMemOperand(t0, FixedArray::kMapOffset));
2843
  __ sw(a1, FieldMemOperand(t0, FixedArray::kLengthOffset));
2844
  // Untag the length for the loop.
2845
  __ srl(a1, a1, kSmiTagSize);
2846

    
2847
  // Copy the fixed array slots.
2848
  Label loop;
2849
  // Set up t0 to point to the first array slot.
2850
  __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2851
  __ bind(&loop);
2852
  // Pre-decrement a2 with kPointerSize on each iteration.
2853
  // Pre-decrement in order to skip receiver.
2854
  __ Addu(a2, a2, Operand(-kPointerSize));
2855
  __ lw(a3, MemOperand(a2));
2856
  // Post-increment t0 with kPointerSize on each iteration.
2857
  __ sw(a3, MemOperand(t0));
2858
  __ Addu(t0, t0, Operand(kPointerSize));
2859
  __ Subu(a1, a1, Operand(1));
2860
  __ Branch(&loop, ne, a1, Operand(zero_reg));
2861

    
2862
  // Return and remove the on-stack parameters.
2863
  __ bind(&done);
2864
  __ DropAndRet(3);
2865

    
2866
  // Do the runtime call to allocate the arguments object.
2867
  __ bind(&runtime);
2868
  __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
2869
}
2870

    
2871

    
2872
void RegExpExecStub::Generate(MacroAssembler* masm) {
2873
  // Just jump directly to runtime if native RegExp is not selected at compile
2874
  // time or if regexp entry in generated code is turned off runtime switch or
2875
  // at compilation.
2876
#ifdef V8_INTERPRETED_REGEXP
2877
  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
2878
#else  // V8_INTERPRETED_REGEXP
2879

    
2880
  // Stack frame on entry.
2881
  //  sp[0]: last_match_info (expected JSArray)
2882
  //  sp[4]: previous index
2883
  //  sp[8]: subject string
2884
  //  sp[12]: JSRegExp object
2885

    
2886
  const int kLastMatchInfoOffset = 0 * kPointerSize;
2887
  const int kPreviousIndexOffset = 1 * kPointerSize;
2888
  const int kSubjectOffset = 2 * kPointerSize;
2889
  const int kJSRegExpOffset = 3 * kPointerSize;
2890

    
2891
  Isolate* isolate = masm->isolate();
2892

    
2893
  Label runtime;
2894
  // Allocation of registers for this function. These are in callee save
2895
  // registers and will be preserved by the call to the native RegExp code, as
2896
  // this code is called using the normal C calling convention. When calling
2897
  // directly from generated code the native RegExp code will not do a GC and
2898
  // therefore the content of these registers are safe to use after the call.
2899
  // MIPS - using s0..s2, since we are not using CEntry Stub.
2900
  Register subject = s0;
2901
  Register regexp_data = s1;
2902
  Register last_match_info_elements = s2;
2903

    
2904
  // Ensure that a RegExp stack is allocated.
2905
  ExternalReference address_of_regexp_stack_memory_address =
2906
      ExternalReference::address_of_regexp_stack_memory_address(
2907
          isolate);
2908
  ExternalReference address_of_regexp_stack_memory_size =
2909
      ExternalReference::address_of_regexp_stack_memory_size(isolate);
2910
  __ li(a0, Operand(address_of_regexp_stack_memory_size));
2911
  __ lw(a0, MemOperand(a0, 0));
2912
  __ Branch(&runtime, eq, a0, Operand(zero_reg));
2913

    
2914
  // Check that the first argument is a JSRegExp object.
2915
  __ lw(a0, MemOperand(sp, kJSRegExpOffset));
2916
  STATIC_ASSERT(kSmiTag == 0);
2917
  __ JumpIfSmi(a0, &runtime);
2918
  __ GetObjectType(a0, a1, a1);
2919
  __ Branch(&runtime, ne, a1, Operand(JS_REGEXP_TYPE));
2920

    
2921
  // Check that the RegExp has been compiled (data contains a fixed array).
2922
  __ lw(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
2923
  if (FLAG_debug_code) {
2924
    __ And(t0, regexp_data, Operand(kSmiTagMask));
2925
    __ Check(nz,
2926
             kUnexpectedTypeForRegExpDataFixedArrayExpected,
2927
             t0,
2928
             Operand(zero_reg));
2929
    __ GetObjectType(regexp_data, a0, a0);
2930
    __ Check(eq,
2931
             kUnexpectedTypeForRegExpDataFixedArrayExpected,
2932
             a0,
2933
             Operand(FIXED_ARRAY_TYPE));
2934
  }
2935

    
2936
  // regexp_data: RegExp data (FixedArray)
2937
  // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
2938
  __ lw(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
2939
  __ Branch(&runtime, ne, a0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
2940

    
2941
  // regexp_data: RegExp data (FixedArray)
2942
  // Check that the number of captures fit in the static offsets vector buffer.
2943
  __ lw(a2,
2944
         FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
2945
  // Check (number_of_captures + 1) * 2 <= offsets vector size
2946
  // Or          number_of_captures * 2 <= offsets vector size - 2
2947
  // Multiplying by 2 comes for free since a2 is smi-tagged.
2948
  STATIC_ASSERT(kSmiTag == 0);
2949
  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
2950
  STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
2951
  __ Branch(
2952
      &runtime, hi, a2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
2953

    
2954
  // Reset offset for possibly sliced string.
2955
  __ mov(t0, zero_reg);
2956
  __ lw(subject, MemOperand(sp, kSubjectOffset));
2957
  __ JumpIfSmi(subject, &runtime);
2958
  __ mov(a3, subject);  // Make a copy of the original subject string.
2959
  __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
2960
  __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
2961
  // subject: subject string
2962
  // a3: subject string
2963
  // a0: subject string instance type
2964
  // regexp_data: RegExp data (FixedArray)
2965
  // Handle subject string according to its encoding and representation:
2966
  // (1) Sequential string?  If yes, go to (5).
2967
  // (2) Anything but sequential or cons?  If yes, go to (6).
2968
  // (3) Cons string.  If the string is flat, replace subject with first string.
2969
  //     Otherwise bailout.
2970
  // (4) Is subject external?  If yes, go to (7).
2971
  // (5) Sequential string.  Load regexp code according to encoding.
2972
  // (E) Carry on.
2973
  /// [...]
2974

    
2975
  // Deferred code at the end of the stub:
2976
  // (6) Not a long external string?  If yes, go to (8).
2977
  // (7) External string.  Make it, offset-wise, look like a sequential string.
2978
  //     Go to (5).
2979
  // (8) Short external string or not a string?  If yes, bail out to runtime.
2980
  // (9) Sliced string.  Replace subject with parent.  Go to (4).
2981

    
2982
  Label seq_string /* 5 */, external_string /* 7 */,
2983
        check_underlying /* 4 */, not_seq_nor_cons /* 6 */,
2984
        not_long_external /* 8 */;
2985

    
2986
  // (1) Sequential string?  If yes, go to (5).
2987
  __ And(a1,
2988
         a0,
2989
         Operand(kIsNotStringMask |
2990
                 kStringRepresentationMask |
2991
                 kShortExternalStringMask));
2992
  STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
2993
  __ Branch(&seq_string, eq, a1, Operand(zero_reg));  // Go to (5).
2994

    
2995
  // (2) Anything but sequential or cons?  If yes, go to (6).
2996
  STATIC_ASSERT(kConsStringTag < kExternalStringTag);
2997
  STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
2998
  STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
2999
  STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
3000
  // Go to (6).
3001
  __ Branch(&not_seq_nor_cons, ge, a1, Operand(kExternalStringTag));
3002

    
3003
  // (3) Cons string.  Check that it's flat.
3004
  // Replace subject with first string and reload instance type.
3005
  __ lw(a0, FieldMemOperand(subject, ConsString::kSecondOffset));
3006
  __ LoadRoot(a1, Heap::kempty_stringRootIndex);
3007
  __ Branch(&runtime, ne, a0, Operand(a1));
3008
  __ lw(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
3009

    
3010
  // (4) Is subject external?  If yes, go to (7).
3011
  __ bind(&check_underlying);
3012
  __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
3013
  __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
3014
  STATIC_ASSERT(kSeqStringTag == 0);
3015
  __ And(at, a0, Operand(kStringRepresentationMask));
3016
  // The underlying external string is never a short external string.
3017
  STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength);
3018
  STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength);
3019
  __ Branch(&external_string, ne, at, Operand(zero_reg));  // Go to (7).
3020

    
3021
  // (5) Sequential string.  Load regexp code according to encoding.
3022
  __ bind(&seq_string);
3023
  // subject: sequential subject string (or look-alike, external string)
3024
  // a3: original subject string
3025
  // Load previous index and check range before a3 is overwritten.  We have to
3026
  // use a3 instead of subject here because subject might have been only made
3027
  // to look like a sequential string when it actually is an external string.
3028
  __ lw(a1, MemOperand(sp, kPreviousIndexOffset));
3029
  __ JumpIfNotSmi(a1, &runtime);
3030
  __ lw(a3, FieldMemOperand(a3, String::kLengthOffset));
3031
  __ Branch(&runtime, ls, a3, Operand(a1));
3032
  __ sra(a1, a1, kSmiTagSize);  // Untag the Smi.
3033

    
3034
  STATIC_ASSERT(kStringEncodingMask == 4);
3035
  STATIC_ASSERT(kOneByteStringTag == 4);
3036
  STATIC_ASSERT(kTwoByteStringTag == 0);
3037
  __ And(a0, a0, Operand(kStringEncodingMask));  // Non-zero for ASCII.
3038
  __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset));
3039
  __ sra(a3, a0, 2);  // a3 is 1 for ASCII, 0 for UC16 (used below).
3040
  __ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
3041
  __ Movz(t9, t1, a0);  // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
3042

    
3043
  // (E) Carry on.  String handling is done.
3044
  // t9: irregexp code
3045
  // Check that the irregexp code has been generated for the actual string
3046
  // encoding. If it has, the field contains a code object otherwise it contains
3047
  // a smi (code flushing support).
3048
  __ JumpIfSmi(t9, &runtime);
3049

    
3050
  // a1: previous index
3051
  // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
3052
  // t9: code
3053
  // subject: Subject string
3054
  // regexp_data: RegExp data (FixedArray)
3055
  // All checks done. Now push arguments for native regexp code.
3056
  __ IncrementCounter(isolate->counters()->regexp_entry_native(),
3057
                      1, a0, a2);
3058

    
3059
  // Isolates: note we add an additional parameter here (isolate pointer).
3060
  const int kRegExpExecuteArguments = 9;
3061
  const int kParameterRegisters = 4;
3062
  __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
3063

    
3064
  // Stack pointer now points to cell where return address is to be written.
3065
  // Arguments are before that on the stack or in registers, meaning we
3066
  // treat the return address as argument 5. Thus every argument after that
3067
  // needs to be shifted back by 1. Since DirectCEntryStub will handle
3068
  // allocating space for the c argument slots, we don't need to calculate
3069
  // that into the argument positions on the stack. This is how the stack will
3070
  // look (sp meaning the value of sp at this moment):
3071
  // [sp + 5] - Argument 9
3072
  // [sp + 4] - Argument 8
3073
  // [sp + 3] - Argument 7
3074
  // [sp + 2] - Argument 6
3075
  // [sp + 1] - Argument 5
3076
  // [sp + 0] - saved ra
3077

    
3078
  // Argument 9: Pass current isolate address.
3079
  // CFunctionArgumentOperand handles MIPS stack argument slots.
3080
  __ li(a0, Operand(ExternalReference::isolate_address(isolate)));
3081
  __ sw(a0, MemOperand(sp, 5 * kPointerSize));
3082

    
3083
  // Argument 8: Indicate that this is a direct call from JavaScript.
3084
  __ li(a0, Operand(1));
3085
  __ sw(a0, MemOperand(sp, 4 * kPointerSize));
3086

    
3087
  // Argument 7: Start (high end) of backtracking stack memory area.
3088
  __ li(a0, Operand(address_of_regexp_stack_memory_address));
3089
  __ lw(a0, MemOperand(a0, 0));
3090
  __ li(a2, Operand(address_of_regexp_stack_memory_size));
3091
  __ lw(a2, MemOperand(a2, 0));
3092
  __ addu(a0, a0, a2);
3093
  __ sw(a0, MemOperand(sp, 3 * kPointerSize));
3094

    
3095
  // Argument 6: Set the number of capture registers to zero to force global
3096
  // regexps to behave as non-global.  This does not affect non-global regexps.
3097
  __ mov(a0, zero_reg);
3098
  __ sw(a0, MemOperand(sp, 2 * kPointerSize));
3099

    
3100
  // Argument 5: static offsets vector buffer.
3101
  __ li(a0, Operand(
3102
        ExternalReference::address_of_static_offsets_vector(isolate)));
3103
  __ sw(a0, MemOperand(sp, 1 * kPointerSize));
3104

    
3105
  // For arguments 4 and 3 get string length, calculate start of string data
3106
  // and calculate the shift of the index (0 for ASCII and 1 for two byte).
3107
  __ Addu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
3108
  __ Xor(a3, a3, Operand(1));  // 1 for 2-byte str, 0 for 1-byte.
3109
  // Load the length from the original subject string from the previous stack
3110
  // frame. Therefore we have to use fp, which points exactly to two pointer
3111
  // sizes below the previous sp. (Because creating a new stack frame pushes
3112
  // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
3113
  __ lw(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
3114
  // If slice offset is not 0, load the length from the original sliced string.
3115
  // Argument 4, a3: End of string data
3116
  // Argument 3, a2: Start of string data
3117
  // Prepare start and end index of the input.
3118
  __ sllv(t1, t0, a3);
3119
  __ addu(t0, t2, t1);
3120
  __ sllv(t1, a1, a3);
3121
  __ addu(a2, t0, t1);
3122

    
3123
  __ lw(t2, FieldMemOperand(subject, String::kLengthOffset));
3124
  __ sra(t2, t2, kSmiTagSize);
3125
  __ sllv(t1, t2, a3);
3126
  __ addu(a3, t0, t1);
3127
  // Argument 2 (a1): Previous index.
3128
  // Already there
3129

    
3130
  // Argument 1 (a0): Subject string.
3131
  __ mov(a0, subject);
3132

    
3133
  // Locate the code entry and call it.
3134
  __ Addu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
3135
  DirectCEntryStub stub;
3136
  stub.GenerateCall(masm, t9);
3137

    
3138
  __ LeaveExitFrame(false, no_reg, true);
3139

    
3140
  // v0: result
3141
  // subject: subject string (callee saved)
3142
  // regexp_data: RegExp data (callee saved)
3143
  // last_match_info_elements: Last match info elements (callee saved)
3144
  // Check the result.
3145
  Label success;
3146
  __ Branch(&success, eq, v0, Operand(1));
3147
  // We expect exactly one result since we force the called regexp to behave
3148
  // as non-global.
3149
  Label failure;
3150
  __ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE));
3151
  // If not exception it can only be retry. Handle that in the runtime system.
3152
  __ Branch(&runtime, ne, v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
3153
  // Result must now be exception. If there is no pending exception already a
3154
  // stack overflow (on the backtrack stack) was detected in RegExp code but
3155
  // haven't created the exception yet. Handle that in the runtime system.
3156
  // TODO(592): Rerunning the RegExp to get the stack overflow exception.
3157
  __ li(a1, Operand(isolate->factory()->the_hole_value()));
3158
  __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
3159
                                      isolate)));
3160
  __ lw(v0, MemOperand(a2, 0));
3161
  __ Branch(&runtime, eq, v0, Operand(a1));
3162

    
3163
  __ sw(a1, MemOperand(a2, 0));  // Clear pending exception.
3164

    
3165
  // Check if the exception is a termination. If so, throw as uncatchable.
3166
  __ LoadRoot(a0, Heap::kTerminationExceptionRootIndex);
3167
  Label termination_exception;
3168
  __ Branch(&termination_exception, eq, v0, Operand(a0));
3169

    
3170
  __ Throw(v0);
3171

    
3172
  __ bind(&termination_exception);
3173
  __ ThrowUncatchable(v0);
3174

    
3175
  __ bind(&failure);
3176
  // For failure and exception return null.
3177
  __ li(v0, Operand(isolate->factory()->null_value()));
3178
  __ DropAndRet(4);
3179

    
3180
  // Process the result from the native regexp code.
3181
  __ bind(&success);
3182
  __ lw(a1,
3183
         FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
3184
  // Calculate number of capture registers (number_of_captures + 1) * 2.
3185
  // Multiplying by 2 comes for free since r1 is smi-tagged.
3186
  STATIC_ASSERT(kSmiTag == 0);
3187
  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
3188
  __ Addu(a1, a1, Operand(2));  // a1 was a smi.
3189

    
3190
  __ lw(a0, MemOperand(sp, kLastMatchInfoOffset));
3191
  __ JumpIfSmi(a0, &runtime);
3192
  __ GetObjectType(a0, a2, a2);
3193
  __ Branch(&runtime, ne, a2, Operand(JS_ARRAY_TYPE));
3194
  // Check that the JSArray is in fast case.
3195
  __ lw(last_match_info_elements,
3196
        FieldMemOperand(a0, JSArray::kElementsOffset));
3197
  __ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
3198
  __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
3199
  __ Branch(&runtime, ne, a0, Operand(at));
3200
  // Check that the last match info has space for the capture registers and the
3201
  // additional information.
3202
  __ lw(a0,
3203
        FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
3204
  __ Addu(a2, a1, Operand(RegExpImpl::kLastMatchOverhead));
3205
  __ sra(at, a0, kSmiTagSize);
3206
  __ Branch(&runtime, gt, a2, Operand(at));
3207

    
3208
  // a1: number of capture registers
3209
  // subject: subject string
3210
  // Store the capture count.
3211
  __ sll(a2, a1, kSmiTagSize + kSmiShiftSize);  // To smi.
3212
  __ sw(a2, FieldMemOperand(last_match_info_elements,
3213
                             RegExpImpl::kLastCaptureCountOffset));
3214
  // Store last subject and last input.
3215
  __ sw(subject,
3216
         FieldMemOperand(last_match_info_elements,
3217
                         RegExpImpl::kLastSubjectOffset));
3218
  __ mov(a2, subject);
3219
  __ RecordWriteField(last_match_info_elements,
3220
                      RegExpImpl::kLastSubjectOffset,
3221
                      subject,
3222
                      t3,
3223
                      kRAHasNotBeenSaved,
3224
                      kDontSaveFPRegs);
3225
  __ mov(subject, a2);
3226
  __ sw(subject,
3227
         FieldMemOperand(last_match_info_elements,
3228
                         RegExpImpl::kLastInputOffset));
3229
  __ RecordWriteField(last_match_info_elements,
3230
                      RegExpImpl::kLastInputOffset,
3231
                      subject,
3232
                      t3,
3233
                      kRAHasNotBeenSaved,
3234
                      kDontSaveFPRegs);
3235

    
3236
  // Get the static offsets vector filled by the native regexp code.
3237
  ExternalReference address_of_static_offsets_vector =
3238
      ExternalReference::address_of_static_offsets_vector(isolate);
3239
  __ li(a2, Operand(address_of_static_offsets_vector));
3240

    
3241
  // a1: number of capture registers
3242
  // a2: offsets vector
3243
  Label next_capture, done;
3244
  // Capture register counter starts from number of capture registers and
3245
  // counts down until wrapping after zero.
3246
  __ Addu(a0,
3247
         last_match_info_elements,
3248
         Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
3249
  __ bind(&next_capture);
3250
  __ Subu(a1, a1, Operand(1));
3251
  __ Branch(&done, lt, a1, Operand(zero_reg));
3252
  // Read the value from the static offsets vector buffer.
3253
  __ lw(a3, MemOperand(a2, 0));
3254
  __ addiu(a2, a2, kPointerSize);
3255
  // Store the smi value in the last match info.
3256
  __ sll(a3, a3, kSmiTagSize);  // Convert to Smi.
3257
  __ sw(a3, MemOperand(a0, 0));
3258
  __ Branch(&next_capture, USE_DELAY_SLOT);
3259
  __ addiu(a0, a0, kPointerSize);  // In branch delay slot.
3260

    
3261
  __ bind(&done);
3262

    
3263
  // Return last match info.
3264
  __ lw(v0, MemOperand(sp, kLastMatchInfoOffset));
3265
  __ DropAndRet(4);
3266

    
3267
  // Do the runtime call to execute the regexp.
3268
  __ bind(&runtime);
3269
  __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
3270

    
3271
  // Deferred code for string handling.
3272
  // (6) Not a long external string?  If yes, go to (8).
3273
  __ bind(&not_seq_nor_cons);
3274
  // Go to (8).
3275
  __ Branch(&not_long_external, gt, a1, Operand(kExternalStringTag));
3276

    
3277
  // (7) External string.  Make it, offset-wise, look like a sequential string.
3278
  __ bind(&external_string);
3279
  __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
3280
  __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
3281
  if (FLAG_debug_code) {
3282
    // Assert that we do not have a cons or slice (indirect strings) here.
3283
    // Sequential strings have already been ruled out.
3284
    __ And(at, a0, Operand(kIsIndirectStringMask));
3285
    __ Assert(eq,
3286
              kExternalStringExpectedButNotFound,
3287
              at,
3288
              Operand(zero_reg));
3289
  }
3290
  __ lw(subject,
3291
        FieldMemOperand(subject, ExternalString::kResourceDataOffset));
3292
  // Move the pointer so that offset-wise, it looks like a sequential string.
3293
  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
3294
  __ Subu(subject,
3295
          subject,
3296
          SeqTwoByteString::kHeaderSize - kHeapObjectTag);
3297
  __ jmp(&seq_string);    // Go to (5).
3298

    
3299
  // (8) Short external string or not a string?  If yes, bail out to runtime.
3300
  __ bind(&not_long_external);
3301
  STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
3302
  __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
3303
  __ Branch(&runtime, ne, at, Operand(zero_reg));
3304

    
3305
  // (9) Sliced string.  Replace subject with parent.  Go to (4).
3306
  // Load offset into t0 and replace subject string with parent.
3307
  __ lw(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
3308
  __ sra(t0, t0, kSmiTagSize);
3309
  __ lw(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
3310
  __ jmp(&check_underlying);  // Go to (4).
3311
#endif  // V8_INTERPRETED_REGEXP
3312
}
3313

    
3314

    
3315
void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
3316
  const int kMaxInlineLength = 100;
3317
  Label slowcase;
3318
  Label done;
3319
  __ lw(a1, MemOperand(sp, kPointerSize * 2));
3320
  STATIC_ASSERT(kSmiTag == 0);
3321
  STATIC_ASSERT(kSmiTagSize == 1);
3322
  __ JumpIfNotSmi(a1, &slowcase);
3323
  __ Branch(&slowcase, hi, a1, Operand(Smi::FromInt(kMaxInlineLength)));
3324
  // Smi-tagging is equivalent to multiplying by 2.
3325
  // Allocate RegExpResult followed by FixedArray with size in ebx.
3326
  // JSArray:   [Map][empty properties][Elements][Length-smi][index][input]
3327
  // Elements:  [Map][Length][..elements..]
3328
  // Size of JSArray with two in-object properties and the header of a
3329
  // FixedArray.
3330
  int objects_size =
3331
      (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize;
3332
  __ srl(t1, a1, kSmiTagSize + kSmiShiftSize);
3333
  __ Addu(a2, t1, Operand(objects_size));
3334
  __ Allocate(
3335
      a2,  // In: Size, in words.
3336
      v0,  // Out: Start of allocation (tagged).
3337
      a3,  // Scratch register.
3338
      t0,  // Scratch register.
3339
      &slowcase,
3340
      static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
3341
  // v0: Start of allocated area, object-tagged.
3342
  // a1: Number of elements in array, as smi.
3343
  // t1: Number of elements, untagged.
3344

    
3345
  // Set JSArray map to global.regexp_result_map().
3346
  // Set empty properties FixedArray.
3347
  // Set elements to point to FixedArray allocated right after the JSArray.
3348
  // Interleave operations for better latency.
3349
  __ lw(a2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
3350
  __ Addu(a3, v0, Operand(JSRegExpResult::kSize));
3351
  __ li(t0, Operand(masm->isolate()->factory()->empty_fixed_array()));
3352
  __ lw(a2, FieldMemOperand(a2, GlobalObject::kNativeContextOffset));
3353
  __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
3354
  __ lw(a2, ContextOperand(a2, Context::REGEXP_RESULT_MAP_INDEX));
3355
  __ sw(t0, FieldMemOperand(v0, JSObject::kPropertiesOffset));
3356
  __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
3357

    
3358
  // Set input, index and length fields from arguments.
3359
  __ lw(a1, MemOperand(sp, kPointerSize * 0));
3360
  __ lw(a2, MemOperand(sp, kPointerSize * 1));
3361
  __ lw(t2, MemOperand(sp, kPointerSize * 2));
3362
  __ sw(a1, FieldMemOperand(v0, JSRegExpResult::kInputOffset));
3363
  __ sw(a2, FieldMemOperand(v0, JSRegExpResult::kIndexOffset));
3364
  __ sw(t2, FieldMemOperand(v0, JSArray::kLengthOffset));
3365

    
3366
  // Fill out the elements FixedArray.
3367
  // v0: JSArray, tagged.
3368
  // a3: FixedArray, tagged.
3369
  // t1: Number of elements in array, untagged.
3370

    
3371
  // Set map.
3372
  __ li(a2, Operand(masm->isolate()->factory()->fixed_array_map()));
3373
  __ sw(a2, FieldMemOperand(a3, HeapObject::kMapOffset));
3374
  // Set FixedArray length.
3375
  __ sll(t2, t1, kSmiTagSize);
3376
  __ sw(t2, FieldMemOperand(a3, FixedArray::kLengthOffset));
3377
  // Fill contents of fixed-array with undefined.
3378
  __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
3379
  __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
3380
  // Fill fixed array elements with undefined.
3381
  // v0: JSArray, tagged.
3382
  // a2: undefined.
3383
  // a3: Start of elements in FixedArray.
3384
  // t1: Number of elements to fill.
3385
  Label loop;
3386
  __ sll(t1, t1, kPointerSizeLog2);  // Convert num elements to num bytes.
3387
  __ addu(t1, t1, a3);  // Point past last element to store.
3388
  __ bind(&loop);
3389
  __ Branch(&done, ge, a3, Operand(t1));  // Break when a3 past end of elem.
3390
  __ sw(a2, MemOperand(a3));
3391
  __ Branch(&loop, USE_DELAY_SLOT);
3392
  __ addiu(a3, a3, kPointerSize);  // In branch delay slot.
3393

    
3394
  __ bind(&done);
3395
  __ DropAndRet(3);
3396

    
3397
  __ bind(&slowcase);
3398
  __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
3399
}
3400

    
3401

    
3402
static void GenerateRecordCallTarget(MacroAssembler* masm) {
3403
  // Cache the called function in a global property cell.  Cache states
3404
  // are uninitialized, monomorphic (indicated by a JSFunction), and
3405
  // megamorphic.
3406
  // a0 : number of arguments to the construct function
3407
  // a1 : the function to call
3408
  // a2 : cache cell for call target
3409
  Label initialize, done, miss, megamorphic, not_array_function;
3410

    
3411
  ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
3412
            masm->isolate()->heap()->undefined_value());
3413
  ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
3414
            masm->isolate()->heap()->the_hole_value());
3415

    
3416
  // Load the cache state into a3.
3417
  __ lw(a3, FieldMemOperand(a2, Cell::kValueOffset));
3418

    
3419
  // A monomorphic cache hit or an already megamorphic state: invoke the
3420
  // function without changing the state.
3421
  __ Branch(&done, eq, a3, Operand(a1));
3422

    
3423
  // If we came here, we need to see if we are the array function.
3424
  // If we didn't have a matching function, and we didn't find the megamorph
3425
  // sentinel, then we have in the cell either some other function or an
3426
  // AllocationSite. Do a map check on the object in a3.
3427
  __ lw(t1, FieldMemOperand(a3, 0));
3428
  __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
3429
  __ Branch(&miss, ne, t1, Operand(at));
3430

    
3431
  // Make sure the function is the Array() function
3432
  __ LoadArrayFunction(a3);
3433
  __ Branch(&megamorphic, ne, a1, Operand(a3));
3434
  __ jmp(&done);
3435

    
3436
  __ bind(&miss);
3437

    
3438
  // A monomorphic miss (i.e, here the cache is not uninitialized) goes
3439
  // megamorphic.
3440
  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
3441
  __ Branch(&initialize, eq, a3, Operand(at));
3442
  // MegamorphicSentinel is an immortal immovable object (undefined) so no
3443
  // write-barrier is needed.
3444
  __ bind(&megamorphic);
3445
  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
3446
  __ sw(at, FieldMemOperand(a2, Cell::kValueOffset));
3447
  __ jmp(&done);
3448

    
3449
  // An uninitialized cache is patched with the function or sentinel to
3450
  // indicate the ElementsKind if function is the Array constructor.
3451
  __ bind(&initialize);
3452
  // Make sure the function is the Array() function
3453
  __ LoadArrayFunction(a3);
3454
  __ Branch(&not_array_function, ne, a1, Operand(a3));
3455

    
3456
  // The target function is the Array constructor.
3457
  // Create an AllocationSite if we don't already have it, store it in the cell.
3458
  {
3459
    FrameScope scope(masm, StackFrame::INTERNAL);
3460
    const RegList kSavedRegs =
3461
        1 << 4  |  // a0
3462
        1 << 5  |  // a1
3463
        1 << 6;    // a2
3464

    
3465
    // Arguments register must be smi-tagged to call out.
3466
    __ SmiTag(a0);
3467
    __ MultiPush(kSavedRegs);
3468

    
3469
    CreateAllocationSiteStub create_stub;
3470
    __ CallStub(&create_stub);
3471

    
3472
    __ MultiPop(kSavedRegs);
3473
    __ SmiUntag(a0);
3474
  }
3475
  __ Branch(&done);
3476

    
3477
  __ bind(&not_array_function);
3478
  __ sw(a1, FieldMemOperand(a2, Cell::kValueOffset));
3479
  // No need for a write barrier here - cells are rescanned.
3480

    
3481
  __ bind(&done);
3482
}
3483

    
3484

    
3485
void CallFunctionStub::Generate(MacroAssembler* masm) {
3486
  // a1 : the function to call
3487
  // a2 : cache cell for call target
3488
  Label slow, non_function;
3489

    
3490
  // The receiver might implicitly be the global object. This is
3491
  // indicated by passing the hole as the receiver to the call
3492
  // function stub.
3493
  if (ReceiverMightBeImplicit()) {
3494
    Label call;
3495
    // Get the receiver from the stack.
3496
    // function, receiver [, arguments]
3497
    __ lw(t0, MemOperand(sp, argc_ * kPointerSize));
3498
    // Call as function is indicated with the hole.
3499
    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
3500
    __ Branch(&call, ne, t0, Operand(at));
3501
    // Patch the receiver on the stack with the global receiver object.
3502
    __ lw(a3,
3503
          MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
3504
    __ lw(a3, FieldMemOperand(a3, GlobalObject::kGlobalReceiverOffset));
3505
    __ sw(a3, MemOperand(sp, argc_ * kPointerSize));
3506
    __ bind(&call);
3507
  }
3508

    
3509
  // Check that the function is really a JavaScript function.
3510
  // a1: pushed function (to be verified)
3511
  __ JumpIfSmi(a1, &non_function);
3512
  // Get the map of the function object.
3513
  __ GetObjectType(a1, a3, a3);
3514
  __ Branch(&slow, ne, a3, Operand(JS_FUNCTION_TYPE));
3515

    
3516
  if (RecordCallTarget()) {
3517
    GenerateRecordCallTarget(masm);
3518
  }
3519

    
3520
  // Fast-case: Invoke the function now.
3521
  // a1: pushed function
3522
  ParameterCount actual(argc_);
3523

    
3524
  if (ReceiverMightBeImplicit()) {
3525
    Label call_as_function;
3526
    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
3527
    __ Branch(&call_as_function, eq, t0, Operand(at));
3528
    __ InvokeFunction(a1,
3529
                      actual,
3530
                      JUMP_FUNCTION,
3531
                      NullCallWrapper(),
3532
                      CALL_AS_METHOD);
3533
    __ bind(&call_as_function);
3534
  }
3535
  __ InvokeFunction(a1,
3536
                    actual,
3537
                    JUMP_FUNCTION,
3538
                    NullCallWrapper(),
3539
                    CALL_AS_FUNCTION);
3540

    
3541
  // Slow-case: Non-function called.
3542
  __ bind(&slow);
3543
  if (RecordCallTarget()) {
3544
    // If there is a call target cache, mark it megamorphic in the
3545
    // non-function case.  MegamorphicSentinel is an immortal immovable
3546
    // object (undefined) so no write barrier is needed.
3547
    ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
3548
              masm->isolate()->heap()->undefined_value());
3549
    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
3550
    __ sw(at, FieldMemOperand(a2, Cell::kValueOffset));
3551
  }
3552
  // Check for function proxy.
3553
  __ Branch(&non_function, ne, a3, Operand(JS_FUNCTION_PROXY_TYPE));
3554
  __ push(a1);  // Put proxy as additional argument.
3555
  __ li(a0, Operand(argc_ + 1, RelocInfo::NONE32));
3556
  __ li(a2, Operand(0, RelocInfo::NONE32));
3557
  __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY);
3558
  __ SetCallKind(t1, CALL_AS_METHOD);
3559
  {
3560
    Handle<Code> adaptor =
3561
      masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
3562
    __ Jump(adaptor, RelocInfo::CODE_TARGET);
3563
  }
3564

    
3565
  // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
3566
  // of the original receiver from the call site).
3567
  __ bind(&non_function);
3568
  __ sw(a1, MemOperand(sp, argc_ * kPointerSize));
3569
  __ li(a0, Operand(argc_));  // Set up the number of arguments.
3570
  __ mov(a2, zero_reg);
3571
  __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION);
3572
  __ SetCallKind(t1, CALL_AS_METHOD);
3573
  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
3574
          RelocInfo::CODE_TARGET);
3575
}
3576

    
3577

    
3578
void CallConstructStub::Generate(MacroAssembler* masm) {
3579
  // a0 : number of arguments
3580
  // a1 : the function to call
3581
  // a2 : cache cell for call target
3582
  Label slow, non_function_call;
3583

    
3584
  // Check that the function is not a smi.
3585
  __ JumpIfSmi(a1, &non_function_call);
3586
  // Check that the function is a JSFunction.
3587
  __ GetObjectType(a1, a3, a3);
3588
  __ Branch(&slow, ne, a3, Operand(JS_FUNCTION_TYPE));
3589

    
3590
  if (RecordCallTarget()) {
3591
    GenerateRecordCallTarget(masm);
3592
  }
3593

    
3594
  // Jump to the function-specific construct stub.
3595
  Register jmp_reg = a3;
3596
  __ lw(jmp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
3597
  __ lw(jmp_reg, FieldMemOperand(jmp_reg,
3598
                                 SharedFunctionInfo::kConstructStubOffset));
3599
  __ Addu(at, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
3600
  __ Jump(at);
3601

    
3602
  // a0: number of arguments
3603
  // a1: called object
3604
  // a3: object type
3605
  Label do_call;
3606
  __ bind(&slow);
3607
  __ Branch(&non_function_call, ne, a3, Operand(JS_FUNCTION_PROXY_TYPE));
3608
  __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
3609
  __ jmp(&do_call);
3610

    
3611
  __ bind(&non_function_call);
3612
  __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
3613
  __ bind(&do_call);
3614
  // Set expected number of arguments to zero (not changing r0).
3615
  __ li(a2, Operand(0, RelocInfo::NONE32));
3616
  __ SetCallKind(t1, CALL_AS_METHOD);
3617
  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
3618
          RelocInfo::CODE_TARGET);
3619
}
3620

    
3621

    
3622
// StringCharCodeAtGenerator.
3623
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
3624
  Label flat_string;
3625
  Label ascii_string;
3626
  Label got_char_code;
3627
  Label sliced_string;
3628

    
3629
  ASSERT(!t0.is(index_));
3630
  ASSERT(!t0.is(result_));
3631
  ASSERT(!t0.is(object_));
3632

    
3633
  // If the receiver is a smi trigger the non-string case.
3634
  __ JumpIfSmi(object_, receiver_not_string_);
3635

    
3636
  // Fetch the instance type of the receiver into result register.
3637
  __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
3638
  __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
3639
  // If the receiver is not a string trigger the non-string case.
3640
  __ And(t0, result_, Operand(kIsNotStringMask));
3641
  __ Branch(receiver_not_string_, ne, t0, Operand(zero_reg));
3642

    
3643
  // If the index is non-smi trigger the non-smi case.
3644
  __ JumpIfNotSmi(index_, &index_not_smi_);
3645

    
3646
  __ bind(&got_smi_index_);
3647

    
3648
  // Check for index out of range.
3649
  __ lw(t0, FieldMemOperand(object_, String::kLengthOffset));
3650
  __ Branch(index_out_of_range_, ls, t0, Operand(index_));
3651

    
3652
  __ sra(index_, index_, kSmiTagSize);
3653

    
3654
  StringCharLoadGenerator::Generate(masm,
3655
                                    object_,
3656
                                    index_,
3657
                                    result_,
3658
                                    &call_runtime_);
3659

    
3660
  __ sll(result_, result_, kSmiTagSize);
3661
  __ bind(&exit_);
3662
}
3663

    
3664

    
3665
void StringCharCodeAtGenerator::GenerateSlow(
3666
    MacroAssembler* masm,
3667
    const RuntimeCallHelper& call_helper) {
3668
  __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
3669

    
3670
  // Index is not a smi.
3671
  __ bind(&index_not_smi_);
3672
  // If index is a heap number, try converting it to an integer.
3673
  __ CheckMap(index_,
3674
              result_,
3675
              Heap::kHeapNumberMapRootIndex,
3676
              index_not_number_,
3677
              DONT_DO_SMI_CHECK);
3678
  call_helper.BeforeCall(masm);
3679
  // Consumed by runtime conversion function:
3680
  __ Push(object_, index_);
3681
  if (index_flags_ == STRING_INDEX_IS_NUMBER) {
3682
    __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
3683
  } else {
3684
    ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
3685
    // NumberToSmi discards numbers that are not exact integers.
3686
    __ CallRuntime(Runtime::kNumberToSmi, 1);
3687
  }
3688

    
3689
  // Save the conversion result before the pop instructions below
3690
  // have a chance to overwrite it.
3691

    
3692
  __ Move(index_, v0);
3693
  __ pop(object_);
3694
  // Reload the instance type.
3695
  __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
3696
  __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
3697
  call_helper.AfterCall(masm);
3698
  // If index is still not a smi, it must be out of range.
3699
  __ JumpIfNotSmi(index_, index_out_of_range_);
3700
  // Otherwise, return to the fast path.
3701
  __ Branch(&got_smi_index_);
3702

    
3703
  // Call runtime. We get here when the receiver is a string and the
3704
  // index is a number, but the code of getting the actual character
3705
  // is too complex (e.g., when the string needs to be flattened).
3706
  __ bind(&call_runtime_);
3707
  call_helper.BeforeCall(masm);
3708
  __ sll(index_, index_, kSmiTagSize);
3709
  __ Push(object_, index_);
3710
  __ CallRuntime(Runtime::kStringCharCodeAt, 2);
3711

    
3712
  __ Move(result_, v0);
3713

    
3714
  call_helper.AfterCall(masm);
3715
  __ jmp(&exit_);
3716

    
3717
  __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
3718
}
3719

    
3720

    
3721
// -------------------------------------------------------------------------
3722
// StringCharFromCodeGenerator
3723

    
3724
void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
3725
  // Fast case of Heap::LookupSingleCharacterStringFromCode.
3726

    
3727
  ASSERT(!t0.is(result_));
3728
  ASSERT(!t0.is(code_));
3729

    
3730
  STATIC_ASSERT(kSmiTag == 0);
3731
  STATIC_ASSERT(kSmiShiftSize == 0);
3732
  ASSERT(IsPowerOf2(String::kMaxOneByteCharCode + 1));
3733
  __ And(t0,
3734
         code_,
3735
         Operand(kSmiTagMask |
3736
                 ((~String::kMaxOneByteCharCode) << kSmiTagSize)));
3737
  __ Branch(&slow_case_, ne, t0, Operand(zero_reg));
3738

    
3739
  __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
3740
  // At this point code register contains smi tagged ASCII char code.
3741
  STATIC_ASSERT(kSmiTag == 0);
3742
  __ sll(t0, code_, kPointerSizeLog2 - kSmiTagSize);
3743
  __ Addu(result_, result_, t0);
3744
  __ lw(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
3745
  __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
3746
  __ Branch(&slow_case_, eq, result_, Operand(t0));
3747
  __ bind(&exit_);
3748
}
3749

    
3750

    
3751
void StringCharFromCodeGenerator::GenerateSlow(
3752
    MacroAssembler* masm,
3753
    const RuntimeCallHelper& call_helper) {
3754
  __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
3755

    
3756
  __ bind(&slow_case_);
3757
  call_helper.BeforeCall(masm);
3758
  __ push(code_);
3759
  __ CallRuntime(Runtime::kCharFromCode, 1);
3760
  __ Move(result_, v0);
3761

    
3762
  call_helper.AfterCall(masm);
3763
  __ Branch(&exit_);
3764

    
3765
  __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
3766
}
3767

    
3768

    
3769
void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
3770
                                          Register dest,
3771
                                          Register src,
3772
                                          Register count,
3773
                                          Register scratch,
3774
                                          bool ascii) {
3775
  Label loop;
3776
  Label done;
3777
  // This loop just copies one character at a time, as it is only used for
3778
  // very short strings.
3779
  if (!ascii) {
3780
    __ addu(count, count, count);
3781
  }
3782
  __ Branch(&done, eq, count, Operand(zero_reg));
3783
  __ addu(count, dest, count);  // Count now points to the last dest byte.
3784

    
3785
  __ bind(&loop);
3786
  __ lbu(scratch, MemOperand(src));
3787
  __ addiu(src, src, 1);
3788
  __ sb(scratch, MemOperand(dest));
3789
  __ addiu(dest, dest, 1);
3790
  __ Branch(&loop, lt, dest, Operand(count));
3791

    
3792
  __ bind(&done);
3793
}
3794

    
3795

    
3796
enum CopyCharactersFlags {
3797
  COPY_ASCII = 1,
3798
  DEST_ALWAYS_ALIGNED = 2
3799
};
3800

    
3801

    
3802
void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
3803
                                              Register dest,
3804
                                              Register src,
3805
                                              Register count,
3806
                                              Register scratch1,
3807
                                              Register scratch2,
3808
                                              Register scratch3,
3809
                                              Register scratch4,
3810
                                              Register scratch5,
3811
                                              int flags) {
3812
  bool ascii = (flags & COPY_ASCII) != 0;
3813
  bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0;
3814

    
3815
  if (dest_always_aligned && FLAG_debug_code) {
3816
    // Check that destination is actually word aligned if the flag says
3817
    // that it is.
3818
    __ And(scratch4, dest, Operand(kPointerAlignmentMask));
3819
    __ Check(eq,
3820
             kDestinationOfCopyNotAligned,
3821
             scratch4,
3822
             Operand(zero_reg));
3823
  }
3824

    
3825
  const int kReadAlignment = 4;
3826
  const int kReadAlignmentMask = kReadAlignment - 1;
3827
  // Ensure that reading an entire aligned word containing the last character
3828
  // of a string will not read outside the allocated area (because we pad up
3829
  // to kObjectAlignment).
3830
  STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
3831
  // Assumes word reads and writes are little endian.
3832
  // Nothing to do for zero characters.
3833
  Label done;
3834

    
3835
  if (!ascii) {
3836
    __ addu(count, count, count);
3837
  }
3838
  __ Branch(&done, eq, count, Operand(zero_reg));
3839

    
3840
  Label byte_loop;
3841
  // Must copy at least eight bytes, otherwise just do it one byte at a time.
3842
  __ Subu(scratch1, count, Operand(8));
3843
  __ Addu(count, dest, Operand(count));
3844
  Register limit = count;  // Read until src equals this.
3845
  __ Branch(&byte_loop, lt, scratch1, Operand(zero_reg));
3846

    
3847
  if (!dest_always_aligned) {
3848
    // Align dest by byte copying. Copies between zero and three bytes.
3849
    __ And(scratch4, dest, Operand(kReadAlignmentMask));
3850
    Label dest_aligned;
3851
    __ Branch(&dest_aligned, eq, scratch4, Operand(zero_reg));
3852
    Label aligned_loop;
3853
    __ bind(&aligned_loop);
3854
    __ lbu(scratch1, MemOperand(src));
3855
    __ addiu(src, src, 1);
3856
    __ sb(scratch1, MemOperand(dest));
3857
    __ addiu(dest, dest, 1);
3858
    __ addiu(scratch4, scratch4, 1);
3859
    __ Branch(&aligned_loop, le, scratch4, Operand(kReadAlignmentMask));
3860
    __ bind(&dest_aligned);
3861
  }
3862

    
3863
  Label simple_loop;
3864

    
3865
  __ And(scratch4, src, Operand(kReadAlignmentMask));
3866
  __ Branch(&simple_loop, eq, scratch4, Operand(zero_reg));
3867

    
3868
  // Loop for src/dst that are not aligned the same way.
3869
  // This loop uses lwl and lwr instructions. These instructions
3870
  // depend on the endianness, and the implementation assumes little-endian.
3871
  {
3872
    Label loop;
3873
    __ bind(&loop);
3874
    __ lwr(scratch1, MemOperand(src));
3875
    __ Addu(src, src, Operand(kReadAlignment));
3876
    __ lwl(scratch1, MemOperand(src, -1));
3877
    __ sw(scratch1, MemOperand(dest));
3878
    __ Addu(dest, dest, Operand(kReadAlignment));
3879
    __ Subu(scratch2, limit, dest);
3880
    __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
3881
  }
3882

    
3883
  __ Branch(&byte_loop);
3884

    
3885
  // Simple loop.
3886
  // Copy words from src to dest, until less than four bytes left.
3887
  // Both src and dest are word aligned.
3888
  __ bind(&simple_loop);
3889
  {
3890
    Label loop;
3891
    __ bind(&loop);
3892
    __ lw(scratch1, MemOperand(src));
3893
    __ Addu(src, src, Operand(kReadAlignment));
3894
    __ sw(scratch1, MemOperand(dest));
3895
    __ Addu(dest, dest, Operand(kReadAlignment));
3896
    __ Subu(scratch2, limit, dest);
3897
    __ Branch(&loop, ge, scratch2, Operand(kReadAlignment));
3898
  }
3899

    
3900
  // Copy bytes from src to dest until dest hits limit.
3901
  __ bind(&byte_loop);
3902
  // Test if dest has already reached the limit.
3903
  __ Branch(&done, ge, dest, Operand(limit));
3904
  __ lbu(scratch1, MemOperand(src));
3905
  __ addiu(src, src, 1);
3906
  __ sb(scratch1, MemOperand(dest));
3907
  __ addiu(dest, dest, 1);
3908
  __ Branch(&byte_loop);
3909

    
3910
  __ bind(&done);
3911
}
3912

    
3913

    
3914
void StringHelper::GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
3915
                                                        Register c1,
3916
                                                        Register c2,
3917
                                                        Register scratch1,
3918
                                                        Register scratch2,
3919
                                                        Register scratch3,
3920
                                                        Register scratch4,
3921
                                                        Register scratch5,
3922
                                                        Label* not_found) {
3923
  // Register scratch3 is the general scratch register in this function.
3924
  Register scratch = scratch3;
3925

    
3926
  // Make sure that both characters are not digits as such strings has a
3927
  // different hash algorithm. Don't try to look for these in the string table.
3928
  Label not_array_index;
3929
  __ Subu(scratch, c1, Operand(static_cast<int>('0')));
3930
  __ Branch(&not_array_index,
3931
            Ugreater,
3932
            scratch,
3933
            Operand(static_cast<int>('9' - '0')));
3934
  __ Subu(scratch, c2, Operand(static_cast<int>('0')));
3935

    
3936
  // If check failed combine both characters into single halfword.
3937
  // This is required by the contract of the method: code at the
3938
  // not_found branch expects this combination in c1 register.
3939
  Label tmp;
3940
  __ sll(scratch1, c2, kBitsPerByte);
3941
  __ Branch(&tmp, Ugreater, scratch, Operand(static_cast<int>('9' - '0')));
3942
  __ Or(c1, c1, scratch1);
3943
  __ bind(&tmp);
3944
  __ Branch(
3945
      not_found, Uless_equal, scratch, Operand(static_cast<int>('9' - '0')));
3946

    
3947
  __ bind(&not_array_index);
3948
  // Calculate the two character string hash.
3949
  Register hash = scratch1;
3950
  StringHelper::GenerateHashInit(masm, hash, c1);
3951
  StringHelper::GenerateHashAddCharacter(masm, hash, c2);
3952
  StringHelper::GenerateHashGetHash(masm, hash);
3953

    
3954
  // Collect the two characters in a register.
3955
  Register chars = c1;
3956
  __ sll(scratch, c2, kBitsPerByte);
3957
  __ Or(chars, chars, scratch);
3958

    
3959
  // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
3960
  // hash:  hash of two character string.
3961

    
3962
  // Load string table.
3963
  // Load address of first element of the string table.
3964
  Register string_table = c2;
3965
  __ LoadRoot(string_table, Heap::kStringTableRootIndex);
3966

    
3967
  Register undefined = scratch4;
3968
  __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
3969

    
3970
  // Calculate capacity mask from the string table capacity.
3971
  Register mask = scratch2;
3972
  __ lw(mask, FieldMemOperand(string_table, StringTable::kCapacityOffset));
3973
  __ sra(mask, mask, 1);
3974
  __ Addu(mask, mask, -1);
3975

    
3976
  // Calculate untagged address of the first element of the string table.
3977
  Register first_string_table_element = string_table;
3978
  __ Addu(first_string_table_element, string_table,
3979
         Operand(StringTable::kElementsStartOffset - kHeapObjectTag));
3980

    
3981
  // Registers.
3982
  // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
3983
  // hash:  hash of two character string
3984
  // mask:  capacity mask
3985
  // first_string_table_element: address of the first element of
3986
  //                             the string table
3987
  // undefined: the undefined object
3988
  // scratch: -
3989

    
3990
  // Perform a number of probes in the string table.
3991
  const int kProbes = 4;
3992
  Label found_in_string_table;
3993
  Label next_probe[kProbes];
3994
  Register candidate = scratch5;  // Scratch register contains candidate.
3995
  for (int i = 0; i < kProbes; i++) {
3996
    // Calculate entry in string table.
3997
    if (i > 0) {
3998
      __ Addu(candidate, hash, Operand(StringTable::GetProbeOffset(i)));
3999
    } else {
4000
      __ mov(candidate, hash);
4001
    }
4002

    
4003
    __ And(candidate, candidate, Operand(mask));
4004

    
4005
    // Load the entry from the symble table.
4006
    STATIC_ASSERT(StringTable::kEntrySize == 1);
4007
    __ sll(scratch, candidate, kPointerSizeLog2);
4008
    __ Addu(scratch, scratch, first_string_table_element);
4009
    __ lw(candidate, MemOperand(scratch));
4010

    
4011
    // If entry is undefined no string with this hash can be found.
4012
    Label is_string;
4013
    __ GetObjectType(candidate, scratch, scratch);
4014
    __ Branch(&is_string, ne, scratch, Operand(ODDBALL_TYPE));
4015

    
4016
    __ Branch(not_found, eq, undefined, Operand(candidate));
4017
    // Must be the hole (deleted entry).
4018
    if (FLAG_debug_code) {
4019
      __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
4020
      __ Assert(eq, kOddballInStringTableIsNotUndefinedOrTheHole,
4021
          scratch, Operand(candidate));
4022
    }
4023
    __ jmp(&next_probe[i]);
4024

    
4025
    __ bind(&is_string);
4026

    
4027
    // Check that the candidate is a non-external ASCII string.  The instance
4028
    // type is still in the scratch register from the CompareObjectType
4029
    // operation.
4030
    __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &next_probe[i]);
4031

    
4032
    // If length is not 2 the string is not a candidate.
4033
    __ lw(scratch, FieldMemOperand(candidate, String::kLengthOffset));
4034
    __ Branch(&next_probe[i], ne, scratch, Operand(Smi::FromInt(2)));
4035

    
4036
    // Check if the two characters match.
4037
    // Assumes that word load is little endian.
4038
    __ lhu(scratch, FieldMemOperand(candidate, SeqOneByteString::kHeaderSize));
4039
    __ Branch(&found_in_string_table, eq, chars, Operand(scratch));
4040
    __ bind(&next_probe[i]);
4041
  }
4042

    
4043
  // No matching 2 character string found by probing.
4044
  __ jmp(not_found);
4045

    
4046
  // Scratch register contains result when we fall through to here.
4047
  Register result = candidate;
4048
  __ bind(&found_in_string_table);
4049
  __ mov(v0, result);
4050
}
4051

    
4052

    
4053
void StringHelper::GenerateHashInit(MacroAssembler* masm,
4054
                                    Register hash,
4055
                                    Register character) {
4056
  // hash = seed + character + ((seed + character) << 10);
4057
  __ LoadRoot(hash, Heap::kHashSeedRootIndex);
4058
  // Untag smi seed and add the character.
4059
  __ SmiUntag(hash);
4060
  __ addu(hash, hash, character);
4061
  __ sll(at, hash, 10);
4062
  __ addu(hash, hash, at);
4063
  // hash ^= hash >> 6;
4064
  __ srl(at, hash, 6);
4065
  __ xor_(hash, hash, at);
4066
}
4067

    
4068

    
4069
void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
4070
                                            Register hash,
4071
                                            Register character) {
4072
  // hash += character;
4073
  __ addu(hash, hash, character);
4074
  // hash += hash << 10;
4075
  __ sll(at, hash, 10);
4076
  __ addu(hash, hash, at);
4077
  // hash ^= hash >> 6;
4078
  __ srl(at, hash, 6);
4079
  __ xor_(hash, hash, at);
4080
}
4081

    
4082

    
4083
void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
4084
                                       Register hash) {
4085
  // hash += hash << 3;
4086
  __ sll(at, hash, 3);
4087
  __ addu(hash, hash, at);
4088
  // hash ^= hash >> 11;
4089
  __ srl(at, hash, 11);
4090
  __ xor_(hash, hash, at);
4091
  // hash += hash << 15;
4092
  __ sll(at, hash, 15);
4093
  __ addu(hash, hash, at);
4094

    
4095
  __ li(at, Operand(String::kHashBitMask));
4096
  __ and_(hash, hash, at);
4097

    
4098
  // if (hash == 0) hash = 27;
4099
  __ ori(at, zero_reg, StringHasher::kZeroHash);
4100
  __ Movz(hash, at, hash);
4101
}
4102

    
4103

    
4104
void SubStringStub::Generate(MacroAssembler* masm) {
4105
  Label runtime;
4106
  // Stack frame on entry.
4107
  //  ra: return address
4108
  //  sp[0]: to
4109
  //  sp[4]: from
4110
  //  sp[8]: string
4111

    
4112
  // This stub is called from the native-call %_SubString(...), so
4113
  // nothing can be assumed about the arguments. It is tested that:
4114
  //  "string" is a sequential string,
4115
  //  both "from" and "to" are smis, and
4116
  //  0 <= from <= to <= string.length.
4117
  // If any of these assumptions fail, we call the runtime system.
4118

    
4119
  const int kToOffset = 0 * kPointerSize;
4120
  const int kFromOffset = 1 * kPointerSize;
4121
  const int kStringOffset = 2 * kPointerSize;
4122

    
4123
  __ lw(a2, MemOperand(sp, kToOffset));
4124
  __ lw(a3, MemOperand(sp, kFromOffset));
4125
  STATIC_ASSERT(kFromOffset == kToOffset + 4);
4126
  STATIC_ASSERT(kSmiTag == 0);
4127
  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
4128

    
4129
  // Utilize delay slots. SmiUntag doesn't emit a jump, everything else is
4130
  // safe in this case.
4131
  __ UntagAndJumpIfNotSmi(a2, a2, &runtime);
4132
  __ UntagAndJumpIfNotSmi(a3, a3, &runtime);
4133
  // Both a2 and a3 are untagged integers.
4134

    
4135
  __ Branch(&runtime, lt, a3, Operand(zero_reg));  // From < 0.
4136

    
4137
  __ Branch(&runtime, gt, a3, Operand(a2));  // Fail if from > to.
4138
  __ Subu(a2, a2, a3);
4139

    
4140
  // Make sure first argument is a string.
4141
  __ lw(v0, MemOperand(sp, kStringOffset));
4142
  __ JumpIfSmi(v0, &runtime);
4143
  __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
4144
  __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
4145
  __ And(t0, a1, Operand(kIsNotStringMask));
4146

    
4147
  __ Branch(&runtime, ne, t0, Operand(zero_reg));
4148

    
4149
  Label single_char;
4150
  __ Branch(&single_char, eq, a2, Operand(1));
4151

    
4152
  // Short-cut for the case of trivial substring.
4153
  Label return_v0;
4154
  // v0: original string
4155
  // a2: result string length
4156
  __ lw(t0, FieldMemOperand(v0, String::kLengthOffset));
4157
  __ sra(t0, t0, 1);
4158
  // Return original string.
4159
  __ Branch(&return_v0, eq, a2, Operand(t0));
4160
  // Longer than original string's length or negative: unsafe arguments.
4161
  __ Branch(&runtime, hi, a2, Operand(t0));
4162
  // Shorter than original string's length: an actual substring.
4163

    
4164
  // Deal with different string types: update the index if necessary
4165
  // and put the underlying string into t1.
4166
  // v0: original string
4167
  // a1: instance type
4168
  // a2: length
4169
  // a3: from index (untagged)
4170
  Label underlying_unpacked, sliced_string, seq_or_external_string;
4171
  // If the string is not indirect, it can only be sequential or external.
4172
  STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
4173
  STATIC_ASSERT(kIsIndirectStringMask != 0);
4174
  __ And(t0, a1, Operand(kIsIndirectStringMask));
4175
  __ Branch(USE_DELAY_SLOT, &seq_or_external_string, eq, t0, Operand(zero_reg));
4176
  // t0 is used as a scratch register and can be overwritten in either case.
4177
  __ And(t0, a1, Operand(kSlicedNotConsMask));
4178
  __ Branch(&sliced_string, ne, t0, Operand(zero_reg));
4179
  // Cons string.  Check whether it is flat, then fetch first part.
4180
  __ lw(t1, FieldMemOperand(v0, ConsString::kSecondOffset));
4181
  __ LoadRoot(t0, Heap::kempty_stringRootIndex);
4182
  __ Branch(&runtime, ne, t1, Operand(t0));
4183
  __ lw(t1, FieldMemOperand(v0, ConsString::kFirstOffset));
4184
  // Update instance type.
4185
  __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
4186
  __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
4187
  __ jmp(&underlying_unpacked);
4188

    
4189
  __ bind(&sliced_string);
4190
  // Sliced string.  Fetch parent and correct start index by offset.
4191
  __ lw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
4192
  __ lw(t0, FieldMemOperand(v0, SlicedString::kOffsetOffset));
4193
  __ sra(t0, t0, 1);  // Add offset to index.
4194
  __ Addu(a3, a3, t0);
4195
  // Update instance type.
4196
  __ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
4197
  __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
4198
  __ jmp(&underlying_unpacked);
4199

    
4200
  __ bind(&seq_or_external_string);
4201
  // Sequential or external string.  Just move string to the expected register.
4202
  __ mov(t1, v0);
4203

    
4204
  __ bind(&underlying_unpacked);
4205

    
4206
  if (FLAG_string_slices) {
4207
    Label copy_routine;
4208
    // t1: underlying subject string
4209
    // a1: instance type of underlying subject string
4210
    // a2: length
4211
    // a3: adjusted start index (untagged)
4212
    // Short slice.  Copy instead of slicing.
4213
    __ Branch(&copy_routine, lt, a2, Operand(SlicedString::kMinLength));
4214
    // Allocate new sliced string.  At this point we do not reload the instance
4215
    // type including the string encoding because we simply rely on the info
4216
    // provided by the original string.  It does not matter if the original
4217
    // string's encoding is wrong because we always have to recheck encoding of
4218
    // the newly created string's parent anyways due to externalized strings.
4219
    Label two_byte_slice, set_slice_header;
4220
    STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
4221
    STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
4222
    __ And(t0, a1, Operand(kStringEncodingMask));
4223
    __ Branch(&two_byte_slice, eq, t0, Operand(zero_reg));
4224
    __ AllocateAsciiSlicedString(v0, a2, t2, t3, &runtime);
4225
    __ jmp(&set_slice_header);
4226
    __ bind(&two_byte_slice);
4227
    __ AllocateTwoByteSlicedString(v0, a2, t2, t3, &runtime);
4228
    __ bind(&set_slice_header);
4229
    __ sll(a3, a3, 1);
4230
    __ sw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
4231
    __ sw(a3, FieldMemOperand(v0, SlicedString::kOffsetOffset));
4232
    __ jmp(&return_v0);
4233

    
4234
    __ bind(&copy_routine);
4235
  }
4236

    
4237
  // t1: underlying subject string
4238
  // a1: instance type of underlying subject string
4239
  // a2: length
4240
  // a3: adjusted start index (untagged)
4241
  Label two_byte_sequential, sequential_string, allocate_result;
4242
  STATIC_ASSERT(kExternalStringTag != 0);
4243
  STATIC_ASSERT(kSeqStringTag == 0);
4244
  __ And(t0, a1, Operand(kExternalStringTag));
4245
  __ Branch(&sequential_string, eq, t0, Operand(zero_reg));
4246

    
4247
  // Handle external string.
4248
  // Rule out short external strings.
4249
  STATIC_CHECK(kShortExternalStringTag != 0);
4250
  __ And(t0, a1, Operand(kShortExternalStringTag));
4251
  __ Branch(&runtime, ne, t0, Operand(zero_reg));
4252
  __ lw(t1, FieldMemOperand(t1, ExternalString::kResourceDataOffset));
4253
  // t1 already points to the first character of underlying string.
4254
  __ jmp(&allocate_result);
4255

    
4256
  __ bind(&sequential_string);
4257
  // Locate first character of underlying subject string.
4258
  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
4259
  __ Addu(t1, t1, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
4260

    
4261
  __ bind(&allocate_result);
4262
  // Sequential acii string.  Allocate the result.
4263
  STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
4264
  __ And(t0, a1, Operand(kStringEncodingMask));
4265
  __ Branch(&two_byte_sequential, eq, t0, Operand(zero_reg));
4266

    
4267
  // Allocate and copy the resulting ASCII string.
4268
  __ AllocateAsciiString(v0, a2, t0, t2, t3, &runtime);
4269

    
4270
  // Locate first character of substring to copy.
4271
  __ Addu(t1, t1, a3);
4272

    
4273
  // Locate first character of result.
4274
  __ Addu(a1, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
4275

    
4276
  // v0: result string
4277
  // a1: first character of result string
4278
  // a2: result string length
4279
  // t1: first character of substring to copy
4280
  STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
4281
  StringHelper::GenerateCopyCharactersLong(
4282
      masm, a1, t1, a2, a3, t0, t2, t3, t4, COPY_ASCII | DEST_ALWAYS_ALIGNED);
4283
  __ jmp(&return_v0);
4284

    
4285
  // Allocate and copy the resulting two-byte string.
4286
  __ bind(&two_byte_sequential);
4287
  __ AllocateTwoByteString(v0, a2, t0, t2, t3, &runtime);
4288

    
4289
  // Locate first character of substring to copy.
4290
  STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
4291
  __ sll(t0, a3, 1);
4292
  __ Addu(t1, t1, t0);
4293
  // Locate first character of result.
4294
  __ Addu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
4295

    
4296
  // v0: result string.
4297
  // a1: first character of result.
4298
  // a2: result length.
4299
  // t1: first character of substring to copy.
4300
  STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
4301
  StringHelper::GenerateCopyCharactersLong(
4302
      masm, a1, t1, a2, a3, t0, t2, t3, t4, DEST_ALWAYS_ALIGNED);
4303

    
4304
  __ bind(&return_v0);
4305
  Counters* counters = masm->isolate()->counters();
4306
  __ IncrementCounter(counters->sub_string_native(), 1, a3, t0);
4307
  __ DropAndRet(3);
4308

    
4309
  // Just jump to runtime to create the sub string.
4310
  __ bind(&runtime);
4311
  __ TailCallRuntime(Runtime::kSubString, 3, 1);
4312

    
4313
  __ bind(&single_char);
4314
  // v0: original string
4315
  // a1: instance type
4316
  // a2: length
4317
  // a3: from index (untagged)
4318
  __ SmiTag(a3, a3);
4319
  StringCharAtGenerator generator(
4320
      v0, a3, a2, v0, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
4321
  generator.GenerateFast(masm);
4322
  __ DropAndRet(3);
4323
  generator.SkipSlow(masm, &runtime);
4324
}
4325

    
4326

    
4327
void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
4328
                                                      Register left,
4329
                                                      Register right,
4330
                                                      Register scratch1,
4331
                                                      Register scratch2,
4332
                                                      Register scratch3) {
4333
  Register length = scratch1;
4334

    
4335
  // Compare lengths.
4336
  Label strings_not_equal, check_zero_length;
4337
  __ lw(length, FieldMemOperand(left, String::kLengthOffset));
4338
  __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
4339
  __ Branch(&check_zero_length, eq, length, Operand(scratch2));
4340
  __ bind(&strings_not_equal);
4341
  ASSERT(is_int16(NOT_EQUAL));
4342
  __ Ret(USE_DELAY_SLOT);
4343
  __ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
4344

    
4345
  // Check if the length is zero.
4346
  Label compare_chars;
4347
  __ bind(&check_zero_length);
4348
  STATIC_ASSERT(kSmiTag == 0);
4349
  __ Branch(&compare_chars, ne, length, Operand(zero_reg));
4350
  ASSERT(is_int16(EQUAL));
4351
  __ Ret(USE_DELAY_SLOT);
4352
  __ li(v0, Operand(Smi::FromInt(EQUAL)));
4353

    
4354
  // Compare characters.
4355
  __ bind(&compare_chars);
4356

    
4357
  GenerateAsciiCharsCompareLoop(masm,
4358
                                left, right, length, scratch2, scratch3, v0,
4359
                                &strings_not_equal);
4360

    
4361
  // Characters are equal.
4362
  __ Ret(USE_DELAY_SLOT);
4363
  __ li(v0, Operand(Smi::FromInt(EQUAL)));
4364
}
4365

    
4366

    
4367
void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
4368
                                                        Register left,
4369
                                                        Register right,
4370
                                                        Register scratch1,
4371
                                                        Register scratch2,
4372
                                                        Register scratch3,
4373
                                                        Register scratch4) {
4374
  Label result_not_equal, compare_lengths;
4375
  // Find minimum length and length difference.
4376
  __ lw(scratch1, FieldMemOperand(left, String::kLengthOffset));
4377
  __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
4378
  __ Subu(scratch3, scratch1, Operand(scratch2));
4379
  Register length_delta = scratch3;
4380
  __ slt(scratch4, scratch2, scratch1);
4381
  __ Movn(scratch1, scratch2, scratch4);
4382
  Register min_length = scratch1;
4383
  STATIC_ASSERT(kSmiTag == 0);
4384
  __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
4385

    
4386
  // Compare loop.
4387
  GenerateAsciiCharsCompareLoop(masm,
4388
                                left, right, min_length, scratch2, scratch4, v0,
4389
                                &result_not_equal);
4390

    
4391
  // Compare lengths - strings up to min-length are equal.
4392
  __ bind(&compare_lengths);
4393
  ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
4394
  // Use length_delta as result if it's zero.
4395
  __ mov(scratch2, length_delta);
4396
  __ mov(scratch4, zero_reg);
4397
  __ mov(v0, zero_reg);
4398

    
4399
  __ bind(&result_not_equal);
4400
  // Conditionally update the result based either on length_delta or
4401
  // the last comparion performed in the loop above.
4402
  Label ret;
4403
  __ Branch(&ret, eq, scratch2, Operand(scratch4));
4404
  __ li(v0, Operand(Smi::FromInt(GREATER)));
4405
  __ Branch(&ret, gt, scratch2, Operand(scratch4));
4406
  __ li(v0, Operand(Smi::FromInt(LESS)));
4407
  __ bind(&ret);
4408
  __ Ret();
4409
}
4410

    
4411

    
4412
void StringCompareStub::GenerateAsciiCharsCompareLoop(
4413
    MacroAssembler* masm,
4414
    Register left,
4415
    Register right,
4416
    Register length,
4417
    Register scratch1,
4418
    Register scratch2,
4419
    Register scratch3,
4420
    Label* chars_not_equal) {
4421
  // Change index to run from -length to -1 by adding length to string
4422
  // start. This means that loop ends when index reaches zero, which
4423
  // doesn't need an additional compare.
4424
  __ SmiUntag(length);
4425
  __ Addu(scratch1, length,
4426
          Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
4427
  __ Addu(left, left, Operand(scratch1));
4428
  __ Addu(right, right, Operand(scratch1));
4429
  __ Subu(length, zero_reg, length);
4430
  Register index = length;  // index = -length;
4431

    
4432

    
4433
  // Compare loop.
4434
  Label loop;
4435
  __ bind(&loop);
4436
  __ Addu(scratch3, left, index);
4437
  __ lbu(scratch1, MemOperand(scratch3));
4438
  __ Addu(scratch3, right, index);
4439
  __ lbu(scratch2, MemOperand(scratch3));
4440
  __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2));
4441
  __ Addu(index, index, 1);
4442
  __ Branch(&loop, ne, index, Operand(zero_reg));
4443
}
4444

    
4445

    
4446
void StringCompareStub::Generate(MacroAssembler* masm) {
4447
  Label runtime;
4448

    
4449
  Counters* counters = masm->isolate()->counters();
4450

    
4451
  // Stack frame on entry.
4452
  //  sp[0]: right string
4453
  //  sp[4]: left string
4454
  __ lw(a1, MemOperand(sp, 1 * kPointerSize));  // Left.
4455
  __ lw(a0, MemOperand(sp, 0 * kPointerSize));  // Right.
4456

    
4457
  Label not_same;
4458
  __ Branch(&not_same, ne, a0, Operand(a1));
4459
  STATIC_ASSERT(EQUAL == 0);
4460
  STATIC_ASSERT(kSmiTag == 0);
4461
  __ li(v0, Operand(Smi::FromInt(EQUAL)));
4462
  __ IncrementCounter(counters->string_compare_native(), 1, a1, a2);
4463
  __ DropAndRet(2);
4464

    
4465
  __ bind(&not_same);
4466

    
4467
  // Check that both objects are sequential ASCII strings.
4468
  __ JumpIfNotBothSequentialAsciiStrings(a1, a0, a2, a3, &runtime);
4469

    
4470
  // Compare flat ASCII strings natively. Remove arguments from stack first.
4471
  __ IncrementCounter(counters->string_compare_native(), 1, a2, a3);
4472
  __ Addu(sp, sp, Operand(2 * kPointerSize));
4473
  GenerateCompareFlatAsciiStrings(masm, a1, a0, a2, a3, t0, t1);
4474

    
4475
  __ bind(&runtime);
4476
  __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
4477
}
4478

    
4479

    
4480
void StringAddStub::Generate(MacroAssembler* masm) {
4481
  Label call_runtime, call_builtin;
4482
  Builtins::JavaScript builtin_id = Builtins::ADD;
4483

    
4484
  Counters* counters = masm->isolate()->counters();
4485

    
4486
  // Stack on entry:
4487
  // sp[0]: second argument (right).
4488
  // sp[4]: first argument (left).
4489

    
4490
  // Load the two arguments.
4491
  __ lw(a0, MemOperand(sp, 1 * kPointerSize));  // First argument.
4492
  __ lw(a1, MemOperand(sp, 0 * kPointerSize));  // Second argument.
4493

    
4494
  // Make sure that both arguments are strings if not known in advance.
4495
  // Otherwise, at least one of the arguments is definitely a string,
4496
  // and we convert the one that is not known to be a string.
4497
  if ((flags_ & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_BOTH) {
4498
    ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT);
4499
    ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT);
4500
    __ JumpIfEitherSmi(a0, a1, &call_runtime);
4501
    // Load instance types.
4502
    __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
4503
    __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
4504
    __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
4505
    __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
4506
    STATIC_ASSERT(kStringTag == 0);
4507
    // If either is not a string, go to runtime.
4508
    __ Or(t4, t0, Operand(t1));
4509
    __ And(t4, t4, Operand(kIsNotStringMask));
4510
    __ Branch(&call_runtime, ne, t4, Operand(zero_reg));
4511
  } else if ((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT) {
4512
    ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == 0);
4513
    GenerateConvertArgument(
4514
        masm, 1 * kPointerSize, a0, a2, a3, t0, t1, &call_builtin);
4515
    builtin_id = Builtins::STRING_ADD_RIGHT;
4516
  } else if ((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT) {
4517
    ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == 0);
4518
    GenerateConvertArgument(
4519
        masm, 0 * kPointerSize, a1, a2, a3, t0, t1, &call_builtin);
4520
    builtin_id = Builtins::STRING_ADD_LEFT;
4521
  }
4522

    
4523
  // Both arguments are strings.
4524
  // a0: first string
4525
  // a1: second string
4526
  // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
4527
  // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
4528
  {
4529
    Label strings_not_empty;
4530
    // Check if either of the strings are empty. In that case return the other.
4531
    // These tests use zero-length check on string-length whch is an Smi.
4532
    // Assert that Smi::FromInt(0) is really 0.
4533
    STATIC_ASSERT(kSmiTag == 0);
4534
    ASSERT(Smi::FromInt(0) == 0);
4535
    __ lw(a2, FieldMemOperand(a0, String::kLengthOffset));
4536
    __ lw(a3, FieldMemOperand(a1, String::kLengthOffset));
4537
    __ mov(v0, a0);       // Assume we'll return first string (from a0).
4538
    __ Movz(v0, a1, a2);  // If first is empty, return second (from a1).
4539
    __ slt(t4, zero_reg, a2);   // if (a2 > 0) t4 = 1.
4540
    __ slt(t5, zero_reg, a3);   // if (a3 > 0) t5 = 1.
4541
    __ and_(t4, t4, t5);        // Branch if both strings were non-empty.
4542
    __ Branch(&strings_not_empty, ne, t4, Operand(zero_reg));
4543

    
4544
    __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
4545
    __ DropAndRet(2);
4546

    
4547
    __ bind(&strings_not_empty);
4548
  }
4549

    
4550
  // Untag both string-lengths.
4551
  __ sra(a2, a2, kSmiTagSize);
4552
  __ sra(a3, a3, kSmiTagSize);
4553

    
4554
  // Both strings are non-empty.
4555
  // a0: first string
4556
  // a1: second string
4557
  // a2: length of first string
4558
  // a3: length of second string
4559
  // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
4560
  // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
4561
  // Look at the length of the result of adding the two strings.
4562
  Label string_add_flat_result, longer_than_two;
4563
  // Adding two lengths can't overflow.
4564
  STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
4565
  __ Addu(t2, a2, Operand(a3));
4566
  // Use the string table when adding two one character strings, as it
4567
  // helps later optimizations to return a string here.
4568
  __ Branch(&longer_than_two, ne, t2, Operand(2));
4569

    
4570
  // Check that both strings are non-external ASCII strings.
4571
  if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
4572
    __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
4573
    __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
4574
    __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
4575
    __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
4576
  }
4577
  __ JumpIfBothInstanceTypesAreNotSequentialAscii(t0, t1, t2, t3,
4578
                                                 &call_runtime);
4579

    
4580
  // Get the two characters forming the sub string.
4581
  __ lbu(a2, FieldMemOperand(a0, SeqOneByteString::kHeaderSize));
4582
  __ lbu(a3, FieldMemOperand(a1, SeqOneByteString::kHeaderSize));
4583

    
4584
  // Try to lookup two character string in string table. If it is not found
4585
  // just allocate a new one.
4586
  Label make_two_character_string;
4587
  StringHelper::GenerateTwoCharacterStringTableProbe(
4588
      masm, a2, a3, t2, t3, t0, t1, t5, &make_two_character_string);
4589
  __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
4590
  __ DropAndRet(2);
4591

    
4592
  __ bind(&make_two_character_string);
4593
  // Resulting string has length 2 and first chars of two strings
4594
  // are combined into single halfword in a2 register.
4595
  // So we can fill resulting string without two loops by a single
4596
  // halfword store instruction (which assumes that processor is
4597
  // in a little endian mode).
4598
  __ li(t2, Operand(2));
4599
  __ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime);
4600
  __ sh(a2, FieldMemOperand(v0, SeqOneByteString::kHeaderSize));
4601
  __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
4602
  __ DropAndRet(2);
4603

    
4604
  __ bind(&longer_than_two);
4605
  // Check if resulting string will be flat.
4606
  __ Branch(&string_add_flat_result, lt, t2, Operand(ConsString::kMinLength));
4607
  // Handle exceptionally long strings in the runtime system.
4608
  STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
4609
  ASSERT(IsPowerOf2(String::kMaxLength + 1));
4610
  // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
4611
  __ Branch(&call_runtime, hs, t2, Operand(String::kMaxLength + 1));
4612

    
4613
  // If result is not supposed to be flat, allocate a cons string object.
4614
  // If both strings are ASCII the result is an ASCII cons string.
4615
  if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
4616
    __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
4617
    __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
4618
    __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
4619
    __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
4620
  }
4621
  Label non_ascii, allocated, ascii_data;
4622
  STATIC_ASSERT(kTwoByteStringTag == 0);
4623
  // Branch to non_ascii if either string-encoding field is zero (non-ASCII).
4624
  __ And(t4, t0, Operand(t1));
4625
  __ And(t4, t4, Operand(kStringEncodingMask));
4626
  __ Branch(&non_ascii, eq, t4, Operand(zero_reg));
4627

    
4628
  // Allocate an ASCII cons string.
4629
  __ bind(&ascii_data);
4630
  __ AllocateAsciiConsString(v0, t2, t0, t1, &call_runtime);
4631
  __ bind(&allocated);
4632
  // Fill the fields of the cons string.
4633
  Label skip_write_barrier, after_writing;
4634
  ExternalReference high_promotion_mode = ExternalReference::
4635
      new_space_high_promotion_mode_active_address(masm->isolate());
4636
  __ li(t0, Operand(high_promotion_mode));
4637
  __ lw(t0, MemOperand(t0, 0));
4638
  __ Branch(&skip_write_barrier, eq, t0, Operand(zero_reg));
4639

    
4640
  __ mov(t3, v0);
4641
  __ sw(a0, FieldMemOperand(t3, ConsString::kFirstOffset));
4642
  __ RecordWriteField(t3,
4643
                      ConsString::kFirstOffset,
4644
                      a0,
4645
                      t0,
4646
                      kRAHasNotBeenSaved,
4647
                      kDontSaveFPRegs);
4648
  __ sw(a1, FieldMemOperand(t3, ConsString::kSecondOffset));
4649
  __ RecordWriteField(t3,
4650
                      ConsString::kSecondOffset,
4651
                      a1,
4652
                      t0,
4653
                      kRAHasNotBeenSaved,
4654
                      kDontSaveFPRegs);
4655
  __ jmp(&after_writing);
4656

    
4657
  __ bind(&skip_write_barrier);
4658
  __ sw(a0, FieldMemOperand(v0, ConsString::kFirstOffset));
4659
  __ sw(a1, FieldMemOperand(v0, ConsString::kSecondOffset));
4660

    
4661
  __ bind(&after_writing);
4662

    
4663
  __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
4664
  __ DropAndRet(2);
4665

    
4666
  __ bind(&non_ascii);
4667
  // At least one of the strings is two-byte. Check whether it happens
4668
  // to contain only one byte characters.
4669
  // t0: first instance type.
4670
  // t1: second instance type.
4671
  // Branch to if _both_ instances have kOneByteDataHintMask set.
4672
  __ And(at, t0, Operand(kOneByteDataHintMask));
4673
  __ and_(at, at, t1);
4674
  __ Branch(&ascii_data, ne, at, Operand(zero_reg));
4675
  __ Xor(t0, t0, Operand(t1));
4676
  STATIC_ASSERT(kOneByteStringTag != 0 && kOneByteDataHintTag != 0);
4677
  __ And(t0, t0, Operand(kOneByteStringTag | kOneByteDataHintTag));
4678
  __ Branch(&ascii_data, eq, t0,
4679
      Operand(kOneByteStringTag | kOneByteDataHintTag));
4680

    
4681
  // Allocate a two byte cons string.
4682
  __ AllocateTwoByteConsString(v0, t2, t0, t1, &call_runtime);
4683
  __ Branch(&allocated);
4684

    
4685
  // We cannot encounter sliced strings or cons strings here since:
4686
  STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
4687
  // Handle creating a flat result from either external or sequential strings.
4688
  // Locate the first characters' locations.
4689
  // a0: first string
4690
  // a1: second string
4691
  // a2: length of first string
4692
  // a3: length of second string
4693
  // t0: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
4694
  // t1: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
4695
  // t2: sum of lengths.
4696
  Label first_prepared, second_prepared;
4697
  __ bind(&string_add_flat_result);
4698
  if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
4699
    __ lw(t0, FieldMemOperand(a0, HeapObject::kMapOffset));
4700
    __ lw(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
4701
    __ lbu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
4702
    __ lbu(t1, FieldMemOperand(t1, Map::kInstanceTypeOffset));
4703
  }
4704
  // Check whether both strings have same encoding
4705
  __ Xor(t3, t0, Operand(t1));
4706
  __ And(t3, t3, Operand(kStringEncodingMask));
4707
  __ Branch(&call_runtime, ne, t3, Operand(zero_reg));
4708

    
4709
  STATIC_ASSERT(kSeqStringTag == 0);
4710
  __ And(t4, t0, Operand(kStringRepresentationMask));
4711

    
4712
  STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
4713
  Label skip_first_add;
4714
  __ Branch(&skip_first_add, ne, t4, Operand(zero_reg));
4715
  __ Branch(USE_DELAY_SLOT, &first_prepared);
4716
  __ addiu(t3, a0, SeqOneByteString::kHeaderSize - kHeapObjectTag);
4717
  __ bind(&skip_first_add);
4718
  // External string: rule out short external string and load string resource.
4719
  STATIC_ASSERT(kShortExternalStringTag != 0);
4720
  __ And(t4, t0, Operand(kShortExternalStringMask));
4721
  __ Branch(&call_runtime, ne, t4, Operand(zero_reg));
4722
  __ lw(t3, FieldMemOperand(a0, ExternalString::kResourceDataOffset));
4723
  __ bind(&first_prepared);
4724

    
4725
  STATIC_ASSERT(kSeqStringTag == 0);
4726
  __ And(t4, t1, Operand(kStringRepresentationMask));
4727
  STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
4728
  Label skip_second_add;
4729
  __ Branch(&skip_second_add, ne, t4, Operand(zero_reg));
4730
  __ Branch(USE_DELAY_SLOT, &second_prepared);
4731
  __ addiu(a1, a1, SeqOneByteString::kHeaderSize - kHeapObjectTag);
4732
  __ bind(&skip_second_add);
4733
  // External string: rule out short external string and load string resource.
4734
  STATIC_ASSERT(kShortExternalStringTag != 0);
4735
  __ And(t4, t1, Operand(kShortExternalStringMask));
4736
  __ Branch(&call_runtime, ne, t4, Operand(zero_reg));
4737
  __ lw(a1, FieldMemOperand(a1, ExternalString::kResourceDataOffset));
4738
  __ bind(&second_prepared);
4739

    
4740
  Label non_ascii_string_add_flat_result;
4741
  // t3: first character of first string
4742
  // a1: first character of second string
4743
  // a2: length of first string
4744
  // a3: length of second string
4745
  // t2: sum of lengths.
4746
  // Both strings have the same encoding.
4747
  STATIC_ASSERT(kTwoByteStringTag == 0);
4748
  __ And(t4, t1, Operand(kStringEncodingMask));
4749
  __ Branch(&non_ascii_string_add_flat_result, eq, t4, Operand(zero_reg));
4750

    
4751
  __ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime);
4752
  __ Addu(t2, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
4753
  // v0: result string.
4754
  // t3: first character of first string.
4755
  // a1: first character of second string
4756
  // a2: length of first string.
4757
  // a3: length of second string.
4758
  // t2: first character of result.
4759

    
4760
  StringHelper::GenerateCopyCharacters(masm, t2, t3, a2, t0, true);
4761
  // t2: next character of result.
4762
  StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, true);
4763
  __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
4764
  __ DropAndRet(2);
4765

    
4766
  __ bind(&non_ascii_string_add_flat_result);
4767
  __ AllocateTwoByteString(v0, t2, t0, t1, t5, &call_runtime);
4768
  __ Addu(t2, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
4769
  // v0: result string.
4770
  // t3: first character of first string.
4771
  // a1: first character of second string.
4772
  // a2: length of first string.
4773
  // a3: length of second string.
4774
  // t2: first character of result.
4775
  StringHelper::GenerateCopyCharacters(masm, t2, t3, a2, t0, false);
4776
  // t2: next character of result.
4777
  StringHelper::GenerateCopyCharacters(masm, t2, a1, a3, t0, false);
4778

    
4779
  __ IncrementCounter(counters->string_add_native(), 1, a2, a3);
4780
  __ DropAndRet(2);
4781

    
4782
  // Just jump to runtime to add the two strings.
4783
  __ bind(&call_runtime);
4784
  __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
4785

    
4786
  if (call_builtin.is_linked()) {
4787
    __ bind(&call_builtin);
4788
    __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
4789
  }
4790
}
4791

    
4792

    
4793
void StringAddStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
4794
  __ push(a0);
4795
  __ push(a1);
4796
}
4797

    
4798

    
4799
void StringAddStub::GenerateRegisterArgsPop(MacroAssembler* masm) {
4800
  __ pop(a1);
4801
  __ pop(a0);
4802
}
4803

    
4804

    
4805
void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
4806
                                            int stack_offset,
4807
                                            Register arg,
4808
                                            Register scratch1,
4809
                                            Register scratch2,
4810
                                            Register scratch3,
4811
                                            Register scratch4,
4812
                                            Label* slow) {
4813
  // First check if the argument is already a string.
4814
  Label not_string, done;
4815
  __ JumpIfSmi(arg, &not_string);
4816
  __ GetObjectType(arg, scratch1, scratch1);
4817
  __ Branch(&done, lt, scratch1, Operand(FIRST_NONSTRING_TYPE));
4818

    
4819
  // Check the number to string cache.
4820
  __ bind(&not_string);
4821
  // Puts the cached result into scratch1.
4822
  __ LookupNumberStringCache(arg, scratch1, scratch2, scratch3, scratch4, slow);
4823
  __ mov(arg, scratch1);
4824
  __ sw(arg, MemOperand(sp, stack_offset));
4825
  __ bind(&done);
4826
}
4827

    
4828

    
4829
void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
4830
  ASSERT(state_ == CompareIC::SMI);
4831
  Label miss;
4832
  __ Or(a2, a1, a0);
4833
  __ JumpIfNotSmi(a2, &miss);
4834

    
4835
  if (GetCondition() == eq) {
4836
    // For equality we do not care about the sign of the result.
4837
    __ Ret(USE_DELAY_SLOT);
4838
    __ Subu(v0, a0, a1);
4839
  } else {
4840
    // Untag before subtracting to avoid handling overflow.
4841
    __ SmiUntag(a1);
4842
    __ SmiUntag(a0);
4843
    __ Ret(USE_DELAY_SLOT);
4844
    __ Subu(v0, a1, a0);
4845
  }
4846

    
4847
  __ bind(&miss);
4848
  GenerateMiss(masm);
4849
}
4850

    
4851

    
4852
void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
4853
  ASSERT(state_ == CompareIC::NUMBER);
4854

    
4855
  Label generic_stub;
4856
  Label unordered, maybe_undefined1, maybe_undefined2;
4857
  Label miss;
4858

    
4859
  if (left_ == CompareIC::SMI) {
4860
    __ JumpIfNotSmi(a1, &miss);
4861
  }
4862
  if (right_ == CompareIC::SMI) {
4863
    __ JumpIfNotSmi(a0, &miss);
4864
  }
4865

    
4866
  // Inlining the double comparison and falling back to the general compare
4867
  // stub if NaN is involved.
4868
  // Load left and right operand.
4869
  Label done, left, left_smi, right_smi;
4870
  __ JumpIfSmi(a0, &right_smi);
4871
  __ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
4872
              DONT_DO_SMI_CHECK);
4873
  __ Subu(a2, a0, Operand(kHeapObjectTag));
4874
  __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
4875
  __ Branch(&left);
4876
  __ bind(&right_smi);
4877
  __ SmiUntag(a2, a0);  // Can't clobber a0 yet.
4878
  FPURegister single_scratch = f6;
4879
  __ mtc1(a2, single_scratch);
4880
  __ cvt_d_w(f2, single_scratch);
4881

    
4882
  __ bind(&left);
4883
  __ JumpIfSmi(a1, &left_smi);
4884
  __ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
4885
              DONT_DO_SMI_CHECK);
4886
  __ Subu(a2, a1, Operand(kHeapObjectTag));
4887
  __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
4888
  __ Branch(&done);
4889
  __ bind(&left_smi);
4890
  __ SmiUntag(a2, a1);  // Can't clobber a1 yet.
4891
  single_scratch = f8;
4892
  __ mtc1(a2, single_scratch);
4893
  __ cvt_d_w(f0, single_scratch);
4894

    
4895
  __ bind(&done);
4896

    
4897
  // Return a result of -1, 0, or 1, or use CompareStub for NaNs.
4898
  Label fpu_eq, fpu_lt;
4899
  // Test if equal, and also handle the unordered/NaN case.
4900
  __ BranchF(&fpu_eq, &unordered, eq, f0, f2);
4901

    
4902
  // Test if less (unordered case is already handled).
4903
  __ BranchF(&fpu_lt, NULL, lt, f0, f2);
4904

    
4905
  // Otherwise it's greater, so just fall thru, and return.
4906
  ASSERT(is_int16(GREATER) && is_int16(EQUAL) && is_int16(LESS));
4907
  __ Ret(USE_DELAY_SLOT);
4908
  __ li(v0, Operand(GREATER));
4909

    
4910
  __ bind(&fpu_eq);
4911
  __ Ret(USE_DELAY_SLOT);
4912
  __ li(v0, Operand(EQUAL));
4913

    
4914
  __ bind(&fpu_lt);
4915
  __ Ret(USE_DELAY_SLOT);
4916
  __ li(v0, Operand(LESS));
4917

    
4918
  __ bind(&unordered);
4919
  __ bind(&generic_stub);
4920
  ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
4921
                     CompareIC::GENERIC);
4922
  __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
4923

    
4924
  __ bind(&maybe_undefined1);
4925
  if (Token::IsOrderedRelationalCompareOp(op_)) {
4926
    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4927
    __ Branch(&miss, ne, a0, Operand(at));
4928
    __ JumpIfSmi(a1, &unordered);
4929
    __ GetObjectType(a1, a2, a2);
4930
    __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
4931
    __ jmp(&unordered);
4932
  }
4933

    
4934
  __ bind(&maybe_undefined2);
4935
  if (Token::IsOrderedRelationalCompareOp(op_)) {
4936
    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4937
    __ Branch(&unordered, eq, a1, Operand(at));
4938
  }
4939

    
4940
  __ bind(&miss);
4941
  GenerateMiss(masm);
4942
}
4943

    
4944

    
4945
void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
4946
  ASSERT(state_ == CompareIC::INTERNALIZED_STRING);
4947
  Label miss;
4948

    
4949
  // Registers containing left and right operands respectively.
4950
  Register left = a1;
4951
  Register right = a0;
4952
  Register tmp1 = a2;
4953
  Register tmp2 = a3;
4954

    
4955
  // Check that both operands are heap objects.
4956
  __ JumpIfEitherSmi(left, right, &miss);
4957

    
4958
  // Check that both operands are internalized strings.
4959
  __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
4960
  __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
4961
  __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
4962
  __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
4963
  STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
4964
  __ Or(tmp1, tmp1, Operand(tmp2));
4965
  __ And(at, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
4966
  __ Branch(&miss, ne, at, Operand(zero_reg));
4967

    
4968
  // Make sure a0 is non-zero. At this point input operands are
4969
  // guaranteed to be non-zero.
4970
  ASSERT(right.is(a0));
4971
  STATIC_ASSERT(EQUAL == 0);
4972
  STATIC_ASSERT(kSmiTag == 0);
4973
  __ mov(v0, right);
4974
  // Internalized strings are compared by identity.
4975
  __ Ret(ne, left, Operand(right));
4976
  ASSERT(is_int16(EQUAL));
4977
  __ Ret(USE_DELAY_SLOT);
4978
  __ li(v0, Operand(Smi::FromInt(EQUAL)));
4979

    
4980
  __ bind(&miss);
4981
  GenerateMiss(masm);
4982
}
4983

    
4984

    
4985
void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
4986
  ASSERT(state_ == CompareIC::UNIQUE_NAME);
4987
  ASSERT(GetCondition() == eq);
4988
  Label miss;
4989

    
4990
  // Registers containing left and right operands respectively.
4991
  Register left = a1;
4992
  Register right = a0;
4993
  Register tmp1 = a2;
4994
  Register tmp2 = a3;
4995

    
4996
  // Check that both operands are heap objects.
4997
  __ JumpIfEitherSmi(left, right, &miss);
4998

    
4999
  // Check that both operands are unique names. This leaves the instance
5000
  // types loaded in tmp1 and tmp2.
5001
  __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
5002
  __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
5003
  __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
5004
  __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
5005

    
5006
  __ JumpIfNotUniqueName(tmp1, &miss);
5007
  __ JumpIfNotUniqueName(tmp2, &miss);
5008

    
5009
  // Use a0 as result
5010
  __ mov(v0, a0);
5011

    
5012
  // Unique names are compared by identity.
5013
  Label done;
5014
  __ Branch(&done, ne, left, Operand(right));
5015
  // Make sure a0 is non-zero. At this point input operands are
5016
  // guaranteed to be non-zero.
5017
  ASSERT(right.is(a0));
5018
  STATIC_ASSERT(EQUAL == 0);
5019
  STATIC_ASSERT(kSmiTag == 0);
5020
  __ li(v0, Operand(Smi::FromInt(EQUAL)));
5021
  __ bind(&done);
5022
  __ Ret();
5023

    
5024
  __ bind(&miss);
5025
  GenerateMiss(masm);
5026
}
5027

    
5028

    
5029
void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
5030
  ASSERT(state_ == CompareIC::STRING);
5031
  Label miss;
5032

    
5033
  bool equality = Token::IsEqualityOp(op_);
5034

    
5035
  // Registers containing left and right operands respectively.
5036
  Register left = a1;
5037
  Register right = a0;
5038
  Register tmp1 = a2;
5039
  Register tmp2 = a3;
5040
  Register tmp3 = t0;
5041
  Register tmp4 = t1;
5042
  Register tmp5 = t2;
5043

    
5044
  // Check that both operands are heap objects.
5045
  __ JumpIfEitherSmi(left, right, &miss);
5046

    
5047
  // Check that both operands are strings. This leaves the instance
5048
  // types loaded in tmp1 and tmp2.
5049
  __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
5050
  __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
5051
  __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
5052
  __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
5053
  STATIC_ASSERT(kNotStringTag != 0);
5054
  __ Or(tmp3, tmp1, tmp2);
5055
  __ And(tmp5, tmp3, Operand(kIsNotStringMask));
5056
  __ Branch(&miss, ne, tmp5, Operand(zero_reg));
5057

    
5058
  // Fast check for identical strings.
5059
  Label left_ne_right;
5060
  STATIC_ASSERT(EQUAL == 0);
5061
  STATIC_ASSERT(kSmiTag == 0);
5062
  __ Branch(&left_ne_right, ne, left, Operand(right));
5063
  __ Ret(USE_DELAY_SLOT);
5064
  __ mov(v0, zero_reg);  // In the delay slot.
5065
  __ bind(&left_ne_right);
5066

    
5067
  // Handle not identical strings.
5068

    
5069
  // Check that both strings are internalized strings. If they are, we're done
5070
  // because we already know they are not identical. We know they are both
5071
  // strings.
5072
  if (equality) {
5073
    ASSERT(GetCondition() == eq);
5074
    STATIC_ASSERT(kInternalizedTag == 0);
5075
    __ Or(tmp3, tmp1, Operand(tmp2));
5076
    __ And(tmp5, tmp3, Operand(kIsNotInternalizedMask));
5077
    Label is_symbol;
5078
    __ Branch(&is_symbol, ne, tmp5, Operand(zero_reg));
5079
    // Make sure a0 is non-zero. At this point input operands are
5080
    // guaranteed to be non-zero.
5081
    ASSERT(right.is(a0));
5082
    __ Ret(USE_DELAY_SLOT);
5083
    __ mov(v0, a0);  // In the delay slot.
5084
    __ bind(&is_symbol);
5085
  }
5086

    
5087
  // Check that both strings are sequential ASCII.
5088
  Label runtime;
5089
  __ JumpIfBothInstanceTypesAreNotSequentialAscii(
5090
      tmp1, tmp2, tmp3, tmp4, &runtime);
5091

    
5092
  // Compare flat ASCII strings. Returns when done.
5093
  if (equality) {
5094
    StringCompareStub::GenerateFlatAsciiStringEquals(
5095
        masm, left, right, tmp1, tmp2, tmp3);
5096
  } else {
5097
    StringCompareStub::GenerateCompareFlatAsciiStrings(
5098
        masm, left, right, tmp1, tmp2, tmp3, tmp4);
5099
  }
5100

    
5101
  // Handle more complex cases in runtime.
5102
  __ bind(&runtime);
5103
  __ Push(left, right);
5104
  if (equality) {
5105
    __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
5106
  } else {
5107
    __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
5108
  }
5109

    
5110
  __ bind(&miss);
5111
  GenerateMiss(masm);
5112
}
5113

    
5114

    
5115
void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
5116
  ASSERT(state_ == CompareIC::OBJECT);
5117
  Label miss;
5118
  __ And(a2, a1, Operand(a0));
5119
  __ JumpIfSmi(a2, &miss);
5120

    
5121
  __ GetObjectType(a0, a2, a2);
5122
  __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
5123
  __ GetObjectType(a1, a2, a2);
5124
  __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
5125

    
5126
  ASSERT(GetCondition() == eq);
5127
  __ Ret(USE_DELAY_SLOT);
5128
  __ subu(v0, a0, a1);
5129

    
5130
  __ bind(&miss);
5131
  GenerateMiss(masm);
5132
}
5133

    
5134

    
5135
void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
5136
  Label miss;
5137
  __ And(a2, a1, a0);
5138
  __ JumpIfSmi(a2, &miss);
5139
  __ lw(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
5140
  __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
5141
  __ Branch(&miss, ne, a2, Operand(known_map_));
5142
  __ Branch(&miss, ne, a3, Operand(known_map_));
5143

    
5144
  __ Ret(USE_DELAY_SLOT);
5145
  __ subu(v0, a0, a1);
5146

    
5147
  __ bind(&miss);
5148
  GenerateMiss(masm);
5149
}
5150

    
5151

    
5152
void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
5153
  {
5154
    // Call the runtime system in a fresh internal frame.
5155
    ExternalReference miss =
5156
        ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
5157
    FrameScope scope(masm, StackFrame::INTERNAL);
5158
    __ Push(a1, a0);
5159
    __ push(ra);
5160
    __ Push(a1, a0);
5161
    __ li(t0, Operand(Smi::FromInt(op_)));
5162
    __ addiu(sp, sp, -kPointerSize);
5163
    __ CallExternalReference(miss, 3, USE_DELAY_SLOT);
5164
    __ sw(t0, MemOperand(sp));  // In the delay slot.
5165
    // Compute the entry point of the rewritten stub.
5166
    __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
5167
    // Restore registers.
5168
    __ Pop(a1, a0, ra);
5169
  }
5170
  __ Jump(a2);
5171
}
5172

    
5173

    
5174
void DirectCEntryStub::Generate(MacroAssembler* masm) {
5175
  // Make place for arguments to fit C calling convention. Most of the callers
5176
  // of DirectCEntryStub::GenerateCall are using EnterExitFrame/LeaveExitFrame
5177
  // so they handle stack restoring and we don't have to do that here.
5178
  // Any caller of DirectCEntryStub::GenerateCall must take care of dropping
5179
  // kCArgsSlotsSize stack space after the call.
5180
  __ Subu(sp, sp, Operand(kCArgsSlotsSize));
5181
  // Place the return address on the stack, making the call
5182
  // GC safe. The RegExp backend also relies on this.
5183
  __ sw(ra, MemOperand(sp, kCArgsSlotsSize));
5184
  __ Call(t9);  // Call the C++ function.
5185
  __ lw(t9, MemOperand(sp, kCArgsSlotsSize));
5186

    
5187
  if (FLAG_debug_code && FLAG_enable_slow_asserts) {
5188
    // In case of an error the return address may point to a memory area
5189
    // filled with kZapValue by the GC.
5190
    // Dereference the address and check for this.
5191
    __ lw(t0, MemOperand(t9));
5192
    __ Assert(ne, kReceivedInvalidReturnAddress, t0,
5193
        Operand(reinterpret_cast<uint32_t>(kZapValue)));
5194
  }
5195
  __ Jump(t9);
5196
}
5197

    
5198

    
5199
void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
5200
                                    Register target) {
5201
  intptr_t loc =
5202
      reinterpret_cast<intptr_t>(GetCode(masm->isolate()).location());
5203
  __ Move(t9, target);
5204
  __ li(ra, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
5205
  __ Call(ra);
5206
}
5207

    
5208

    
5209
void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
5210
                                                      Label* miss,
5211
                                                      Label* done,
5212
                                                      Register receiver,
5213
                                                      Register properties,
5214
                                                      Handle<Name> name,
5215
                                                      Register scratch0) {
5216
  ASSERT(name->IsUniqueName());
5217
  // If names of slots in range from 1 to kProbes - 1 for the hash value are
5218
  // not equal to the name and kProbes-th slot is not used (its name is the
5219
  // undefined value), it guarantees the hash table doesn't contain the
5220
  // property. It's true even if some slots represent deleted properties
5221
  // (their names are the hole value).
5222
  for (int i = 0; i < kInlinedProbes; i++) {
5223
    // scratch0 points to properties hash.
5224
    // Compute the masked index: (hash + i + i * i) & mask.
5225
    Register index = scratch0;
5226
    // Capacity is smi 2^n.
5227
    __ lw(index, FieldMemOperand(properties, kCapacityOffset));
5228
    __ Subu(index, index, Operand(1));
5229
    __ And(index, index, Operand(
5230
        Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i))));
5231

    
5232
    // Scale the index by multiplying by the entry size.
5233
    ASSERT(NameDictionary::kEntrySize == 3);
5234
    __ sll(at, index, 1);
5235
    __ Addu(index, index, at);
5236

    
5237
    Register entity_name = scratch0;
5238
    // Having undefined at this place means the name is not contained.
5239
    ASSERT_EQ(kSmiTagSize, 1);
5240
    Register tmp = properties;
5241
    __ sll(scratch0, index, 1);
5242
    __ Addu(tmp, properties, scratch0);
5243
    __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
5244

    
5245
    ASSERT(!tmp.is(entity_name));
5246
    __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
5247
    __ Branch(done, eq, entity_name, Operand(tmp));
5248

    
5249
    // Load the hole ready for use below:
5250
    __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
5251

    
5252
    // Stop if found the property.
5253
    __ Branch(miss, eq, entity_name, Operand(Handle<Name>(name)));
5254

    
5255
    Label good;
5256
    __ Branch(&good, eq, entity_name, Operand(tmp));
5257

    
5258
    // Check if the entry name is not a unique name.
5259
    __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
5260
    __ lbu(entity_name,
5261
           FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
5262
    __ JumpIfNotUniqueName(entity_name, miss);
5263
    __ bind(&good);
5264

    
5265
    // Restore the properties.
5266
    __ lw(properties,
5267
          FieldMemOperand(receiver, JSObject::kPropertiesOffset));
5268
  }
5269

    
5270
  const int spill_mask =
5271
      (ra.bit() | t2.bit() | t1.bit() | t0.bit() | a3.bit() |
5272
       a2.bit() | a1.bit() | a0.bit() | v0.bit());
5273

    
5274
  __ MultiPush(spill_mask);
5275
  __ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
5276
  __ li(a1, Operand(Handle<Name>(name)));
5277
  NameDictionaryLookupStub stub(NEGATIVE_LOOKUP);
5278
  __ CallStub(&stub);
5279
  __ mov(at, v0);
5280
  __ MultiPop(spill_mask);
5281

    
5282
  __ Branch(done, eq, at, Operand(zero_reg));
5283
  __ Branch(miss, ne, at, Operand(zero_reg));
5284
}
5285

    
5286

    
5287
// Probe the name dictionary in the |elements| register. Jump to the
5288
// |done| label if a property with the given name is found. Jump to
5289
// the |miss| label otherwise.
5290
// If lookup was successful |scratch2| will be equal to elements + 4 * index.
5291
void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
5292
                                                      Label* miss,
5293
                                                      Label* done,
5294
                                                      Register elements,
5295
                                                      Register name,
5296
                                                      Register scratch1,
5297
                                                      Register scratch2) {
5298
  ASSERT(!elements.is(scratch1));
5299
  ASSERT(!elements.is(scratch2));
5300
  ASSERT(!name.is(scratch1));
5301
  ASSERT(!name.is(scratch2));
5302

    
5303
  __ AssertName(name);
5304

    
5305
  // Compute the capacity mask.
5306
  __ lw(scratch1, FieldMemOperand(elements, kCapacityOffset));
5307
  __ sra(scratch1, scratch1, kSmiTagSize);  // convert smi to int
5308
  __ Subu(scratch1, scratch1, Operand(1));
5309

    
5310
  // Generate an unrolled loop that performs a few probes before
5311
  // giving up. Measurements done on Gmail indicate that 2 probes
5312
  // cover ~93% of loads from dictionaries.
5313
  for (int i = 0; i < kInlinedProbes; i++) {
5314
    // Compute the masked index: (hash + i + i * i) & mask.
5315
    __ lw(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
5316
    if (i > 0) {
5317
      // Add the probe offset (i + i * i) left shifted to avoid right shifting
5318
      // the hash in a separate instruction. The value hash + i + i * i is right
5319
      // shifted in the following and instruction.
5320
      ASSERT(NameDictionary::GetProbeOffset(i) <
5321
             1 << (32 - Name::kHashFieldOffset));
5322
      __ Addu(scratch2, scratch2, Operand(
5323
          NameDictionary::GetProbeOffset(i) << Name::kHashShift));
5324
    }
5325
    __ srl(scratch2, scratch2, Name::kHashShift);
5326
    __ And(scratch2, scratch1, scratch2);
5327

    
5328
    // Scale the index by multiplying by the element size.
5329
    ASSERT(NameDictionary::kEntrySize == 3);
5330
    // scratch2 = scratch2 * 3.
5331

    
5332
    __ sll(at, scratch2, 1);
5333
    __ Addu(scratch2, scratch2, at);
5334

    
5335
    // Check if the key is identical to the name.
5336
    __ sll(at, scratch2, 2);
5337
    __ Addu(scratch2, elements, at);
5338
    __ lw(at, FieldMemOperand(scratch2, kElementsStartOffset));
5339
    __ Branch(done, eq, name, Operand(at));
5340
  }
5341

    
5342
  const int spill_mask =
5343
      (ra.bit() | t2.bit() | t1.bit() | t0.bit() |
5344
       a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) &
5345
      ~(scratch1.bit() | scratch2.bit());
5346

    
5347
  __ MultiPush(spill_mask);
5348
  if (name.is(a0)) {
5349
    ASSERT(!elements.is(a1));
5350
    __ Move(a1, name);
5351
    __ Move(a0, elements);
5352
  } else {
5353
    __ Move(a0, elements);
5354
    __ Move(a1, name);
5355
  }
5356
  NameDictionaryLookupStub stub(POSITIVE_LOOKUP);
5357
  __ CallStub(&stub);
5358
  __ mov(scratch2, a2);
5359
  __ mov(at, v0);
5360
  __ MultiPop(spill_mask);
5361

    
5362
  __ Branch(done, ne, at, Operand(zero_reg));
5363
  __ Branch(miss, eq, at, Operand(zero_reg));
5364
}
5365

    
5366

    
5367
void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
5368
  // This stub overrides SometimesSetsUpAFrame() to return false.  That means
5369
  // we cannot call anything that could cause a GC from this stub.
5370
  // Registers:
5371
  //  result: NameDictionary to probe
5372
  //  a1: key
5373
  //  dictionary: NameDictionary to probe.
5374
  //  index: will hold an index of entry if lookup is successful.
5375
  //         might alias with result_.
5376
  // Returns:
5377
  //  result_ is zero if lookup failed, non zero otherwise.
5378

    
5379
  Register result = v0;
5380
  Register dictionary = a0;
5381
  Register key = a1;
5382
  Register index = a2;
5383
  Register mask = a3;
5384
  Register hash = t0;
5385
  Register undefined = t1;
5386
  Register entry_key = t2;
5387

    
5388
  Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
5389

    
5390
  __ lw(mask, FieldMemOperand(dictionary, kCapacityOffset));
5391
  __ sra(mask, mask, kSmiTagSize);
5392
  __ Subu(mask, mask, Operand(1));
5393

    
5394
  __ lw(hash, FieldMemOperand(key, Name::kHashFieldOffset));
5395

    
5396
  __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
5397

    
5398
  for (int i = kInlinedProbes; i < kTotalProbes; i++) {
5399
    // Compute the masked index: (hash + i + i * i) & mask.
5400
    // Capacity is smi 2^n.
5401
    if (i > 0) {
5402
      // Add the probe offset (i + i * i) left shifted to avoid right shifting
5403
      // the hash in a separate instruction. The value hash + i + i * i is right
5404
      // shifted in the following and instruction.
5405
      ASSERT(NameDictionary::GetProbeOffset(i) <
5406
             1 << (32 - Name::kHashFieldOffset));
5407
      __ Addu(index, hash, Operand(
5408
          NameDictionary::GetProbeOffset(i) << Name::kHashShift));
5409
    } else {
5410
      __ mov(index, hash);
5411
    }
5412
    __ srl(index, index, Name::kHashShift);
5413
    __ And(index, mask, index);
5414

    
5415
    // Scale the index by multiplying by the entry size.
5416
    ASSERT(NameDictionary::kEntrySize == 3);
5417
    // index *= 3.
5418
    __ mov(at, index);
5419
    __ sll(index, index, 1);
5420
    __ Addu(index, index, at);
5421

    
5422

    
5423
    ASSERT_EQ(kSmiTagSize, 1);
5424
    __ sll(index, index, 2);
5425
    __ Addu(index, index, dictionary);
5426
    __ lw(entry_key, FieldMemOperand(index, kElementsStartOffset));
5427

    
5428
    // Having undefined at this place means the name is not contained.
5429
    __ Branch(&not_in_dictionary, eq, entry_key, Operand(undefined));
5430

    
5431
    // Stop if found the property.
5432
    __ Branch(&in_dictionary, eq, entry_key, Operand(key));
5433

    
5434
    if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
5435
      // Check if the entry name is not a unique name.
5436
      __ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
5437
      __ lbu(entry_key,
5438
             FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
5439
      __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary);
5440
    }
5441
  }
5442

    
5443
  __ bind(&maybe_in_dictionary);
5444
  // If we are doing negative lookup then probing failure should be
5445
  // treated as a lookup success. For positive lookup probing failure
5446
  // should be treated as lookup failure.
5447
  if (mode_ == POSITIVE_LOOKUP) {
5448
    __ Ret(USE_DELAY_SLOT);
5449
    __ mov(result, zero_reg);
5450
  }
5451

    
5452
  __ bind(&in_dictionary);
5453
  __ Ret(USE_DELAY_SLOT);
5454
  __ li(result, 1);
5455

    
5456
  __ bind(&not_in_dictionary);
5457
  __ Ret(USE_DELAY_SLOT);
5458
  __ mov(result, zero_reg);
5459
}
5460

    
5461

    
5462
struct AheadOfTimeWriteBarrierStubList {
5463
  Register object, value, address;
5464
  RememberedSetAction action;
5465
};
5466

    
5467

    
5468
#define REG(Name) { kRegister_ ## Name ## _Code }
5469

    
5470
static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
5471
  // Used in RegExpExecStub.
5472
  { REG(s2), REG(s0), REG(t3), EMIT_REMEMBERED_SET },
5473
  // Used in CompileArrayPushCall.
5474
  // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
5475
  // Also used in KeyedStoreIC::GenerateGeneric.
5476
  { REG(a3), REG(t0), REG(t1), EMIT_REMEMBERED_SET },
5477
  // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
5478
  { REG(a1), REG(a2), REG(a3), EMIT_REMEMBERED_SET },
5479
  { REG(a3), REG(a2), REG(a1), EMIT_REMEMBERED_SET },
5480
  // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
5481
  { REG(a2), REG(a1), REG(a3), EMIT_REMEMBERED_SET },
5482
  { REG(a3), REG(a1), REG(a2), EMIT_REMEMBERED_SET },
5483
  // KeyedStoreStubCompiler::GenerateStoreFastElement.
5484
  { REG(a3), REG(a2), REG(t0), EMIT_REMEMBERED_SET },
5485
  { REG(a2), REG(a3), REG(t0), EMIT_REMEMBERED_SET },
5486
  // ElementsTransitionGenerator::GenerateMapChangeElementTransition
5487
  // and ElementsTransitionGenerator::GenerateSmiToDouble
5488
  // and ElementsTransitionGenerator::GenerateDoubleToObject
5489
  { REG(a2), REG(a3), REG(t5), EMIT_REMEMBERED_SET },
5490
  { REG(a2), REG(a3), REG(t5), OMIT_REMEMBERED_SET },
5491
  // ElementsTransitionGenerator::GenerateDoubleToObject
5492
  { REG(t2), REG(a2), REG(a0), EMIT_REMEMBERED_SET },
5493
  { REG(a2), REG(t2), REG(t5), EMIT_REMEMBERED_SET },
5494
  // StoreArrayLiteralElementStub::Generate
5495
  { REG(t1), REG(a0), REG(t2), EMIT_REMEMBERED_SET },
5496
  // FastNewClosureStub::Generate
5497
  { REG(a2), REG(t0), REG(a1), EMIT_REMEMBERED_SET },
5498
  // StringAddStub::Generate
5499
  { REG(t3), REG(a1), REG(t0), EMIT_REMEMBERED_SET },
5500
  { REG(t3), REG(a0), REG(t0), EMIT_REMEMBERED_SET },
5501
  // Null termination.
5502
  { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
5503
};
5504

    
5505
#undef REG
5506

    
5507

    
5508
bool RecordWriteStub::IsPregenerated(Isolate* isolate) {
5509
  for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
5510
       !entry->object.is(no_reg);
5511
       entry++) {
5512
    if (object_.is(entry->object) &&
5513
        value_.is(entry->value) &&
5514
        address_.is(entry->address) &&
5515
        remembered_set_action_ == entry->action &&
5516
        save_fp_regs_mode_ == kDontSaveFPRegs) {
5517
      return true;
5518
    }
5519
  }
5520
  return false;
5521
}
5522

    
5523

    
5524
void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
5525
    Isolate* isolate) {
5526
  StoreBufferOverflowStub stub1(kDontSaveFPRegs);
5527
  stub1.GetCode(isolate)->set_is_pregenerated(true);
5528
  // Hydrogen code stubs need stub2 at snapshot time.
5529
  StoreBufferOverflowStub stub2(kSaveFPRegs);
5530
  stub2.GetCode(isolate)->set_is_pregenerated(true);
5531
}
5532

    
5533

    
5534
void RecordWriteStub::GenerateFixedRegStubsAheadOfTime(Isolate* isolate) {
5535
  for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
5536
       !entry->object.is(no_reg);
5537
       entry++) {
5538
    RecordWriteStub stub(entry->object,
5539
                         entry->value,
5540
                         entry->address,
5541
                         entry->action,
5542
                         kDontSaveFPRegs);
5543
    stub.GetCode(isolate)->set_is_pregenerated(true);
5544
  }
5545
}
5546

    
5547

    
5548
bool CodeStub::CanUseFPRegisters() {
5549
  return true;  // FPU is a base requirement for V8.
5550
}
5551

    
5552

    
5553
// Takes the input in 3 registers: address_ value_ and object_.  A pointer to
5554
// the value has just been written into the object, now this stub makes sure
5555
// we keep the GC informed.  The word in the object where the value has been
5556
// written is in the address register.
5557
void RecordWriteStub::Generate(MacroAssembler* masm) {
5558
  Label skip_to_incremental_noncompacting;
5559
  Label skip_to_incremental_compacting;
5560

    
5561
  // The first two branch+nop instructions are generated with labels so as to
5562
  // get the offset fixed up correctly by the bind(Label*) call.  We patch it
5563
  // back and forth between a "bne zero_reg, zero_reg, ..." (a nop in this
5564
  // position) and the "beq zero_reg, zero_reg, ..." when we start and stop
5565
  // incremental heap marking.
5566
  // See RecordWriteStub::Patch for details.
5567
  __ beq(zero_reg, zero_reg, &skip_to_incremental_noncompacting);
5568
  __ nop();
5569
  __ beq(zero_reg, zero_reg, &skip_to_incremental_compacting);
5570
  __ nop();
5571

    
5572
  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
5573
    __ RememberedSetHelper(object_,
5574
                           address_,
5575
                           value_,
5576
                           save_fp_regs_mode_,
5577
                           MacroAssembler::kReturnAtEnd);
5578
  }
5579
  __ Ret();
5580

    
5581
  __ bind(&skip_to_incremental_noncompacting);
5582
  GenerateIncremental(masm, INCREMENTAL);
5583

    
5584
  __ bind(&skip_to_incremental_compacting);
5585
  GenerateIncremental(masm, INCREMENTAL_COMPACTION);
5586

    
5587
  // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
5588
  // Will be checked in IncrementalMarking::ActivateGeneratedStub.
5589

    
5590
  PatchBranchIntoNop(masm, 0);
5591
  PatchBranchIntoNop(masm, 2 * Assembler::kInstrSize);
5592
}
5593

    
5594

    
5595
void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
5596
  regs_.Save(masm);
5597

    
5598
  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
5599
    Label dont_need_remembered_set;
5600

    
5601
    __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
5602
    __ JumpIfNotInNewSpace(regs_.scratch0(),  // Value.
5603
                           regs_.scratch0(),
5604
                           &dont_need_remembered_set);
5605

    
5606
    __ CheckPageFlag(regs_.object(),
5607
                     regs_.scratch0(),
5608
                     1 << MemoryChunk::SCAN_ON_SCAVENGE,
5609
                     ne,
5610
                     &dont_need_remembered_set);
5611

    
5612
    // First notify the incremental marker if necessary, then update the
5613
    // remembered set.
5614
    CheckNeedsToInformIncrementalMarker(
5615
        masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
5616
    InformIncrementalMarker(masm, mode);
5617
    regs_.Restore(masm);
5618
    __ RememberedSetHelper(object_,
5619
                           address_,
5620
                           value_,
5621
                           save_fp_regs_mode_,
5622
                           MacroAssembler::kReturnAtEnd);
5623

    
5624
    __ bind(&dont_need_remembered_set);
5625
  }
5626

    
5627
  CheckNeedsToInformIncrementalMarker(
5628
      masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
5629
  InformIncrementalMarker(masm, mode);
5630
  regs_.Restore(masm);
5631
  __ Ret();
5632
}
5633

    
5634

    
5635
void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
5636
  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
5637
  int argument_count = 3;
5638
  __ PrepareCallCFunction(argument_count, regs_.scratch0());
5639
  Register address =
5640
      a0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
5641
  ASSERT(!address.is(regs_.object()));
5642
  ASSERT(!address.is(a0));
5643
  __ Move(address, regs_.address());
5644
  __ Move(a0, regs_.object());
5645
  __ Move(a1, address);
5646
  __ li(a2, Operand(ExternalReference::isolate_address(masm->isolate())));
5647

    
5648
  AllowExternalCallThatCantCauseGC scope(masm);
5649
  if (mode == INCREMENTAL_COMPACTION) {
5650
    __ CallCFunction(
5651
        ExternalReference::incremental_evacuation_record_write_function(
5652
            masm->isolate()),
5653
        argument_count);
5654
  } else {
5655
    ASSERT(mode == INCREMENTAL);
5656
    __ CallCFunction(
5657
        ExternalReference::incremental_marking_record_write_function(
5658
            masm->isolate()),
5659
        argument_count);
5660
  }
5661
  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
5662
}
5663

    
5664

    
5665
void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
5666
    MacroAssembler* masm,
5667
    OnNoNeedToInformIncrementalMarker on_no_need,
5668
    Mode mode) {
5669
  Label on_black;
5670
  Label need_incremental;
5671
  Label need_incremental_pop_scratch;
5672

    
5673
  __ And(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
5674
  __ lw(regs_.scratch1(),
5675
        MemOperand(regs_.scratch0(),
5676
                   MemoryChunk::kWriteBarrierCounterOffset));
5677
  __ Subu(regs_.scratch1(), regs_.scratch1(), Operand(1));
5678
  __ sw(regs_.scratch1(),
5679
         MemOperand(regs_.scratch0(),
5680
                    MemoryChunk::kWriteBarrierCounterOffset));
5681
  __ Branch(&need_incremental, lt, regs_.scratch1(), Operand(zero_reg));
5682

    
5683
  // Let's look at the color of the object:  If it is not black we don't have
5684
  // to inform the incremental marker.
5685
  __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
5686

    
5687
  regs_.Restore(masm);
5688
  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
5689
    __ RememberedSetHelper(object_,
5690
                           address_,
5691
                           value_,
5692
                           save_fp_regs_mode_,
5693
                           MacroAssembler::kReturnAtEnd);
5694
  } else {
5695
    __ Ret();
5696
  }
5697

    
5698
  __ bind(&on_black);
5699

    
5700
  // Get the value from the slot.
5701
  __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
5702

    
5703
  if (mode == INCREMENTAL_COMPACTION) {
5704
    Label ensure_not_white;
5705

    
5706
    __ CheckPageFlag(regs_.scratch0(),  // Contains value.
5707
                     regs_.scratch1(),  // Scratch.
5708
                     MemoryChunk::kEvacuationCandidateMask,
5709
                     eq,
5710
                     &ensure_not_white);
5711

    
5712
    __ CheckPageFlag(regs_.object(),
5713
                     regs_.scratch1(),  // Scratch.
5714
                     MemoryChunk::kSkipEvacuationSlotsRecordingMask,
5715
                     eq,
5716
                     &need_incremental);
5717

    
5718
    __ bind(&ensure_not_white);
5719
  }
5720

    
5721
  // We need extra registers for this, so we push the object and the address
5722
  // register temporarily.
5723
  __ Push(regs_.object(), regs_.address());
5724
  __ EnsureNotWhite(regs_.scratch0(),  // The value.
5725
                    regs_.scratch1(),  // Scratch.
5726
                    regs_.object(),  // Scratch.
5727
                    regs_.address(),  // Scratch.
5728
                    &need_incremental_pop_scratch);
5729
  __ Pop(regs_.object(), regs_.address());
5730

    
5731
  regs_.Restore(masm);
5732
  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
5733
    __ RememberedSetHelper(object_,
5734
                           address_,
5735
                           value_,
5736
                           save_fp_regs_mode_,
5737
                           MacroAssembler::kReturnAtEnd);
5738
  } else {
5739
    __ Ret();
5740
  }
5741

    
5742
  __ bind(&need_incremental_pop_scratch);
5743
  __ Pop(regs_.object(), regs_.address());
5744

    
5745
  __ bind(&need_incremental);
5746

    
5747
  // Fall through when we need to inform the incremental marker.
5748
}
5749

    
5750

    
5751
void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
5752
  // ----------- S t a t e -------------
5753
  //  -- a0    : element value to store
5754
  //  -- a3    : element index as smi
5755
  //  -- sp[0] : array literal index in function as smi
5756
  //  -- sp[4] : array literal
5757
  // clobbers a1, a2, t0
5758
  // -----------------------------------
5759

    
5760
  Label element_done;
5761
  Label double_elements;
5762
  Label smi_element;
5763
  Label slow_elements;
5764
  Label fast_elements;
5765

    
5766
  // Get array literal index, array literal and its map.
5767
  __ lw(t0, MemOperand(sp, 0 * kPointerSize));
5768
  __ lw(a1, MemOperand(sp, 1 * kPointerSize));
5769
  __ lw(a2, FieldMemOperand(a1, JSObject::kMapOffset));
5770

    
5771
  __ CheckFastElements(a2, t1, &double_elements);
5772
  // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
5773
  __ JumpIfSmi(a0, &smi_element);
5774
  __ CheckFastSmiElements(a2, t1, &fast_elements);
5775

    
5776
  // Store into the array literal requires a elements transition. Call into
5777
  // the runtime.
5778
  __ bind(&slow_elements);
5779
  // call.
5780
  __ Push(a1, a3, a0);
5781
  __ lw(t1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
5782
  __ lw(t1, FieldMemOperand(t1, JSFunction::kLiteralsOffset));
5783
  __ Push(t1, t0);
5784
  __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
5785

    
5786
  // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
5787
  __ bind(&fast_elements);
5788
  __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
5789
  __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
5790
  __ Addu(t2, t1, t2);
5791
  __ Addu(t2, t2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
5792
  __ sw(a0, MemOperand(t2, 0));
5793
  // Update the write barrier for the array store.
5794
  __ RecordWrite(t1, t2, a0, kRAHasNotBeenSaved, kDontSaveFPRegs,
5795
                 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
5796
  __ Ret(USE_DELAY_SLOT);
5797
  __ mov(v0, a0);
5798

    
5799
  // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
5800
  // and value is Smi.
5801
  __ bind(&smi_element);
5802
  __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
5803
  __ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
5804
  __ Addu(t2, t1, t2);
5805
  __ sw(a0, FieldMemOperand(t2, FixedArray::kHeaderSize));
5806
  __ Ret(USE_DELAY_SLOT);
5807
  __ mov(v0, a0);
5808

    
5809
  // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
5810
  __ bind(&double_elements);
5811
  __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
5812
  __ StoreNumberToDoubleElements(a0, a3, t1, t3, t5, a2, &slow_elements);
5813
  __ Ret(USE_DELAY_SLOT);
5814
  __ mov(v0, a0);
5815
}
5816

    
5817

    
5818
void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
5819
  CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
5820
  __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
5821
  int parameter_count_offset =
5822
      StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
5823
  __ lw(a1, MemOperand(fp, parameter_count_offset));
5824
  if (function_mode_ == JS_FUNCTION_STUB_MODE) {
5825
    __ Addu(a1, a1, Operand(1));
5826
  }
5827
  masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
5828
  __ sll(a1, a1, kPointerSizeLog2);
5829
  __ Ret(USE_DELAY_SLOT);
5830
  __ Addu(sp, sp, a1);
5831
}
5832

    
5833

    
5834
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
5835
  if (masm->isolate()->function_entry_hook() != NULL) {
5836
    AllowStubCallsScope allow_stub_calls(masm, true);
5837
    ProfileEntryHookStub stub;
5838
    __ push(ra);
5839
    __ CallStub(&stub);
5840
    __ pop(ra);
5841
  }
5842
}
5843

    
5844

    
5845
void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
5846
  // The entry hook is a "push ra" instruction, followed by a call.
5847
  // Note: on MIPS "push" is 2 instruction
5848
  const int32_t kReturnAddressDistanceFromFunctionStart =
5849
      Assembler::kCallTargetAddressOffset + (2 * Assembler::kInstrSize);
5850

    
5851
  // This should contain all kJSCallerSaved registers.
5852
  const RegList kSavedRegs =
5853
     kJSCallerSaved |  // Caller saved registers.
5854
     s5.bit();         // Saved stack pointer.
5855

    
5856
  // We also save ra, so the count here is one higher than the mask indicates.
5857
  const int32_t kNumSavedRegs = kNumJSCallerSaved + 2;
5858

    
5859
  // Save all caller-save registers as this may be called from anywhere.
5860
  __ MultiPush(kSavedRegs | ra.bit());
5861

    
5862
  // Compute the function's address for the first argument.
5863
  __ Subu(a0, ra, Operand(kReturnAddressDistanceFromFunctionStart));
5864

    
5865
  // The caller's return address is above the saved temporaries.
5866
  // Grab that for the second argument to the hook.
5867
  __ Addu(a1, sp, Operand(kNumSavedRegs * kPointerSize));
5868

    
5869
  // Align the stack if necessary.
5870
  int frame_alignment = masm->ActivationFrameAlignment();
5871
  if (frame_alignment > kPointerSize) {
5872
    __ mov(s5, sp);
5873
    ASSERT(IsPowerOf2(frame_alignment));
5874
    __ And(sp, sp, Operand(-frame_alignment));
5875
  }
5876

    
5877
#if defined(V8_HOST_ARCH_MIPS)
5878
  int32_t entry_hook =
5879
      reinterpret_cast<int32_t>(masm->isolate()->function_entry_hook());
5880
  __ li(at, Operand(entry_hook));
5881
#else
5882
  // Under the simulator we need to indirect the entry hook through a
5883
  // trampoline function at a known address.
5884
  // It additionally takes an isolate as a third parameter.
5885
  __ li(a2, Operand(ExternalReference::isolate_address(masm->isolate())));
5886

    
5887
  ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
5888
  __ li(at, Operand(ExternalReference(&dispatcher,
5889
                                      ExternalReference::BUILTIN_CALL,
5890
                                      masm->isolate())));
5891
#endif
5892
  __ Call(at);
5893

    
5894
  // Restore the stack pointer if needed.
5895
  if (frame_alignment > kPointerSize) {
5896
    __ mov(sp, s5);
5897
  }
5898

    
5899
  // Also pop ra to get Ret(0).
5900
  __ MultiPop(kSavedRegs | ra.bit());
5901
  __ Ret();
5902
}
5903

    
5904

    
5905
template<class T>
5906
static void CreateArrayDispatch(MacroAssembler* masm,
5907
                                AllocationSiteOverrideMode mode) {
5908
  if (mode == DISABLE_ALLOCATION_SITES) {
5909
    T stub(GetInitialFastElementsKind(),
5910
           CONTEXT_CHECK_REQUIRED,
5911
           mode);
5912
    __ TailCallStub(&stub);
5913
  } else if (mode == DONT_OVERRIDE) {
5914
    int last_index = GetSequenceIndexFromFastElementsKind(
5915
        TERMINAL_FAST_ELEMENTS_KIND);
5916
    for (int i = 0; i <= last_index; ++i) {
5917
      Label next;
5918
      ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
5919
      __ Branch(&next, ne, a3, Operand(kind));
5920
      T stub(kind);
5921
      __ TailCallStub(&stub);
5922
      __ bind(&next);
5923
    }
5924

    
5925
    // If we reached this point there is a problem.
5926
    __ Abort(kUnexpectedElementsKindInArrayConstructor);
5927
  } else {
5928
    UNREACHABLE();
5929
  }
5930
}
5931

    
5932

    
5933
static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
5934
                                           AllocationSiteOverrideMode mode) {
5935
  // a2 - type info cell (if mode != DISABLE_ALLOCATION_SITES)
5936
  // a3 - kind (if mode != DISABLE_ALLOCATION_SITES)
5937
  // a0 - number of arguments
5938
  // a1 - constructor?
5939
  // sp[0] - last argument
5940
  Label normal_sequence;
5941
  if (mode == DONT_OVERRIDE) {
5942
    ASSERT(FAST_SMI_ELEMENTS == 0);
5943
    ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
5944
    ASSERT(FAST_ELEMENTS == 2);
5945
    ASSERT(FAST_HOLEY_ELEMENTS == 3);
5946
    ASSERT(FAST_DOUBLE_ELEMENTS == 4);
5947
    ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
5948

    
5949
    // is the low bit set? If so, we are holey and that is good.
5950
    __ And(at, a3, Operand(1));
5951
    __ Branch(&normal_sequence, ne, at, Operand(zero_reg));
5952
  }
5953

    
5954
  // look at the first argument
5955
  __ lw(t1, MemOperand(sp, 0));
5956
  __ Branch(&normal_sequence, eq, t1, Operand(zero_reg));
5957

    
5958
  if (mode == DISABLE_ALLOCATION_SITES) {
5959
    ElementsKind initial = GetInitialFastElementsKind();
5960
    ElementsKind holey_initial = GetHoleyElementsKind(initial);
5961

    
5962
    ArraySingleArgumentConstructorStub stub_holey(holey_initial,
5963
                                                  CONTEXT_CHECK_REQUIRED,
5964
                                                  DISABLE_ALLOCATION_SITES);
5965
    __ TailCallStub(&stub_holey);
5966

    
5967
    __ bind(&normal_sequence);
5968
    ArraySingleArgumentConstructorStub stub(initial,
5969
                                            CONTEXT_CHECK_REQUIRED,
5970
                                            DISABLE_ALLOCATION_SITES);
5971
    __ TailCallStub(&stub);
5972
  } else if (mode == DONT_OVERRIDE) {
5973
    // We are going to create a holey array, but our kind is non-holey.
5974
    // Fix kind and retry (only if we have an allocation site in the cell).
5975
    __ Addu(a3, a3, Operand(1));
5976
    __ lw(t1, FieldMemOperand(a2, Cell::kValueOffset));
5977

    
5978
    if (FLAG_debug_code) {
5979
      __ lw(t1, FieldMemOperand(t1, 0));
5980
      __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
5981
      __ Assert(eq, kExpectedAllocationSiteInCell, t1, Operand(at));
5982
      __ lw(t1, FieldMemOperand(a2, Cell::kValueOffset));
5983
    }
5984

    
5985
    // Save the resulting elements kind in type info
5986
    __ SmiTag(a3);
5987
    __ lw(t1, FieldMemOperand(a2, Cell::kValueOffset));
5988
    __ sw(a3, FieldMemOperand(t1, AllocationSite::kTransitionInfoOffset));
5989
    __ SmiUntag(a3);
5990

    
5991
    __ bind(&normal_sequence);
5992
    int last_index = GetSequenceIndexFromFastElementsKind(
5993
        TERMINAL_FAST_ELEMENTS_KIND);
5994
    for (int i = 0; i <= last_index; ++i) {
5995
      Label next;
5996
      ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
5997
      __ Branch(&next, ne, a3, Operand(kind));
5998
      ArraySingleArgumentConstructorStub stub(kind);
5999
      __ TailCallStub(&stub);
6000
      __ bind(&next);
6001
    }
6002

    
6003
    // If we reached this point there is a problem.
6004
    __ Abort(kUnexpectedElementsKindInArrayConstructor);
6005
  } else {
6006
    UNREACHABLE();
6007
  }
6008
}
6009

    
6010

    
6011
template<class T>
6012
static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
6013
  ElementsKind initial_kind = GetInitialFastElementsKind();
6014
  ElementsKind initial_holey_kind = GetHoleyElementsKind(initial_kind);
6015

    
6016
  int to_index = GetSequenceIndexFromFastElementsKind(
6017
      TERMINAL_FAST_ELEMENTS_KIND);
6018
  for (int i = 0; i <= to_index; ++i) {
6019
    ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
6020
    T stub(kind);
6021
    stub.GetCode(isolate)->set_is_pregenerated(true);
6022
    if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE ||
6023
        (!FLAG_track_allocation_sites &&
6024
         (kind == initial_kind || kind == initial_holey_kind))) {
6025
      T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES);
6026
      stub1.GetCode(isolate)->set_is_pregenerated(true);
6027
    }
6028
  }
6029
}
6030

    
6031

    
6032
void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
6033
  ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
6034
      isolate);
6035
  ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
6036
      isolate);
6037
  ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
6038
      isolate);
6039
}
6040

    
6041

    
6042
void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
6043
    Isolate* isolate) {
6044
  ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
6045
  for (int i = 0; i < 2; i++) {
6046
    // For internal arrays we only need a few things.
6047
    InternalArrayNoArgumentConstructorStub stubh1(kinds[i]);
6048
    stubh1.GetCode(isolate)->set_is_pregenerated(true);
6049
    InternalArraySingleArgumentConstructorStub stubh2(kinds[i]);
6050
    stubh2.GetCode(isolate)->set_is_pregenerated(true);
6051
    InternalArrayNArgumentsConstructorStub stubh3(kinds[i]);
6052
    stubh3.GetCode(isolate)->set_is_pregenerated(true);
6053
  }
6054
}
6055

    
6056

    
6057
void ArrayConstructorStub::GenerateDispatchToArrayStub(
6058
    MacroAssembler* masm,
6059
    AllocationSiteOverrideMode mode) {
6060
  if (argument_count_ == ANY) {
6061
    Label not_zero_case, not_one_case;
6062
    __ And(at, a0, a0);
6063
    __ Branch(&not_zero_case, ne, at, Operand(zero_reg));
6064
    CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
6065

    
6066
    __ bind(&not_zero_case);
6067
    __ Branch(&not_one_case, gt, a0, Operand(1));
6068
    CreateArrayDispatchOneArgument(masm, mode);
6069

    
6070
    __ bind(&not_one_case);
6071
    CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
6072
  } else if (argument_count_ == NONE) {
6073
    CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
6074
  } else if (argument_count_ == ONE) {
6075
    CreateArrayDispatchOneArgument(masm, mode);
6076
  } else if (argument_count_ == MORE_THAN_ONE) {
6077
    CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
6078
  } else {
6079
    UNREACHABLE();
6080
  }
6081
}
6082

    
6083

    
6084
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
6085
  // ----------- S t a t e -------------
6086
  //  -- a0 : argc (only if argument_count_ == ANY)
6087
  //  -- a1 : constructor
6088
  //  -- a2 : type info cell
6089
  //  -- sp[0] : return address
6090
  //  -- sp[4] : last argument
6091
  // -----------------------------------
6092
  if (FLAG_debug_code) {
6093
    // The array construct code is only set for the global and natives
6094
    // builtin Array functions which always have maps.
6095

    
6096
    // Initial map for the builtin Array function should be a map.
6097
    __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
6098
    // Will both indicate a NULL and a Smi.
6099
    __ And(at, a3, Operand(kSmiTagMask));
6100
    __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
6101
        at, Operand(zero_reg));
6102
    __ GetObjectType(a3, a3, t0);
6103
    __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
6104
        t0, Operand(MAP_TYPE));
6105

    
6106
    // We should either have undefined in a2 or a valid cell.
6107
    Label okay_here;
6108
    Handle<Map> cell_map = masm->isolate()->factory()->cell_map();
6109
    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
6110
    __ Branch(&okay_here, eq, a2, Operand(at));
6111
    __ lw(a3, FieldMemOperand(a2, 0));
6112
    __ Assert(eq, kExpectedPropertyCellInRegisterA2,
6113
        a3, Operand(cell_map));
6114
    __ bind(&okay_here);
6115
  }
6116

    
6117
  Label no_info;
6118
  // Get the elements kind and case on that.
6119
  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
6120
  __ Branch(&no_info, eq, a2, Operand(at));
6121
  __ lw(a3, FieldMemOperand(a2, Cell::kValueOffset));
6122

    
6123
  // If the type cell is undefined, or contains anything other than an
6124
  // AllocationSite, call an array constructor that doesn't use AllocationSites.
6125
  __ lw(t0, FieldMemOperand(a3, 0));
6126
  __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
6127
  __ Branch(&no_info, ne, t0, Operand(at));
6128

    
6129
  __ lw(a3, FieldMemOperand(a3, AllocationSite::kTransitionInfoOffset));
6130
  __ SmiUntag(a3);
6131
  GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
6132

    
6133
  __ bind(&no_info);
6134
  GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
6135
}
6136

    
6137

    
6138
void InternalArrayConstructorStub::GenerateCase(
6139
    MacroAssembler* masm, ElementsKind kind) {
6140
  Label not_zero_case, not_one_case;
6141
  Label normal_sequence;
6142

    
6143
  __ Branch(&not_zero_case, ne, a0, Operand(zero_reg));
6144
  InternalArrayNoArgumentConstructorStub stub0(kind);
6145
  __ TailCallStub(&stub0);
6146

    
6147
  __ bind(&not_zero_case);
6148
  __ Branch(&not_one_case, gt, a0, Operand(1));
6149

    
6150
  if (IsFastPackedElementsKind(kind)) {
6151
    // We might need to create a holey array
6152
    // look at the first argument.
6153
    __ lw(at, MemOperand(sp, 0));
6154
    __ Branch(&normal_sequence, eq, at, Operand(zero_reg));
6155

    
6156
    InternalArraySingleArgumentConstructorStub
6157
        stub1_holey(GetHoleyElementsKind(kind));
6158
    __ TailCallStub(&stub1_holey);
6159
  }
6160

    
6161
  __ bind(&normal_sequence);
6162
  InternalArraySingleArgumentConstructorStub stub1(kind);
6163
  __ TailCallStub(&stub1);
6164

    
6165
  __ bind(&not_one_case);
6166
  InternalArrayNArgumentsConstructorStub stubN(kind);
6167
  __ TailCallStub(&stubN);
6168
}
6169

    
6170

    
6171
void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
6172
  // ----------- S t a t e -------------
6173
  //  -- a0 : argc
6174
  //  -- a1 : constructor
6175
  //  -- sp[0] : return address
6176
  //  -- sp[4] : last argument
6177
  // -----------------------------------
6178

    
6179
  if (FLAG_debug_code) {
6180
    // The array construct code is only set for the global and natives
6181
    // builtin Array functions which always have maps.
6182

    
6183
    // Initial map for the builtin Array function should be a map.
6184
    __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
6185
    // Will both indicate a NULL and a Smi.
6186
    __ And(at, a3, Operand(kSmiTagMask));
6187
    __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
6188
        at, Operand(zero_reg));
6189
    __ GetObjectType(a3, a3, t0);
6190
    __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
6191
        t0, Operand(MAP_TYPE));
6192
  }
6193

    
6194
  // Figure out the right elements kind.
6195
  __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
6196

    
6197
  // Load the map's "bit field 2" into a3. We only need the first byte,
6198
  // but the following bit field extraction takes care of that anyway.
6199
  __ lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
6200
  // Retrieve elements_kind from bit field 2.
6201
  __ Ext(a3, a3, Map::kElementsKindShift, Map::kElementsKindBitCount);
6202

    
6203
  if (FLAG_debug_code) {
6204
    Label done;
6205
    __ Branch(&done, eq, a3, Operand(FAST_ELEMENTS));
6206
    __ Assert(
6207
        eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray,
6208
        a3, Operand(FAST_HOLEY_ELEMENTS));
6209
    __ bind(&done);
6210
  }
6211

    
6212
  Label fast_elements_case;
6213
  __ Branch(&fast_elements_case, eq, a3, Operand(FAST_ELEMENTS));
6214
  GenerateCase(masm, FAST_HOLEY_ELEMENTS);
6215

    
6216
  __ bind(&fast_elements_case);
6217
  GenerateCase(masm, FAST_ELEMENTS);
6218
}
6219

    
6220

    
6221
#undef __
6222

    
6223
} }  // namespace v8::internal
6224

    
6225
#endif  // V8_TARGET_ARCH_MIPS