The data contained in this repository can be downloaded to your computer using one of several clients.
Please see the documentation of your version control software client for more information.

Please select the desired protocol below to get the URL.

This URL has Read-Only access.

Statistics
| Branch: | Revision:

main_repo / deps / v8 / src / mips / ic-mips.cc @ f230a1cf

History | View | Annotate | Download (60 KB)

1
// Copyright 2012 the V8 project authors. All rights reserved.
2
// Redistribution and use in source and binary forms, with or without
3
// modification, are permitted provided that the following conditions are
4
// met:
5
//
6
//     * Redistributions of source code must retain the above copyright
7
//       notice, this list of conditions and the following disclaimer.
8
//     * Redistributions in binary form must reproduce the above
9
//       copyright notice, this list of conditions and the following
10
//       disclaimer in the documentation and/or other materials provided
11
//       with the distribution.
12
//     * Neither the name of Google Inc. nor the names of its
13
//       contributors may be used to endorse or promote products derived
14
//       from this software without specific prior written permission.
15
//
16
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27

    
28

    
29

    
30
#include "v8.h"
31

    
32
#if V8_TARGET_ARCH_MIPS
33

    
34
#include "codegen.h"
35
#include "code-stubs.h"
36
#include "ic-inl.h"
37
#include "runtime.h"
38
#include "stub-cache.h"
39

    
40
namespace v8 {
41
namespace internal {
42

    
43

    
44
// ----------------------------------------------------------------------------
45
// Static IC stub generators.
46
//
47

    
48
#define __ ACCESS_MASM(masm)
49

    
50

    
51
static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
52
                                            Register type,
53
                                            Label* global_object) {
54
  // Register usage:
55
  //   type: holds the receiver instance type on entry.
56
  __ Branch(global_object, eq, type, Operand(JS_GLOBAL_OBJECT_TYPE));
57
  __ Branch(global_object, eq, type, Operand(JS_BUILTINS_OBJECT_TYPE));
58
  __ Branch(global_object, eq, type, Operand(JS_GLOBAL_PROXY_TYPE));
59
}
60

    
61

    
62
// Generated code falls through if the receiver is a regular non-global
63
// JS object with slow properties and no interceptors.
64
static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
65
                                                Register receiver,
66
                                                Register elements,
67
                                                Register scratch0,
68
                                                Register scratch1,
69
                                                Label* miss) {
70
  // Register usage:
71
  //   receiver: holds the receiver on entry and is unchanged.
72
  //   elements: holds the property dictionary on fall through.
73
  // Scratch registers:
74
  //   scratch0: used to holds the receiver map.
75
  //   scratch1: used to holds the receiver instance type, receiver bit mask
76
  //     and elements map.
77

    
78
  // Check that the receiver isn't a smi.
79
  __ JumpIfSmi(receiver, miss);
80

    
81
  // Check that the receiver is a valid JS object.
82
  __ GetObjectType(receiver, scratch0, scratch1);
83
  __ Branch(miss, lt, scratch1, Operand(FIRST_SPEC_OBJECT_TYPE));
84

    
85
  // If this assert fails, we have to check upper bound too.
86
  STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
87

    
88
  GenerateGlobalInstanceTypeCheck(masm, scratch1, miss);
89

    
90
  // Check that the global object does not require access checks.
91
  __ lbu(scratch1, FieldMemOperand(scratch0, Map::kBitFieldOffset));
92
  __ And(scratch1, scratch1, Operand((1 << Map::kIsAccessCheckNeeded) |
93
                           (1 << Map::kHasNamedInterceptor)));
94
  __ Branch(miss, ne, scratch1, Operand(zero_reg));
95

    
96
  __ lw(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
97
  __ lw(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
98
  __ LoadRoot(scratch0, Heap::kHashTableMapRootIndex);
99
  __ Branch(miss, ne, scratch1, Operand(scratch0));
100
}
101

    
102

    
103
// Helper function used from LoadIC/CallIC GenerateNormal.
104
//
105
// elements: Property dictionary. It is not clobbered if a jump to the miss
106
//           label is done.
107
// name:     Property name. It is not clobbered if a jump to the miss label is
108
//           done
109
// result:   Register for the result. It is only updated if a jump to the miss
110
//           label is not done. Can be the same as elements or name clobbering
111
//           one of these in the case of not jumping to the miss label.
112
// The two scratch registers need to be different from elements, name and
113
// result.
114
// The generated code assumes that the receiver has slow properties,
115
// is not a global object and does not have interceptors.
116
// The address returned from GenerateStringDictionaryProbes() in scratch2
117
// is used.
118
static void GenerateDictionaryLoad(MacroAssembler* masm,
119
                                   Label* miss,
120
                                   Register elements,
121
                                   Register name,
122
                                   Register result,
123
                                   Register scratch1,
124
                                   Register scratch2) {
125
  // Main use of the scratch registers.
126
  // scratch1: Used as temporary and to hold the capacity of the property
127
  //           dictionary.
128
  // scratch2: Used as temporary.
129
  Label done;
130

    
131
  // Probe the dictionary.
132
  NameDictionaryLookupStub::GeneratePositiveLookup(masm,
133
                                                   miss,
134
                                                   &done,
135
                                                   elements,
136
                                                   name,
137
                                                   scratch1,
138
                                                   scratch2);
139

    
140
  // If probing finds an entry check that the value is a normal
141
  // property.
142
  __ bind(&done);  // scratch2 == elements + 4 * index.
143
  const int kElementsStartOffset = NameDictionary::kHeaderSize +
144
      NameDictionary::kElementsStartIndex * kPointerSize;
145
  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
146
  __ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
147
  __ And(at,
148
         scratch1,
149
         Operand(PropertyDetails::TypeField::kMask << kSmiTagSize));
150
  __ Branch(miss, ne, at, Operand(zero_reg));
151

    
152
  // Get the value at the masked, scaled index and return.
153
  __ lw(result,
154
        FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
155
}
156

    
157

    
158
// Helper function used from StoreIC::GenerateNormal.
159
//
160
// elements: Property dictionary. It is not clobbered if a jump to the miss
161
//           label is done.
162
// name:     Property name. It is not clobbered if a jump to the miss label is
163
//           done
164
// value:    The value to store.
165
// The two scratch registers need to be different from elements, name and
166
// result.
167
// The generated code assumes that the receiver has slow properties,
168
// is not a global object and does not have interceptors.
169
// The address returned from GenerateStringDictionaryProbes() in scratch2
170
// is used.
171
static void GenerateDictionaryStore(MacroAssembler* masm,
172
                                    Label* miss,
173
                                    Register elements,
174
                                    Register name,
175
                                    Register value,
176
                                    Register scratch1,
177
                                    Register scratch2) {
178
  // Main use of the scratch registers.
179
  // scratch1: Used as temporary and to hold the capacity of the property
180
  //           dictionary.
181
  // scratch2: Used as temporary.
182
  Label done;
183

    
184
  // Probe the dictionary.
185
  NameDictionaryLookupStub::GeneratePositiveLookup(masm,
186
                                                   miss,
187
                                                   &done,
188
                                                   elements,
189
                                                   name,
190
                                                   scratch1,
191
                                                   scratch2);
192

    
193
  // If probing finds an entry in the dictionary check that the value
194
  // is a normal property that is not read only.
195
  __ bind(&done);  // scratch2 == elements + 4 * index.
196
  const int kElementsStartOffset = NameDictionary::kHeaderSize +
197
      NameDictionary::kElementsStartIndex * kPointerSize;
198
  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
199
  const int kTypeAndReadOnlyMask =
200
      (PropertyDetails::TypeField::kMask |
201
       PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
202
  __ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
203
  __ And(at, scratch1, Operand(kTypeAndReadOnlyMask));
204
  __ Branch(miss, ne, at, Operand(zero_reg));
205

    
206
  // Store the value at the masked, scaled index and return.
207
  const int kValueOffset = kElementsStartOffset + kPointerSize;
208
  __ Addu(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
209
  __ sw(value, MemOperand(scratch2));
210

    
211
  // Update the write barrier. Make sure not to clobber the value.
212
  __ mov(scratch1, value);
213
  __ RecordWrite(
214
      elements, scratch2, scratch1, kRAHasNotBeenSaved, kDontSaveFPRegs);
215
}
216

    
217

    
218
// Checks the receiver for special cases (value type, slow case bits).
219
// Falls through for regular JS object.
220
static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
221
                                           Register receiver,
222
                                           Register map,
223
                                           Register scratch,
224
                                           int interceptor_bit,
225
                                           Label* slow) {
226
  // Check that the object isn't a smi.
227
  __ JumpIfSmi(receiver, slow);
228
  // Get the map of the receiver.
229
  __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
230
  // Check bit field.
231
  __ lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
232
  __ And(at, scratch, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
233
  __ Branch(slow, ne, at, Operand(zero_reg));
234
  // Check that the object is some kind of JS object EXCEPT JS Value type.
235
  // In the case that the object is a value-wrapper object,
236
  // we enter the runtime system to make sure that indexing into string
237
  // objects work as intended.
238
  ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
239
  __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
240
  __ Branch(slow, lt, scratch, Operand(JS_OBJECT_TYPE));
241
}
242

    
243

    
244
// Loads an indexed element from a fast case array.
245
// If not_fast_array is NULL, doesn't perform the elements map check.
246
static void GenerateFastArrayLoad(MacroAssembler* masm,
247
                                  Register receiver,
248
                                  Register key,
249
                                  Register elements,
250
                                  Register scratch1,
251
                                  Register scratch2,
252
                                  Register result,
253
                                  Label* not_fast_array,
254
                                  Label* out_of_range) {
255
  // Register use:
256
  //
257
  // receiver - holds the receiver on entry.
258
  //            Unchanged unless 'result' is the same register.
259
  //
260
  // key      - holds the smi key on entry.
261
  //            Unchanged unless 'result' is the same register.
262
  //
263
  // elements - holds the elements of the receiver on exit.
264
  //
265
  // result   - holds the result on exit if the load succeeded.
266
  //            Allowed to be the the same as 'receiver' or 'key'.
267
  //            Unchanged on bailout so 'receiver' and 'key' can be safely
268
  //            used by further computation.
269
  //
270
  // Scratch registers:
271
  //
272
  // scratch1 - used to hold elements map and elements length.
273
  //            Holds the elements map if not_fast_array branch is taken.
274
  //
275
  // scratch2 - used to hold the loaded value.
276

    
277
  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
278
  if (not_fast_array != NULL) {
279
    // Check that the object is in fast mode (not dictionary).
280
    __ lw(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
281
    __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
282
    __ Branch(not_fast_array, ne, scratch1, Operand(at));
283
  } else {
284
    __ AssertFastElements(elements);
285
  }
286

    
287
  // Check that the key (index) is within bounds.
288
  __ lw(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
289
  __ Branch(out_of_range, hs, key, Operand(scratch1));
290

    
291
  // Fast case: Do the load.
292
  __ Addu(scratch1, elements,
293
          Operand(FixedArray::kHeaderSize - kHeapObjectTag));
294
  // The key is a smi.
295
  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
296
  __ sll(at, key, kPointerSizeLog2 - kSmiTagSize);
297
  __ addu(at, at, scratch1);
298
  __ lw(scratch2, MemOperand(at));
299

    
300
  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
301
  // In case the loaded value is the_hole we have to consult GetProperty
302
  // to ensure the prototype chain is searched.
303
  __ Branch(out_of_range, eq, scratch2, Operand(at));
304
  __ mov(result, scratch2);
305
}
306

    
307

    
308
// Checks whether a key is an array index string or a unique name.
309
// Falls through if a key is a unique name.
310
static void GenerateKeyNameCheck(MacroAssembler* masm,
311
                                 Register key,
312
                                 Register map,
313
                                 Register hash,
314
                                 Label* index_string,
315
                                 Label* not_unique) {
316
  // The key is not a smi.
317
  Label unique;
318
  // Is it a name?
319
  __ GetObjectType(key, map, hash);
320
  __ Branch(not_unique, hi, hash, Operand(LAST_UNIQUE_NAME_TYPE));
321
  STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
322
  __ Branch(&unique, eq, hash, Operand(LAST_UNIQUE_NAME_TYPE));
323

    
324
  // Is the string an array index, with cached numeric value?
325
  __ lw(hash, FieldMemOperand(key, Name::kHashFieldOffset));
326
  __ And(at, hash, Operand(Name::kContainsCachedArrayIndexMask));
327
  __ Branch(index_string, eq, at, Operand(zero_reg));
328

    
329
  // Is the string internalized? We know it's a string, so a single
330
  // bit test is enough.
331
  // map: key map
332
  __ lbu(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
333
  STATIC_ASSERT(kInternalizedTag == 0);
334
  __ And(at, hash, Operand(kIsNotInternalizedMask));
335
  __ Branch(not_unique, ne, at, Operand(zero_reg));
336

    
337
  __ bind(&unique);
338
}
339

    
340

    
341
// Defined in ic.cc.
342
Object* CallIC_Miss(Arguments args);
343

    
344
// The generated code does not accept smi keys.
345
// The generated code falls through if both probes miss.
346
void CallICBase::GenerateMonomorphicCacheProbe(MacroAssembler* masm,
347
                                               int argc,
348
                                               Code::Kind kind,
349
                                               Code::ExtraICState extra_state) {
350
  // ----------- S t a t e -------------
351
  //  -- a1    : receiver
352
  //  -- a2    : name
353
  // -----------------------------------
354
  Label number, non_number, non_string, boolean, probe, miss;
355

    
356
  // Probe the stub cache.
357
  Code::Flags flags = Code::ComputeFlags(kind,
358
                                         MONOMORPHIC,
359
                                         extra_state,
360
                                         Code::NORMAL,
361
                                         argc);
362
  masm->isolate()->stub_cache()->GenerateProbe(
363
      masm, flags, a1, a2, a3, t0, t1, t2);
364

    
365
  // If the stub cache probing failed, the receiver might be a value.
366
  // For value objects, we use the map of the prototype objects for
367
  // the corresponding JSValue for the cache and that is what we need
368
  // to probe.
369
  //
370
  // Check for number.
371
  __ JumpIfSmi(a1, &number, t1);
372
  __ GetObjectType(a1, a3, a3);
373
  __ Branch(&non_number, ne, a3, Operand(HEAP_NUMBER_TYPE));
374
  __ bind(&number);
375
  StubCompiler::GenerateLoadGlobalFunctionPrototype(
376
      masm, Context::NUMBER_FUNCTION_INDEX, a1);
377
  __ Branch(&probe);
378

    
379
  // Check for string.
380
  __ bind(&non_number);
381
  __ Branch(&non_string, Ugreater_equal, a3, Operand(FIRST_NONSTRING_TYPE));
382
  StubCompiler::GenerateLoadGlobalFunctionPrototype(
383
      masm, Context::STRING_FUNCTION_INDEX, a1);
384
  __ Branch(&probe);
385

    
386
  // Check for boolean.
387
  __ bind(&non_string);
388
  __ LoadRoot(t0, Heap::kTrueValueRootIndex);
389
  __ Branch(&boolean, eq, a1, Operand(t0));
390
  __ LoadRoot(t1, Heap::kFalseValueRootIndex);
391
  __ Branch(&miss, ne, a1, Operand(t1));
392
  __ bind(&boolean);
393
  StubCompiler::GenerateLoadGlobalFunctionPrototype(
394
      masm, Context::BOOLEAN_FUNCTION_INDEX, a1);
395

    
396
  // Probe the stub cache for the value object.
397
  __ bind(&probe);
398
  masm->isolate()->stub_cache()->GenerateProbe(
399
      masm, flags, a1, a2, a3, t0, t1, t2);
400

    
401
  __ bind(&miss);
402
}
403

    
404

    
405
static void GenerateFunctionTailCall(MacroAssembler* masm,
406
                                     int argc,
407
                                     Label* miss,
408
                                     Register scratch) {
409
  // a1: function
410

    
411
  // Check that the value isn't a smi.
412
  __ JumpIfSmi(a1, miss);
413

    
414
  // Check that the value is a JSFunction.
415
  __ GetObjectType(a1, scratch, scratch);
416
  __ Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
417

    
418
  // Invoke the function.
419
  ParameterCount actual(argc);
420
  __ InvokeFunction(a1, actual, JUMP_FUNCTION,
421
                    NullCallWrapper(), CALL_AS_METHOD);
422
}
423

    
424

    
425
void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) {
426
  // ----------- S t a t e -------------
427
  //  -- a2    : name
428
  //  -- ra    : return address
429
  // -----------------------------------
430
  Label miss;
431

    
432
  // Get the receiver of the function from the stack into a1.
433
  __ lw(a1, MemOperand(sp, argc * kPointerSize));
434

    
435
  GenerateNameDictionaryReceiverCheck(masm, a1, a0, a3, t0, &miss);
436

    
437
  // a0: elements
438
  // Search the dictionary - put result in register a1.
439
  GenerateDictionaryLoad(masm, &miss, a0, a2, a1, a3, t0);
440

    
441
  GenerateFunctionTailCall(masm, argc, &miss, t0);
442

    
443
  // Cache miss: Jump to runtime.
444
  __ bind(&miss);
445
}
446

    
447

    
448
void CallICBase::GenerateMiss(MacroAssembler* masm,
449
                              int argc,
450
                              IC::UtilityId id,
451
                              Code::ExtraICState extra_state) {
452
  // ----------- S t a t e -------------
453
  //  -- a2    : name
454
  //  -- ra    : return address
455
  // -----------------------------------
456
  Isolate* isolate = masm->isolate();
457

    
458
  if (id == IC::kCallIC_Miss) {
459
    __ IncrementCounter(isolate->counters()->call_miss(), 1, a3, t0);
460
  } else {
461
    __ IncrementCounter(isolate->counters()->keyed_call_miss(), 1, a3, t0);
462
  }
463

    
464
  // Get the receiver of the function from the stack.
465
  __ lw(a3, MemOperand(sp, argc*kPointerSize));
466

    
467
  {
468
    FrameScope scope(masm, StackFrame::INTERNAL);
469

    
470
    // Push the receiver and the name of the function.
471
    __ Push(a3, a2);
472

    
473
    // Call the entry.
474
    __ PrepareCEntryArgs(2);
475
    __ PrepareCEntryFunction(ExternalReference(IC_Utility(id), isolate));
476

    
477
    CEntryStub stub(1);
478
    __ CallStub(&stub);
479

    
480
    // Move result to a1 and leave the internal frame.
481
    __ mov(a1, v0);
482
  }
483

    
484
  // Check if the receiver is a global object of some sort.
485
  // This can happen only for regular CallIC but not KeyedCallIC.
486
  if (id == IC::kCallIC_Miss) {
487
    Label invoke, global;
488
    __ lw(a2, MemOperand(sp, argc * kPointerSize));
489
    __ JumpIfSmi(a2, &invoke);
490
    __ GetObjectType(a2, a3, a3);
491
    __ Branch(&global, eq, a3, Operand(JS_GLOBAL_OBJECT_TYPE));
492
    __ Branch(&invoke, ne, a3, Operand(JS_BUILTINS_OBJECT_TYPE));
493

    
494
    // Patch the receiver on the stack.
495
    __ bind(&global);
496
    __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
497
    __ sw(a2, MemOperand(sp, argc * kPointerSize));
498
    __ bind(&invoke);
499
  }
500
  // Invoke the function.
501
  CallKind call_kind = CallICBase::Contextual::decode(extra_state)
502
      ? CALL_AS_FUNCTION
503
      : CALL_AS_METHOD;
504
  ParameterCount actual(argc);
505
  __ InvokeFunction(a1,
506
                    actual,
507
                    JUMP_FUNCTION,
508
                    NullCallWrapper(),
509
                    call_kind);
510
}
511

    
512

    
513
void CallIC::GenerateMegamorphic(MacroAssembler* masm,
514
                                 int argc,
515
                                 Code::ExtraICState extra_ic_state) {
516
  // ----------- S t a t e -------------
517
  //  -- a2    : name
518
  //  -- ra    : return address
519
  // -----------------------------------
520

    
521
  // Get the receiver of the function from the stack into a1.
522
  __ lw(a1, MemOperand(sp, argc * kPointerSize));
523
  GenerateMonomorphicCacheProbe(masm, argc, Code::CALL_IC, extra_ic_state);
524
  GenerateMiss(masm, argc, extra_ic_state);
525
}
526

    
527

    
528
void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
529
  // ----------- S t a t e -------------
530
  //  -- a2    : name
531
  //  -- ra    : return address
532
  // -----------------------------------
533

    
534
  // Get the receiver of the function from the stack into a1.
535
  __ lw(a1, MemOperand(sp, argc * kPointerSize));
536

    
537
  Label do_call, slow_call, slow_load, slow_reload_receiver;
538
  Label check_number_dictionary, check_name, lookup_monomorphic_cache;
539
  Label index_smi, index_name;
540

    
541
  // Check that the key is a smi.
542
  __ JumpIfNotSmi(a2, &check_name);
543
  __ bind(&index_smi);
544
  // Now the key is known to be a smi. This place is also jumped to from below
545
  // where a numeric string is converted to a smi.
546

    
547
  GenerateKeyedLoadReceiverCheck(
548
      masm, a1, a0, a3, Map::kHasIndexedInterceptor, &slow_call);
549

    
550
  GenerateFastArrayLoad(
551
      masm, a1, a2, t0, a3, a0, a1, &check_number_dictionary, &slow_load);
552
  Counters* counters = masm->isolate()->counters();
553
  __ IncrementCounter(counters->keyed_call_generic_smi_fast(), 1, a0, a3);
554

    
555
  __ bind(&do_call);
556
  // receiver in a1 is not used after this point.
557
  // a2: key
558
  // a1: function
559

    
560
  GenerateFunctionTailCall(masm, argc, &slow_call, a0);
561

    
562
  __ bind(&check_number_dictionary);
563
  // a2: key
564
  // a3: elements map
565
  // t0: elements pointer
566
  // Check whether the elements is a number dictionary.
567
  __ LoadRoot(at, Heap::kHashTableMapRootIndex);
568
  __ Branch(&slow_load, ne, a3, Operand(at));
569
  __ sra(a0, a2, kSmiTagSize);
570
  // a0: untagged index
571
  __ LoadFromNumberDictionary(&slow_load, t0, a2, a1, a0, a3, t1);
572
  __ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1, a0, a3);
573
  __ jmp(&do_call);
574

    
575
  __ bind(&slow_load);
576
  // This branch is taken when calling KeyedCallIC_Miss is neither required
577
  // nor beneficial.
578
  __ IncrementCounter(counters->keyed_call_generic_slow_load(), 1, a0, a3);
579
  {
580
    FrameScope scope(masm, StackFrame::INTERNAL);
581
    __ push(a2);  // Save the key.
582
    __ Push(a1, a2);  // Pass the receiver and the key.
583
    __ CallRuntime(Runtime::kKeyedGetProperty, 2);
584
    __ pop(a2);  // Restore the key.
585
  }
586
  __ mov(a1, v0);
587
  __ jmp(&do_call);
588

    
589
  __ bind(&check_name);
590
  GenerateKeyNameCheck(masm, a2, a0, a3, &index_name, &slow_call);
591

    
592
  // The key is known to be a unique name.
593
  // If the receiver is a regular JS object with slow properties then do
594
  // a quick inline probe of the receiver's dictionary.
595
  // Otherwise do the monomorphic cache probe.
596
  GenerateKeyedLoadReceiverCheck(
597
      masm, a1, a0, a3, Map::kHasNamedInterceptor, &lookup_monomorphic_cache);
598

    
599
  __ lw(a0, FieldMemOperand(a1, JSObject::kPropertiesOffset));
600
  __ lw(a3, FieldMemOperand(a0, HeapObject::kMapOffset));
601
  __ LoadRoot(at, Heap::kHashTableMapRootIndex);
602
  __ Branch(&lookup_monomorphic_cache, ne, a3, Operand(at));
603

    
604
  GenerateDictionaryLoad(masm, &slow_load, a0, a2, a1, a3, t0);
605
  __ IncrementCounter(counters->keyed_call_generic_lookup_dict(), 1, a0, a3);
606
  __ jmp(&do_call);
607

    
608
  __ bind(&lookup_monomorphic_cache);
609
  __ IncrementCounter(counters->keyed_call_generic_lookup_cache(), 1, a0, a3);
610
  GenerateMonomorphicCacheProbe(masm,
611
                                argc,
612
                                Code::KEYED_CALL_IC,
613
                                Code::kNoExtraICState);
614
  // Fall through on miss.
615

    
616
  __ bind(&slow_call);
617
  // This branch is taken if:
618
  // - the receiver requires boxing or access check,
619
  // - the key is neither smi nor a unique name,
620
  // - the value loaded is not a function,
621
  // - there is hope that the runtime will create a monomorphic call stub,
622
  //   that will get fetched next time.
623
  __ IncrementCounter(counters->keyed_call_generic_slow(), 1, a0, a3);
624
  GenerateMiss(masm, argc);
625

    
626
  __ bind(&index_name);
627
  __ IndexFromHash(a3, a2);
628
  // Now jump to the place where smi keys are handled.
629
  __ jmp(&index_smi);
630
}
631

    
632

    
633
void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
634
  // ----------- S t a t e -------------
635
  //  -- a2    : name
636
  //  -- ra    : return address
637
  // -----------------------------------
638

    
639
  // Check if the name is really a name.
640
  Label miss;
641
  __ JumpIfSmi(a2, &miss);
642
  __ IsObjectNameType(a2, a0, &miss);
643

    
644
  CallICBase::GenerateNormal(masm, argc);
645
  __ bind(&miss);
646
  GenerateMiss(masm, argc);
647
}
648

    
649

    
650
void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
651
  // ----------- S t a t e -------------
652
  //  -- a2    : name
653
  //  -- ra    : return address
654
  //  -- a0    : receiver
655
  // -----------------------------------
656

    
657
  // Probe the stub cache.
658
  Code::Flags flags = Code::ComputeFlags(
659
      Code::HANDLER, MONOMORPHIC, Code::kNoExtraICState,
660
      Code::NORMAL, Code::LOAD_IC);
661
  masm->isolate()->stub_cache()->GenerateProbe(
662
      masm, flags, a0, a2, a3, t0, t1, t2);
663

    
664
  // Cache miss: Jump to runtime.
665
  GenerateMiss(masm);
666
}
667

    
668

    
669
void LoadIC::GenerateNormal(MacroAssembler* masm) {
670
  // ----------- S t a t e -------------
671
  //  -- a2    : name
672
  //  -- lr    : return address
673
  //  -- a0    : receiver
674
  // -----------------------------------
675
  Label miss;
676

    
677
  GenerateNameDictionaryReceiverCheck(masm, a0, a1, a3, t0, &miss);
678

    
679
  // a1: elements
680
  GenerateDictionaryLoad(masm, &miss, a1, a2, v0, a3, t0);
681
  __ Ret();
682

    
683
  // Cache miss: Jump to runtime.
684
  __ bind(&miss);
685
  GenerateMiss(masm);
686
}
687

    
688

    
689
void LoadIC::GenerateMiss(MacroAssembler* masm) {
690
  // ----------- S t a t e -------------
691
  //  -- a2    : name
692
  //  -- ra    : return address
693
  //  -- a0    : receiver
694
  // -----------------------------------
695
  Isolate* isolate = masm->isolate();
696

    
697
  __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, t0);
698

    
699
  __ mov(a3, a0);
700
  __ Push(a3, a2);
701

    
702
  // Perform tail call to the entry.
703
  ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
704
  __ TailCallExternalReference(ref, 2, 1);
705
}
706

    
707

    
708
void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
709
  // ---------- S t a t e --------------
710
  //  -- a2    : name
711
  //  -- ra    : return address
712
  //  -- a0    : receiver
713
  // -----------------------------------
714

    
715
  __ mov(a3, a0);
716
  __ Push(a3, a2);
717

    
718
  __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
719
}
720

    
721

    
722
static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
723
                                                Register object,
724
                                                Register key,
725
                                                Register scratch1,
726
                                                Register scratch2,
727
                                                Register scratch3,
728
                                                Label* unmapped_case,
729
                                                Label* slow_case) {
730
  // Check that the receiver is a JSObject. Because of the map check
731
  // later, we do not need to check for interceptors or whether it
732
  // requires access checks.
733
  __ JumpIfSmi(object, slow_case);
734
  // Check that the object is some kind of JSObject.
735
  __ GetObjectType(object, scratch1, scratch2);
736
  __ Branch(slow_case, lt, scratch2, Operand(FIRST_JS_RECEIVER_TYPE));
737

    
738
  // Check that the key is a positive smi.
739
  __ And(scratch1, key, Operand(0x80000001));
740
  __ Branch(slow_case, ne, scratch1, Operand(zero_reg));
741

    
742
  // Load the elements into scratch1 and check its map.
743
  __ lw(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
744
  __ CheckMap(scratch1,
745
              scratch2,
746
              Heap::kNonStrictArgumentsElementsMapRootIndex,
747
              slow_case,
748
              DONT_DO_SMI_CHECK);
749
  // Check if element is in the range of mapped arguments. If not, jump
750
  // to the unmapped lookup with the parameter map in scratch1.
751
  __ lw(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
752
  __ Subu(scratch2, scratch2, Operand(Smi::FromInt(2)));
753
  __ Branch(unmapped_case, Ugreater_equal, key, Operand(scratch2));
754

    
755
  // Load element index and check whether it is the hole.
756
  const int kOffset =
757
      FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
758

    
759
  __ li(scratch3, Operand(kPointerSize >> 1));
760
  __ Mul(scratch3, key, scratch3);
761
  __ Addu(scratch3, scratch3, Operand(kOffset));
762

    
763
  __ Addu(scratch2, scratch1, scratch3);
764
  __ lw(scratch2, MemOperand(scratch2));
765
  __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
766
  __ Branch(unmapped_case, eq, scratch2, Operand(scratch3));
767

    
768
  // Load value from context and return it. We can reuse scratch1 because
769
  // we do not jump to the unmapped lookup (which requires the parameter
770
  // map in scratch1).
771
  __ lw(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
772
  __ li(scratch3, Operand(kPointerSize >> 1));
773
  __ Mul(scratch3, scratch2, scratch3);
774
  __ Addu(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag));
775
  __ Addu(scratch2, scratch1, scratch3);
776
  return MemOperand(scratch2);
777
}
778

    
779

    
780
static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
781
                                                  Register key,
782
                                                  Register parameter_map,
783
                                                  Register scratch,
784
                                                  Label* slow_case) {
785
  // Element is in arguments backing store, which is referenced by the
786
  // second element of the parameter_map. The parameter_map register
787
  // must be loaded with the parameter map of the arguments object and is
788
  // overwritten.
789
  const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
790
  Register backing_store = parameter_map;
791
  __ lw(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
792
  __ CheckMap(backing_store,
793
              scratch,
794
              Heap::kFixedArrayMapRootIndex,
795
              slow_case,
796
              DONT_DO_SMI_CHECK);
797
  __ lw(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
798
  __ Branch(slow_case, Ugreater_equal, key, Operand(scratch));
799
  __ li(scratch, Operand(kPointerSize >> 1));
800
  __ Mul(scratch, key, scratch);
801
  __ Addu(scratch,
802
          scratch,
803
          Operand(FixedArray::kHeaderSize - kHeapObjectTag));
804
  __ Addu(scratch, backing_store, scratch);
805
  return MemOperand(scratch);
806
}
807

    
808

    
809
void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
810
  // ---------- S t a t e --------------
811
  //  -- lr     : return address
812
  //  -- a0     : key
813
  //  -- a1     : receiver
814
  // -----------------------------------
815
  Label slow, notin;
816
  MemOperand mapped_location =
817
      GenerateMappedArgumentsLookup(masm, a1, a0, a2, a3, t0, &notin, &slow);
818
  __ Ret(USE_DELAY_SLOT);
819
  __ lw(v0, mapped_location);
820
  __ bind(&notin);
821
  // The unmapped lookup expects that the parameter map is in a2.
822
  MemOperand unmapped_location =
823
      GenerateUnmappedArgumentsLookup(masm, a0, a2, a3, &slow);
824
  __ lw(a2, unmapped_location);
825
  __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
826
  __ Branch(&slow, eq, a2, Operand(a3));
827
  __ Ret(USE_DELAY_SLOT);
828
  __ mov(v0, a2);
829
  __ bind(&slow);
830
  GenerateMiss(masm, MISS);
831
}
832

    
833

    
834
void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
835
  // ---------- S t a t e --------------
836
  //  -- a0     : value
837
  //  -- a1     : key
838
  //  -- a2     : receiver
839
  //  -- lr     : return address
840
  // -----------------------------------
841
  Label slow, notin;
842
  // Store address is returned in register (of MemOperand) mapped_location.
843
  MemOperand mapped_location =
844
      GenerateMappedArgumentsLookup(masm, a2, a1, a3, t0, t1, &notin, &slow);
845
  __ sw(a0, mapped_location);
846
  __ mov(t5, a0);
847
  ASSERT_EQ(mapped_location.offset(), 0);
848
  __ RecordWrite(a3, mapped_location.rm(), t5,
849
                 kRAHasNotBeenSaved, kDontSaveFPRegs);
850
  __ Ret(USE_DELAY_SLOT);
851
  __ mov(v0, a0);  // (In delay slot) return the value stored in v0.
852
  __ bind(&notin);
853
  // The unmapped lookup expects that the parameter map is in a3.
854
  // Store address is returned in register (of MemOperand) unmapped_location.
855
  MemOperand unmapped_location =
856
      GenerateUnmappedArgumentsLookup(masm, a1, a3, t0, &slow);
857
  __ sw(a0, unmapped_location);
858
  __ mov(t5, a0);
859
  ASSERT_EQ(unmapped_location.offset(), 0);
860
  __ RecordWrite(a3, unmapped_location.rm(), t5,
861
                 kRAHasNotBeenSaved, kDontSaveFPRegs);
862
  __ Ret(USE_DELAY_SLOT);
863
  __ mov(v0, a0);  // (In delay slot) return the value stored in v0.
864
  __ bind(&slow);
865
  GenerateMiss(masm, MISS);
866
}
867

    
868

    
869
void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
870
                                             int argc) {
871
  // ----------- S t a t e -------------
872
  //  -- a2    : name
873
  //  -- lr    : return address
874
  // -----------------------------------
875
  Label slow, notin;
876
  // Load receiver.
877
  __ lw(a1, MemOperand(sp, argc * kPointerSize));
878
  MemOperand mapped_location =
879
      GenerateMappedArgumentsLookup(masm, a1, a2, a3, t0, t1, &notin, &slow);
880
  __ lw(a1, mapped_location);
881
  GenerateFunctionTailCall(masm, argc, &slow, a3);
882
  __ bind(&notin);
883
  // The unmapped lookup expects that the parameter map is in a3.
884
  MemOperand unmapped_location =
885
      GenerateUnmappedArgumentsLookup(masm, a2, a3, t0, &slow);
886
  __ lw(a1, unmapped_location);
887
  __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
888
  __ Branch(&slow, eq, a1, Operand(a3));
889
  GenerateFunctionTailCall(masm, argc, &slow, a3);
890
  __ bind(&slow);
891
  GenerateMiss(masm, argc);
892
}
893

    
894

    
895
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
896
  // ---------- S t a t e --------------
897
  //  -- ra     : return address
898
  //  -- a0     : key
899
  //  -- a1     : receiver
900
  // -----------------------------------
901
  Isolate* isolate = masm->isolate();
902

    
903
  __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, t0);
904

    
905
  __ Push(a1, a0);
906

    
907
  // Perform tail call to the entry.
908
  ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
909
      ? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric), isolate)
910
      : ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
911

    
912
  __ TailCallExternalReference(ref, 2, 1);
913
}
914

    
915

    
916
void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
917
  // ---------- S t a t e --------------
918
  //  -- ra     : return address
919
  //  -- a0     : key
920
  //  -- a1     : receiver
921
  // -----------------------------------
922

    
923
  __ Push(a1, a0);
924

    
925
  __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
926
}
927

    
928

    
929
void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
930
  // ---------- S t a t e --------------
931
  //  -- ra     : return address
932
  //  -- a0     : key
933
  //  -- a1     : receiver
934
  // -----------------------------------
935
  Label slow, check_name, index_smi, index_name, property_array_property;
936
  Label probe_dictionary, check_number_dictionary;
937

    
938
  Register key = a0;
939
  Register receiver = a1;
940

    
941
  Isolate* isolate = masm->isolate();
942

    
943
  // Check that the key is a smi.
944
  __ JumpIfNotSmi(key, &check_name);
945
  __ bind(&index_smi);
946
  // Now the key is known to be a smi. This place is also jumped to from below
947
  // where a numeric string is converted to a smi.
948

    
949
  GenerateKeyedLoadReceiverCheck(
950
      masm, receiver, a2, a3, Map::kHasIndexedInterceptor, &slow);
951

    
952
  // Check the receiver's map to see if it has fast elements.
953
  __ CheckFastElements(a2, a3, &check_number_dictionary);
954

    
955
  GenerateFastArrayLoad(
956
      masm, receiver, key, t0, a3, a2, v0, NULL, &slow);
957

    
958
  __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, a2, a3);
959
  __ Ret();
960

    
961
  __ bind(&check_number_dictionary);
962
  __ lw(t0, FieldMemOperand(receiver, JSObject::kElementsOffset));
963
  __ lw(a3, FieldMemOperand(t0, JSObject::kMapOffset));
964

    
965
  // Check whether the elements is a number dictionary.
966
  // a0: key
967
  // a3: elements map
968
  // t0: elements
969
  __ LoadRoot(at, Heap::kHashTableMapRootIndex);
970
  __ Branch(&slow, ne, a3, Operand(at));
971
  __ sra(a2, a0, kSmiTagSize);
972
  __ LoadFromNumberDictionary(&slow, t0, a0, v0, a2, a3, t1);
973
  __ Ret();
974

    
975
  // Slow case, key and receiver still in a0 and a1.
976
  __ bind(&slow);
977
  __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(),
978
                      1,
979
                      a2,
980
                      a3);
981
  GenerateRuntimeGetProperty(masm);
982

    
983
  __ bind(&check_name);
984
  GenerateKeyNameCheck(masm, key, a2, a3, &index_name, &slow);
985

    
986
  GenerateKeyedLoadReceiverCheck(
987
       masm, receiver, a2, a3, Map::kHasIndexedInterceptor, &slow);
988

    
989

    
990
  // If the receiver is a fast-case object, check the keyed lookup
991
  // cache. Otherwise probe the dictionary.
992
  __ lw(a3, FieldMemOperand(a1, JSObject::kPropertiesOffset));
993
  __ lw(t0, FieldMemOperand(a3, HeapObject::kMapOffset));
994
  __ LoadRoot(at, Heap::kHashTableMapRootIndex);
995
  __ Branch(&probe_dictionary, eq, t0, Operand(at));
996

    
997
  // Load the map of the receiver, compute the keyed lookup cache hash
998
  // based on 32 bits of the map pointer and the name hash.
999
  __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
1000
  __ sra(a3, a2, KeyedLookupCache::kMapHashShift);
1001
  __ lw(t0, FieldMemOperand(a0, Name::kHashFieldOffset));
1002
  __ sra(at, t0, Name::kHashShift);
1003
  __ xor_(a3, a3, at);
1004
  int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
1005
  __ And(a3, a3, Operand(mask));
1006

    
1007
  // Load the key (consisting of map and unique name) from the cache and
1008
  // check for match.
1009
  Label load_in_object_property;
1010
  static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
1011
  Label hit_on_nth_entry[kEntriesPerBucket];
1012
  ExternalReference cache_keys =
1013
      ExternalReference::keyed_lookup_cache_keys(isolate);
1014
  __ li(t0, Operand(cache_keys));
1015
  __ sll(at, a3, kPointerSizeLog2 + 1);
1016
  __ addu(t0, t0, at);
1017

    
1018
  for (int i = 0; i < kEntriesPerBucket - 1; i++) {
1019
    Label try_next_entry;
1020
    __ lw(t1, MemOperand(t0, kPointerSize * i * 2));
1021
    __ Branch(&try_next_entry, ne, a2, Operand(t1));
1022
    __ lw(t1, MemOperand(t0, kPointerSize * (i * 2 + 1)));
1023
    __ Branch(&hit_on_nth_entry[i], eq, a0, Operand(t1));
1024
    __ bind(&try_next_entry);
1025
  }
1026

    
1027
  __ lw(t1, MemOperand(t0, kPointerSize * (kEntriesPerBucket - 1) * 2));
1028
  __ Branch(&slow, ne, a2, Operand(t1));
1029
  __ lw(t1, MemOperand(t0, kPointerSize * ((kEntriesPerBucket - 1) * 2 + 1)));
1030
  __ Branch(&slow, ne, a0, Operand(t1));
1031

    
1032
  // Get field offset.
1033
  // a0     : key
1034
  // a1     : receiver
1035
  // a2     : receiver's map
1036
  // a3     : lookup cache index
1037
  ExternalReference cache_field_offsets =
1038
      ExternalReference::keyed_lookup_cache_field_offsets(isolate);
1039

    
1040
  // Hit on nth entry.
1041
  for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
1042
    __ bind(&hit_on_nth_entry[i]);
1043
    __ li(t0, Operand(cache_field_offsets));
1044
    __ sll(at, a3, kPointerSizeLog2);
1045
    __ addu(at, t0, at);
1046
    __ lw(t1, MemOperand(at, kPointerSize * i));
1047
    __ lbu(t2, FieldMemOperand(a2, Map::kInObjectPropertiesOffset));
1048
    __ Subu(t1, t1, t2);
1049
    __ Branch(&property_array_property, ge, t1, Operand(zero_reg));
1050
    if (i != 0) {
1051
      __ Branch(&load_in_object_property);
1052
    }
1053
  }
1054

    
1055
  // Load in-object property.
1056
  __ bind(&load_in_object_property);
1057
  __ lbu(t2, FieldMemOperand(a2, Map::kInstanceSizeOffset));
1058
  __ addu(t2, t2, t1);  // Index from start of object.
1059
  __ Subu(a1, a1, Operand(kHeapObjectTag));  // Remove the heap tag.
1060
  __ sll(at, t2, kPointerSizeLog2);
1061
  __ addu(at, a1, at);
1062
  __ lw(v0, MemOperand(at));
1063
  __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
1064
                      1,
1065
                      a2,
1066
                      a3);
1067
  __ Ret();
1068

    
1069
  // Load property array property.
1070
  __ bind(&property_array_property);
1071
  __ lw(a1, FieldMemOperand(a1, JSObject::kPropertiesOffset));
1072
  __ Addu(a1, a1, FixedArray::kHeaderSize - kHeapObjectTag);
1073
  __ sll(t0, t1, kPointerSizeLog2);
1074
  __ Addu(t0, t0, a1);
1075
  __ lw(v0, MemOperand(t0));
1076
  __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
1077
                      1,
1078
                      a2,
1079
                      a3);
1080
  __ Ret();
1081

    
1082

    
1083
  // Do a quick inline probe of the receiver's dictionary, if it
1084
  // exists.
1085
  __ bind(&probe_dictionary);
1086
  // a1: receiver
1087
  // a0: key
1088
  // a3: elements
1089
  __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
1090
  __ lbu(a2, FieldMemOperand(a2, Map::kInstanceTypeOffset));
1091
  GenerateGlobalInstanceTypeCheck(masm, a2, &slow);
1092
  // Load the property to v0.
1093
  GenerateDictionaryLoad(masm, &slow, a3, a0, v0, a2, t0);
1094
  __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(),
1095
                      1,
1096
                      a2,
1097
                      a3);
1098
  __ Ret();
1099

    
1100
  __ bind(&index_name);
1101
  __ IndexFromHash(a3, key);
1102
  // Now jump to the place where smi keys are handled.
1103
  __ Branch(&index_smi);
1104
}
1105

    
1106

    
1107
void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
1108
  // ---------- S t a t e --------------
1109
  //  -- ra     : return address
1110
  //  -- a0     : key (index)
1111
  //  -- a1     : receiver
1112
  // -----------------------------------
1113
  Label miss;
1114

    
1115
  Register receiver = a1;
1116
  Register index = a0;
1117
  Register scratch = a3;
1118
  Register result = v0;
1119

    
1120
  StringCharAtGenerator char_at_generator(receiver,
1121
                                          index,
1122
                                          scratch,
1123
                                          result,
1124
                                          &miss,  // When not a string.
1125
                                          &miss,  // When not a number.
1126
                                          &miss,  // When index out of range.
1127
                                          STRING_INDEX_IS_ARRAY_INDEX);
1128
  char_at_generator.GenerateFast(masm);
1129
  __ Ret();
1130

    
1131
  StubRuntimeCallHelper call_helper;
1132
  char_at_generator.GenerateSlow(masm, call_helper);
1133

    
1134
  __ bind(&miss);
1135
  GenerateMiss(masm, MISS);
1136
}
1137

    
1138

    
1139
void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
1140
                                              StrictModeFlag strict_mode) {
1141
  // ---------- S t a t e --------------
1142
  //  -- a0     : value
1143
  //  -- a1     : key
1144
  //  -- a2     : receiver
1145
  //  -- ra     : return address
1146
  // -----------------------------------
1147

    
1148
  // Push receiver, key and value for runtime call.
1149
  __ Push(a2, a1, a0);
1150
  __ li(a1, Operand(Smi::FromInt(NONE)));          // PropertyAttributes.
1151
  __ li(a0, Operand(Smi::FromInt(strict_mode)));   // Strict mode.
1152
  __ Push(a1, a0);
1153

    
1154
  __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
1155
}
1156

    
1157

    
1158
static void KeyedStoreGenerateGenericHelper(
1159
    MacroAssembler* masm,
1160
    Label* fast_object,
1161
    Label* fast_double,
1162
    Label* slow,
1163
    KeyedStoreCheckMap check_map,
1164
    KeyedStoreIncrementLength increment_length,
1165
    Register value,
1166
    Register key,
1167
    Register receiver,
1168
    Register receiver_map,
1169
    Register elements_map,
1170
    Register elements) {
1171
  Label transition_smi_elements;
1172
  Label finish_object_store, non_double_value, transition_double_elements;
1173
  Label fast_double_without_map_check;
1174

    
1175
  // Fast case: Do the store, could be either Object or double.
1176
  __ bind(fast_object);
1177
  Register scratch_value = t0;
1178
  Register address = t1;
1179
  if (check_map == kCheckMap) {
1180
    __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
1181
    __ Branch(fast_double, ne, elements_map,
1182
              Operand(masm->isolate()->factory()->fixed_array_map()));
1183
  }
1184
  // Smi stores don't require further checks.
1185
  Label non_smi_value;
1186
  __ JumpIfNotSmi(value, &non_smi_value);
1187

    
1188
  if (increment_length == kIncrementLength) {
1189
    // Add 1 to receiver->length.
1190
    __ Addu(scratch_value, key, Operand(Smi::FromInt(1)));
1191
    __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
1192
  }
1193
  // It's irrelevant whether array is smi-only or not when writing a smi.
1194
  __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
1195
  __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize);
1196
  __ Addu(address, address, scratch_value);
1197
  __ sw(value, MemOperand(address));
1198
  __ Ret();
1199

    
1200
  __ bind(&non_smi_value);
1201
  // Escape to elements kind transition case.
1202
  __ CheckFastObjectElements(receiver_map, scratch_value,
1203
                             &transition_smi_elements);
1204

    
1205
  // Fast elements array, store the value to the elements backing store.
1206
  __ bind(&finish_object_store);
1207
  if (increment_length == kIncrementLength) {
1208
    // Add 1 to receiver->length.
1209
    __ Addu(scratch_value, key, Operand(Smi::FromInt(1)));
1210
    __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
1211
  }
1212
  __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
1213
  __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize);
1214
  __ Addu(address, address, scratch_value);
1215
  __ sw(value, MemOperand(address));
1216
  // Update write barrier for the elements array address.
1217
  __ mov(scratch_value, value);  // Preserve the value which is returned.
1218
  __ RecordWrite(elements,
1219
                 address,
1220
                 scratch_value,
1221
                 kRAHasNotBeenSaved,
1222
                 kDontSaveFPRegs,
1223
                 EMIT_REMEMBERED_SET,
1224
                 OMIT_SMI_CHECK);
1225
  __ Ret();
1226

    
1227
  __ bind(fast_double);
1228
  if (check_map == kCheckMap) {
1229
    // Check for fast double array case. If this fails, call through to the
1230
    // runtime.
1231
    __ LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
1232
    __ Branch(slow, ne, elements_map, Operand(at));
1233
  }
1234
  __ bind(&fast_double_without_map_check);
1235
  __ StoreNumberToDoubleElements(value,
1236
                                 key,
1237
                                 elements,  // Overwritten.
1238
                                 a3,        // Scratch regs...
1239
                                 t0,
1240
                                 t1,
1241
                                 &transition_double_elements);
1242
  if (increment_length == kIncrementLength) {
1243
    // Add 1 to receiver->length.
1244
    __ Addu(scratch_value, key, Operand(Smi::FromInt(1)));
1245
    __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
1246
  }
1247
  __ Ret();
1248

    
1249
  __ bind(&transition_smi_elements);
1250
  // Transition the array appropriately depending on the value type.
1251
  __ lw(t0, FieldMemOperand(value, HeapObject::kMapOffset));
1252
  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
1253
  __ Branch(&non_double_value, ne, t0, Operand(at));
1254

    
1255
  // Value is a double. Transition FAST_SMI_ELEMENTS ->
1256
  // FAST_DOUBLE_ELEMENTS and complete the store.
1257
  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
1258
                                         FAST_DOUBLE_ELEMENTS,
1259
                                         receiver_map,
1260
                                         t0,
1261
                                         slow);
1262
  ASSERT(receiver_map.is(a3));  // Transition code expects map in a3
1263
  AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
1264
                                                    FAST_DOUBLE_ELEMENTS);
1265
  ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
1266
  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
1267
  __ jmp(&fast_double_without_map_check);
1268

    
1269
  __ bind(&non_double_value);
1270
  // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
1271
  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
1272
                                         FAST_ELEMENTS,
1273
                                         receiver_map,
1274
                                         t0,
1275
                                         slow);
1276
  ASSERT(receiver_map.is(a3));  // Transition code expects map in a3
1277
  mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
1278
  ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
1279
                                                                   slow);
1280
  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
1281
  __ jmp(&finish_object_store);
1282

    
1283
  __ bind(&transition_double_elements);
1284
  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
1285
  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
1286
  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
1287
  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
1288
                                         FAST_ELEMENTS,
1289
                                         receiver_map,
1290
                                         t0,
1291
                                         slow);
1292
  ASSERT(receiver_map.is(a3));  // Transition code expects map in a3
1293
  mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
1294
  ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
1295
  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
1296
  __ jmp(&finish_object_store);
1297
}
1298

    
1299

    
1300
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
1301
                                   StrictModeFlag strict_mode) {
1302
  // ---------- S t a t e --------------
1303
  //  -- a0     : value
1304
  //  -- a1     : key
1305
  //  -- a2     : receiver
1306
  //  -- ra     : return address
1307
  // -----------------------------------
1308
  Label slow, fast_object, fast_object_grow;
1309
  Label fast_double, fast_double_grow;
1310
  Label array, extra, check_if_double_array;
1311

    
1312
  // Register usage.
1313
  Register value = a0;
1314
  Register key = a1;
1315
  Register receiver = a2;
1316
  Register receiver_map = a3;
1317
  Register elements_map = t2;
1318
  Register elements = t3;  // Elements array of the receiver.
1319
  // t0 and t1 are used as general scratch registers.
1320

    
1321
  // Check that the key is a smi.
1322
  __ JumpIfNotSmi(key, &slow);
1323
  // Check that the object isn't a smi.
1324
  __ JumpIfSmi(receiver, &slow);
1325
  // Get the map of the object.
1326
  __ lw(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
1327
  // Check that the receiver does not require access checks.  We need
1328
  // to do this because this generic stub does not perform map checks.
1329
  __ lbu(t0, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
1330
  __ And(t0, t0, Operand(1 << Map::kIsAccessCheckNeeded));
1331
  __ Branch(&slow, ne, t0, Operand(zero_reg));
1332
  // Check if the object is a JS array or not.
1333
  __ lbu(t0, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
1334
  __ Branch(&array, eq, t0, Operand(JS_ARRAY_TYPE));
1335
  // Check that the object is some kind of JSObject.
1336
  __ Branch(&slow, lt, t0, Operand(FIRST_JS_OBJECT_TYPE));
1337

    
1338
  // Object case: Check key against length in the elements array.
1339
  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
1340
  // Check array bounds. Both the key and the length of FixedArray are smis.
1341
  __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
1342
  __ Branch(&fast_object, lo, key, Operand(t0));
1343

    
1344
  // Slow case, handle jump to runtime.
1345
  __ bind(&slow);
1346
  // Entry registers are intact.
1347
  // a0: value.
1348
  // a1: key.
1349
  // a2: receiver.
1350
  GenerateRuntimeSetProperty(masm, strict_mode);
1351

    
1352
  // Extra capacity case: Check if there is extra capacity to
1353
  // perform the store and update the length. Used for adding one
1354
  // element to the array by writing to array[array.length].
1355
  __ bind(&extra);
1356
  // Condition code from comparing key and array length is still available.
1357
  // Only support writing to array[array.length].
1358
  __ Branch(&slow, ne, key, Operand(t0));
1359
  // Check for room in the elements backing store.
1360
  // Both the key and the length of FixedArray are smis.
1361
  __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
1362
  __ Branch(&slow, hs, key, Operand(t0));
1363
  __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
1364
  __ Branch(
1365
      &check_if_double_array, ne, elements_map, Heap::kFixedArrayMapRootIndex);
1366

    
1367
  __ jmp(&fast_object_grow);
1368

    
1369
  __ bind(&check_if_double_array);
1370
  __ Branch(&slow, ne, elements_map, Heap::kFixedDoubleArrayMapRootIndex);
1371
  __ jmp(&fast_double_grow);
1372

    
1373
  // Array case: Get the length and the elements array from the JS
1374
  // array. Check that the array is in fast mode (and writable); if it
1375
  // is the length is always a smi.
1376
  __ bind(&array);
1377
  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
1378

    
1379
  // Check the key against the length in the array.
1380
  __ lw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
1381
  __ Branch(&extra, hs, key, Operand(t0));
1382

    
1383
  KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
1384
                                  &slow, kCheckMap, kDontIncrementLength,
1385
                                  value, key, receiver, receiver_map,
1386
                                  elements_map, elements);
1387
  KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
1388
                                  &slow, kDontCheckMap, kIncrementLength,
1389
                                  value, key, receiver, receiver_map,
1390
                                  elements_map, elements);
1391
}
1392

    
1393

    
1394
void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
1395
  // ---------- S t a t e --------------
1396
  //  -- ra     : return address
1397
  //  -- a0     : key
1398
  //  -- a1     : receiver
1399
  // -----------------------------------
1400
  Label slow;
1401

    
1402
  // Check that the receiver isn't a smi.
1403
  __ JumpIfSmi(a1, &slow);
1404

    
1405
  // Check that the key is an array index, that is Uint32.
1406
  __ And(t0, a0, Operand(kSmiTagMask | kSmiSignMask));
1407
  __ Branch(&slow, ne, t0, Operand(zero_reg));
1408

    
1409
  // Get the map of the receiver.
1410
  __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
1411

    
1412
  // Check that it has indexed interceptor and access checks
1413
  // are not enabled for this object.
1414
  __ lbu(a3, FieldMemOperand(a2, Map::kBitFieldOffset));
1415
  __ And(a3, a3, Operand(kSlowCaseBitFieldMask));
1416
  __ Branch(&slow, ne, a3, Operand(1 << Map::kHasIndexedInterceptor));
1417
  // Everything is fine, call runtime.
1418
  __ Push(a1, a0);  // Receiver, key.
1419

    
1420
  // Perform tail call to the entry.
1421
  __ TailCallExternalReference(ExternalReference(
1422
       IC_Utility(kKeyedLoadPropertyWithInterceptor), masm->isolate()), 2, 1);
1423

    
1424
  __ bind(&slow);
1425
  GenerateMiss(masm, MISS);
1426
}
1427

    
1428

    
1429
void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) {
1430
  // ---------- S t a t e --------------
1431
  //  -- a0     : value
1432
  //  -- a1     : key
1433
  //  -- a2     : receiver
1434
  //  -- ra     : return address
1435
  // -----------------------------------
1436

    
1437
  // Push receiver, key and value for runtime call.
1438
  __ Push(a2, a1, a0);
1439

    
1440
  ExternalReference ref = miss_mode == MISS_FORCE_GENERIC
1441
      ? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric),
1442
                          masm->isolate())
1443
      : ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
1444
  __ TailCallExternalReference(ref, 3, 1);
1445
}
1446

    
1447

    
1448
void StoreIC::GenerateSlow(MacroAssembler* masm) {
1449
  // ---------- S t a t e --------------
1450
  //  -- a0     : value
1451
  //  -- a2     : key
1452
  //  -- a1     : receiver
1453
  //  -- ra     : return address
1454
  // -----------------------------------
1455

    
1456
  // Push receiver, key and value for runtime call.
1457
  __ Push(a1, a2, a0);
1458

    
1459
  // The slow case calls into the runtime to complete the store without causing
1460
  // an IC miss that would otherwise cause a transition to the generic stub.
1461
  ExternalReference ref =
1462
      ExternalReference(IC_Utility(kStoreIC_Slow), masm->isolate());
1463
  __ TailCallExternalReference(ref, 3, 1);
1464
}
1465

    
1466

    
1467
void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
1468
  // ---------- S t a t e --------------
1469
  //  -- a0     : value
1470
  //  -- a1     : key
1471
  //  -- a2     : receiver
1472
  //  -- ra     : return address
1473
  // -----------------------------------
1474

    
1475
  // Push receiver, key and value for runtime call.
1476
  // We can't use MultiPush as the order of the registers is important.
1477
  __ Push(a2, a1, a0);
1478

    
1479
  // The slow case calls into the runtime to complete the store without causing
1480
  // an IC miss that would otherwise cause a transition to the generic stub.
1481
  ExternalReference ref =
1482
      ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
1483

    
1484
  __ TailCallExternalReference(ref, 3, 1);
1485
}
1486

    
1487

    
1488
void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
1489
                                  StrictModeFlag strict_mode) {
1490
  // ----------- S t a t e -------------
1491
  //  -- a0    : value
1492
  //  -- a1    : receiver
1493
  //  -- a2    : name
1494
  //  -- ra    : return address
1495
  // -----------------------------------
1496

    
1497
  // Get the receiver from the stack and probe the stub cache.
1498
  Code::Flags flags = Code::ComputeFlags(
1499
      Code::HANDLER, MONOMORPHIC, strict_mode,
1500
      Code::NORMAL, Code::STORE_IC);
1501
  masm->isolate()->stub_cache()->GenerateProbe(
1502
      masm, flags, a1, a2, a3, t0, t1, t2);
1503

    
1504
  // Cache miss: Jump to runtime.
1505
  GenerateMiss(masm);
1506
}
1507

    
1508

    
1509
void StoreIC::GenerateMiss(MacroAssembler* masm) {
1510
  // ----------- S t a t e -------------
1511
  //  -- a0    : value
1512
  //  -- a1    : receiver
1513
  //  -- a2    : name
1514
  //  -- ra    : return address
1515
  // -----------------------------------
1516

    
1517
  __ Push(a1, a2, a0);
1518
  // Perform tail call to the entry.
1519
  ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_Miss),
1520
                                            masm->isolate());
1521
  __ TailCallExternalReference(ref, 3, 1);
1522
}
1523

    
1524

    
1525
void StoreIC::GenerateNormal(MacroAssembler* masm) {
1526
  // ----------- S t a t e -------------
1527
  //  -- a0    : value
1528
  //  -- a1    : receiver
1529
  //  -- a2    : name
1530
  //  -- ra    : return address
1531
  // -----------------------------------
1532
  Label miss;
1533

    
1534
  GenerateNameDictionaryReceiverCheck(masm, a1, a3, t0, t1, &miss);
1535

    
1536
  GenerateDictionaryStore(masm, &miss, a3, a2, a0, t0, t1);
1537
  Counters* counters = masm->isolate()->counters();
1538
  __ IncrementCounter(counters->store_normal_hit(), 1, t0, t1);
1539
  __ Ret();
1540

    
1541
  __ bind(&miss);
1542
  __ IncrementCounter(counters->store_normal_miss(), 1, t0, t1);
1543
  GenerateMiss(masm);
1544
}
1545

    
1546

    
1547
void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
1548
                                         StrictModeFlag strict_mode) {
1549
  // ----------- S t a t e -------------
1550
  //  -- a0    : value
1551
  //  -- a1    : receiver
1552
  //  -- a2    : name
1553
  //  -- ra    : return address
1554
  // -----------------------------------
1555

    
1556
  __ Push(a1, a2, a0);
1557

    
1558
  __ li(a1, Operand(Smi::FromInt(NONE)));  // PropertyAttributes.
1559
  __ li(a0, Operand(Smi::FromInt(strict_mode)));
1560
  __ Push(a1, a0);
1561

    
1562
  // Do tail-call to runtime routine.
1563
  __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
1564
}
1565

    
1566

    
1567
#undef __
1568

    
1569

    
1570
Condition CompareIC::ComputeCondition(Token::Value op) {
1571
  switch (op) {
1572
    case Token::EQ_STRICT:
1573
    case Token::EQ:
1574
      return eq;
1575
    case Token::LT:
1576
      return lt;
1577
    case Token::GT:
1578
      return gt;
1579
    case Token::LTE:
1580
      return le;
1581
    case Token::GTE:
1582
      return ge;
1583
    default:
1584
      UNREACHABLE();
1585
      return kNoCondition;
1586
  }
1587
}
1588

    
1589

    
1590
bool CompareIC::HasInlinedSmiCode(Address address) {
1591
  // The address of the instruction following the call.
1592
  Address andi_instruction_address =
1593
      address + Assembler::kCallTargetAddressOffset;
1594

    
1595
  // If the instruction following the call is not a andi at, rx, #yyy, nothing
1596
  // was inlined.
1597
  Instr instr = Assembler::instr_at(andi_instruction_address);
1598
  return Assembler::IsAndImmediate(instr) &&
1599
      Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code());
1600
}
1601

    
1602

    
1603
void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
1604
  Address andi_instruction_address =
1605
      address + Assembler::kCallTargetAddressOffset;
1606

    
1607
  // If the instruction following the call is not a andi at, rx, #yyy, nothing
1608
  // was inlined.
1609
  Instr instr = Assembler::instr_at(andi_instruction_address);
1610
  if (!(Assembler::IsAndImmediate(instr) &&
1611
        Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code()))) {
1612
    return;
1613
  }
1614

    
1615
  // The delta to the start of the map check instruction and the
1616
  // condition code uses at the patched jump.
1617
  int delta = Assembler::GetImmediate16(instr);
1618
  delta += Assembler::GetRs(instr) * kImm16Mask;
1619
  // If the delta is 0 the instruction is andi at, zero_reg, #0 which also
1620
  // signals that nothing was inlined.
1621
  if (delta == 0) {
1622
    return;
1623
  }
1624

    
1625
#ifdef DEBUG
1626
  if (FLAG_trace_ic) {
1627
    PrintF("[  patching ic at %p, andi=%p, delta=%d\n",
1628
           address, andi_instruction_address, delta);
1629
  }
1630
#endif
1631

    
1632
  Address patch_address =
1633
      andi_instruction_address - delta * Instruction::kInstrSize;
1634
  Instr instr_at_patch = Assembler::instr_at(patch_address);
1635
  Instr branch_instr =
1636
      Assembler::instr_at(patch_address + Instruction::kInstrSize);
1637
  // This is patching a conditional "jump if not smi/jump if smi" site.
1638
  // Enabling by changing from
1639
  //   andi at, rx, 0
1640
  //   Branch <target>, eq, at, Operand(zero_reg)
1641
  // to:
1642
  //   andi at, rx, #kSmiTagMask
1643
  //   Branch <target>, ne, at, Operand(zero_reg)
1644
  // and vice-versa to be disabled again.
1645
  CodePatcher patcher(patch_address, 2);
1646
  Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
1647
  if (check == ENABLE_INLINED_SMI_CHECK) {
1648
    ASSERT(Assembler::IsAndImmediate(instr_at_patch));
1649
    ASSERT_EQ(0, Assembler::GetImmediate16(instr_at_patch));
1650
    patcher.masm()->andi(at, reg, kSmiTagMask);
1651
  } else {
1652
    ASSERT(check == DISABLE_INLINED_SMI_CHECK);
1653
    ASSERT(Assembler::IsAndImmediate(instr_at_patch));
1654
    patcher.masm()->andi(at, reg, 0);
1655
  }
1656
  ASSERT(Assembler::IsBranch(branch_instr));
1657
  if (Assembler::IsBeq(branch_instr)) {
1658
    patcher.ChangeBranchCondition(ne);
1659
  } else {
1660
    ASSERT(Assembler::IsBne(branch_instr));
1661
    patcher.ChangeBranchCondition(eq);
1662
  }
1663
}
1664

    
1665

    
1666
} }  // namespace v8::internal
1667

    
1668
#endif  // V8_TARGET_ARCH_MIPS