The data contained in this repository can be downloaded to your computer using one of several clients.
Please see the documentation of your version control software client for more information.

Please select the desired protocol below to get the URL.

This URL has Read-Only access.

Statistics
| Branch: | Revision:

main_repo / deps / v8 / src / x64 / macro-assembler-x64.cc @ f230a1cf

History | View | Annotate | Download (156 KB)

1
// Copyright 2012 the V8 project authors. All rights reserved.
2
// Redistribution and use in source and binary forms, with or without
3
// modification, are permitted provided that the following conditions are
4
// met:
5
//
6
//     * Redistributions of source code must retain the above copyright
7
//       notice, this list of conditions and the following disclaimer.
8
//     * Redistributions in binary form must reproduce the above
9
//       copyright notice, this list of conditions and the following
10
//       disclaimer in the documentation and/or other materials provided
11
//       with the distribution.
12
//     * Neither the name of Google Inc. nor the names of its
13
//       contributors may be used to endorse or promote products derived
14
//       from this software without specific prior written permission.
15
//
16
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27

    
28
#include "v8.h"
29

    
30
#if V8_TARGET_ARCH_X64
31

    
32
#include "bootstrapper.h"
33
#include "codegen.h"
34
#include "cpu-profiler.h"
35
#include "assembler-x64.h"
36
#include "macro-assembler-x64.h"
37
#include "serialize.h"
38
#include "debug.h"
39
#include "heap.h"
40
#include "isolate-inl.h"
41

    
42
namespace v8 {
43
namespace internal {
44

    
45
MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
46
    : Assembler(arg_isolate, buffer, size),
47
      generating_stub_(false),
48
      allow_stub_calls_(true),
49
      has_frame_(false),
50
      root_array_available_(true) {
51
  if (isolate() != NULL) {
52
    code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
53
                                  isolate());
54
  }
55
}
56

    
57

    
58
static const int kInvalidRootRegisterDelta = -1;
59

    
60

    
61
intptr_t MacroAssembler::RootRegisterDelta(ExternalReference other) {
62
  if (predictable_code_size() &&
63
      (other.address() < reinterpret_cast<Address>(isolate()) ||
64
       other.address() >= reinterpret_cast<Address>(isolate() + 1))) {
65
    return kInvalidRootRegisterDelta;
66
  }
67
  Address roots_register_value = kRootRegisterBias +
68
      reinterpret_cast<Address>(isolate()->heap()->roots_array_start());
69
  intptr_t delta = other.address() - roots_register_value;
70
  return delta;
71
}
72

    
73

    
74
Operand MacroAssembler::ExternalOperand(ExternalReference target,
75
                                        Register scratch) {
76
  if (root_array_available_ && !Serializer::enabled()) {
77
    intptr_t delta = RootRegisterDelta(target);
78
    if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
79
      Serializer::TooLateToEnableNow();
80
      return Operand(kRootRegister, static_cast<int32_t>(delta));
81
    }
82
  }
83
  movq(scratch, target);
84
  return Operand(scratch, 0);
85
}
86

    
87

    
88
void MacroAssembler::Load(Register destination, ExternalReference source) {
89
  if (root_array_available_ && !Serializer::enabled()) {
90
    intptr_t delta = RootRegisterDelta(source);
91
    if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
92
      Serializer::TooLateToEnableNow();
93
      movq(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
94
      return;
95
    }
96
  }
97
  // Safe code.
98
  if (destination.is(rax)) {
99
    load_rax(source);
100
  } else {
101
    movq(kScratchRegister, source);
102
    movq(destination, Operand(kScratchRegister, 0));
103
  }
104
}
105

    
106

    
107
void MacroAssembler::Store(ExternalReference destination, Register source) {
108
  if (root_array_available_ && !Serializer::enabled()) {
109
    intptr_t delta = RootRegisterDelta(destination);
110
    if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
111
      Serializer::TooLateToEnableNow();
112
      movq(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
113
      return;
114
    }
115
  }
116
  // Safe code.
117
  if (source.is(rax)) {
118
    store_rax(destination);
119
  } else {
120
    movq(kScratchRegister, destination);
121
    movq(Operand(kScratchRegister, 0), source);
122
  }
123
}
124

    
125

    
126
void MacroAssembler::LoadAddress(Register destination,
127
                                 ExternalReference source) {
128
  if (root_array_available_ && !Serializer::enabled()) {
129
    intptr_t delta = RootRegisterDelta(source);
130
    if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
131
      Serializer::TooLateToEnableNow();
132
      lea(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
133
      return;
134
    }
135
  }
136
  // Safe code.
137
  movq(destination, source);
138
}
139

    
140

    
141
int MacroAssembler::LoadAddressSize(ExternalReference source) {
142
  if (root_array_available_ && !Serializer::enabled()) {
143
    // This calculation depends on the internals of LoadAddress.
144
    // It's correctness is ensured by the asserts in the Call
145
    // instruction below.
146
    intptr_t delta = RootRegisterDelta(source);
147
    if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
148
      Serializer::TooLateToEnableNow();
149
      // Operand is lea(scratch, Operand(kRootRegister, delta));
150
      // Opcodes : REX.W 8D ModRM Disp8/Disp32  - 4 or 7.
151
      int size = 4;
152
      if (!is_int8(static_cast<int32_t>(delta))) {
153
        size += 3;  // Need full four-byte displacement in lea.
154
      }
155
      return size;
156
    }
157
  }
158
  // Size of movq(destination, src);
159
  return Assembler::kMoveAddressIntoScratchRegisterInstructionLength;
160
}
161

    
162

    
163
void MacroAssembler::PushAddress(ExternalReference source) {
164
  int64_t address = reinterpret_cast<int64_t>(source.address());
165
  if (is_int32(address) && !Serializer::enabled()) {
166
    if (emit_debug_code()) {
167
      movq(kScratchRegister, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
168
    }
169
    push(Immediate(static_cast<int32_t>(address)));
170
    return;
171
  }
172
  LoadAddress(kScratchRegister, source);
173
  push(kScratchRegister);
174
}
175

    
176

    
177
void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
178
  ASSERT(root_array_available_);
179
  movq(destination, Operand(kRootRegister,
180
                            (index << kPointerSizeLog2) - kRootRegisterBias));
181
}
182

    
183

    
184
void MacroAssembler::LoadRootIndexed(Register destination,
185
                                     Register variable_offset,
186
                                     int fixed_offset) {
187
  ASSERT(root_array_available_);
188
  movq(destination,
189
       Operand(kRootRegister,
190
               variable_offset, times_pointer_size,
191
               (fixed_offset << kPointerSizeLog2) - kRootRegisterBias));
192
}
193

    
194

    
195
void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) {
196
  ASSERT(root_array_available_);
197
  movq(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias),
198
       source);
199
}
200

    
201

    
202
void MacroAssembler::PushRoot(Heap::RootListIndex index) {
203
  ASSERT(root_array_available_);
204
  push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
205
}
206

    
207

    
208
void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
209
  ASSERT(root_array_available_);
210
  cmpq(with, Operand(kRootRegister,
211
                     (index << kPointerSizeLog2) - kRootRegisterBias));
212
}
213

    
214

    
215
void MacroAssembler::CompareRoot(const Operand& with,
216
                                 Heap::RootListIndex index) {
217
  ASSERT(root_array_available_);
218
  ASSERT(!with.AddressUsesRegister(kScratchRegister));
219
  LoadRoot(kScratchRegister, index);
220
  cmpq(with, kScratchRegister);
221
}
222

    
223

    
224
void MacroAssembler::RememberedSetHelper(Register object,  // For debug tests.
225
                                         Register addr,
226
                                         Register scratch,
227
                                         SaveFPRegsMode save_fp,
228
                                         RememberedSetFinalAction and_then) {
229
  if (emit_debug_code()) {
230
    Label ok;
231
    JumpIfNotInNewSpace(object, scratch, &ok, Label::kNear);
232
    int3();
233
    bind(&ok);
234
  }
235
  // Load store buffer top.
236
  LoadRoot(scratch, Heap::kStoreBufferTopRootIndex);
237
  // Store pointer to buffer.
238
  movq(Operand(scratch, 0), addr);
239
  // Increment buffer top.
240
  addq(scratch, Immediate(kPointerSize));
241
  // Write back new top of buffer.
242
  StoreRoot(scratch, Heap::kStoreBufferTopRootIndex);
243
  // Call stub on end of buffer.
244
  Label done;
245
  // Check for end of buffer.
246
  testq(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit));
247
  if (and_then == kReturnAtEnd) {
248
    Label buffer_overflowed;
249
    j(not_equal, &buffer_overflowed, Label::kNear);
250
    ret(0);
251
    bind(&buffer_overflowed);
252
  } else {
253
    ASSERT(and_then == kFallThroughAtEnd);
254
    j(equal, &done, Label::kNear);
255
  }
256
  StoreBufferOverflowStub store_buffer_overflow =
257
      StoreBufferOverflowStub(save_fp);
258
  CallStub(&store_buffer_overflow);
259
  if (and_then == kReturnAtEnd) {
260
    ret(0);
261
  } else {
262
    ASSERT(and_then == kFallThroughAtEnd);
263
    bind(&done);
264
  }
265
}
266

    
267

    
268
void MacroAssembler::InNewSpace(Register object,
269
                                Register scratch,
270
                                Condition cc,
271
                                Label* branch,
272
                                Label::Distance distance) {
273
  if (Serializer::enabled()) {
274
    // Can't do arithmetic on external references if it might get serialized.
275
    // The mask isn't really an address.  We load it as an external reference in
276
    // case the size of the new space is different between the snapshot maker
277
    // and the running system.
278
    if (scratch.is(object)) {
279
      movq(kScratchRegister, ExternalReference::new_space_mask(isolate()));
280
      and_(scratch, kScratchRegister);
281
    } else {
282
      movq(scratch, ExternalReference::new_space_mask(isolate()));
283
      and_(scratch, object);
284
    }
285
    movq(kScratchRegister, ExternalReference::new_space_start(isolate()));
286
    cmpq(scratch, kScratchRegister);
287
    j(cc, branch, distance);
288
  } else {
289
    ASSERT(is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask())));
290
    intptr_t new_space_start =
291
        reinterpret_cast<intptr_t>(isolate()->heap()->NewSpaceStart());
292
    movq(kScratchRegister, -new_space_start, RelocInfo::NONE64);
293
    if (scratch.is(object)) {
294
      addq(scratch, kScratchRegister);
295
    } else {
296
      lea(scratch, Operand(object, kScratchRegister, times_1, 0));
297
    }
298
    and_(scratch,
299
         Immediate(static_cast<int32_t>(isolate()->heap()->NewSpaceMask())));
300
    j(cc, branch, distance);
301
  }
302
}
303

    
304

    
305
void MacroAssembler::RecordWriteField(
306
    Register object,
307
    int offset,
308
    Register value,
309
    Register dst,
310
    SaveFPRegsMode save_fp,
311
    RememberedSetAction remembered_set_action,
312
    SmiCheck smi_check) {
313
  // The compiled code assumes that record write doesn't change the
314
  // context register, so we check that none of the clobbered
315
  // registers are rsi.
316
  ASSERT(!value.is(rsi) && !dst.is(rsi));
317

    
318
  // First, check if a write barrier is even needed. The tests below
319
  // catch stores of Smis.
320
  Label done;
321

    
322
  // Skip barrier if writing a smi.
323
  if (smi_check == INLINE_SMI_CHECK) {
324
    JumpIfSmi(value, &done);
325
  }
326

    
327
  // Although the object register is tagged, the offset is relative to the start
328
  // of the object, so so offset must be a multiple of kPointerSize.
329
  ASSERT(IsAligned(offset, kPointerSize));
330

    
331
  lea(dst, FieldOperand(object, offset));
332
  if (emit_debug_code()) {
333
    Label ok;
334
    testb(dst, Immediate((1 << kPointerSizeLog2) - 1));
335
    j(zero, &ok, Label::kNear);
336
    int3();
337
    bind(&ok);
338
  }
339

    
340
  RecordWrite(
341
      object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
342

    
343
  bind(&done);
344

    
345
  // Clobber clobbered input registers when running with the debug-code flag
346
  // turned on to provoke errors.
347
  if (emit_debug_code()) {
348
    movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
349
    movq(dst, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
350
  }
351
}
352

    
353

    
354
void MacroAssembler::RecordWriteArray(Register object,
355
                                      Register value,
356
                                      Register index,
357
                                      SaveFPRegsMode save_fp,
358
                                      RememberedSetAction remembered_set_action,
359
                                      SmiCheck smi_check) {
360
  // First, check if a write barrier is even needed. The tests below
361
  // catch stores of Smis.
362
  Label done;
363

    
364
  // Skip barrier if writing a smi.
365
  if (smi_check == INLINE_SMI_CHECK) {
366
    JumpIfSmi(value, &done);
367
  }
368

    
369
  // Array access: calculate the destination address. Index is not a smi.
370
  Register dst = index;
371
  lea(dst, Operand(object, index, times_pointer_size,
372
                   FixedArray::kHeaderSize - kHeapObjectTag));
373

    
374
  RecordWrite(
375
      object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
376

    
377
  bind(&done);
378

    
379
  // Clobber clobbered input registers when running with the debug-code flag
380
  // turned on to provoke errors.
381
  if (emit_debug_code()) {
382
    movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
383
    movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
384
  }
385
}
386

    
387

    
388
void MacroAssembler::RecordWrite(Register object,
389
                                 Register address,
390
                                 Register value,
391
                                 SaveFPRegsMode fp_mode,
392
                                 RememberedSetAction remembered_set_action,
393
                                 SmiCheck smi_check) {
394
  // The compiled code assumes that record write doesn't change the
395
  // context register, so we check that none of the clobbered
396
  // registers are rsi.
397
  ASSERT(!value.is(rsi) && !address.is(rsi));
398

    
399
  ASSERT(!object.is(value));
400
  ASSERT(!object.is(address));
401
  ASSERT(!value.is(address));
402
  AssertNotSmi(object);
403

    
404
  if (remembered_set_action == OMIT_REMEMBERED_SET &&
405
      !FLAG_incremental_marking) {
406
    return;
407
  }
408

    
409
  if (emit_debug_code()) {
410
    Label ok;
411
    cmpq(value, Operand(address, 0));
412
    j(equal, &ok, Label::kNear);
413
    int3();
414
    bind(&ok);
415
  }
416

    
417
  // First, check if a write barrier is even needed. The tests below
418
  // catch stores of smis and stores into the young generation.
419
  Label done;
420

    
421
  if (smi_check == INLINE_SMI_CHECK) {
422
    // Skip barrier if writing a smi.
423
    JumpIfSmi(value, &done);
424
  }
425

    
426
  CheckPageFlag(value,
427
                value,  // Used as scratch.
428
                MemoryChunk::kPointersToHereAreInterestingMask,
429
                zero,
430
                &done,
431
                Label::kNear);
432

    
433
  CheckPageFlag(object,
434
                value,  // Used as scratch.
435
                MemoryChunk::kPointersFromHereAreInterestingMask,
436
                zero,
437
                &done,
438
                Label::kNear);
439

    
440
  RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
441
  CallStub(&stub);
442

    
443
  bind(&done);
444

    
445
  // Clobber clobbered registers when running with the debug-code flag
446
  // turned on to provoke errors.
447
  if (emit_debug_code()) {
448
    movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
449
    movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE64);
450
  }
451
}
452

    
453

    
454
void MacroAssembler::Assert(Condition cc, BailoutReason reason) {
455
  if (emit_debug_code()) Check(cc, reason);
456
}
457

    
458

    
459
void MacroAssembler::AssertFastElements(Register elements) {
460
  if (emit_debug_code()) {
461
    Label ok;
462
    CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
463
                Heap::kFixedArrayMapRootIndex);
464
    j(equal, &ok, Label::kNear);
465
    CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
466
                Heap::kFixedDoubleArrayMapRootIndex);
467
    j(equal, &ok, Label::kNear);
468
    CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
469
                Heap::kFixedCOWArrayMapRootIndex);
470
    j(equal, &ok, Label::kNear);
471
    Abort(kJSObjectWithFastElementsMapHasSlowElements);
472
    bind(&ok);
473
  }
474
}
475

    
476

    
477
void MacroAssembler::Check(Condition cc, BailoutReason reason) {
478
  Label L;
479
  j(cc, &L, Label::kNear);
480
  Abort(reason);
481
  // Control will not return here.
482
  bind(&L);
483
}
484

    
485

    
486
void MacroAssembler::CheckStackAlignment() {
487
  int frame_alignment = OS::ActivationFrameAlignment();
488
  int frame_alignment_mask = frame_alignment - 1;
489
  if (frame_alignment > kPointerSize) {
490
    ASSERT(IsPowerOf2(frame_alignment));
491
    Label alignment_as_expected;
492
    testq(rsp, Immediate(frame_alignment_mask));
493
    j(zero, &alignment_as_expected, Label::kNear);
494
    // Abort if stack is not aligned.
495
    int3();
496
    bind(&alignment_as_expected);
497
  }
498
}
499

    
500

    
501
void MacroAssembler::NegativeZeroTest(Register result,
502
                                      Register op,
503
                                      Label* then_label) {
504
  Label ok;
505
  testl(result, result);
506
  j(not_zero, &ok, Label::kNear);
507
  testl(op, op);
508
  j(sign, then_label);
509
  bind(&ok);
510
}
511

    
512

    
513
void MacroAssembler::Abort(BailoutReason reason) {
514
  // We want to pass the msg string like a smi to avoid GC
515
  // problems, however msg is not guaranteed to be aligned
516
  // properly. Instead, we pass an aligned pointer that is
517
  // a proper v8 smi, but also pass the alignment difference
518
  // from the real pointer as a smi.
519
  const char* msg = GetBailoutReason(reason);
520
  intptr_t p1 = reinterpret_cast<intptr_t>(msg);
521
  intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
522
  // Note: p0 might not be a valid Smi _value_, but it has a valid Smi tag.
523
  ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
524
#ifdef DEBUG
525
  if (msg != NULL) {
526
    RecordComment("Abort message: ");
527
    RecordComment(msg);
528
  }
529

    
530
  if (FLAG_trap_on_abort) {
531
    int3();
532
    return;
533
  }
534
#endif
535

    
536
  push(rax);
537
  movq(kScratchRegister, p0, RelocInfo::NONE64);
538
  push(kScratchRegister);
539
  movq(kScratchRegister,
540
       reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))),
541
       RelocInfo::NONE64);
542
  push(kScratchRegister);
543

    
544
  if (!has_frame_) {
545
    // We don't actually want to generate a pile of code for this, so just
546
    // claim there is a stack frame, without generating one.
547
    FrameScope scope(this, StackFrame::NONE);
548
    CallRuntime(Runtime::kAbort, 2);
549
  } else {
550
    CallRuntime(Runtime::kAbort, 2);
551
  }
552
  // Control will not return here.
553
  int3();
554
}
555

    
556

    
557
void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
558
  ASSERT(AllowThisStubCall(stub));  // Calls are not allowed in some stubs
559
  Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id);
560
}
561

    
562

    
563
void MacroAssembler::TailCallStub(CodeStub* stub) {
564
  ASSERT(allow_stub_calls_ ||
565
         stub->CompilingCallsToThisStubIsGCSafe(isolate()));
566
  Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET);
567
}
568

    
569

    
570
void MacroAssembler::StubReturn(int argc) {
571
  ASSERT(argc >= 1 && generating_stub());
572
  ret((argc - 1) * kPointerSize);
573
}
574

    
575

    
576
bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
577
  if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false;
578
  return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe(isolate());
579
}
580

    
581

    
582
void MacroAssembler::IllegalOperation(int num_arguments) {
583
  if (num_arguments > 0) {
584
    addq(rsp, Immediate(num_arguments * kPointerSize));
585
  }
586
  LoadRoot(rax, Heap::kUndefinedValueRootIndex);
587
}
588

    
589

    
590
void MacroAssembler::IndexFromHash(Register hash, Register index) {
591
  // The assert checks that the constants for the maximum number of digits
592
  // for an array index cached in the hash field and the number of bits
593
  // reserved for it does not conflict.
594
  ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
595
         (1 << String::kArrayIndexValueBits));
596
  // We want the smi-tagged index in key. Even if we subsequently go to
597
  // the slow case, converting the key to a smi is always valid.
598
  // key: string key
599
  // hash: key's hash field, including its array index value.
600
  and_(hash, Immediate(String::kArrayIndexValueMask));
601
  shr(hash, Immediate(String::kHashShift));
602
  // Here we actually clobber the key which will be used if calling into
603
  // runtime later. However as the new key is the numeric value of a string key
604
  // there is no difference in using either key.
605
  Integer32ToSmi(index, hash);
606
}
607

    
608

    
609
void MacroAssembler::CallRuntime(const Runtime::Function* f,
610
                                 int num_arguments,
611
                                 SaveFPRegsMode save_doubles) {
612
  // If the expected number of arguments of the runtime function is
613
  // constant, we check that the actual number of arguments match the
614
  // expectation.
615
  if (f->nargs >= 0 && f->nargs != num_arguments) {
616
    IllegalOperation(num_arguments);
617
    return;
618
  }
619

    
620
  // TODO(1236192): Most runtime routines don't need the number of
621
  // arguments passed in because it is constant. At some point we
622
  // should remove this need and make the runtime routine entry code
623
  // smarter.
624
  Set(rax, num_arguments);
625
  LoadAddress(rbx, ExternalReference(f, isolate()));
626
  CEntryStub ces(f->result_size, save_doubles);
627
  CallStub(&ces);
628
}
629

    
630

    
631
void MacroAssembler::CallExternalReference(const ExternalReference& ext,
632
                                           int num_arguments) {
633
  Set(rax, num_arguments);
634
  LoadAddress(rbx, ext);
635

    
636
  CEntryStub stub(1);
637
  CallStub(&stub);
638
}
639

    
640

    
641
void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
642
                                               int num_arguments,
643
                                               int result_size) {
644
  // ----------- S t a t e -------------
645
  //  -- rsp[0]                 : return address
646
  //  -- rsp[8]                 : argument num_arguments - 1
647
  //  ...
648
  //  -- rsp[8 * num_arguments] : argument 0 (receiver)
649
  // -----------------------------------
650

    
651
  // TODO(1236192): Most runtime routines don't need the number of
652
  // arguments passed in because it is constant. At some point we
653
  // should remove this need and make the runtime routine entry code
654
  // smarter.
655
  Set(rax, num_arguments);
656
  JumpToExternalReference(ext, result_size);
657
}
658

    
659

    
660
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
661
                                     int num_arguments,
662
                                     int result_size) {
663
  TailCallExternalReference(ExternalReference(fid, isolate()),
664
                            num_arguments,
665
                            result_size);
666
}
667

    
668

    
669
static int Offset(ExternalReference ref0, ExternalReference ref1) {
670
  int64_t offset = (ref0.address() - ref1.address());
671
  // Check that fits into int.
672
  ASSERT(static_cast<int>(offset) == offset);
673
  return static_cast<int>(offset);
674
}
675

    
676

    
677
void MacroAssembler::PrepareCallApiFunction(int arg_stack_space) {
678
  EnterApiExitFrame(arg_stack_space);
679
}
680

    
681

    
682
void MacroAssembler::CallApiFunctionAndReturn(
683
    Address function_address,
684
    Address thunk_address,
685
    Register thunk_last_arg,
686
    int stack_space,
687
    Operand return_value_operand,
688
    Operand* context_restore_operand) {
689
  Label prologue;
690
  Label promote_scheduled_exception;
691
  Label exception_handled;
692
  Label delete_allocated_handles;
693
  Label leave_exit_frame;
694
  Label write_back;
695

    
696
  Factory* factory = isolate()->factory();
697
  ExternalReference next_address =
698
      ExternalReference::handle_scope_next_address(isolate());
699
  const int kNextOffset = 0;
700
  const int kLimitOffset = Offset(
701
      ExternalReference::handle_scope_limit_address(isolate()),
702
      next_address);
703
  const int kLevelOffset = Offset(
704
      ExternalReference::handle_scope_level_address(isolate()),
705
      next_address);
706
  ExternalReference scheduled_exception_address =
707
      ExternalReference::scheduled_exception_address(isolate());
708

    
709
  // Allocate HandleScope in callee-save registers.
710
  Register prev_next_address_reg = r14;
711
  Register prev_limit_reg = rbx;
712
  Register base_reg = r15;
713
  movq(base_reg, next_address);
714
  movq(prev_next_address_reg, Operand(base_reg, kNextOffset));
715
  movq(prev_limit_reg, Operand(base_reg, kLimitOffset));
716
  addl(Operand(base_reg, kLevelOffset), Immediate(1));
717

    
718
  if (FLAG_log_timer_events) {
719
    FrameScope frame(this, StackFrame::MANUAL);
720
    PushSafepointRegisters();
721
    PrepareCallCFunction(1);
722
    LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate()));
723
    CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
724
    PopSafepointRegisters();
725
  }
726

    
727

    
728
  Label profiler_disabled;
729
  Label end_profiler_check;
730
  bool* is_profiling_flag =
731
      isolate()->cpu_profiler()->is_profiling_address();
732
  STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
733
  movq(rax, is_profiling_flag, RelocInfo::EXTERNAL_REFERENCE);
734
  cmpb(Operand(rax, 0), Immediate(0));
735
  j(zero, &profiler_disabled);
736

    
737
  // Third parameter is the address of the actual getter function.
738
  movq(thunk_last_arg, function_address, RelocInfo::EXTERNAL_REFERENCE);
739
  movq(rax, thunk_address, RelocInfo::EXTERNAL_REFERENCE);
740
  jmp(&end_profiler_check);
741

    
742
  bind(&profiler_disabled);
743
  // Call the api function!
744
  movq(rax, reinterpret_cast<Address>(function_address),
745
       RelocInfo::EXTERNAL_REFERENCE);
746

    
747
  bind(&end_profiler_check);
748

    
749
  // Call the api function!
750
  call(rax);
751

    
752
  if (FLAG_log_timer_events) {
753
    FrameScope frame(this, StackFrame::MANUAL);
754
    PushSafepointRegisters();
755
    PrepareCallCFunction(1);
756
    LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate()));
757
    CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
758
    PopSafepointRegisters();
759
  }
760

    
761
  // Load the value from ReturnValue
762
  movq(rax, return_value_operand);
763
  bind(&prologue);
764

    
765
  // No more valid handles (the result handle was the last one). Restore
766
  // previous handle scope.
767
  subl(Operand(base_reg, kLevelOffset), Immediate(1));
768
  movq(Operand(base_reg, kNextOffset), prev_next_address_reg);
769
  cmpq(prev_limit_reg, Operand(base_reg, kLimitOffset));
770
  j(not_equal, &delete_allocated_handles);
771
  bind(&leave_exit_frame);
772

    
773
  // Check if the function scheduled an exception.
774
  movq(rsi, scheduled_exception_address);
775
  Cmp(Operand(rsi, 0), factory->the_hole_value());
776
  j(not_equal, &promote_scheduled_exception);
777
  bind(&exception_handled);
778

    
779
#if ENABLE_EXTRA_CHECKS
780
  // Check if the function returned a valid JavaScript value.
781
  Label ok;
782
  Register return_value = rax;
783
  Register map = rcx;
784

    
785
  JumpIfSmi(return_value, &ok, Label::kNear);
786
  movq(map, FieldOperand(return_value, HeapObject::kMapOffset));
787

    
788
  CmpInstanceType(map, FIRST_NONSTRING_TYPE);
789
  j(below, &ok, Label::kNear);
790

    
791
  CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
792
  j(above_equal, &ok, Label::kNear);
793

    
794
  CompareRoot(map, Heap::kHeapNumberMapRootIndex);
795
  j(equal, &ok, Label::kNear);
796

    
797
  CompareRoot(return_value, Heap::kUndefinedValueRootIndex);
798
  j(equal, &ok, Label::kNear);
799

    
800
  CompareRoot(return_value, Heap::kTrueValueRootIndex);
801
  j(equal, &ok, Label::kNear);
802

    
803
  CompareRoot(return_value, Heap::kFalseValueRootIndex);
804
  j(equal, &ok, Label::kNear);
805

    
806
  CompareRoot(return_value, Heap::kNullValueRootIndex);
807
  j(equal, &ok, Label::kNear);
808

    
809
  Abort(kAPICallReturnedInvalidObject);
810

    
811
  bind(&ok);
812
#endif
813

    
814
  bool restore_context = context_restore_operand != NULL;
815
  if (restore_context) {
816
    movq(rsi, *context_restore_operand);
817
  }
818
  LeaveApiExitFrame(!restore_context);
819
  ret(stack_space * kPointerSize);
820

    
821
  bind(&promote_scheduled_exception);
822
  {
823
    FrameScope frame(this, StackFrame::INTERNAL);
824
    CallRuntime(Runtime::kPromoteScheduledException, 0);
825
  }
826
  jmp(&exception_handled);
827

    
828
  // HandleScope limit has changed. Delete allocated extensions.
829
  bind(&delete_allocated_handles);
830
  movq(Operand(base_reg, kLimitOffset), prev_limit_reg);
831
  movq(prev_limit_reg, rax);
832
  LoadAddress(arg_reg_1, ExternalReference::isolate_address(isolate()));
833
  LoadAddress(rax,
834
              ExternalReference::delete_handle_scope_extensions(isolate()));
835
  call(rax);
836
  movq(rax, prev_limit_reg);
837
  jmp(&leave_exit_frame);
838
}
839

    
840

    
841
void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
842
                                             int result_size) {
843
  // Set the entry point and jump to the C entry runtime stub.
844
  LoadAddress(rbx, ext);
845
  CEntryStub ces(result_size);
846
  jmp(ces.GetCode(isolate()), RelocInfo::CODE_TARGET);
847
}
848

    
849

    
850
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
851
                                   InvokeFlag flag,
852
                                   const CallWrapper& call_wrapper) {
853
  // You can't call a builtin without a valid frame.
854
  ASSERT(flag == JUMP_FUNCTION || has_frame());
855

    
856
  // Rely on the assertion to check that the number of provided
857
  // arguments match the expected number of arguments. Fake a
858
  // parameter count to avoid emitting code to do the check.
859
  ParameterCount expected(0);
860
  GetBuiltinEntry(rdx, id);
861
  InvokeCode(rdx, expected, expected, flag, call_wrapper, CALL_AS_METHOD);
862
}
863

    
864

    
865
void MacroAssembler::GetBuiltinFunction(Register target,
866
                                        Builtins::JavaScript id) {
867
  // Load the builtins object into target register.
868
  movq(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
869
  movq(target, FieldOperand(target, GlobalObject::kBuiltinsOffset));
870
  movq(target, FieldOperand(target,
871
                            JSBuiltinsObject::OffsetOfFunctionWithId(id)));
872
}
873

    
874

    
875
void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
876
  ASSERT(!target.is(rdi));
877
  // Load the JavaScript builtin function from the builtins object.
878
  GetBuiltinFunction(rdi, id);
879
  movq(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
880
}
881

    
882

    
883
#define REG(Name) { kRegister_ ## Name ## _Code }
884

    
885
static const Register saved_regs[] = {
886
  REG(rax), REG(rcx), REG(rdx), REG(rbx), REG(rbp), REG(rsi), REG(rdi), REG(r8),
887
  REG(r9), REG(r10), REG(r11)
888
};
889

    
890
#undef REG
891

    
892
static const int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
893

    
894

    
895
void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
896
                                     Register exclusion1,
897
                                     Register exclusion2,
898
                                     Register exclusion3) {
899
  // We don't allow a GC during a store buffer overflow so there is no need to
900
  // store the registers in any particular way, but we do have to store and
901
  // restore them.
902
  for (int i = 0; i < kNumberOfSavedRegs; i++) {
903
    Register reg = saved_regs[i];
904
    if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
905
      push(reg);
906
    }
907
  }
908
  // R12 to r15 are callee save on all platforms.
909
  if (fp_mode == kSaveFPRegs) {
910
    subq(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
911
    for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
912
      XMMRegister reg = XMMRegister::from_code(i);
913
      movsd(Operand(rsp, i * kDoubleSize), reg);
914
    }
915
  }
916
}
917

    
918

    
919
void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
920
                                    Register exclusion1,
921
                                    Register exclusion2,
922
                                    Register exclusion3) {
923
  if (fp_mode == kSaveFPRegs) {
924
    for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
925
      XMMRegister reg = XMMRegister::from_code(i);
926
      movsd(reg, Operand(rsp, i * kDoubleSize));
927
    }
928
    addq(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
929
  }
930
  for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
931
    Register reg = saved_regs[i];
932
    if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) {
933
      pop(reg);
934
    }
935
  }
936
}
937

    
938

    
939
void MacroAssembler::Cvtlsi2sd(XMMRegister dst, Register src) {
940
  xorps(dst, dst);
941
  cvtlsi2sd(dst, src);
942
}
943

    
944

    
945
void MacroAssembler::Cvtlsi2sd(XMMRegister dst, const Operand& src) {
946
  xorps(dst, dst);
947
  cvtlsi2sd(dst, src);
948
}
949

    
950

    
951
void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
952
  ASSERT(!r.IsDouble());
953
  if (r.IsByte()) {
954
    movzxbl(dst, src);
955
  } else if (r.IsInteger32()) {
956
    movl(dst, src);
957
  } else {
958
    movq(dst, src);
959
  }
960
}
961

    
962

    
963
void MacroAssembler::Store(const Operand& dst, Register src, Representation r) {
964
  ASSERT(!r.IsDouble());
965
  if (r.IsByte()) {
966
    movb(dst, src);
967
  } else if (r.IsInteger32()) {
968
    movl(dst, src);
969
  } else {
970
    movq(dst, src);
971
  }
972
}
973

    
974

    
975
void MacroAssembler::Set(Register dst, int64_t x) {
976
  if (x == 0) {
977
    xorl(dst, dst);
978
  } else if (is_uint32(x)) {
979
    movl(dst, Immediate(static_cast<uint32_t>(x)));
980
  } else if (is_int32(x)) {
981
    movq(dst, Immediate(static_cast<int32_t>(x)));
982
  } else {
983
    movq(dst, x, RelocInfo::NONE64);
984
  }
985
}
986

    
987

    
988
void MacroAssembler::Set(const Operand& dst, int64_t x) {
989
  if (is_int32(x)) {
990
    movq(dst, Immediate(static_cast<int32_t>(x)));
991
  } else {
992
    Set(kScratchRegister, x);
993
    movq(dst, kScratchRegister);
994
  }
995
}
996

    
997

    
998
// ----------------------------------------------------------------------------
999
// Smi tagging, untagging and tag detection.
1000

    
1001
bool MacroAssembler::IsUnsafeInt(const int32_t x) {
1002
  static const int kMaxBits = 17;
1003
  return !is_intn(x, kMaxBits);
1004
}
1005

    
1006

    
1007
void MacroAssembler::SafeMove(Register dst, Smi* src) {
1008
  ASSERT(!dst.is(kScratchRegister));
1009
  ASSERT(SmiValuesAre32Bits());  // JIT cookie can be converted to Smi.
1010
  if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
1011
    Move(dst, Smi::FromInt(src->value() ^ jit_cookie()));
1012
    Move(kScratchRegister, Smi::FromInt(jit_cookie()));
1013
    xor_(dst, kScratchRegister);
1014
  } else {
1015
    Move(dst, src);
1016
  }
1017
}
1018

    
1019

    
1020
void MacroAssembler::SafePush(Smi* src) {
1021
  ASSERT(SmiValuesAre32Bits());  // JIT cookie can be converted to Smi.
1022
  if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
1023
    Push(Smi::FromInt(src->value() ^ jit_cookie()));
1024
    Move(kScratchRegister, Smi::FromInt(jit_cookie()));
1025
    xor_(Operand(rsp, 0), kScratchRegister);
1026
  } else {
1027
    Push(src);
1028
  }
1029
}
1030

    
1031

    
1032
Register MacroAssembler::GetSmiConstant(Smi* source) {
1033
  int value = source->value();
1034
  if (value == 0) {
1035
    xorl(kScratchRegister, kScratchRegister);
1036
    return kScratchRegister;
1037
  }
1038
  if (value == 1) {
1039
    return kSmiConstantRegister;
1040
  }
1041
  LoadSmiConstant(kScratchRegister, source);
1042
  return kScratchRegister;
1043
}
1044

    
1045

    
1046
void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
1047
  if (emit_debug_code()) {
1048
    movq(dst,
1049
         reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
1050
         RelocInfo::NONE64);
1051
    cmpq(dst, kSmiConstantRegister);
1052
    if (allow_stub_calls()) {
1053
      Assert(equal, kUninitializedKSmiConstantRegister);
1054
    } else {
1055
      Label ok;
1056
      j(equal, &ok, Label::kNear);
1057
      int3();
1058
      bind(&ok);
1059
    }
1060
  }
1061
  int value = source->value();
1062
  if (value == 0) {
1063
    xorl(dst, dst);
1064
    return;
1065
  }
1066
  bool negative = value < 0;
1067
  unsigned int uvalue = negative ? -value : value;
1068

    
1069
  switch (uvalue) {
1070
    case 9:
1071
      lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0));
1072
      break;
1073
    case 8:
1074
      xorl(dst, dst);
1075
      lea(dst, Operand(dst, kSmiConstantRegister, times_8, 0));
1076
      break;
1077
    case 4:
1078
      xorl(dst, dst);
1079
      lea(dst, Operand(dst, kSmiConstantRegister, times_4, 0));
1080
      break;
1081
    case 5:
1082
      lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0));
1083
      break;
1084
    case 3:
1085
      lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0));
1086
      break;
1087
    case 2:
1088
      lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0));
1089
      break;
1090
    case 1:
1091
      movq(dst, kSmiConstantRegister);
1092
      break;
1093
    case 0:
1094
      UNREACHABLE();
1095
      return;
1096
    default:
1097
      movq(dst, reinterpret_cast<uint64_t>(source), RelocInfo::NONE64);
1098
      return;
1099
  }
1100
  if (negative) {
1101
    neg(dst);
1102
  }
1103
}
1104

    
1105

    
1106
void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
1107
  STATIC_ASSERT(kSmiTag == 0);
1108
  if (!dst.is(src)) {
1109
    movl(dst, src);
1110
  }
1111
  shl(dst, Immediate(kSmiShift));
1112
}
1113

    
1114

    
1115
void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
1116
  if (emit_debug_code()) {
1117
    testb(dst, Immediate(0x01));
1118
    Label ok;
1119
    j(zero, &ok, Label::kNear);
1120
    if (allow_stub_calls()) {
1121
      Abort(kInteger32ToSmiFieldWritingToNonSmiLocation);
1122
    } else {
1123
      int3();
1124
    }
1125
    bind(&ok);
1126
  }
1127
  ASSERT(kSmiShift % kBitsPerByte == 0);
1128
  movl(Operand(dst, kSmiShift / kBitsPerByte), src);
1129
}
1130

    
1131

    
1132
void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
1133
                                                Register src,
1134
                                                int constant) {
1135
  if (dst.is(src)) {
1136
    addl(dst, Immediate(constant));
1137
  } else {
1138
    leal(dst, Operand(src, constant));
1139
  }
1140
  shl(dst, Immediate(kSmiShift));
1141
}
1142

    
1143

    
1144
void MacroAssembler::SmiToInteger32(Register dst, Register src) {
1145
  STATIC_ASSERT(kSmiTag == 0);
1146
  if (!dst.is(src)) {
1147
    movq(dst, src);
1148
  }
1149
  shr(dst, Immediate(kSmiShift));
1150
}
1151

    
1152

    
1153
void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) {
1154
  movl(dst, Operand(src, kSmiShift / kBitsPerByte));
1155
}
1156

    
1157

    
1158
void MacroAssembler::SmiToInteger64(Register dst, Register src) {
1159
  STATIC_ASSERT(kSmiTag == 0);
1160
  if (!dst.is(src)) {
1161
    movq(dst, src);
1162
  }
1163
  sar(dst, Immediate(kSmiShift));
1164
}
1165

    
1166

    
1167
void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
1168
  movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
1169
}
1170

    
1171

    
1172
void MacroAssembler::SmiTest(Register src) {
1173
  AssertSmi(src);
1174
  testq(src, src);
1175
}
1176

    
1177

    
1178
void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
1179
  AssertSmi(smi1);
1180
  AssertSmi(smi2);
1181
  cmpq(smi1, smi2);
1182
}
1183

    
1184

    
1185
void MacroAssembler::SmiCompare(Register dst, Smi* src) {
1186
  AssertSmi(dst);
1187
  Cmp(dst, src);
1188
}
1189

    
1190

    
1191
void MacroAssembler::Cmp(Register dst, Smi* src) {
1192
  ASSERT(!dst.is(kScratchRegister));
1193
  if (src->value() == 0) {
1194
    testq(dst, dst);
1195
  } else {
1196
    Register constant_reg = GetSmiConstant(src);
1197
    cmpq(dst, constant_reg);
1198
  }
1199
}
1200

    
1201

    
1202
void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
1203
  AssertSmi(dst);
1204
  AssertSmi(src);
1205
  cmpq(dst, src);
1206
}
1207

    
1208

    
1209
void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
1210
  AssertSmi(dst);
1211
  AssertSmi(src);
1212
  cmpq(dst, src);
1213
}
1214

    
1215

    
1216
void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
1217
  AssertSmi(dst);
1218
  cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
1219
}
1220

    
1221

    
1222
void MacroAssembler::Cmp(const Operand& dst, Smi* src) {
1223
  // The Operand cannot use the smi register.
1224
  Register smi_reg = GetSmiConstant(src);
1225
  ASSERT(!dst.AddressUsesRegister(smi_reg));
1226
  cmpq(dst, smi_reg);
1227
}
1228

    
1229

    
1230
void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) {
1231
  cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
1232
}
1233

    
1234

    
1235
void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
1236
                                                           Register src,
1237
                                                           int power) {
1238
  ASSERT(power >= 0);
1239
  ASSERT(power < 64);
1240
  if (power == 0) {
1241
    SmiToInteger64(dst, src);
1242
    return;
1243
  }
1244
  if (!dst.is(src)) {
1245
    movq(dst, src);
1246
  }
1247
  if (power < kSmiShift) {
1248
    sar(dst, Immediate(kSmiShift - power));
1249
  } else if (power > kSmiShift) {
1250
    shl(dst, Immediate(power - kSmiShift));
1251
  }
1252
}
1253

    
1254

    
1255
void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
1256
                                                         Register src,
1257
                                                         int power) {
1258
  ASSERT((0 <= power) && (power < 32));
1259
  if (dst.is(src)) {
1260
    shr(dst, Immediate(power + kSmiShift));
1261
  } else {
1262
    UNIMPLEMENTED();  // Not used.
1263
  }
1264
}
1265

    
1266

    
1267
void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2,
1268
                                 Label* on_not_smis,
1269
                                 Label::Distance near_jump) {
1270
  if (dst.is(src1) || dst.is(src2)) {
1271
    ASSERT(!src1.is(kScratchRegister));
1272
    ASSERT(!src2.is(kScratchRegister));
1273
    movq(kScratchRegister, src1);
1274
    or_(kScratchRegister, src2);
1275
    JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump);
1276
    movq(dst, kScratchRegister);
1277
  } else {
1278
    movq(dst, src1);
1279
    or_(dst, src2);
1280
    JumpIfNotSmi(dst, on_not_smis, near_jump);
1281
  }
1282
}
1283

    
1284

    
1285
Condition MacroAssembler::CheckSmi(Register src) {
1286
  STATIC_ASSERT(kSmiTag == 0);
1287
  testb(src, Immediate(kSmiTagMask));
1288
  return zero;
1289
}
1290

    
1291

    
1292
Condition MacroAssembler::CheckSmi(const Operand& src) {
1293
  STATIC_ASSERT(kSmiTag == 0);
1294
  testb(src, Immediate(kSmiTagMask));
1295
  return zero;
1296
}
1297

    
1298

    
1299
Condition MacroAssembler::CheckNonNegativeSmi(Register src) {
1300
  STATIC_ASSERT(kSmiTag == 0);
1301
  // Test that both bits of the mask 0x8000000000000001 are zero.
1302
  movq(kScratchRegister, src);
1303
  rol(kScratchRegister, Immediate(1));
1304
  testb(kScratchRegister, Immediate(3));
1305
  return zero;
1306
}
1307

    
1308

    
1309
Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
1310
  if (first.is(second)) {
1311
    return CheckSmi(first);
1312
  }
1313
  STATIC_ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3);
1314
  leal(kScratchRegister, Operand(first, second, times_1, 0));
1315
  testb(kScratchRegister, Immediate(0x03));
1316
  return zero;
1317
}
1318

    
1319

    
1320
Condition MacroAssembler::CheckBothNonNegativeSmi(Register first,
1321
                                                  Register second) {
1322
  if (first.is(second)) {
1323
    return CheckNonNegativeSmi(first);
1324
  }
1325
  movq(kScratchRegister, first);
1326
  or_(kScratchRegister, second);
1327
  rol(kScratchRegister, Immediate(1));
1328
  testl(kScratchRegister, Immediate(3));
1329
  return zero;
1330
}
1331

    
1332

    
1333
Condition MacroAssembler::CheckEitherSmi(Register first,
1334
                                         Register second,
1335
                                         Register scratch) {
1336
  if (first.is(second)) {
1337
    return CheckSmi(first);
1338
  }
1339
  if (scratch.is(second)) {
1340
    andl(scratch, first);
1341
  } else {
1342
    if (!scratch.is(first)) {
1343
      movl(scratch, first);
1344
    }
1345
    andl(scratch, second);
1346
  }
1347
  testb(scratch, Immediate(kSmiTagMask));
1348
  return zero;
1349
}
1350

    
1351

    
1352
Condition MacroAssembler::CheckIsMinSmi(Register src) {
1353
  ASSERT(!src.is(kScratchRegister));
1354
  // If we overflow by subtracting one, it's the minimal smi value.
1355
  cmpq(src, kSmiConstantRegister);
1356
  return overflow;
1357
}
1358

    
1359

    
1360
Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
1361
  // A 32-bit integer value can always be converted to a smi.
1362
  return always;
1363
}
1364

    
1365

    
1366
Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
1367
  // An unsigned 32-bit integer value is valid as long as the high bit
1368
  // is not set.
1369
  testl(src, src);
1370
  return positive;
1371
}
1372

    
1373

    
1374
void MacroAssembler::CheckSmiToIndicator(Register dst, Register src) {
1375
  if (dst.is(src)) {
1376
    andl(dst, Immediate(kSmiTagMask));
1377
  } else {
1378
    movl(dst, Immediate(kSmiTagMask));
1379
    andl(dst, src);
1380
  }
1381
}
1382

    
1383

    
1384
void MacroAssembler::CheckSmiToIndicator(Register dst, const Operand& src) {
1385
  if (!(src.AddressUsesRegister(dst))) {
1386
    movl(dst, Immediate(kSmiTagMask));
1387
    andl(dst, src);
1388
  } else {
1389
    movl(dst, src);
1390
    andl(dst, Immediate(kSmiTagMask));
1391
  }
1392
}
1393

    
1394

    
1395
void MacroAssembler::JumpIfNotValidSmiValue(Register src,
1396
                                            Label* on_invalid,
1397
                                            Label::Distance near_jump) {
1398
  Condition is_valid = CheckInteger32ValidSmiValue(src);
1399
  j(NegateCondition(is_valid), on_invalid, near_jump);
1400
}
1401

    
1402

    
1403
void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
1404
                                                Label* on_invalid,
1405
                                                Label::Distance near_jump) {
1406
  Condition is_valid = CheckUInteger32ValidSmiValue(src);
1407
  j(NegateCondition(is_valid), on_invalid, near_jump);
1408
}
1409

    
1410

    
1411
void MacroAssembler::JumpIfSmi(Register src,
1412
                               Label* on_smi,
1413
                               Label::Distance near_jump) {
1414
  Condition smi = CheckSmi(src);
1415
  j(smi, on_smi, near_jump);
1416
}
1417

    
1418

    
1419
void MacroAssembler::JumpIfNotSmi(Register src,
1420
                                  Label* on_not_smi,
1421
                                  Label::Distance near_jump) {
1422
  Condition smi = CheckSmi(src);
1423
  j(NegateCondition(smi), on_not_smi, near_jump);
1424
}
1425

    
1426

    
1427
void MacroAssembler::JumpUnlessNonNegativeSmi(
1428
    Register src, Label* on_not_smi_or_negative,
1429
    Label::Distance near_jump) {
1430
  Condition non_negative_smi = CheckNonNegativeSmi(src);
1431
  j(NegateCondition(non_negative_smi), on_not_smi_or_negative, near_jump);
1432
}
1433

    
1434

    
1435
void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
1436
                                             Smi* constant,
1437
                                             Label* on_equals,
1438
                                             Label::Distance near_jump) {
1439
  SmiCompare(src, constant);
1440
  j(equal, on_equals, near_jump);
1441
}
1442

    
1443

    
1444
void MacroAssembler::JumpIfNotBothSmi(Register src1,
1445
                                      Register src2,
1446
                                      Label* on_not_both_smi,
1447
                                      Label::Distance near_jump) {
1448
  Condition both_smi = CheckBothSmi(src1, src2);
1449
  j(NegateCondition(both_smi), on_not_both_smi, near_jump);
1450
}
1451

    
1452

    
1453
void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1,
1454
                                                  Register src2,
1455
                                                  Label* on_not_both_smi,
1456
                                                  Label::Distance near_jump) {
1457
  Condition both_smi = CheckBothNonNegativeSmi(src1, src2);
1458
  j(NegateCondition(both_smi), on_not_both_smi, near_jump);
1459
}
1460

    
1461

    
1462
void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
1463
  if (constant->value() == 0) {
1464
    if (!dst.is(src)) {
1465
      movq(dst, src);
1466
    }
1467
    return;
1468
  } else if (dst.is(src)) {
1469
    ASSERT(!dst.is(kScratchRegister));
1470
    switch (constant->value()) {
1471
      case 1:
1472
        addq(dst, kSmiConstantRegister);
1473
        return;
1474
      case 2:
1475
        lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
1476
        return;
1477
      case 4:
1478
        lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
1479
        return;
1480
      case 8:
1481
        lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
1482
        return;
1483
      default:
1484
        Register constant_reg = GetSmiConstant(constant);
1485
        addq(dst, constant_reg);
1486
        return;
1487
    }
1488
  } else {
1489
    switch (constant->value()) {
1490
      case 1:
1491
        lea(dst, Operand(src, kSmiConstantRegister, times_1, 0));
1492
        return;
1493
      case 2:
1494
        lea(dst, Operand(src, kSmiConstantRegister, times_2, 0));
1495
        return;
1496
      case 4:
1497
        lea(dst, Operand(src, kSmiConstantRegister, times_4, 0));
1498
        return;
1499
      case 8:
1500
        lea(dst, Operand(src, kSmiConstantRegister, times_8, 0));
1501
        return;
1502
      default:
1503
        LoadSmiConstant(dst, constant);
1504
        addq(dst, src);
1505
        return;
1506
    }
1507
  }
1508
}
1509

    
1510

    
1511
void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
1512
  if (constant->value() != 0) {
1513
    addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant->value()));
1514
  }
1515
}
1516

    
1517

    
1518
void MacroAssembler::SmiAddConstant(Register dst,
1519
                                    Register src,
1520
                                    Smi* constant,
1521
                                    Label* on_not_smi_result,
1522
                                    Label::Distance near_jump) {
1523
  if (constant->value() == 0) {
1524
    if (!dst.is(src)) {
1525
      movq(dst, src);
1526
    }
1527
  } else if (dst.is(src)) {
1528
    ASSERT(!dst.is(kScratchRegister));
1529

    
1530
    Label done;
1531
    LoadSmiConstant(kScratchRegister, constant);
1532
    addq(dst, kScratchRegister);
1533
    j(no_overflow, &done, Label::kNear);
1534
    // Restore src.
1535
    subq(dst, kScratchRegister);
1536
    jmp(on_not_smi_result, near_jump);
1537
    bind(&done);
1538
  } else {
1539
    LoadSmiConstant(dst, constant);
1540
    addq(dst, src);
1541
    j(overflow, on_not_smi_result, near_jump);
1542
  }
1543
}
1544

    
1545

    
1546
void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
1547
  if (constant->value() == 0) {
1548
    if (!dst.is(src)) {
1549
      movq(dst, src);
1550
    }
1551
  } else if (dst.is(src)) {
1552
    ASSERT(!dst.is(kScratchRegister));
1553
    Register constant_reg = GetSmiConstant(constant);
1554
    subq(dst, constant_reg);
1555
  } else {
1556
    if (constant->value() == Smi::kMinValue) {
1557
      LoadSmiConstant(dst, constant);
1558
      // Adding and subtracting the min-value gives the same result, it only
1559
      // differs on the overflow bit, which we don't check here.
1560
      addq(dst, src);
1561
    } else {
1562
      // Subtract by adding the negation.
1563
      LoadSmiConstant(dst, Smi::FromInt(-constant->value()));
1564
      addq(dst, src);
1565
    }
1566
  }
1567
}
1568

    
1569

    
1570
void MacroAssembler::SmiSubConstant(Register dst,
1571
                                    Register src,
1572
                                    Smi* constant,
1573
                                    Label* on_not_smi_result,
1574
                                    Label::Distance near_jump) {
1575
  if (constant->value() == 0) {
1576
    if (!dst.is(src)) {
1577
      movq(dst, src);
1578
    }
1579
  } else if (dst.is(src)) {
1580
    ASSERT(!dst.is(kScratchRegister));
1581
    if (constant->value() == Smi::kMinValue) {
1582
      // Subtracting min-value from any non-negative value will overflow.
1583
      // We test the non-negativeness before doing the subtraction.
1584
      testq(src, src);
1585
      j(not_sign, on_not_smi_result, near_jump);
1586
      LoadSmiConstant(kScratchRegister, constant);
1587
      subq(dst, kScratchRegister);
1588
    } else {
1589
      // Subtract by adding the negation.
1590
      LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value()));
1591
      addq(kScratchRegister, dst);
1592
      j(overflow, on_not_smi_result, near_jump);
1593
      movq(dst, kScratchRegister);
1594
    }
1595
  } else {
1596
    if (constant->value() == Smi::kMinValue) {
1597
      // Subtracting min-value from any non-negative value will overflow.
1598
      // We test the non-negativeness before doing the subtraction.
1599
      testq(src, src);
1600
      j(not_sign, on_not_smi_result, near_jump);
1601
      LoadSmiConstant(dst, constant);
1602
      // Adding and subtracting the min-value gives the same result, it only
1603
      // differs on the overflow bit, which we don't check here.
1604
      addq(dst, src);
1605
    } else {
1606
      // Subtract by adding the negation.
1607
      LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
1608
      addq(dst, src);
1609
      j(overflow, on_not_smi_result, near_jump);
1610
    }
1611
  }
1612
}
1613

    
1614

    
1615
void MacroAssembler::SmiNeg(Register dst,
1616
                            Register src,
1617
                            Label* on_smi_result,
1618
                            Label::Distance near_jump) {
1619
  if (dst.is(src)) {
1620
    ASSERT(!dst.is(kScratchRegister));
1621
    movq(kScratchRegister, src);
1622
    neg(dst);  // Low 32 bits are retained as zero by negation.
1623
    // Test if result is zero or Smi::kMinValue.
1624
    cmpq(dst, kScratchRegister);
1625
    j(not_equal, on_smi_result, near_jump);
1626
    movq(src, kScratchRegister);
1627
  } else {
1628
    movq(dst, src);
1629
    neg(dst);
1630
    cmpq(dst, src);
1631
    // If the result is zero or Smi::kMinValue, negation failed to create a smi.
1632
    j(not_equal, on_smi_result, near_jump);
1633
  }
1634
}
1635

    
1636

    
1637
template<class T>
1638
static void SmiAddHelper(MacroAssembler* masm,
1639
                         Register dst,
1640
                         Register src1,
1641
                         T src2,
1642
                         Label* on_not_smi_result,
1643
                         Label::Distance near_jump) {
1644
  if (dst.is(src1)) {
1645
    Label done;
1646
    masm->addq(dst, src2);
1647
    masm->j(no_overflow, &done, Label::kNear);
1648
    // Restore src1.
1649
    masm->subq(dst, src2);
1650
    masm->jmp(on_not_smi_result, near_jump);
1651
    masm->bind(&done);
1652
  } else {
1653
    masm->movq(dst, src1);
1654
    masm->addq(dst, src2);
1655
    masm->j(overflow, on_not_smi_result, near_jump);
1656
  }
1657
}
1658

    
1659

    
1660
void MacroAssembler::SmiAdd(Register dst,
1661
                            Register src1,
1662
                            Register src2,
1663
                            Label* on_not_smi_result,
1664
                            Label::Distance near_jump) {
1665
  ASSERT_NOT_NULL(on_not_smi_result);
1666
  ASSERT(!dst.is(src2));
1667
  SmiAddHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
1668
}
1669

    
1670

    
1671
void MacroAssembler::SmiAdd(Register dst,
1672
                            Register src1,
1673
                            const Operand& src2,
1674
                            Label* on_not_smi_result,
1675
                            Label::Distance near_jump) {
1676
  ASSERT_NOT_NULL(on_not_smi_result);
1677
  ASSERT(!src2.AddressUsesRegister(dst));
1678
  SmiAddHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
1679
}
1680

    
1681

    
1682
void MacroAssembler::SmiAdd(Register dst,
1683
                            Register src1,
1684
                            Register src2) {
1685
  // No overflow checking. Use only when it's known that
1686
  // overflowing is impossible.
1687
  if (!dst.is(src1)) {
1688
    if (emit_debug_code()) {
1689
      movq(kScratchRegister, src1);
1690
      addq(kScratchRegister, src2);
1691
      Check(no_overflow, kSmiAdditionOverflow);
1692
    }
1693
    lea(dst, Operand(src1, src2, times_1, 0));
1694
  } else {
1695
    addq(dst, src2);
1696
    Assert(no_overflow, kSmiAdditionOverflow);
1697
  }
1698
}
1699

    
1700

    
1701
template<class T>
1702
static void SmiSubHelper(MacroAssembler* masm,
1703
                         Register dst,
1704
                         Register src1,
1705
                         T src2,
1706
                         Label* on_not_smi_result,
1707
                         Label::Distance near_jump) {
1708
  if (dst.is(src1)) {
1709
    Label done;
1710
    masm->subq(dst, src2);
1711
    masm->j(no_overflow, &done, Label::kNear);
1712
    // Restore src1.
1713
    masm->addq(dst, src2);
1714
    masm->jmp(on_not_smi_result, near_jump);
1715
    masm->bind(&done);
1716
  } else {
1717
    masm->movq(dst, src1);
1718
    masm->subq(dst, src2);
1719
    masm->j(overflow, on_not_smi_result, near_jump);
1720
  }
1721
}
1722

    
1723

    
1724
void MacroAssembler::SmiSub(Register dst,
1725
                            Register src1,
1726
                            Register src2,
1727
                            Label* on_not_smi_result,
1728
                            Label::Distance near_jump) {
1729
  ASSERT_NOT_NULL(on_not_smi_result);
1730
  ASSERT(!dst.is(src2));
1731
  SmiSubHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
1732
}
1733

    
1734

    
1735
void MacroAssembler::SmiSub(Register dst,
1736
                            Register src1,
1737
                            const Operand& src2,
1738
                            Label* on_not_smi_result,
1739
                            Label::Distance near_jump) {
1740
  ASSERT_NOT_NULL(on_not_smi_result);
1741
  ASSERT(!src2.AddressUsesRegister(dst));
1742
  SmiSubHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
1743
}
1744

    
1745

    
1746
template<class T>
1747
static void SmiSubNoOverflowHelper(MacroAssembler* masm,
1748
                                   Register dst,
1749
                                   Register src1,
1750
                                   T src2) {
1751
  // No overflow checking. Use only when it's known that
1752
  // overflowing is impossible (e.g., subtracting two positive smis).
1753
  if (!dst.is(src1)) {
1754
    masm->movq(dst, src1);
1755
  }
1756
  masm->subq(dst, src2);
1757
  masm->Assert(no_overflow, kSmiSubtractionOverflow);
1758
}
1759

    
1760

    
1761
void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
1762
  ASSERT(!dst.is(src2));
1763
  SmiSubNoOverflowHelper<Register>(this, dst, src1, src2);
1764
}
1765

    
1766

    
1767
void MacroAssembler::SmiSub(Register dst,
1768
                            Register src1,
1769
                            const Operand& src2) {
1770
  SmiSubNoOverflowHelper<Operand>(this, dst, src1, src2);
1771
}
1772

    
1773

    
1774
void MacroAssembler::SmiMul(Register dst,
1775
                            Register src1,
1776
                            Register src2,
1777
                            Label* on_not_smi_result,
1778
                            Label::Distance near_jump) {
1779
  ASSERT(!dst.is(src2));
1780
  ASSERT(!dst.is(kScratchRegister));
1781
  ASSERT(!src1.is(kScratchRegister));
1782
  ASSERT(!src2.is(kScratchRegister));
1783

    
1784
  if (dst.is(src1)) {
1785
    Label failure, zero_correct_result;
1786
    movq(kScratchRegister, src1);  // Create backup for later testing.
1787
    SmiToInteger64(dst, src1);
1788
    imul(dst, src2);
1789
    j(overflow, &failure, Label::kNear);
1790

    
1791
    // Check for negative zero result.  If product is zero, and one
1792
    // argument is negative, go to slow case.
1793
    Label correct_result;
1794
    testq(dst, dst);
1795
    j(not_zero, &correct_result, Label::kNear);
1796

    
1797
    movq(dst, kScratchRegister);
1798
    xor_(dst, src2);
1799
    // Result was positive zero.
1800
    j(positive, &zero_correct_result, Label::kNear);
1801

    
1802
    bind(&failure);  // Reused failure exit, restores src1.
1803
    movq(src1, kScratchRegister);
1804
    jmp(on_not_smi_result, near_jump);
1805

    
1806
    bind(&zero_correct_result);
1807
    Set(dst, 0);
1808

    
1809
    bind(&correct_result);
1810
  } else {
1811
    SmiToInteger64(dst, src1);
1812
    imul(dst, src2);
1813
    j(overflow, on_not_smi_result, near_jump);
1814
    // Check for negative zero result.  If product is zero, and one
1815
    // argument is negative, go to slow case.
1816
    Label correct_result;
1817
    testq(dst, dst);
1818
    j(not_zero, &correct_result, Label::kNear);
1819
    // One of src1 and src2 is zero, the check whether the other is
1820
    // negative.
1821
    movq(kScratchRegister, src1);
1822
    xor_(kScratchRegister, src2);
1823
    j(negative, on_not_smi_result, near_jump);
1824
    bind(&correct_result);
1825
  }
1826
}
1827

    
1828

    
1829
void MacroAssembler::SmiDiv(Register dst,
1830
                            Register src1,
1831
                            Register src2,
1832
                            Label* on_not_smi_result,
1833
                            Label::Distance near_jump) {
1834
  ASSERT(!src1.is(kScratchRegister));
1835
  ASSERT(!src2.is(kScratchRegister));
1836
  ASSERT(!dst.is(kScratchRegister));
1837
  ASSERT(!src2.is(rax));
1838
  ASSERT(!src2.is(rdx));
1839
  ASSERT(!src1.is(rdx));
1840

    
1841
  // Check for 0 divisor (result is +/-Infinity).
1842
  testq(src2, src2);
1843
  j(zero, on_not_smi_result, near_jump);
1844

    
1845
  if (src1.is(rax)) {
1846
    movq(kScratchRegister, src1);
1847
  }
1848
  SmiToInteger32(rax, src1);
1849
  // We need to rule out dividing Smi::kMinValue by -1, since that would
1850
  // overflow in idiv and raise an exception.
1851
  // We combine this with negative zero test (negative zero only happens
1852
  // when dividing zero by a negative number).
1853

    
1854
  // We overshoot a little and go to slow case if we divide min-value
1855
  // by any negative value, not just -1.
1856
  Label safe_div;
1857
  testl(rax, Immediate(0x7fffffff));
1858
  j(not_zero, &safe_div, Label::kNear);
1859
  testq(src2, src2);
1860
  if (src1.is(rax)) {
1861
    j(positive, &safe_div, Label::kNear);
1862
    movq(src1, kScratchRegister);
1863
    jmp(on_not_smi_result, near_jump);
1864
  } else {
1865
    j(negative, on_not_smi_result, near_jump);
1866
  }
1867
  bind(&safe_div);
1868

    
1869
  SmiToInteger32(src2, src2);
1870
  // Sign extend src1 into edx:eax.
1871
  cdq();
1872
  idivl(src2);
1873
  Integer32ToSmi(src2, src2);
1874
  // Check that the remainder is zero.
1875
  testl(rdx, rdx);
1876
  if (src1.is(rax)) {
1877
    Label smi_result;
1878
    j(zero, &smi_result, Label::kNear);
1879
    movq(src1, kScratchRegister);
1880
    jmp(on_not_smi_result, near_jump);
1881
    bind(&smi_result);
1882
  } else {
1883
    j(not_zero, on_not_smi_result, near_jump);
1884
  }
1885
  if (!dst.is(src1) && src1.is(rax)) {
1886
    movq(src1, kScratchRegister);
1887
  }
1888
  Integer32ToSmi(dst, rax);
1889
}
1890

    
1891

    
1892
void MacroAssembler::SmiMod(Register dst,
1893
                            Register src1,
1894
                            Register src2,
1895
                            Label* on_not_smi_result,
1896
                            Label::Distance near_jump) {
1897
  ASSERT(!dst.is(kScratchRegister));
1898
  ASSERT(!src1.is(kScratchRegister));
1899
  ASSERT(!src2.is(kScratchRegister));
1900
  ASSERT(!src2.is(rax));
1901
  ASSERT(!src2.is(rdx));
1902
  ASSERT(!src1.is(rdx));
1903
  ASSERT(!src1.is(src2));
1904

    
1905
  testq(src2, src2);
1906
  j(zero, on_not_smi_result, near_jump);
1907

    
1908
  if (src1.is(rax)) {
1909
    movq(kScratchRegister, src1);
1910
  }
1911
  SmiToInteger32(rax, src1);
1912
  SmiToInteger32(src2, src2);
1913

    
1914
  // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
1915
  Label safe_div;
1916
  cmpl(rax, Immediate(Smi::kMinValue));
1917
  j(not_equal, &safe_div, Label::kNear);
1918
  cmpl(src2, Immediate(-1));
1919
  j(not_equal, &safe_div, Label::kNear);
1920
  // Retag inputs and go slow case.
1921
  Integer32ToSmi(src2, src2);
1922
  if (src1.is(rax)) {
1923
    movq(src1, kScratchRegister);
1924
  }
1925
  jmp(on_not_smi_result, near_jump);
1926
  bind(&safe_div);
1927

    
1928
  // Sign extend eax into edx:eax.
1929
  cdq();
1930
  idivl(src2);
1931
  // Restore smi tags on inputs.
1932
  Integer32ToSmi(src2, src2);
1933
  if (src1.is(rax)) {
1934
    movq(src1, kScratchRegister);
1935
  }
1936
  // Check for a negative zero result.  If the result is zero, and the
1937
  // dividend is negative, go slow to return a floating point negative zero.
1938
  Label smi_result;
1939
  testl(rdx, rdx);
1940
  j(not_zero, &smi_result, Label::kNear);
1941
  testq(src1, src1);
1942
  j(negative, on_not_smi_result, near_jump);
1943
  bind(&smi_result);
1944
  Integer32ToSmi(dst, rdx);
1945
}
1946

    
1947

    
1948
void MacroAssembler::SmiNot(Register dst, Register src) {
1949
  ASSERT(!dst.is(kScratchRegister));
1950
  ASSERT(!src.is(kScratchRegister));
1951
  // Set tag and padding bits before negating, so that they are zero afterwards.
1952
  movl(kScratchRegister, Immediate(~0));
1953
  if (dst.is(src)) {
1954
    xor_(dst, kScratchRegister);
1955
  } else {
1956
    lea(dst, Operand(src, kScratchRegister, times_1, 0));
1957
  }
1958
  not_(dst);
1959
}
1960

    
1961

    
1962
void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
1963
  ASSERT(!dst.is(src2));
1964
  if (!dst.is(src1)) {
1965
    movq(dst, src1);
1966
  }
1967
  and_(dst, src2);
1968
}
1969

    
1970

    
1971
void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
1972
  if (constant->value() == 0) {
1973
    Set(dst, 0);
1974
  } else if (dst.is(src)) {
1975
    ASSERT(!dst.is(kScratchRegister));
1976
    Register constant_reg = GetSmiConstant(constant);
1977
    and_(dst, constant_reg);
1978
  } else {
1979
    LoadSmiConstant(dst, constant);
1980
    and_(dst, src);
1981
  }
1982
}
1983

    
1984

    
1985
void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
1986
  if (!dst.is(src1)) {
1987
    ASSERT(!src1.is(src2));
1988
    movq(dst, src1);
1989
  }
1990
  or_(dst, src2);
1991
}
1992

    
1993

    
1994
void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
1995
  if (dst.is(src)) {
1996
    ASSERT(!dst.is(kScratchRegister));
1997
    Register constant_reg = GetSmiConstant(constant);
1998
    or_(dst, constant_reg);
1999
  } else {
2000
    LoadSmiConstant(dst, constant);
2001
    or_(dst, src);
2002
  }
2003
}
2004

    
2005

    
2006
void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
2007
  if (!dst.is(src1)) {
2008
    ASSERT(!src1.is(src2));
2009
    movq(dst, src1);
2010
  }
2011
  xor_(dst, src2);
2012
}
2013

    
2014

    
2015
void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
2016
  if (dst.is(src)) {
2017
    ASSERT(!dst.is(kScratchRegister));
2018
    Register constant_reg = GetSmiConstant(constant);
2019
    xor_(dst, constant_reg);
2020
  } else {
2021
    LoadSmiConstant(dst, constant);
2022
    xor_(dst, src);
2023
  }
2024
}
2025

    
2026

    
2027
void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
2028
                                                     Register src,
2029
                                                     int shift_value) {
2030
  ASSERT(is_uint5(shift_value));
2031
  if (shift_value > 0) {
2032
    if (dst.is(src)) {
2033
      sar(dst, Immediate(shift_value + kSmiShift));
2034
      shl(dst, Immediate(kSmiShift));
2035
    } else {
2036
      UNIMPLEMENTED();  // Not used.
2037
    }
2038
  }
2039
}
2040

    
2041

    
2042
void MacroAssembler::SmiShiftLeftConstant(Register dst,
2043
                                          Register src,
2044
                                          int shift_value) {
2045
  if (!dst.is(src)) {
2046
    movq(dst, src);
2047
  }
2048
  if (shift_value > 0) {
2049
    shl(dst, Immediate(shift_value));
2050
  }
2051
}
2052

    
2053

    
2054
void MacroAssembler::SmiShiftLogicalRightConstant(
2055
    Register dst, Register src, int shift_value,
2056
    Label* on_not_smi_result, Label::Distance near_jump) {
2057
  // Logic right shift interprets its result as an *unsigned* number.
2058
  if (dst.is(src)) {
2059
    UNIMPLEMENTED();  // Not used.
2060
  } else {
2061
    movq(dst, src);
2062
    if (shift_value == 0) {
2063
      testq(dst, dst);
2064
      j(negative, on_not_smi_result, near_jump);
2065
    }
2066
    shr(dst, Immediate(shift_value + kSmiShift));
2067
    shl(dst, Immediate(kSmiShift));
2068
  }
2069
}
2070

    
2071

    
2072
void MacroAssembler::SmiShiftLeft(Register dst,
2073
                                  Register src1,
2074
                                  Register src2) {
2075
  ASSERT(!dst.is(rcx));
2076
  // Untag shift amount.
2077
  if (!dst.is(src1)) {
2078
    movq(dst, src1);
2079
  }
2080
  SmiToInteger32(rcx, src2);
2081
  // Shift amount specified by lower 5 bits, not six as the shl opcode.
2082
  and_(rcx, Immediate(0x1f));
2083
  shl_cl(dst);
2084
}
2085

    
2086

    
2087
void MacroAssembler::SmiShiftLogicalRight(Register dst,
2088
                                          Register src1,
2089
                                          Register src2,
2090
                                          Label* on_not_smi_result,
2091
                                          Label::Distance near_jump) {
2092
  ASSERT(!dst.is(kScratchRegister));
2093
  ASSERT(!src1.is(kScratchRegister));
2094
  ASSERT(!src2.is(kScratchRegister));
2095
  ASSERT(!dst.is(rcx));
2096
  // dst and src1 can be the same, because the one case that bails out
2097
  // is a shift by 0, which leaves dst, and therefore src1, unchanged.
2098
  if (src1.is(rcx) || src2.is(rcx)) {
2099
    movq(kScratchRegister, rcx);
2100
  }
2101
  if (!dst.is(src1)) {
2102
    movq(dst, src1);
2103
  }
2104
  SmiToInteger32(rcx, src2);
2105
  orl(rcx, Immediate(kSmiShift));
2106
  shr_cl(dst);  // Shift is rcx modulo 0x1f + 32.
2107
  shl(dst, Immediate(kSmiShift));
2108
  testq(dst, dst);
2109
  if (src1.is(rcx) || src2.is(rcx)) {
2110
    Label positive_result;
2111
    j(positive, &positive_result, Label::kNear);
2112
    if (src1.is(rcx)) {
2113
      movq(src1, kScratchRegister);
2114
    } else {
2115
      movq(src2, kScratchRegister);
2116
    }
2117
    jmp(on_not_smi_result, near_jump);
2118
    bind(&positive_result);
2119
  } else {
2120
    // src2 was zero and src1 negative.
2121
    j(negative, on_not_smi_result, near_jump);
2122
  }
2123
}
2124

    
2125

    
2126
void MacroAssembler::SmiShiftArithmeticRight(Register dst,
2127
                                             Register src1,
2128
                                             Register src2) {
2129
  ASSERT(!dst.is(kScratchRegister));
2130
  ASSERT(!src1.is(kScratchRegister));
2131
  ASSERT(!src2.is(kScratchRegister));
2132
  ASSERT(!dst.is(rcx));
2133
  if (src1.is(rcx)) {
2134
    movq(kScratchRegister, src1);
2135
  } else if (src2.is(rcx)) {
2136
    movq(kScratchRegister, src2);
2137
  }
2138
  if (!dst.is(src1)) {
2139
    movq(dst, src1);
2140
  }
2141
  SmiToInteger32(rcx, src2);
2142
  orl(rcx, Immediate(kSmiShift));
2143
  sar_cl(dst);  // Shift 32 + original rcx & 0x1f.
2144
  shl(dst, Immediate(kSmiShift));
2145
  if (src1.is(rcx)) {
2146
    movq(src1, kScratchRegister);
2147
  } else if (src2.is(rcx)) {
2148
    movq(src2, kScratchRegister);
2149
  }
2150
}
2151

    
2152

    
2153
void MacroAssembler::SelectNonSmi(Register dst,
2154
                                  Register src1,
2155
                                  Register src2,
2156
                                  Label* on_not_smis,
2157
                                  Label::Distance near_jump) {
2158
  ASSERT(!dst.is(kScratchRegister));
2159
  ASSERT(!src1.is(kScratchRegister));
2160
  ASSERT(!src2.is(kScratchRegister));
2161
  ASSERT(!dst.is(src1));
2162
  ASSERT(!dst.is(src2));
2163
  // Both operands must not be smis.
2164
#ifdef DEBUG
2165
  if (allow_stub_calls()) {  // Check contains a stub call.
2166
    Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
2167
    Check(not_both_smis, kBothRegistersWereSmisInSelectNonSmi);
2168
  }
2169
#endif
2170
  STATIC_ASSERT(kSmiTag == 0);
2171
  ASSERT_EQ(0, Smi::FromInt(0));
2172
  movl(kScratchRegister, Immediate(kSmiTagMask));
2173
  and_(kScratchRegister, src1);
2174
  testl(kScratchRegister, src2);
2175
  // If non-zero then both are smis.
2176
  j(not_zero, on_not_smis, near_jump);
2177

    
2178
  // Exactly one operand is a smi.
2179
  ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
2180
  // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
2181
  subq(kScratchRegister, Immediate(1));
2182
  // If src1 is a smi, then scratch register all 1s, else it is all 0s.
2183
  movq(dst, src1);
2184
  xor_(dst, src2);
2185
  and_(dst, kScratchRegister);
2186
  // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
2187
  xor_(dst, src1);
2188
  // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
2189
}
2190

    
2191

    
2192
SmiIndex MacroAssembler::SmiToIndex(Register dst,
2193
                                    Register src,
2194
                                    int shift) {
2195
  ASSERT(is_uint6(shift));
2196
  // There is a possible optimization if shift is in the range 60-63, but that
2197
  // will (and must) never happen.
2198
  if (!dst.is(src)) {
2199
    movq(dst, src);
2200
  }
2201
  if (shift < kSmiShift) {
2202
    sar(dst, Immediate(kSmiShift - shift));
2203
  } else {
2204
    shl(dst, Immediate(shift - kSmiShift));
2205
  }
2206
  return SmiIndex(dst, times_1);
2207
}
2208

    
2209
SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
2210
                                            Register src,
2211
                                            int shift) {
2212
  // Register src holds a positive smi.
2213
  ASSERT(is_uint6(shift));
2214
  if (!dst.is(src)) {
2215
    movq(dst, src);
2216
  }
2217
  neg(dst);
2218
  if (shift < kSmiShift) {
2219
    sar(dst, Immediate(kSmiShift - shift));
2220
  } else {
2221
    shl(dst, Immediate(shift - kSmiShift));
2222
  }
2223
  return SmiIndex(dst, times_1);
2224
}
2225

    
2226

    
2227
void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
2228
  ASSERT_EQ(0, kSmiShift % kBitsPerByte);
2229
  addl(dst, Operand(src, kSmiShift / kBitsPerByte));
2230
}
2231

    
2232

    
2233
void MacroAssembler::Push(Smi* source) {
2234
  intptr_t smi = reinterpret_cast<intptr_t>(source);
2235
  if (is_int32(smi)) {
2236
    push(Immediate(static_cast<int32_t>(smi)));
2237
  } else {
2238
    Register constant = GetSmiConstant(source);
2239
    push(constant);
2240
  }
2241
}
2242

    
2243

    
2244
void MacroAssembler::PushInt64AsTwoSmis(Register src, Register scratch) {
2245
  movq(scratch, src);
2246
  // High bits.
2247
  shr(src, Immediate(64 - kSmiShift));
2248
  shl(src, Immediate(kSmiShift));
2249
  push(src);
2250
  // Low bits.
2251
  shl(scratch, Immediate(kSmiShift));
2252
  push(scratch);
2253
}
2254

    
2255

    
2256
void MacroAssembler::PopInt64AsTwoSmis(Register dst, Register scratch) {
2257
  pop(scratch);
2258
  // Low bits.
2259
  shr(scratch, Immediate(kSmiShift));
2260
  pop(dst);
2261
  shr(dst, Immediate(kSmiShift));
2262
  // High bits.
2263
  shl(dst, Immediate(64 - kSmiShift));
2264
  or_(dst, scratch);
2265
}
2266

    
2267

    
2268
void MacroAssembler::Test(const Operand& src, Smi* source) {
2269
  testl(Operand(src, kIntSize), Immediate(source->value()));
2270
}
2271

    
2272

    
2273
// ----------------------------------------------------------------------------
2274

    
2275

    
2276
void MacroAssembler::LookupNumberStringCache(Register object,
2277
                                             Register result,
2278
                                             Register scratch1,
2279
                                             Register scratch2,
2280
                                             Label* not_found) {
2281
  // Use of registers. Register result is used as a temporary.
2282
  Register number_string_cache = result;
2283
  Register mask = scratch1;
2284
  Register scratch = scratch2;
2285

    
2286
  // Load the number string cache.
2287
  LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
2288

    
2289
  // Make the hash mask from the length of the number string cache. It
2290
  // contains two elements (number and string) for each cache entry.
2291
  SmiToInteger32(
2292
      mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
2293
  shrl(mask, Immediate(1));
2294
  subq(mask, Immediate(1));  // Make mask.
2295

    
2296
  // Calculate the entry in the number string cache. The hash value in the
2297
  // number string cache for smis is just the smi value, and the hash for
2298
  // doubles is the xor of the upper and lower words. See
2299
  // Heap::GetNumberStringCache.
2300
  Label is_smi;
2301
  Label load_result_from_cache;
2302
  JumpIfSmi(object, &is_smi);
2303
  CheckMap(object,
2304
           isolate()->factory()->heap_number_map(),
2305
           not_found,
2306
           DONT_DO_SMI_CHECK);
2307

    
2308
  STATIC_ASSERT(8 == kDoubleSize);
2309
  movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
2310
  xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset));
2311
  and_(scratch, mask);
2312
  // Each entry in string cache consists of two pointer sized fields,
2313
  // but times_twice_pointer_size (multiplication by 16) scale factor
2314
  // is not supported by addrmode on x64 platform.
2315
  // So we have to premultiply entry index before lookup.
2316
  shl(scratch, Immediate(kPointerSizeLog2 + 1));
2317

    
2318
  Register index = scratch;
2319
  Register probe = mask;
2320
  movq(probe,
2321
       FieldOperand(number_string_cache,
2322
                    index,
2323
                    times_1,
2324
                    FixedArray::kHeaderSize));
2325
  JumpIfSmi(probe, not_found);
2326
  movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
2327
  ucomisd(xmm0, FieldOperand(probe, HeapNumber::kValueOffset));
2328
  j(parity_even, not_found);  // Bail out if NaN is involved.
2329
  j(not_equal, not_found);  // The cache did not contain this value.
2330
  jmp(&load_result_from_cache);
2331

    
2332
  bind(&is_smi);
2333
  SmiToInteger32(scratch, object);
2334
  and_(scratch, mask);
2335
  // Each entry in string cache consists of two pointer sized fields,
2336
  // but times_twice_pointer_size (multiplication by 16) scale factor
2337
  // is not supported by addrmode on x64 platform.
2338
  // So we have to premultiply entry index before lookup.
2339
  shl(scratch, Immediate(kPointerSizeLog2 + 1));
2340

    
2341
  // Check if the entry is the smi we are looking for.
2342
  cmpq(object,
2343
       FieldOperand(number_string_cache,
2344
                    index,
2345
                    times_1,
2346
                    FixedArray::kHeaderSize));
2347
  j(not_equal, not_found);
2348

    
2349
  // Get the result from the cache.
2350
  bind(&load_result_from_cache);
2351
  movq(result,
2352
       FieldOperand(number_string_cache,
2353
                    index,
2354
                    times_1,
2355
                    FixedArray::kHeaderSize + kPointerSize));
2356
  IncrementCounter(isolate()->counters()->number_to_string_native(), 1);
2357
}
2358

    
2359

    
2360
void MacroAssembler::JumpIfNotString(Register object,
2361
                                     Register object_map,
2362
                                     Label* not_string,
2363
                                     Label::Distance near_jump) {
2364
  Condition is_smi = CheckSmi(object);
2365
  j(is_smi, not_string, near_jump);
2366
  CmpObjectType(object, FIRST_NONSTRING_TYPE, object_map);
2367
  j(above_equal, not_string, near_jump);
2368
}
2369

    
2370

    
2371
void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(
2372
    Register first_object,
2373
    Register second_object,
2374
    Register scratch1,
2375
    Register scratch2,
2376
    Label* on_fail,
2377
    Label::Distance near_jump) {
2378
  // Check that both objects are not smis.
2379
  Condition either_smi = CheckEitherSmi(first_object, second_object);
2380
  j(either_smi, on_fail, near_jump);
2381

    
2382
  // Load instance type for both strings.
2383
  movq(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
2384
  movq(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
2385
  movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
2386
  movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
2387

    
2388
  // Check that both are flat ASCII strings.
2389
  ASSERT(kNotStringTag != 0);
2390
  const int kFlatAsciiStringMask =
2391
      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2392
  const int kFlatAsciiStringTag =
2393
      kStringTag | kOneByteStringTag | kSeqStringTag;
2394

    
2395
  andl(scratch1, Immediate(kFlatAsciiStringMask));
2396
  andl(scratch2, Immediate(kFlatAsciiStringMask));
2397
  // Interleave the bits to check both scratch1 and scratch2 in one test.
2398
  ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
2399
  lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
2400
  cmpl(scratch1,
2401
       Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
2402
  j(not_equal, on_fail, near_jump);
2403
}
2404

    
2405

    
2406
void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
2407
    Register instance_type,
2408
    Register scratch,
2409
    Label* failure,
2410
    Label::Distance near_jump) {
2411
  if (!scratch.is(instance_type)) {
2412
    movl(scratch, instance_type);
2413
  }
2414

    
2415
  const int kFlatAsciiStringMask =
2416
      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2417

    
2418
  andl(scratch, Immediate(kFlatAsciiStringMask));
2419
  cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kOneByteStringTag));
2420
  j(not_equal, failure, near_jump);
2421
}
2422

    
2423

    
2424
void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
2425
    Register first_object_instance_type,
2426
    Register second_object_instance_type,
2427
    Register scratch1,
2428
    Register scratch2,
2429
    Label* on_fail,
2430
    Label::Distance near_jump) {
2431
  // Load instance type for both strings.
2432
  movq(scratch1, first_object_instance_type);
2433
  movq(scratch2, second_object_instance_type);
2434

    
2435
  // Check that both are flat ASCII strings.
2436
  ASSERT(kNotStringTag != 0);
2437
  const int kFlatAsciiStringMask =
2438
      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
2439
  const int kFlatAsciiStringTag =
2440
      kStringTag | kOneByteStringTag | kSeqStringTag;
2441

    
2442
  andl(scratch1, Immediate(kFlatAsciiStringMask));
2443
  andl(scratch2, Immediate(kFlatAsciiStringMask));
2444
  // Interleave the bits to check both scratch1 and scratch2 in one test.
2445
  ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
2446
  lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
2447
  cmpl(scratch1,
2448
       Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
2449
  j(not_equal, on_fail, near_jump);
2450
}
2451

    
2452

    
2453
template<class T>
2454
static void JumpIfNotUniqueNameHelper(MacroAssembler* masm,
2455
                                      T operand_or_register,
2456
                                      Label* not_unique_name,
2457
                                      Label::Distance distance) {
2458
  STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
2459
  Label succeed;
2460
  masm->testb(operand_or_register,
2461
              Immediate(kIsNotStringMask | kIsNotInternalizedMask));
2462
  masm->j(zero, &succeed, Label::kNear);
2463
  masm->cmpb(operand_or_register, Immediate(static_cast<uint8_t>(SYMBOL_TYPE)));
2464
  masm->j(not_equal, not_unique_name, distance);
2465

    
2466
  masm->bind(&succeed);
2467
}
2468

    
2469

    
2470
void MacroAssembler::JumpIfNotUniqueName(Operand operand,
2471
                                         Label* not_unique_name,
2472
                                         Label::Distance distance) {
2473
  JumpIfNotUniqueNameHelper<Operand>(this, operand, not_unique_name, distance);
2474
}
2475

    
2476

    
2477
void MacroAssembler::JumpIfNotUniqueName(Register reg,
2478
                                         Label* not_unique_name,
2479
                                         Label::Distance distance) {
2480
  JumpIfNotUniqueNameHelper<Register>(this, reg, not_unique_name, distance);
2481
}
2482

    
2483

    
2484
void MacroAssembler::Move(Register dst, Register src) {
2485
  if (!dst.is(src)) {
2486
    movq(dst, src);
2487
  }
2488
}
2489

    
2490

    
2491
void MacroAssembler::Move(Register dst, Handle<Object> source) {
2492
  AllowDeferredHandleDereference smi_check;
2493
  if (source->IsSmi()) {
2494
    Move(dst, Smi::cast(*source));
2495
  } else {
2496
    MoveHeapObject(dst, source);
2497
  }
2498
}
2499

    
2500

    
2501
void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
2502
  AllowDeferredHandleDereference smi_check;
2503
  if (source->IsSmi()) {
2504
    Move(dst, Smi::cast(*source));
2505
  } else {
2506
    MoveHeapObject(kScratchRegister, source);
2507
    movq(dst, kScratchRegister);
2508
  }
2509
}
2510

    
2511

    
2512
void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
2513
  AllowDeferredHandleDereference smi_check;
2514
  if (source->IsSmi()) {
2515
    Cmp(dst, Smi::cast(*source));
2516
  } else {
2517
    MoveHeapObject(kScratchRegister, source);
2518
    cmpq(dst, kScratchRegister);
2519
  }
2520
}
2521

    
2522

    
2523
void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
2524
  AllowDeferredHandleDereference smi_check;
2525
  if (source->IsSmi()) {
2526
    Cmp(dst, Smi::cast(*source));
2527
  } else {
2528
    MoveHeapObject(kScratchRegister, source);
2529
    cmpq(dst, kScratchRegister);
2530
  }
2531
}
2532

    
2533

    
2534
void MacroAssembler::Push(Handle<Object> source) {
2535
  AllowDeferredHandleDereference smi_check;
2536
  if (source->IsSmi()) {
2537
    Push(Smi::cast(*source));
2538
  } else {
2539
    MoveHeapObject(kScratchRegister, source);
2540
    push(kScratchRegister);
2541
  }
2542
}
2543

    
2544

    
2545
void MacroAssembler::MoveHeapObject(Register result,
2546
                                    Handle<Object> object) {
2547
  AllowDeferredHandleDereference using_raw_address;
2548
  ASSERT(object->IsHeapObject());
2549
  if (isolate()->heap()->InNewSpace(*object)) {
2550
    Handle<Cell> cell = isolate()->factory()->NewCell(object);
2551
    movq(result, cell, RelocInfo::CELL);
2552
    movq(result, Operand(result, 0));
2553
  } else {
2554
    movq(result, object, RelocInfo::EMBEDDED_OBJECT);
2555
  }
2556
}
2557

    
2558

    
2559
void MacroAssembler::LoadGlobalCell(Register dst, Handle<Cell> cell) {
2560
  if (dst.is(rax)) {
2561
    AllowDeferredHandleDereference embedding_raw_address;
2562
    load_rax(cell.location(), RelocInfo::CELL);
2563
  } else {
2564
    movq(dst, cell, RelocInfo::CELL);
2565
    movq(dst, Operand(dst, 0));
2566
  }
2567
}
2568

    
2569

    
2570
void MacroAssembler::Drop(int stack_elements) {
2571
  if (stack_elements > 0) {
2572
    addq(rsp, Immediate(stack_elements * kPointerSize));
2573
  }
2574
}
2575

    
2576

    
2577
void MacroAssembler::TestBit(const Operand& src, int bits) {
2578
  int byte_offset = bits / kBitsPerByte;
2579
  int bit_in_byte = bits & (kBitsPerByte - 1);
2580
  testb(Operand(src, byte_offset), Immediate(1 << bit_in_byte));
2581
}
2582

    
2583

    
2584
void MacroAssembler::Jump(ExternalReference ext) {
2585
  LoadAddress(kScratchRegister, ext);
2586
  jmp(kScratchRegister);
2587
}
2588

    
2589

    
2590
void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
2591
  movq(kScratchRegister, destination, rmode);
2592
  jmp(kScratchRegister);
2593
}
2594

    
2595

    
2596
void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
2597
  // TODO(X64): Inline this
2598
  jmp(code_object, rmode);
2599
}
2600

    
2601

    
2602
int MacroAssembler::CallSize(ExternalReference ext) {
2603
  // Opcode for call kScratchRegister is: Rex.B FF D4 (three bytes).
2604
  return LoadAddressSize(ext) +
2605
         Assembler::kCallScratchRegisterInstructionLength;
2606
}
2607

    
2608

    
2609
void MacroAssembler::Call(ExternalReference ext) {
2610
#ifdef DEBUG
2611
  int end_position = pc_offset() + CallSize(ext);
2612
#endif
2613
  LoadAddress(kScratchRegister, ext);
2614
  call(kScratchRegister);
2615
#ifdef DEBUG
2616
  CHECK_EQ(end_position, pc_offset());
2617
#endif
2618
}
2619

    
2620

    
2621
void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
2622
#ifdef DEBUG
2623
  int end_position = pc_offset() + CallSize(destination, rmode);
2624
#endif
2625
  movq(kScratchRegister, destination, rmode);
2626
  call(kScratchRegister);
2627
#ifdef DEBUG
2628
  CHECK_EQ(pc_offset(), end_position);
2629
#endif
2630
}
2631

    
2632

    
2633
void MacroAssembler::Call(Handle<Code> code_object,
2634
                          RelocInfo::Mode rmode,
2635
                          TypeFeedbackId ast_id) {
2636
#ifdef DEBUG
2637
  int end_position = pc_offset() + CallSize(code_object);
2638
#endif
2639
  ASSERT(RelocInfo::IsCodeTarget(rmode) ||
2640
      rmode == RelocInfo::CODE_AGE_SEQUENCE);
2641
  call(code_object, rmode, ast_id);
2642
#ifdef DEBUG
2643
  CHECK_EQ(end_position, pc_offset());
2644
#endif
2645
}
2646

    
2647

    
2648
void MacroAssembler::Pushad() {
2649
  push(rax);
2650
  push(rcx);
2651
  push(rdx);
2652
  push(rbx);
2653
  // Not pushing rsp or rbp.
2654
  push(rsi);
2655
  push(rdi);
2656
  push(r8);
2657
  push(r9);
2658
  // r10 is kScratchRegister.
2659
  push(r11);
2660
  // r12 is kSmiConstantRegister.
2661
  // r13 is kRootRegister.
2662
  push(r14);
2663
  push(r15);
2664
  STATIC_ASSERT(11 == kNumSafepointSavedRegisters);
2665
  // Use lea for symmetry with Popad.
2666
  int sp_delta =
2667
      (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
2668
  lea(rsp, Operand(rsp, -sp_delta));
2669
}
2670

    
2671

    
2672
void MacroAssembler::Popad() {
2673
  // Popad must not change the flags, so use lea instead of addq.
2674
  int sp_delta =
2675
      (kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
2676
  lea(rsp, Operand(rsp, sp_delta));
2677
  pop(r15);
2678
  pop(r14);
2679
  pop(r11);
2680
  pop(r9);
2681
  pop(r8);
2682
  pop(rdi);
2683
  pop(rsi);
2684
  pop(rbx);
2685
  pop(rdx);
2686
  pop(rcx);
2687
  pop(rax);
2688
}
2689

    
2690

    
2691
void MacroAssembler::Dropad() {
2692
  addq(rsp, Immediate(kNumSafepointRegisters * kPointerSize));
2693
}
2694

    
2695

    
2696
// Order general registers are pushed by Pushad:
2697
// rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
2698
const int
2699
MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
2700
    0,
2701
    1,
2702
    2,
2703
    3,
2704
    -1,
2705
    -1,
2706
    4,
2707
    5,
2708
    6,
2709
    7,
2710
    -1,
2711
    8,
2712
    -1,
2713
    -1,
2714
    9,
2715
    10
2716
};
2717

    
2718

    
2719
void MacroAssembler::StoreToSafepointRegisterSlot(Register dst,
2720
                                                  const Immediate& imm) {
2721
  movq(SafepointRegisterSlot(dst), imm);
2722
}
2723

    
2724

    
2725
void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
2726
  movq(SafepointRegisterSlot(dst), src);
2727
}
2728

    
2729

    
2730
void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
2731
  movq(dst, SafepointRegisterSlot(src));
2732
}
2733

    
2734

    
2735
Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
2736
  return Operand(rsp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
2737
}
2738

    
2739

    
2740
void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
2741
                                    int handler_index) {
2742
  // Adjust this code if not the case.
2743
  STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize +
2744
                                                kFPOnStackSize);
2745
  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2746
  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2747
  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2748
  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2749
  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2750

    
2751
  // We will build up the handler from the bottom by pushing on the stack.
2752
  // First push the frame pointer and context.
2753
  if (kind == StackHandler::JS_ENTRY) {
2754
    // The frame pointer does not point to a JS frame so we save NULL for
2755
    // rbp. We expect the code throwing an exception to check rbp before
2756
    // dereferencing it to restore the context.
2757
    push(Immediate(0));  // NULL frame pointer.
2758
    Push(Smi::FromInt(0));  // No context.
2759
  } else {
2760
    push(rbp);
2761
    push(rsi);
2762
  }
2763

    
2764
  // Push the state and the code object.
2765
  unsigned state =
2766
      StackHandler::IndexField::encode(handler_index) |
2767
      StackHandler::KindField::encode(kind);
2768
  push(Immediate(state));
2769
  Push(CodeObject());
2770

    
2771
  // Link the current handler as the next handler.
2772
  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
2773
  push(ExternalOperand(handler_address));
2774
  // Set this new handler as the current one.
2775
  movq(ExternalOperand(handler_address), rsp);
2776
}
2777

    
2778

    
2779
void MacroAssembler::PopTryHandler() {
2780
  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2781
  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
2782
  pop(ExternalOperand(handler_address));
2783
  addq(rsp, Immediate(StackHandlerConstants::kSize - kPointerSize));
2784
}
2785

    
2786

    
2787
void MacroAssembler::JumpToHandlerEntry() {
2788
  // Compute the handler entry address and jump to it.  The handler table is
2789
  // a fixed array of (smi-tagged) code offsets.
2790
  // rax = exception, rdi = code object, rdx = state.
2791
  movq(rbx, FieldOperand(rdi, Code::kHandlerTableOffset));
2792
  shr(rdx, Immediate(StackHandler::kKindWidth));
2793
  movq(rdx,
2794
       FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize));
2795
  SmiToInteger64(rdx, rdx);
2796
  lea(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize));
2797
  jmp(rdi);
2798
}
2799

    
2800

    
2801
void MacroAssembler::Throw(Register value) {
2802
  // Adjust this code if not the case.
2803
  STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize +
2804
                                                kFPOnStackSize);
2805
  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2806
  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2807
  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2808
  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2809
  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2810

    
2811
  // The exception is expected in rax.
2812
  if (!value.is(rax)) {
2813
    movq(rax, value);
2814
  }
2815
  // Drop the stack pointer to the top of the top handler.
2816
  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
2817
  movq(rsp, ExternalOperand(handler_address));
2818
  // Restore the next handler.
2819
  pop(ExternalOperand(handler_address));
2820

    
2821
  // Remove the code object and state, compute the handler address in rdi.
2822
  pop(rdi);  // Code object.
2823
  pop(rdx);  // Offset and state.
2824

    
2825
  // Restore the context and frame pointer.
2826
  pop(rsi);  // Context.
2827
  pop(rbp);  // Frame pointer.
2828

    
2829
  // If the handler is a JS frame, restore the context to the frame.
2830
  // (kind == ENTRY) == (rbp == 0) == (rsi == 0), so we could test either
2831
  // rbp or rsi.
2832
  Label skip;
2833
  testq(rsi, rsi);
2834
  j(zero, &skip, Label::kNear);
2835
  movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
2836
  bind(&skip);
2837

    
2838
  JumpToHandlerEntry();
2839
}
2840

    
2841

    
2842
void MacroAssembler::ThrowUncatchable(Register value) {
2843
  // Adjust this code if not the case.
2844
  STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize +
2845
                                                kFPOnStackSize);
2846
  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2847
  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2848
  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2849
  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2850
  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2851

    
2852
  // The exception is expected in rax.
2853
  if (!value.is(rax)) {
2854
    movq(rax, value);
2855
  }
2856
  // Drop the stack pointer to the top of the top stack handler.
2857
  ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
2858
  Load(rsp, handler_address);
2859

    
2860
  // Unwind the handlers until the top ENTRY handler is found.
2861
  Label fetch_next, check_kind;
2862
  jmp(&check_kind, Label::kNear);
2863
  bind(&fetch_next);
2864
  movq(rsp, Operand(rsp, StackHandlerConstants::kNextOffset));
2865

    
2866
  bind(&check_kind);
2867
  STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
2868
  testl(Operand(rsp, StackHandlerConstants::kStateOffset),
2869
        Immediate(StackHandler::KindField::kMask));
2870
  j(not_zero, &fetch_next);
2871

    
2872
  // Set the top handler address to next handler past the top ENTRY handler.
2873
  pop(ExternalOperand(handler_address));
2874

    
2875
  // Remove the code object and state, compute the handler address in rdi.
2876
  pop(rdi);  // Code object.
2877
  pop(rdx);  // Offset and state.
2878

    
2879
  // Clear the context pointer and frame pointer (0 was saved in the handler).
2880
  pop(rsi);
2881
  pop(rbp);
2882

    
2883
  JumpToHandlerEntry();
2884
}
2885

    
2886

    
2887
void MacroAssembler::Ret() {
2888
  ret(0);
2889
}
2890

    
2891

    
2892
void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
2893
  if (is_uint16(bytes_dropped)) {
2894
    ret(bytes_dropped);
2895
  } else {
2896
    PopReturnAddressTo(scratch);
2897
    addq(rsp, Immediate(bytes_dropped));
2898
    PushReturnAddressFrom(scratch);
2899
    ret(0);
2900
  }
2901
}
2902

    
2903

    
2904
void MacroAssembler::FCmp() {
2905
  fucomip();
2906
  fstp(0);
2907
}
2908

    
2909

    
2910
void MacroAssembler::CmpObjectType(Register heap_object,
2911
                                   InstanceType type,
2912
                                   Register map) {
2913
  movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
2914
  CmpInstanceType(map, type);
2915
}
2916

    
2917

    
2918
void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
2919
  cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
2920
       Immediate(static_cast<int8_t>(type)));
2921
}
2922

    
2923

    
2924
void MacroAssembler::CheckFastElements(Register map,
2925
                                       Label* fail,
2926
                                       Label::Distance distance) {
2927
  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
2928
  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
2929
  STATIC_ASSERT(FAST_ELEMENTS == 2);
2930
  STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
2931
  cmpb(FieldOperand(map, Map::kBitField2Offset),
2932
       Immediate(Map::kMaximumBitField2FastHoleyElementValue));
2933
  j(above, fail, distance);
2934
}
2935

    
2936

    
2937
void MacroAssembler::CheckFastObjectElements(Register map,
2938
                                             Label* fail,
2939
                                             Label::Distance distance) {
2940
  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
2941
  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
2942
  STATIC_ASSERT(FAST_ELEMENTS == 2);
2943
  STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
2944
  cmpb(FieldOperand(map, Map::kBitField2Offset),
2945
       Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
2946
  j(below_equal, fail, distance);
2947
  cmpb(FieldOperand(map, Map::kBitField2Offset),
2948
       Immediate(Map::kMaximumBitField2FastHoleyElementValue));
2949
  j(above, fail, distance);
2950
}
2951

    
2952

    
2953
void MacroAssembler::CheckFastSmiElements(Register map,
2954
                                          Label* fail,
2955
                                          Label::Distance distance) {
2956
  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
2957
  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
2958
  cmpb(FieldOperand(map, Map::kBitField2Offset),
2959
       Immediate(Map::kMaximumBitField2FastHoleySmiElementValue));
2960
  j(above, fail, distance);
2961
}
2962

    
2963

    
2964
void MacroAssembler::StoreNumberToDoubleElements(
2965
    Register maybe_number,
2966
    Register elements,
2967
    Register index,
2968
    XMMRegister xmm_scratch,
2969
    Label* fail,
2970
    int elements_offset) {
2971
  Label smi_value, is_nan, maybe_nan, not_nan, have_double_value, done;
2972

    
2973
  JumpIfSmi(maybe_number, &smi_value, Label::kNear);
2974

    
2975
  CheckMap(maybe_number,
2976
           isolate()->factory()->heap_number_map(),
2977
           fail,
2978
           DONT_DO_SMI_CHECK);
2979

    
2980
  // Double value, canonicalize NaN.
2981
  uint32_t offset = HeapNumber::kValueOffset + sizeof(kHoleNanLower32);
2982
  cmpl(FieldOperand(maybe_number, offset),
2983
       Immediate(kNaNOrInfinityLowerBoundUpper32));
2984
  j(greater_equal, &maybe_nan, Label::kNear);
2985

    
2986
  bind(&not_nan);
2987
  movsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset));
2988
  bind(&have_double_value);
2989
  movsd(FieldOperand(elements, index, times_8,
2990
                     FixedDoubleArray::kHeaderSize - elements_offset),
2991
        xmm_scratch);
2992
  jmp(&done);
2993

    
2994
  bind(&maybe_nan);
2995
  // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
2996
  // it's an Infinity, and the non-NaN code path applies.
2997
  j(greater, &is_nan, Label::kNear);
2998
  cmpl(FieldOperand(maybe_number, HeapNumber::kValueOffset), Immediate(0));
2999
  j(zero, &not_nan);
3000
  bind(&is_nan);
3001
  // Convert all NaNs to the same canonical NaN value when they are stored in
3002
  // the double array.
3003
  Set(kScratchRegister, BitCast<uint64_t>(
3004
      FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
3005
  movq(xmm_scratch, kScratchRegister);
3006
  jmp(&have_double_value, Label::kNear);
3007

    
3008
  bind(&smi_value);
3009
  // Value is a smi. convert to a double and store.
3010
  // Preserve original value.
3011
  SmiToInteger32(kScratchRegister, maybe_number);
3012
  Cvtlsi2sd(xmm_scratch, kScratchRegister);
3013
  movsd(FieldOperand(elements, index, times_8,
3014
                     FixedDoubleArray::kHeaderSize - elements_offset),
3015
        xmm_scratch);
3016
  bind(&done);
3017
}
3018

    
3019

    
3020
void MacroAssembler::CompareMap(Register obj,
3021
                                Handle<Map> map,
3022
                                Label* early_success) {
3023
  Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
3024
}
3025

    
3026

    
3027
void MacroAssembler::CheckMap(Register obj,
3028
                              Handle<Map> map,
3029
                              Label* fail,
3030
                              SmiCheckType smi_check_type) {
3031
  if (smi_check_type == DO_SMI_CHECK) {
3032
    JumpIfSmi(obj, fail);
3033
  }
3034

    
3035
  Label success;
3036
  CompareMap(obj, map, &success);
3037
  j(not_equal, fail);
3038
  bind(&success);
3039
}
3040

    
3041

    
3042
void MacroAssembler::ClampUint8(Register reg) {
3043
  Label done;
3044
  testl(reg, Immediate(0xFFFFFF00));
3045
  j(zero, &done, Label::kNear);
3046
  setcc(negative, reg);  // 1 if negative, 0 if positive.
3047
  decb(reg);  // 0 if negative, 255 if positive.
3048
  bind(&done);
3049
}
3050

    
3051

    
3052
void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
3053
                                        XMMRegister temp_xmm_reg,
3054
                                        Register result_reg) {
3055
  Label done;
3056
  Label conv_failure;
3057
  xorps(temp_xmm_reg, temp_xmm_reg);
3058
  cvtsd2si(result_reg, input_reg);
3059
  testl(result_reg, Immediate(0xFFFFFF00));
3060
  j(zero, &done, Label::kNear);
3061
  cmpl(result_reg, Immediate(0x80000000));
3062
  j(equal, &conv_failure, Label::kNear);
3063
  movl(result_reg, Immediate(0));
3064
  setcc(above, result_reg);
3065
  subl(result_reg, Immediate(1));
3066
  andl(result_reg, Immediate(255));
3067
  jmp(&done, Label::kNear);
3068
  bind(&conv_failure);
3069
  Set(result_reg, 0);
3070
  ucomisd(input_reg, temp_xmm_reg);
3071
  j(below, &done, Label::kNear);
3072
  Set(result_reg, 255);
3073
  bind(&done);
3074
}
3075

    
3076

    
3077
void MacroAssembler::LoadUint32(XMMRegister dst,
3078
                                Register src,
3079
                                XMMRegister scratch) {
3080
  if (FLAG_debug_code) {
3081
    cmpq(src, Immediate(0xffffffff));
3082
    Assert(below_equal, kInputGPRIsExpectedToHaveUpper32Cleared);
3083
  }
3084
  cvtqsi2sd(dst, src);
3085
}
3086

    
3087

    
3088
void MacroAssembler::SlowTruncateToI(Register result_reg,
3089
                                     Register input_reg,
3090
                                     int offset) {
3091
  DoubleToIStub stub(input_reg, result_reg, offset, true);
3092
  call(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
3093
}
3094

    
3095

    
3096
void MacroAssembler::TruncateHeapNumberToI(Register result_reg,
3097
                                           Register input_reg) {
3098
  Label done;
3099
  movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
3100
  cvttsd2siq(result_reg, xmm0);
3101
  Set(kScratchRegister, V8_UINT64_C(0x8000000000000000));
3102
  cmpq(result_reg, kScratchRegister);
3103
  j(not_equal, &done, Label::kNear);
3104

    
3105
  // Slow case.
3106
  if (input_reg.is(result_reg)) {
3107
    subq(rsp, Immediate(kDoubleSize));
3108
    movsd(MemOperand(rsp, 0), xmm0);
3109
    SlowTruncateToI(result_reg, rsp, 0);
3110
    addq(rsp, Immediate(kDoubleSize));
3111
  } else {
3112
    SlowTruncateToI(result_reg, input_reg);
3113
  }
3114

    
3115
  bind(&done);
3116
}
3117

    
3118

    
3119
void MacroAssembler::TruncateDoubleToI(Register result_reg,
3120
                                       XMMRegister input_reg) {
3121
  Label done;
3122
  cvttsd2siq(result_reg, input_reg);
3123
  movq(kScratchRegister,
3124
      V8_INT64_C(0x8000000000000000),
3125
      RelocInfo::NONE64);
3126
  cmpq(result_reg, kScratchRegister);
3127
  j(not_equal, &done, Label::kNear);
3128

    
3129
  subq(rsp, Immediate(kDoubleSize));
3130
  movsd(MemOperand(rsp, 0), input_reg);
3131
  SlowTruncateToI(result_reg, rsp, 0);
3132
  addq(rsp, Immediate(kDoubleSize));
3133

    
3134
  bind(&done);
3135
}
3136

    
3137

    
3138
void MacroAssembler::DoubleToI(Register result_reg,
3139
                               XMMRegister input_reg,
3140
                               XMMRegister scratch,
3141
                               MinusZeroMode minus_zero_mode,
3142
                               Label* conversion_failed,
3143
                               Label::Distance dst) {
3144
  cvttsd2si(result_reg, input_reg);
3145
  Cvtlsi2sd(xmm0, result_reg);
3146
  ucomisd(xmm0, input_reg);
3147
  j(not_equal, conversion_failed, dst);
3148
  j(parity_even, conversion_failed, dst);  // NaN.
3149
  if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
3150
    Label done;
3151
    // The integer converted back is equal to the original. We
3152
    // only have to test if we got -0 as an input.
3153
    testl(result_reg, result_reg);
3154
    j(not_zero, &done, Label::kNear);
3155
    movmskpd(result_reg, input_reg);
3156
    // Bit 0 contains the sign of the double in input_reg.
3157
    // If input was positive, we are ok and return 0, otherwise
3158
    // jump to conversion_failed.
3159
    andl(result_reg, Immediate(1));
3160
    j(not_zero, conversion_failed, dst);
3161
    bind(&done);
3162
  }
3163
}
3164

    
3165

    
3166
void MacroAssembler::TaggedToI(Register result_reg,
3167
                               Register input_reg,
3168
                               XMMRegister temp,
3169
                               MinusZeroMode minus_zero_mode,
3170
                               Label* lost_precision,
3171
                               Label::Distance dst) {
3172
  Label done;
3173
  ASSERT(!temp.is(xmm0));
3174

    
3175
  // Heap number map check.
3176
  CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
3177
              Heap::kHeapNumberMapRootIndex);
3178
  j(not_equal, lost_precision, dst);
3179

    
3180
  movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
3181
  cvttsd2si(result_reg, xmm0);
3182
  Cvtlsi2sd(temp, result_reg);
3183
  ucomisd(xmm0, temp);
3184
  RecordComment("Deferred TaggedToI: lost precision");
3185
  j(not_equal, lost_precision, dst);
3186
  RecordComment("Deferred TaggedToI: NaN");
3187
  j(parity_even, lost_precision, dst);  // NaN.
3188
  if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
3189
    testl(result_reg, result_reg);
3190
    j(not_zero, &done, Label::kNear);
3191
    movmskpd(result_reg, xmm0);
3192
    andl(result_reg, Immediate(1));
3193
    j(not_zero, lost_precision, dst);
3194
  }
3195
  bind(&done);
3196
}
3197

    
3198

    
3199
void MacroAssembler::LoadInstanceDescriptors(Register map,
3200
                                             Register descriptors) {
3201
  movq(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
3202
}
3203

    
3204

    
3205
void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
3206
  movq(dst, FieldOperand(map, Map::kBitField3Offset));
3207
  DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
3208
}
3209

    
3210

    
3211
void MacroAssembler::EnumLength(Register dst, Register map) {
3212
  STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3213
  movq(dst, FieldOperand(map, Map::kBitField3Offset));
3214
  Move(kScratchRegister, Smi::FromInt(Map::EnumLengthBits::kMask));
3215
  and_(dst, kScratchRegister);
3216
}
3217

    
3218

    
3219
void MacroAssembler::DispatchMap(Register obj,
3220
                                 Register unused,
3221
                                 Handle<Map> map,
3222
                                 Handle<Code> success,
3223
                                 SmiCheckType smi_check_type) {
3224
  Label fail;
3225
  if (smi_check_type == DO_SMI_CHECK) {
3226
    JumpIfSmi(obj, &fail);
3227
  }
3228
  Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
3229
  j(equal, success, RelocInfo::CODE_TARGET);
3230

    
3231
  bind(&fail);
3232
}
3233

    
3234

    
3235
void MacroAssembler::AssertNumber(Register object) {
3236
  if (emit_debug_code()) {
3237
    Label ok;
3238
    Condition is_smi = CheckSmi(object);
3239
    j(is_smi, &ok, Label::kNear);
3240
    Cmp(FieldOperand(object, HeapObject::kMapOffset),
3241
        isolate()->factory()->heap_number_map());
3242
    Check(equal, kOperandIsNotANumber);
3243
    bind(&ok);
3244
  }
3245
}
3246

    
3247

    
3248
void MacroAssembler::AssertNotSmi(Register object) {
3249
  if (emit_debug_code()) {
3250
    Condition is_smi = CheckSmi(object);
3251
    Check(NegateCondition(is_smi), kOperandIsASmi);
3252
  }
3253
}
3254

    
3255

    
3256
void MacroAssembler::AssertSmi(Register object) {
3257
  if (emit_debug_code()) {
3258
    Condition is_smi = CheckSmi(object);
3259
    Check(is_smi, kOperandIsNotASmi);
3260
  }
3261
}
3262

    
3263

    
3264
void MacroAssembler::AssertSmi(const Operand& object) {
3265
  if (emit_debug_code()) {
3266
    Condition is_smi = CheckSmi(object);
3267
    Check(is_smi, kOperandIsNotASmi);
3268
  }
3269
}
3270

    
3271

    
3272
void MacroAssembler::AssertZeroExtended(Register int32_register) {
3273
  if (emit_debug_code()) {
3274
    ASSERT(!int32_register.is(kScratchRegister));
3275
    movq(kScratchRegister, 0x100000000l, RelocInfo::NONE64);
3276
    cmpq(kScratchRegister, int32_register);
3277
    Check(above_equal, k32BitValueInRegisterIsNotZeroExtended);
3278
  }
3279
}
3280

    
3281

    
3282
void MacroAssembler::AssertString(Register object) {
3283
  if (emit_debug_code()) {
3284
    testb(object, Immediate(kSmiTagMask));
3285
    Check(not_equal, kOperandIsASmiAndNotAString);
3286
    push(object);
3287
    movq(object, FieldOperand(object, HeapObject::kMapOffset));
3288
    CmpInstanceType(object, FIRST_NONSTRING_TYPE);
3289
    pop(object);
3290
    Check(below, kOperandIsNotAString);
3291
  }
3292
}
3293

    
3294

    
3295
void MacroAssembler::AssertName(Register object) {
3296
  if (emit_debug_code()) {
3297
    testb(object, Immediate(kSmiTagMask));
3298
    Check(not_equal, kOperandIsASmiAndNotAName);
3299
    push(object);
3300
    movq(object, FieldOperand(object, HeapObject::kMapOffset));
3301
    CmpInstanceType(object, LAST_NAME_TYPE);
3302
    pop(object);
3303
    Check(below_equal, kOperandIsNotAName);
3304
  }
3305
}
3306

    
3307

    
3308
void MacroAssembler::AssertRootValue(Register src,
3309
                                     Heap::RootListIndex root_value_index,
3310
                                     BailoutReason reason) {
3311
  if (emit_debug_code()) {
3312
    ASSERT(!src.is(kScratchRegister));
3313
    LoadRoot(kScratchRegister, root_value_index);
3314
    cmpq(src, kScratchRegister);
3315
    Check(equal, reason);
3316
  }
3317
}
3318

    
3319

    
3320

    
3321
Condition MacroAssembler::IsObjectStringType(Register heap_object,
3322
                                             Register map,
3323
                                             Register instance_type) {
3324
  movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
3325
  movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
3326
  STATIC_ASSERT(kNotStringTag != 0);
3327
  testb(instance_type, Immediate(kIsNotStringMask));
3328
  return zero;
3329
}
3330

    
3331

    
3332
Condition MacroAssembler::IsObjectNameType(Register heap_object,
3333
                                           Register map,
3334
                                           Register instance_type) {
3335
  movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
3336
  movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
3337
  cmpb(instance_type, Immediate(static_cast<uint8_t>(LAST_NAME_TYPE)));
3338
  return below_equal;
3339
}
3340

    
3341

    
3342
void MacroAssembler::TryGetFunctionPrototype(Register function,
3343
                                             Register result,
3344
                                             Label* miss,
3345
                                             bool miss_on_bound_function) {
3346
  // Check that the receiver isn't a smi.
3347
  testl(function, Immediate(kSmiTagMask));
3348
  j(zero, miss);
3349

    
3350
  // Check that the function really is a function.
3351
  CmpObjectType(function, JS_FUNCTION_TYPE, result);
3352
  j(not_equal, miss);
3353

    
3354
  if (miss_on_bound_function) {
3355
    movq(kScratchRegister,
3356
         FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3357
    // It's not smi-tagged (stored in the top half of a smi-tagged 8-byte
3358
    // field).
3359
    TestBit(FieldOperand(kScratchRegister,
3360
                         SharedFunctionInfo::kCompilerHintsOffset),
3361
            SharedFunctionInfo::kBoundFunction);
3362
    j(not_zero, miss);
3363
  }
3364

    
3365
  // Make sure that the function has an instance prototype.
3366
  Label non_instance;
3367
  testb(FieldOperand(result, Map::kBitFieldOffset),
3368
        Immediate(1 << Map::kHasNonInstancePrototype));
3369
  j(not_zero, &non_instance, Label::kNear);
3370

    
3371
  // Get the prototype or initial map from the function.
3372
  movq(result,
3373
       FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3374

    
3375
  // If the prototype or initial map is the hole, don't return it and
3376
  // simply miss the cache instead. This will allow us to allocate a
3377
  // prototype object on-demand in the runtime system.
3378
  CompareRoot(result, Heap::kTheHoleValueRootIndex);
3379
  j(equal, miss);
3380

    
3381
  // If the function does not have an initial map, we're done.
3382
  Label done;
3383
  CmpObjectType(result, MAP_TYPE, kScratchRegister);
3384
  j(not_equal, &done, Label::kNear);
3385

    
3386
  // Get the prototype from the initial map.
3387
  movq(result, FieldOperand(result, Map::kPrototypeOffset));
3388
  jmp(&done, Label::kNear);
3389

    
3390
  // Non-instance prototype: Fetch prototype from constructor field
3391
  // in initial map.
3392
  bind(&non_instance);
3393
  movq(result, FieldOperand(result, Map::kConstructorOffset));
3394

    
3395
  // All done.
3396
  bind(&done);
3397
}
3398

    
3399

    
3400
void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
3401
  if (FLAG_native_code_counters && counter->Enabled()) {
3402
    Operand counter_operand = ExternalOperand(ExternalReference(counter));
3403
    movl(counter_operand, Immediate(value));
3404
  }
3405
}
3406

    
3407

    
3408
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
3409
  ASSERT(value > 0);
3410
  if (FLAG_native_code_counters && counter->Enabled()) {
3411
    Operand counter_operand = ExternalOperand(ExternalReference(counter));
3412
    if (value == 1) {
3413
      incl(counter_operand);
3414
    } else {
3415
      addl(counter_operand, Immediate(value));
3416
    }
3417
  }
3418
}
3419

    
3420

    
3421
void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
3422
  ASSERT(value > 0);
3423
  if (FLAG_native_code_counters && counter->Enabled()) {
3424
    Operand counter_operand = ExternalOperand(ExternalReference(counter));
3425
    if (value == 1) {
3426
      decl(counter_operand);
3427
    } else {
3428
      subl(counter_operand, Immediate(value));
3429
    }
3430
  }
3431
}
3432

    
3433

    
3434
#ifdef ENABLE_DEBUGGER_SUPPORT
3435
void MacroAssembler::DebugBreak() {
3436
  Set(rax, 0);  // No arguments.
3437
  LoadAddress(rbx, ExternalReference(Runtime::kDebugBreak, isolate()));
3438
  CEntryStub ces(1);
3439
  ASSERT(AllowThisStubCall(&ces));
3440
  Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
3441
}
3442
#endif  // ENABLE_DEBUGGER_SUPPORT
3443

    
3444

    
3445
void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
3446
  // This macro takes the dst register to make the code more readable
3447
  // at the call sites. However, the dst register has to be rcx to
3448
  // follow the calling convention which requires the call type to be
3449
  // in rcx.
3450
  ASSERT(dst.is(rcx));
3451
  if (call_kind == CALL_AS_FUNCTION) {
3452
    LoadSmiConstant(dst, Smi::FromInt(1));
3453
  } else {
3454
    LoadSmiConstant(dst, Smi::FromInt(0));
3455
  }
3456
}
3457

    
3458

    
3459
void MacroAssembler::InvokeCode(Register code,
3460
                                const ParameterCount& expected,
3461
                                const ParameterCount& actual,
3462
                                InvokeFlag flag,
3463
                                const CallWrapper& call_wrapper,
3464
                                CallKind call_kind) {
3465
  // You can't call a function without a valid frame.
3466
  ASSERT(flag == JUMP_FUNCTION || has_frame());
3467

    
3468
  Label done;
3469
  bool definitely_mismatches = false;
3470
  InvokePrologue(expected,
3471
                 actual,
3472
                 Handle<Code>::null(),
3473
                 code,
3474
                 &done,
3475
                 &definitely_mismatches,
3476
                 flag,
3477
                 Label::kNear,
3478
                 call_wrapper,
3479
                 call_kind);
3480
  if (!definitely_mismatches) {
3481
    if (flag == CALL_FUNCTION) {
3482
      call_wrapper.BeforeCall(CallSize(code));
3483
      SetCallKind(rcx, call_kind);
3484
      call(code);
3485
      call_wrapper.AfterCall();
3486
    } else {
3487
      ASSERT(flag == JUMP_FUNCTION);
3488
      SetCallKind(rcx, call_kind);
3489
      jmp(code);
3490
    }
3491
    bind(&done);
3492
  }
3493
}
3494

    
3495

    
3496
void MacroAssembler::InvokeCode(Handle<Code> code,
3497
                                const ParameterCount& expected,
3498
                                const ParameterCount& actual,
3499
                                RelocInfo::Mode rmode,
3500
                                InvokeFlag flag,
3501
                                const CallWrapper& call_wrapper,
3502
                                CallKind call_kind) {
3503
  // You can't call a function without a valid frame.
3504
  ASSERT(flag == JUMP_FUNCTION || has_frame());
3505

    
3506
  Label done;
3507
  bool definitely_mismatches = false;
3508
  Register dummy = rax;
3509
  InvokePrologue(expected,
3510
                 actual,
3511
                 code,
3512
                 dummy,
3513
                 &done,
3514
                 &definitely_mismatches,
3515
                 flag,
3516
                 Label::kNear,
3517
                 call_wrapper,
3518
                 call_kind);
3519
  if (!definitely_mismatches) {
3520
    if (flag == CALL_FUNCTION) {
3521
      call_wrapper.BeforeCall(CallSize(code));
3522
      SetCallKind(rcx, call_kind);
3523
      Call(code, rmode);
3524
      call_wrapper.AfterCall();
3525
    } else {
3526
      ASSERT(flag == JUMP_FUNCTION);
3527
      SetCallKind(rcx, call_kind);
3528
      Jump(code, rmode);
3529
    }
3530
    bind(&done);
3531
  }
3532
}
3533

    
3534

    
3535
void MacroAssembler::InvokeFunction(Register function,
3536
                                    const ParameterCount& actual,
3537
                                    InvokeFlag flag,
3538
                                    const CallWrapper& call_wrapper,
3539
                                    CallKind call_kind) {
3540
  // You can't call a function without a valid frame.
3541
  ASSERT(flag == JUMP_FUNCTION || has_frame());
3542

    
3543
  ASSERT(function.is(rdi));
3544
  movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
3545
  movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
3546
  movsxlq(rbx,
3547
          FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
3548
  // Advances rdx to the end of the Code object header, to the start of
3549
  // the executable code.
3550
  movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
3551

    
3552
  ParameterCount expected(rbx);
3553
  InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
3554
}
3555

    
3556

    
3557
void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
3558
                                    const ParameterCount& expected,
3559
                                    const ParameterCount& actual,
3560
                                    InvokeFlag flag,
3561
                                    const CallWrapper& call_wrapper,
3562
                                    CallKind call_kind) {
3563
  // You can't call a function without a valid frame.
3564
  ASSERT(flag == JUMP_FUNCTION || has_frame());
3565

    
3566
  // Get the function and setup the context.
3567
  Move(rdi, function);
3568
  movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
3569

    
3570
  // We call indirectly through the code field in the function to
3571
  // allow recompilation to take effect without changing any of the
3572
  // call sites.
3573
  movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
3574
  InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
3575
}
3576

    
3577

    
3578
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
3579
                                    const ParameterCount& actual,
3580
                                    Handle<Code> code_constant,
3581
                                    Register code_register,
3582
                                    Label* done,
3583
                                    bool* definitely_mismatches,
3584
                                    InvokeFlag flag,
3585
                                    Label::Distance near_jump,
3586
                                    const CallWrapper& call_wrapper,
3587
                                    CallKind call_kind) {
3588
  bool definitely_matches = false;
3589
  *definitely_mismatches = false;
3590
  Label invoke;
3591
  if (expected.is_immediate()) {
3592
    ASSERT(actual.is_immediate());
3593
    if (expected.immediate() == actual.immediate()) {
3594
      definitely_matches = true;
3595
    } else {
3596
      Set(rax, actual.immediate());
3597
      if (expected.immediate() ==
3598
              SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
3599
        // Don't worry about adapting arguments for built-ins that
3600
        // don't want that done. Skip adaption code by making it look
3601
        // like we have a match between expected and actual number of
3602
        // arguments.
3603
        definitely_matches = true;
3604
      } else {
3605
        *definitely_mismatches = true;
3606
        Set(rbx, expected.immediate());
3607
      }
3608
    }
3609
  } else {
3610
    if (actual.is_immediate()) {
3611
      // Expected is in register, actual is immediate. This is the
3612
      // case when we invoke function values without going through the
3613
      // IC mechanism.
3614
      cmpq(expected.reg(), Immediate(actual.immediate()));
3615
      j(equal, &invoke, Label::kNear);
3616
      ASSERT(expected.reg().is(rbx));
3617
      Set(rax, actual.immediate());
3618
    } else if (!expected.reg().is(actual.reg())) {
3619
      // Both expected and actual are in (different) registers. This
3620
      // is the case when we invoke functions using call and apply.
3621
      cmpq(expected.reg(), actual.reg());
3622
      j(equal, &invoke, Label::kNear);
3623
      ASSERT(actual.reg().is(rax));
3624
      ASSERT(expected.reg().is(rbx));
3625
    }
3626
  }
3627

    
3628
  if (!definitely_matches) {
3629
    Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
3630
    if (!code_constant.is_null()) {
3631
      movq(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
3632
      addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
3633
    } else if (!code_register.is(rdx)) {
3634
      movq(rdx, code_register);
3635
    }
3636

    
3637
    if (flag == CALL_FUNCTION) {
3638
      call_wrapper.BeforeCall(CallSize(adaptor));
3639
      SetCallKind(rcx, call_kind);
3640
      Call(adaptor, RelocInfo::CODE_TARGET);
3641
      call_wrapper.AfterCall();
3642
      if (!*definitely_mismatches) {
3643
        jmp(done, near_jump);
3644
      }
3645
    } else {
3646
      SetCallKind(rcx, call_kind);
3647
      Jump(adaptor, RelocInfo::CODE_TARGET);
3648
    }
3649
    bind(&invoke);
3650
  }
3651
}
3652

    
3653

    
3654
void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
3655
  if (frame_mode == BUILD_STUB_FRAME) {
3656
    push(rbp);  // Caller's frame pointer.
3657
    movq(rbp, rsp);
3658
    push(rsi);  // Callee's context.
3659
    Push(Smi::FromInt(StackFrame::STUB));
3660
  } else {
3661
    PredictableCodeSizeScope predictible_code_size_scope(this,
3662
        kNoCodeAgeSequenceLength);
3663
    if (isolate()->IsCodePreAgingActive()) {
3664
        // Pre-age the code.
3665
      Call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
3666
           RelocInfo::CODE_AGE_SEQUENCE);
3667
      Nop(kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
3668
    } else {
3669
      push(rbp);  // Caller's frame pointer.
3670
      movq(rbp, rsp);
3671
      push(rsi);  // Callee's context.
3672
      push(rdi);  // Callee's JS function.
3673
    }
3674
  }
3675
}
3676

    
3677

    
3678
void MacroAssembler::EnterFrame(StackFrame::Type type) {
3679
  push(rbp);
3680
  movq(rbp, rsp);
3681
  push(rsi);  // Context.
3682
  Push(Smi::FromInt(type));
3683
  movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
3684
  push(kScratchRegister);
3685
  if (emit_debug_code()) {
3686
    movq(kScratchRegister,
3687
         isolate()->factory()->undefined_value(),
3688
         RelocInfo::EMBEDDED_OBJECT);
3689
    cmpq(Operand(rsp, 0), kScratchRegister);
3690
    Check(not_equal, kCodeObjectNotProperlyPatched);
3691
  }
3692
}
3693

    
3694

    
3695
void MacroAssembler::LeaveFrame(StackFrame::Type type) {
3696
  if (emit_debug_code()) {
3697
    Move(kScratchRegister, Smi::FromInt(type));
3698
    cmpq(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
3699
    Check(equal, kStackFrameTypesMustMatch);
3700
  }
3701
  movq(rsp, rbp);
3702
  pop(rbp);
3703
}
3704

    
3705

    
3706
void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
3707
  // Set up the frame structure on the stack.
3708
  // All constants are relative to the frame pointer of the exit frame.
3709
  ASSERT(ExitFrameConstants::kCallerSPDisplacement ==
3710
         kFPOnStackSize + kPCOnStackSize);
3711
  ASSERT(ExitFrameConstants::kCallerPCOffset == kFPOnStackSize);
3712
  ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
3713
  push(rbp);
3714
  movq(rbp, rsp);
3715

    
3716
  // Reserve room for entry stack pointer and push the code object.
3717
  ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
3718
  push(Immediate(0));  // Saved entry sp, patched before call.
3719
  movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
3720
  push(kScratchRegister);  // Accessed from EditFrame::code_slot.
3721

    
3722
  // Save the frame pointer and the context in top.
3723
  if (save_rax) {
3724
    movq(r14, rax);  // Backup rax in callee-save register.
3725
  }
3726

    
3727
  Store(ExternalReference(Isolate::kCEntryFPAddress, isolate()), rbp);
3728
  Store(ExternalReference(Isolate::kContextAddress, isolate()), rsi);
3729
}
3730

    
3731

    
3732
void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
3733
                                            bool save_doubles) {
3734
#ifdef _WIN64
3735
  const int kShadowSpace = 4;
3736
  arg_stack_space += kShadowSpace;
3737
#endif
3738
  // Optionally save all XMM registers.
3739
  if (save_doubles) {
3740
    int space = XMMRegister::kMaxNumAllocatableRegisters * kDoubleSize +
3741
        arg_stack_space * kPointerSize;
3742
    subq(rsp, Immediate(space));
3743
    int offset = -2 * kPointerSize;
3744
    for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
3745
      XMMRegister reg = XMMRegister::FromAllocationIndex(i);
3746
      movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
3747
    }
3748
  } else if (arg_stack_space > 0) {
3749
    subq(rsp, Immediate(arg_stack_space * kPointerSize));
3750
  }
3751

    
3752
  // Get the required frame alignment for the OS.
3753
  const int kFrameAlignment = OS::ActivationFrameAlignment();
3754
  if (kFrameAlignment > 0) {
3755
    ASSERT(IsPowerOf2(kFrameAlignment));
3756
    ASSERT(is_int8(kFrameAlignment));
3757
    and_(rsp, Immediate(-kFrameAlignment));
3758
  }
3759

    
3760
  // Patch the saved entry sp.
3761
  movq(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
3762
}
3763

    
3764

    
3765
void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles) {
3766
  EnterExitFramePrologue(true);
3767

    
3768
  // Set up argv in callee-saved register r15. It is reused in LeaveExitFrame,
3769
  // so it must be retained across the C-call.
3770
  int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
3771
  lea(r15, Operand(rbp, r14, times_pointer_size, offset));
3772

    
3773
  EnterExitFrameEpilogue(arg_stack_space, save_doubles);
3774
}
3775

    
3776

    
3777
void MacroAssembler::EnterApiExitFrame(int arg_stack_space) {
3778
  EnterExitFramePrologue(false);
3779
  EnterExitFrameEpilogue(arg_stack_space, false);
3780
}
3781

    
3782

    
3783
void MacroAssembler::LeaveExitFrame(bool save_doubles) {
3784
  // Registers:
3785
  // r15 : argv
3786
  if (save_doubles) {
3787
    int offset = -2 * kPointerSize;
3788
    for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
3789
      XMMRegister reg = XMMRegister::FromAllocationIndex(i);
3790
      movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
3791
    }
3792
  }
3793
  // Get the return address from the stack and restore the frame pointer.
3794
  movq(rcx, Operand(rbp, 1 * kPointerSize));
3795
  movq(rbp, Operand(rbp, 0 * kPointerSize));
3796

    
3797
  // Drop everything up to and including the arguments and the receiver
3798
  // from the caller stack.
3799
  lea(rsp, Operand(r15, 1 * kPointerSize));
3800

    
3801
  PushReturnAddressFrom(rcx);
3802

    
3803
  LeaveExitFrameEpilogue(true);
3804
}
3805

    
3806

    
3807
void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
3808
  movq(rsp, rbp);
3809
  pop(rbp);
3810

    
3811
  LeaveExitFrameEpilogue(restore_context);
3812
}
3813

    
3814

    
3815
void MacroAssembler::LeaveExitFrameEpilogue(bool restore_context) {
3816
  // Restore current context from top and clear it in debug mode.
3817
  ExternalReference context_address(Isolate::kContextAddress, isolate());
3818
  Operand context_operand = ExternalOperand(context_address);
3819
  if (restore_context) {
3820
    movq(rsi, context_operand);
3821
  }
3822
#ifdef DEBUG
3823
  movq(context_operand, Immediate(0));
3824
#endif
3825

    
3826
  // Clear the top frame.
3827
  ExternalReference c_entry_fp_address(Isolate::kCEntryFPAddress,
3828
                                       isolate());
3829
  Operand c_entry_fp_operand = ExternalOperand(c_entry_fp_address);
3830
  movq(c_entry_fp_operand, Immediate(0));
3831
}
3832

    
3833

    
3834
void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
3835
                                            Register scratch,
3836
                                            Label* miss) {
3837
  Label same_contexts;
3838

    
3839
  ASSERT(!holder_reg.is(scratch));
3840
  ASSERT(!scratch.is(kScratchRegister));
3841
  // Load current lexical context from the stack frame.
3842
  movq(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
3843

    
3844
  // When generating debug code, make sure the lexical context is set.
3845
  if (emit_debug_code()) {
3846
    cmpq(scratch, Immediate(0));
3847
    Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext);
3848
  }
3849
  // Load the native context of the current context.
3850
  int offset =
3851
      Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
3852
  movq(scratch, FieldOperand(scratch, offset));
3853
  movq(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
3854

    
3855
  // Check the context is a native context.
3856
  if (emit_debug_code()) {
3857
    Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
3858
        isolate()->factory()->native_context_map());
3859
    Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
3860
  }
3861

    
3862
  // Check if both contexts are the same.
3863
  cmpq(scratch, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
3864
  j(equal, &same_contexts);
3865

    
3866
  // Compare security tokens.
3867
  // Check that the security token in the calling global object is
3868
  // compatible with the security token in the receiving global
3869
  // object.
3870

    
3871
  // Check the context is a native context.
3872
  if (emit_debug_code()) {
3873
    // Preserve original value of holder_reg.
3874
    push(holder_reg);
3875
    movq(holder_reg,
3876
         FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
3877
    CompareRoot(holder_reg, Heap::kNullValueRootIndex);
3878
    Check(not_equal, kJSGlobalProxyContextShouldNotBeNull);
3879

    
3880
    // Read the first word and compare to native_context_map(),
3881
    movq(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
3882
    CompareRoot(holder_reg, Heap::kNativeContextMapRootIndex);
3883
    Check(equal, kJSGlobalObjectNativeContextShouldBeANativeContext);
3884
    pop(holder_reg);
3885
  }
3886

    
3887
  movq(kScratchRegister,
3888
       FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
3889
  int token_offset =
3890
      Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
3891
  movq(scratch, FieldOperand(scratch, token_offset));
3892
  cmpq(scratch, FieldOperand(kScratchRegister, token_offset));
3893
  j(not_equal, miss);
3894

    
3895
  bind(&same_contexts);
3896
}
3897

    
3898

    
3899
void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
3900
  // First of all we assign the hash seed to scratch.
3901
  LoadRoot(scratch, Heap::kHashSeedRootIndex);
3902
  SmiToInteger32(scratch, scratch);
3903

    
3904
  // Xor original key with a seed.
3905
  xorl(r0, scratch);
3906

    
3907
  // Compute the hash code from the untagged key.  This must be kept in sync
3908
  // with ComputeIntegerHash in utils.h.
3909
  //
3910
  // hash = ~hash + (hash << 15);
3911
  movl(scratch, r0);
3912
  notl(r0);
3913
  shll(scratch, Immediate(15));
3914
  addl(r0, scratch);
3915
  // hash = hash ^ (hash >> 12);
3916
  movl(scratch, r0);
3917
  shrl(scratch, Immediate(12));
3918
  xorl(r0, scratch);
3919
  // hash = hash + (hash << 2);
3920
  leal(r0, Operand(r0, r0, times_4, 0));
3921
  // hash = hash ^ (hash >> 4);
3922
  movl(scratch, r0);
3923
  shrl(scratch, Immediate(4));
3924
  xorl(r0, scratch);
3925
  // hash = hash * 2057;
3926
  imull(r0, r0, Immediate(2057));
3927
  // hash = hash ^ (hash >> 16);
3928
  movl(scratch, r0);
3929
  shrl(scratch, Immediate(16));
3930
  xorl(r0, scratch);
3931
}
3932

    
3933

    
3934

    
3935
void MacroAssembler::LoadFromNumberDictionary(Label* miss,
3936
                                              Register elements,
3937
                                              Register key,
3938
                                              Register r0,
3939
                                              Register r1,
3940
                                              Register r2,
3941
                                              Register result) {
3942
  // Register use:
3943
  //
3944
  // elements - holds the slow-case elements of the receiver on entry.
3945
  //            Unchanged unless 'result' is the same register.
3946
  //
3947
  // key      - holds the smi key on entry.
3948
  //            Unchanged unless 'result' is the same register.
3949
  //
3950
  // Scratch registers:
3951
  //
3952
  // r0 - holds the untagged key on entry and holds the hash once computed.
3953
  //
3954
  // r1 - used to hold the capacity mask of the dictionary
3955
  //
3956
  // r2 - used for the index into the dictionary.
3957
  //
3958
  // result - holds the result on exit if the load succeeded.
3959
  //          Allowed to be the same as 'key' or 'result'.
3960
  //          Unchanged on bailout so 'key' or 'result' can be used
3961
  //          in further computation.
3962

    
3963
  Label done;
3964

    
3965
  GetNumberHash(r0, r1);
3966

    
3967
  // Compute capacity mask.
3968
  SmiToInteger32(r1, FieldOperand(elements,
3969
                                  SeededNumberDictionary::kCapacityOffset));
3970
  decl(r1);
3971

    
3972
  // Generate an unrolled loop that performs a few probes before giving up.
3973
  const int kProbes = 4;
3974
  for (int i = 0; i < kProbes; i++) {
3975
    // Use r2 for index calculations and keep the hash intact in r0.
3976
    movq(r2, r0);
3977
    // Compute the masked index: (hash + i + i * i) & mask.
3978
    if (i > 0) {
3979
      addl(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i)));
3980
    }
3981
    and_(r2, r1);
3982

    
3983
    // Scale the index by multiplying by the entry size.
3984
    ASSERT(SeededNumberDictionary::kEntrySize == 3);
3985
    lea(r2, Operand(r2, r2, times_2, 0));  // r2 = r2 * 3
3986

    
3987
    // Check if the key matches.
3988
    cmpq(key, FieldOperand(elements,
3989
                           r2,
3990
                           times_pointer_size,
3991
                           SeededNumberDictionary::kElementsStartOffset));
3992
    if (i != (kProbes - 1)) {
3993
      j(equal, &done);
3994
    } else {
3995
      j(not_equal, miss);
3996
    }
3997
  }
3998

    
3999
  bind(&done);
4000
  // Check that the value is a normal propety.
4001
  const int kDetailsOffset =
4002
      SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
4003
  ASSERT_EQ(NORMAL, 0);
4004
  Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
4005
       Smi::FromInt(PropertyDetails::TypeField::kMask));
4006
  j(not_zero, miss);
4007

    
4008
  // Get the value at the masked, scaled index.
4009
  const int kValueOffset =
4010
      SeededNumberDictionary::kElementsStartOffset + kPointerSize;
4011
  movq(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
4012
}
4013

    
4014

    
4015
void MacroAssembler::LoadAllocationTopHelper(Register result,
4016
                                             Register scratch,
4017
                                             AllocationFlags flags) {
4018
  ExternalReference allocation_top =
4019
      AllocationUtils::GetAllocationTopReference(isolate(), flags);
4020

    
4021
  // Just return if allocation top is already known.
4022
  if ((flags & RESULT_CONTAINS_TOP) != 0) {
4023
    // No use of scratch if allocation top is provided.
4024
    ASSERT(!scratch.is_valid());
4025
#ifdef DEBUG
4026
    // Assert that result actually contains top on entry.
4027
    Operand top_operand = ExternalOperand(allocation_top);
4028
    cmpq(result, top_operand);
4029
    Check(equal, kUnexpectedAllocationTop);
4030
#endif
4031
    return;
4032
  }
4033

    
4034
  // Move address of new object to result. Use scratch register if available,
4035
  // and keep address in scratch until call to UpdateAllocationTopHelper.
4036
  if (scratch.is_valid()) {
4037
    LoadAddress(scratch, allocation_top);
4038
    movq(result, Operand(scratch, 0));
4039
  } else {
4040
    Load(result, allocation_top);
4041
  }
4042
}
4043

    
4044

    
4045
void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
4046
                                               Register scratch,
4047
                                               AllocationFlags flags) {
4048
  if (emit_debug_code()) {
4049
    testq(result_end, Immediate(kObjectAlignmentMask));
4050
    Check(zero, kUnalignedAllocationInNewSpace);
4051
  }
4052

    
4053
  ExternalReference allocation_top =
4054
      AllocationUtils::GetAllocationTopReference(isolate(), flags);
4055

    
4056
  // Update new top.
4057
  if (scratch.is_valid()) {
4058
    // Scratch already contains address of allocation top.
4059
    movq(Operand(scratch, 0), result_end);
4060
  } else {
4061
    Store(allocation_top, result_end);
4062
  }
4063
}
4064

    
4065

    
4066
void MacroAssembler::Allocate(int object_size,
4067
                              Register result,
4068
                              Register result_end,
4069
                              Register scratch,
4070
                              Label* gc_required,
4071
                              AllocationFlags flags) {
4072
  ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
4073
  ASSERT(object_size <= Page::kMaxNonCodeHeapObjectSize);
4074
  if (!FLAG_inline_new) {
4075
    if (emit_debug_code()) {
4076
      // Trash the registers to simulate an allocation failure.
4077
      movl(result, Immediate(0x7091));
4078
      if (result_end.is_valid()) {
4079
        movl(result_end, Immediate(0x7191));
4080
      }
4081
      if (scratch.is_valid()) {
4082
        movl(scratch, Immediate(0x7291));
4083
      }
4084
    }
4085
    jmp(gc_required);
4086
    return;
4087
  }
4088
  ASSERT(!result.is(result_end));
4089

    
4090
  // Load address of new object into result.
4091
  LoadAllocationTopHelper(result, scratch, flags);
4092

    
4093
  if (isolate()->heap_profiler()->is_tracking_allocations()) {
4094
    RecordObjectAllocation(isolate(), result, object_size);
4095
  }
4096

    
4097
  // Align the next allocation. Storing the filler map without checking top is
4098
  // safe in new-space because the limit of the heap is aligned there.
4099
  if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
4100
    testq(result, Immediate(kDoubleAlignmentMask));
4101
    Check(zero, kAllocationIsNotDoubleAligned);
4102
  }
4103

    
4104
  // Calculate new top and bail out if new space is exhausted.
4105
  ExternalReference allocation_limit =
4106
      AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4107

    
4108
  Register top_reg = result_end.is_valid() ? result_end : result;
4109

    
4110
  if (!top_reg.is(result)) {
4111
    movq(top_reg, result);
4112
  }
4113
  addq(top_reg, Immediate(object_size));
4114
  j(carry, gc_required);
4115
  Operand limit_operand = ExternalOperand(allocation_limit);
4116
  cmpq(top_reg, limit_operand);
4117
  j(above, gc_required);
4118

    
4119
  // Update allocation top.
4120
  UpdateAllocationTopHelper(top_reg, scratch, flags);
4121

    
4122
  bool tag_result = (flags & TAG_OBJECT) != 0;
4123
  if (top_reg.is(result)) {
4124
    if (tag_result) {
4125
      subq(result, Immediate(object_size - kHeapObjectTag));
4126
    } else {
4127
      subq(result, Immediate(object_size));
4128
    }
4129
  } else if (tag_result) {
4130
    // Tag the result if requested.
4131
    ASSERT(kHeapObjectTag == 1);
4132
    incq(result);
4133
  }
4134
}
4135

    
4136

    
4137
void MacroAssembler::Allocate(int header_size,
4138
                              ScaleFactor element_size,
4139
                              Register element_count,
4140
                              Register result,
4141
                              Register result_end,
4142
                              Register scratch,
4143
                              Label* gc_required,
4144
                              AllocationFlags flags) {
4145
  ASSERT((flags & SIZE_IN_WORDS) == 0);
4146
  lea(result_end, Operand(element_count, element_size, header_size));
4147
  Allocate(result_end, result, result_end, scratch, gc_required, flags);
4148
}
4149

    
4150

    
4151
void MacroAssembler::Allocate(Register object_size,
4152
                              Register result,
4153
                              Register result_end,
4154
                              Register scratch,
4155
                              Label* gc_required,
4156
                              AllocationFlags flags) {
4157
  ASSERT((flags & SIZE_IN_WORDS) == 0);
4158
  if (!FLAG_inline_new) {
4159
    if (emit_debug_code()) {
4160
      // Trash the registers to simulate an allocation failure.
4161
      movl(result, Immediate(0x7091));
4162
      movl(result_end, Immediate(0x7191));
4163
      if (scratch.is_valid()) {
4164
        movl(scratch, Immediate(0x7291));
4165
      }
4166
      // object_size is left unchanged by this function.
4167
    }
4168
    jmp(gc_required);
4169
    return;
4170
  }
4171
  ASSERT(!result.is(result_end));
4172

    
4173
  // Load address of new object into result.
4174
  LoadAllocationTopHelper(result, scratch, flags);
4175

    
4176
  if (isolate()->heap_profiler()->is_tracking_allocations()) {
4177
    RecordObjectAllocation(isolate(), result, object_size);
4178
  }
4179

    
4180
  // Align the next allocation. Storing the filler map without checking top is
4181
  // safe in new-space because the limit of the heap is aligned there.
4182
  if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
4183
    testq(result, Immediate(kDoubleAlignmentMask));
4184
    Check(zero, kAllocationIsNotDoubleAligned);
4185
  }
4186

    
4187
  // Calculate new top and bail out if new space is exhausted.
4188
  ExternalReference allocation_limit =
4189
      AllocationUtils::GetAllocationLimitReference(isolate(), flags);
4190
  if (!object_size.is(result_end)) {
4191
    movq(result_end, object_size);
4192
  }
4193
  addq(result_end, result);
4194
  j(carry, gc_required);
4195
  Operand limit_operand = ExternalOperand(allocation_limit);
4196
  cmpq(result_end, limit_operand);
4197
  j(above, gc_required);
4198

    
4199
  // Update allocation top.
4200
  UpdateAllocationTopHelper(result_end, scratch, flags);
4201

    
4202
  // Tag the result if requested.
4203
  if ((flags & TAG_OBJECT) != 0) {
4204
    addq(result, Immediate(kHeapObjectTag));
4205
  }
4206
}
4207

    
4208

    
4209
void MacroAssembler::UndoAllocationInNewSpace(Register object) {
4210
  ExternalReference new_space_allocation_top =
4211
      ExternalReference::new_space_allocation_top_address(isolate());
4212

    
4213
  // Make sure the object has no tag before resetting top.
4214
  and_(object, Immediate(~kHeapObjectTagMask));
4215
  Operand top_operand = ExternalOperand(new_space_allocation_top);
4216
#ifdef DEBUG
4217
  cmpq(object, top_operand);
4218
  Check(below, kUndoAllocationOfNonAllocatedMemory);
4219
#endif
4220
  movq(top_operand, object);
4221
}
4222

    
4223

    
4224
void MacroAssembler::AllocateHeapNumber(Register result,
4225
                                        Register scratch,
4226
                                        Label* gc_required) {
4227
  // Allocate heap number in new space.
4228
  Allocate(HeapNumber::kSize, result, scratch, no_reg, gc_required, TAG_OBJECT);
4229

    
4230
  // Set the map.
4231
  LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
4232
  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4233
}
4234

    
4235

    
4236
void MacroAssembler::AllocateTwoByteString(Register result,
4237
                                           Register length,
4238
                                           Register scratch1,
4239
                                           Register scratch2,
4240
                                           Register scratch3,
4241
                                           Label* gc_required) {
4242
  // Calculate the number of bytes needed for the characters in the string while
4243
  // observing object alignment.
4244
  const int kHeaderAlignment = SeqTwoByteString::kHeaderSize &
4245
                               kObjectAlignmentMask;
4246
  ASSERT(kShortSize == 2);
4247
  // scratch1 = length * 2 + kObjectAlignmentMask.
4248
  lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
4249
                kHeaderAlignment));
4250
  and_(scratch1, Immediate(~kObjectAlignmentMask));
4251
  if (kHeaderAlignment > 0) {
4252
    subq(scratch1, Immediate(kHeaderAlignment));
4253
  }
4254

    
4255
  // Allocate two byte string in new space.
4256
  Allocate(SeqTwoByteString::kHeaderSize,
4257
           times_1,
4258
           scratch1,
4259
           result,
4260
           scratch2,
4261
           scratch3,
4262
           gc_required,
4263
           TAG_OBJECT);
4264

    
4265
  // Set the map, length and hash field.
4266
  LoadRoot(kScratchRegister, Heap::kStringMapRootIndex);
4267
  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4268
  Integer32ToSmi(scratch1, length);
4269
  movq(FieldOperand(result, String::kLengthOffset), scratch1);
4270
  movq(FieldOperand(result, String::kHashFieldOffset),
4271
       Immediate(String::kEmptyHashField));
4272
}
4273

    
4274

    
4275
void MacroAssembler::AllocateAsciiString(Register result,
4276
                                         Register length,
4277
                                         Register scratch1,
4278
                                         Register scratch2,
4279
                                         Register scratch3,
4280
                                         Label* gc_required) {
4281
  // Calculate the number of bytes needed for the characters in the string while
4282
  // observing object alignment.
4283
  const int kHeaderAlignment = SeqOneByteString::kHeaderSize &
4284
                               kObjectAlignmentMask;
4285
  movl(scratch1, length);
4286
  ASSERT(kCharSize == 1);
4287
  addq(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
4288
  and_(scratch1, Immediate(~kObjectAlignmentMask));
4289
  if (kHeaderAlignment > 0) {
4290
    subq(scratch1, Immediate(kHeaderAlignment));
4291
  }
4292

    
4293
  // Allocate ASCII string in new space.
4294
  Allocate(SeqOneByteString::kHeaderSize,
4295
           times_1,
4296
           scratch1,
4297
           result,
4298
           scratch2,
4299
           scratch3,
4300
           gc_required,
4301
           TAG_OBJECT);
4302

    
4303
  // Set the map, length and hash field.
4304
  LoadRoot(kScratchRegister, Heap::kAsciiStringMapRootIndex);
4305
  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4306
  Integer32ToSmi(scratch1, length);
4307
  movq(FieldOperand(result, String::kLengthOffset), scratch1);
4308
  movq(FieldOperand(result, String::kHashFieldOffset),
4309
       Immediate(String::kEmptyHashField));
4310
}
4311

    
4312

    
4313
void MacroAssembler::AllocateTwoByteConsString(Register result,
4314
                                        Register scratch1,
4315
                                        Register scratch2,
4316
                                        Label* gc_required) {
4317
  // Allocate heap number in new space.
4318
  Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
4319
           TAG_OBJECT);
4320

    
4321
  // Set the map. The other fields are left uninitialized.
4322
  LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex);
4323
  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4324
}
4325

    
4326

    
4327
void MacroAssembler::AllocateAsciiConsString(Register result,
4328
                                             Register scratch1,
4329
                                             Register scratch2,
4330
                                             Label* gc_required) {
4331
  Label allocate_new_space, install_map;
4332
  AllocationFlags flags = TAG_OBJECT;
4333

    
4334
  ExternalReference high_promotion_mode = ExternalReference::
4335
      new_space_high_promotion_mode_active_address(isolate());
4336

    
4337
  Load(scratch1, high_promotion_mode);
4338
  testb(scratch1, Immediate(1));
4339
  j(zero, &allocate_new_space);
4340
  Allocate(ConsString::kSize,
4341
           result,
4342
           scratch1,
4343
           scratch2,
4344
           gc_required,
4345
           static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
4346

    
4347
  jmp(&install_map);
4348

    
4349
  bind(&allocate_new_space);
4350
  Allocate(ConsString::kSize,
4351
           result,
4352
           scratch1,
4353
           scratch2,
4354
           gc_required,
4355
           flags);
4356

    
4357
  bind(&install_map);
4358

    
4359
  // Set the map. The other fields are left uninitialized.
4360
  LoadRoot(kScratchRegister, Heap::kConsAsciiStringMapRootIndex);
4361
  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4362
}
4363

    
4364

    
4365
void MacroAssembler::AllocateTwoByteSlicedString(Register result,
4366
                                          Register scratch1,
4367
                                          Register scratch2,
4368
                                          Label* gc_required) {
4369
  // Allocate heap number in new space.
4370
  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
4371
           TAG_OBJECT);
4372

    
4373
  // Set the map. The other fields are left uninitialized.
4374
  LoadRoot(kScratchRegister, Heap::kSlicedStringMapRootIndex);
4375
  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4376
}
4377

    
4378

    
4379
void MacroAssembler::AllocateAsciiSlicedString(Register result,
4380
                                               Register scratch1,
4381
                                               Register scratch2,
4382
                                               Label* gc_required) {
4383
  // Allocate heap number in new space.
4384
  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
4385
           TAG_OBJECT);
4386

    
4387
  // Set the map. The other fields are left uninitialized.
4388
  LoadRoot(kScratchRegister, Heap::kSlicedAsciiStringMapRootIndex);
4389
  movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
4390
}
4391

    
4392

    
4393
// Copy memory, byte-by-byte, from source to destination.  Not optimized for
4394
// long or aligned copies.  The contents of scratch and length are destroyed.
4395
// Destination is incremented by length, source, length and scratch are
4396
// clobbered.
4397
// A simpler loop is faster on small copies, but slower on large ones.
4398
// The cld() instruction must have been emitted, to set the direction flag(),
4399
// before calling this function.
4400
void MacroAssembler::CopyBytes(Register destination,
4401
                               Register source,
4402
                               Register length,
4403
                               int min_length,
4404
                               Register scratch) {
4405
  ASSERT(min_length >= 0);
4406
  if (emit_debug_code()) {
4407
    cmpl(length, Immediate(min_length));
4408
    Assert(greater_equal, kInvalidMinLength);
4409
  }
4410
  Label loop, done, short_string, short_loop;
4411

    
4412
  const int kLongStringLimit = 20;
4413
  if (min_length <= kLongStringLimit) {
4414
    cmpl(length, Immediate(kLongStringLimit));
4415
    j(less_equal, &short_string);
4416
  }
4417

    
4418
  ASSERT(source.is(rsi));
4419
  ASSERT(destination.is(rdi));
4420
  ASSERT(length.is(rcx));
4421

    
4422
  // Because source is 8-byte aligned in our uses of this function,
4423
  // we keep source aligned for the rep movs operation by copying the odd bytes
4424
  // at the end of the ranges.
4425
  movq(scratch, length);
4426
  shrl(length, Immediate(kPointerSizeLog2));
4427
  repmovsq();
4428
  // Move remaining bytes of length.
4429
  andl(scratch, Immediate(kPointerSize - 1));
4430
  movq(length, Operand(source, scratch, times_1, -kPointerSize));
4431
  movq(Operand(destination, scratch, times_1, -kPointerSize), length);
4432
  addq(destination, scratch);
4433

    
4434
  if (min_length <= kLongStringLimit) {
4435
    jmp(&done);
4436

    
4437
    bind(&short_string);
4438
    if (min_length == 0) {
4439
      testl(length, length);
4440
      j(zero, &done);
4441
    }
4442
    lea(scratch, Operand(destination, length, times_1, 0));
4443

    
4444
    bind(&short_loop);
4445
    movb(length, Operand(source, 0));
4446
    movb(Operand(destination, 0), length);
4447
    incq(source);
4448
    incq(destination);
4449
    cmpq(destination, scratch);
4450
    j(not_equal, &short_loop);
4451

    
4452
    bind(&done);
4453
  }
4454
}
4455

    
4456

    
4457
void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
4458
                                                Register end_offset,
4459
                                                Register filler) {
4460
  Label loop, entry;
4461
  jmp(&entry);
4462
  bind(&loop);
4463
  movq(Operand(start_offset, 0), filler);
4464
  addq(start_offset, Immediate(kPointerSize));
4465
  bind(&entry);
4466
  cmpq(start_offset, end_offset);
4467
  j(less, &loop);
4468
}
4469

    
4470

    
4471
void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
4472
  if (context_chain_length > 0) {
4473
    // Move up the chain of contexts to the context containing the slot.
4474
    movq(dst, Operand(rsi, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4475
    for (int i = 1; i < context_chain_length; i++) {
4476
      movq(dst, Operand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4477
    }
4478
  } else {
4479
    // Slot is in the current function context.  Move it into the
4480
    // destination register in case we store into it (the write barrier
4481
    // cannot be allowed to destroy the context in rsi).
4482
    movq(dst, rsi);
4483
  }
4484

    
4485
  // We should not have found a with context by walking the context
4486
  // chain (i.e., the static scope chain and runtime context chain do
4487
  // not agree).  A variable occurring in such a scope should have
4488
  // slot type LOOKUP and not CONTEXT.
4489
  if (emit_debug_code()) {
4490
    CompareRoot(FieldOperand(dst, HeapObject::kMapOffset),
4491
                Heap::kWithContextMapRootIndex);
4492
    Check(not_equal, kVariableResolvedToWithContext);
4493
  }
4494
}
4495

    
4496

    
4497
void MacroAssembler::LoadTransitionedArrayMapConditional(
4498
    ElementsKind expected_kind,
4499
    ElementsKind transitioned_kind,
4500
    Register map_in_out,
4501
    Register scratch,
4502
    Label* no_map_match) {
4503
  // Load the global or builtins object from the current context.
4504
  movq(scratch,
4505
       Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4506
  movq(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
4507

    
4508
  // Check that the function's map is the same as the expected cached map.
4509
  movq(scratch, Operand(scratch,
4510
                        Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
4511

    
4512
  int offset = expected_kind * kPointerSize +
4513
      FixedArrayBase::kHeaderSize;
4514
  cmpq(map_in_out, FieldOperand(scratch, offset));
4515
  j(not_equal, no_map_match);
4516

    
4517
  // Use the transitioned cached map.
4518
  offset = transitioned_kind * kPointerSize +
4519
      FixedArrayBase::kHeaderSize;
4520
  movq(map_in_out, FieldOperand(scratch, offset));
4521
}
4522

    
4523

    
4524
void MacroAssembler::LoadInitialArrayMap(
4525
    Register function_in, Register scratch,
4526
    Register map_out, bool can_have_holes) {
4527
  ASSERT(!function_in.is(map_out));
4528
  Label done;
4529
  movq(map_out, FieldOperand(function_in,
4530
                             JSFunction::kPrototypeOrInitialMapOffset));
4531
  if (!FLAG_smi_only_arrays) {
4532
    ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS;
4533
    LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
4534
                                        kind,
4535
                                        map_out,
4536
                                        scratch,
4537
                                        &done);
4538
  } else if (can_have_holes) {
4539
    LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
4540
                                        FAST_HOLEY_SMI_ELEMENTS,
4541
                                        map_out,
4542
                                        scratch,
4543
                                        &done);
4544
  }
4545
  bind(&done);
4546
}
4547

    
4548
#ifdef _WIN64
4549
static const int kRegisterPassedArguments = 4;
4550
#else
4551
static const int kRegisterPassedArguments = 6;
4552
#endif
4553

    
4554
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
4555
  // Load the global or builtins object from the current context.
4556
  movq(function,
4557
       Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4558
  // Load the native context from the global or builtins object.
4559
  movq(function, FieldOperand(function, GlobalObject::kNativeContextOffset));
4560
  // Load the function from the native context.
4561
  movq(function, Operand(function, Context::SlotOffset(index)));
4562
}
4563

    
4564

    
4565
void MacroAssembler::LoadArrayFunction(Register function) {
4566
  movq(function,
4567
       Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4568
  movq(function, FieldOperand(function, GlobalObject::kGlobalContextOffset));
4569
  movq(function,
4570
       Operand(function, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
4571
}
4572

    
4573

    
4574
void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4575
                                                  Register map) {
4576
  // Load the initial map.  The global functions all have initial maps.
4577
  movq(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4578
  if (emit_debug_code()) {
4579
    Label ok, fail;
4580
    CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
4581
    jmp(&ok);
4582
    bind(&fail);
4583
    Abort(kGlobalFunctionsMustHaveInitialMap);
4584
    bind(&ok);
4585
  }
4586
}
4587

    
4588

    
4589
int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
4590
  // On Windows 64 stack slots are reserved by the caller for all arguments
4591
  // including the ones passed in registers, and space is always allocated for
4592
  // the four register arguments even if the function takes fewer than four
4593
  // arguments.
4594
  // On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers
4595
  // and the caller does not reserve stack slots for them.
4596
  ASSERT(num_arguments >= 0);
4597
#ifdef _WIN64
4598
  const int kMinimumStackSlots = kRegisterPassedArguments;
4599
  if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
4600
  return num_arguments;
4601
#else
4602
  if (num_arguments < kRegisterPassedArguments) return 0;
4603
  return num_arguments - kRegisterPassedArguments;
4604
#endif
4605
}
4606

    
4607

    
4608
void MacroAssembler::PrepareCallCFunction(int num_arguments) {
4609
  int frame_alignment = OS::ActivationFrameAlignment();
4610
  ASSERT(frame_alignment != 0);
4611
  ASSERT(num_arguments >= 0);
4612

    
4613
  // Make stack end at alignment and allocate space for arguments and old rsp.
4614
  movq(kScratchRegister, rsp);
4615
  ASSERT(IsPowerOf2(frame_alignment));
4616
  int argument_slots_on_stack =
4617
      ArgumentStackSlotsForCFunctionCall(num_arguments);
4618
  subq(rsp, Immediate((argument_slots_on_stack + 1) * kPointerSize));
4619
  and_(rsp, Immediate(-frame_alignment));
4620
  movq(Operand(rsp, argument_slots_on_stack * kPointerSize), kScratchRegister);
4621
}
4622

    
4623

    
4624
void MacroAssembler::CallCFunction(ExternalReference function,
4625
                                   int num_arguments) {
4626
  LoadAddress(rax, function);
4627
  CallCFunction(rax, num_arguments);
4628
}
4629

    
4630

    
4631
void MacroAssembler::CallCFunction(Register function, int num_arguments) {
4632
  ASSERT(has_frame());
4633
  // Check stack alignment.
4634
  if (emit_debug_code()) {
4635
    CheckStackAlignment();
4636
  }
4637

    
4638
  call(function);
4639
  ASSERT(OS::ActivationFrameAlignment() != 0);
4640
  ASSERT(num_arguments >= 0);
4641
  int argument_slots_on_stack =
4642
      ArgumentStackSlotsForCFunctionCall(num_arguments);
4643
  movq(rsp, Operand(rsp, argument_slots_on_stack * kPointerSize));
4644
}
4645

    
4646

    
4647
bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
4648
  if (r1.is(r2)) return true;
4649
  if (r1.is(r3)) return true;
4650
  if (r1.is(r4)) return true;
4651
  if (r2.is(r3)) return true;
4652
  if (r2.is(r4)) return true;
4653
  if (r3.is(r4)) return true;
4654
  return false;
4655
}
4656

    
4657

    
4658
CodePatcher::CodePatcher(byte* address, int size)
4659
    : address_(address),
4660
      size_(size),
4661
      masm_(NULL, address, size + Assembler::kGap) {
4662
  // Create a new macro assembler pointing to the address of the code to patch.
4663
  // The size is adjusted with kGap on order for the assembler to generate size
4664
  // bytes of instructions without failing with buffer size constraints.
4665
  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4666
}
4667

    
4668

    
4669
CodePatcher::~CodePatcher() {
4670
  // Indicate that code has changed.
4671
  CPU::FlushICache(address_, size_);
4672

    
4673
  // Check that the code was patched as expected.
4674
  ASSERT(masm_.pc_ == address_ + size_);
4675
  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4676
}
4677

    
4678

    
4679
void MacroAssembler::CheckPageFlag(
4680
    Register object,
4681
    Register scratch,
4682
    int mask,
4683
    Condition cc,
4684
    Label* condition_met,
4685
    Label::Distance condition_met_distance) {
4686
  ASSERT(cc == zero || cc == not_zero);
4687
  if (scratch.is(object)) {
4688
    and_(scratch, Immediate(~Page::kPageAlignmentMask));
4689
  } else {
4690
    movq(scratch, Immediate(~Page::kPageAlignmentMask));
4691
    and_(scratch, object);
4692
  }
4693
  if (mask < (1 << kBitsPerByte)) {
4694
    testb(Operand(scratch, MemoryChunk::kFlagsOffset),
4695
          Immediate(static_cast<uint8_t>(mask)));
4696
  } else {
4697
    testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(mask));
4698
  }
4699
  j(cc, condition_met, condition_met_distance);
4700
}
4701

    
4702

    
4703
void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
4704
                                        Register scratch,
4705
                                        Label* if_deprecated) {
4706
  if (map->CanBeDeprecated()) {
4707
    Move(scratch, map);
4708
    movq(scratch, FieldOperand(scratch, Map::kBitField3Offset));
4709
    SmiToInteger32(scratch, scratch);
4710
    and_(scratch, Immediate(Map::Deprecated::kMask));
4711
    j(not_zero, if_deprecated);
4712
  }
4713
}
4714

    
4715

    
4716
void MacroAssembler::JumpIfBlack(Register object,
4717
                                 Register bitmap_scratch,
4718
                                 Register mask_scratch,
4719
                                 Label* on_black,
4720
                                 Label::Distance on_black_distance) {
4721
  ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
4722
  GetMarkBits(object, bitmap_scratch, mask_scratch);
4723

    
4724
  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
4725
  // The mask_scratch register contains a 1 at the position of the first bit
4726
  // and a 0 at all other positions, including the position of the second bit.
4727
  movq(rcx, mask_scratch);
4728
  // Make rcx into a mask that covers both marking bits using the operation
4729
  // rcx = mask | (mask << 1).
4730
  lea(rcx, Operand(mask_scratch, mask_scratch, times_2, 0));
4731
  // Note that we are using a 4-byte aligned 8-byte load.
4732
  and_(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize));
4733
  cmpq(mask_scratch, rcx);
4734
  j(equal, on_black, on_black_distance);
4735
}
4736

    
4737

    
4738
// Detect some, but not all, common pointer-free objects.  This is used by the
4739
// incremental write barrier which doesn't care about oddballs (they are always
4740
// marked black immediately so this code is not hit).
4741
void MacroAssembler::JumpIfDataObject(
4742
    Register value,
4743
    Register scratch,
4744
    Label* not_data_object,
4745
    Label::Distance not_data_object_distance) {
4746
  Label is_data_object;
4747
  movq(scratch, FieldOperand(value, HeapObject::kMapOffset));
4748
  CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
4749
  j(equal, &is_data_object, Label::kNear);
4750
  ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
4751
  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
4752
  // If it's a string and it's not a cons string then it's an object containing
4753
  // no GC pointers.
4754
  testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
4755
        Immediate(kIsIndirectStringMask | kIsNotStringMask));
4756
  j(not_zero, not_data_object, not_data_object_distance);
4757
  bind(&is_data_object);
4758
}
4759

    
4760

    
4761
void MacroAssembler::GetMarkBits(Register addr_reg,
4762
                                 Register bitmap_reg,
4763
                                 Register mask_reg) {
4764
  ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
4765
  movq(bitmap_reg, addr_reg);
4766
  // Sign extended 32 bit immediate.
4767
  and_(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
4768
  movq(rcx, addr_reg);
4769
  int shift =
4770
      Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2;
4771
  shrl(rcx, Immediate(shift));
4772
  and_(rcx,
4773
       Immediate((Page::kPageAlignmentMask >> shift) &
4774
                 ~(Bitmap::kBytesPerCell - 1)));
4775

    
4776
  addq(bitmap_reg, rcx);
4777
  movq(rcx, addr_reg);
4778
  shrl(rcx, Immediate(kPointerSizeLog2));
4779
  and_(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1));
4780
  movl(mask_reg, Immediate(1));
4781
  shl_cl(mask_reg);
4782
}
4783

    
4784

    
4785
void MacroAssembler::EnsureNotWhite(
4786
    Register value,
4787
    Register bitmap_scratch,
4788
    Register mask_scratch,
4789
    Label* value_is_white_and_not_data,
4790
    Label::Distance distance) {
4791
  ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, rcx));
4792
  GetMarkBits(value, bitmap_scratch, mask_scratch);
4793

    
4794
  // If the value is black or grey we don't need to do anything.
4795
  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
4796
  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
4797
  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
4798
  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
4799

    
4800
  Label done;
4801

    
4802
  // Since both black and grey have a 1 in the first position and white does
4803
  // not have a 1 there we only need to check one bit.
4804
  testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
4805
  j(not_zero, &done, Label::kNear);
4806

    
4807
  if (emit_debug_code()) {
4808
    // Check for impossible bit pattern.
4809
    Label ok;
4810
    push(mask_scratch);
4811
    // shl.  May overflow making the check conservative.
4812
    addq(mask_scratch, mask_scratch);
4813
    testq(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
4814
    j(zero, &ok, Label::kNear);
4815
    int3();
4816
    bind(&ok);
4817
    pop(mask_scratch);
4818
  }
4819

    
4820
  // Value is white.  We check whether it is data that doesn't need scanning.
4821
  // Currently only checks for HeapNumber and non-cons strings.
4822
  Register map = rcx;  // Holds map while checking type.
4823
  Register length = rcx;  // Holds length of object after checking type.
4824
  Label not_heap_number;
4825
  Label is_data_object;
4826

    
4827
  // Check for heap-number
4828
  movq(map, FieldOperand(value, HeapObject::kMapOffset));
4829
  CompareRoot(map, Heap::kHeapNumberMapRootIndex);
4830
  j(not_equal, &not_heap_number, Label::kNear);
4831
  movq(length, Immediate(HeapNumber::kSize));
4832
  jmp(&is_data_object, Label::kNear);
4833

    
4834
  bind(&not_heap_number);
4835
  // Check for strings.
4836
  ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
4837
  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
4838
  // If it's a string and it's not a cons string then it's an object containing
4839
  // no GC pointers.
4840
  Register instance_type = rcx;
4841
  movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
4842
  testb(instance_type, Immediate(kIsIndirectStringMask | kIsNotStringMask));
4843
  j(not_zero, value_is_white_and_not_data);
4844
  // It's a non-indirect (non-cons and non-slice) string.
4845
  // If it's external, the length is just ExternalString::kSize.
4846
  // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
4847
  Label not_external;
4848
  // External strings are the only ones with the kExternalStringTag bit
4849
  // set.
4850
  ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
4851
  ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
4852
  testb(instance_type, Immediate(kExternalStringTag));
4853
  j(zero, &not_external, Label::kNear);
4854
  movq(length, Immediate(ExternalString::kSize));
4855
  jmp(&is_data_object, Label::kNear);
4856

    
4857
  bind(&not_external);
4858
  // Sequential string, either ASCII or UC16.
4859
  ASSERT(kOneByteStringTag == 0x04);
4860
  and_(length, Immediate(kStringEncodingMask));
4861
  xor_(length, Immediate(kStringEncodingMask));
4862
  addq(length, Immediate(0x04));
4863
  // Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2.
4864
  imul(length, FieldOperand(value, String::kLengthOffset));
4865
  shr(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
4866
  addq(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
4867
  and_(length, Immediate(~kObjectAlignmentMask));
4868

    
4869
  bind(&is_data_object);
4870
  // Value is a data object, and it is white.  Mark it black.  Since we know
4871
  // that the object is white we can make it black by flipping one bit.
4872
  or_(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch);
4873

    
4874
  and_(bitmap_scratch, Immediate(~Page::kPageAlignmentMask));
4875
  addl(Operand(bitmap_scratch, MemoryChunk::kLiveBytesOffset), length);
4876

    
4877
  bind(&done);
4878
}
4879

    
4880

    
4881
void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
4882
  Label next, start;
4883
  Register empty_fixed_array_value = r8;
4884
  LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
4885
  movq(rcx, rax);
4886

    
4887
  // Check if the enum length field is properly initialized, indicating that
4888
  // there is an enum cache.
4889
  movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
4890

    
4891
  EnumLength(rdx, rbx);
4892
  Cmp(rdx, Smi::FromInt(Map::kInvalidEnumCache));
4893
  j(equal, call_runtime);
4894

    
4895
  jmp(&start);
4896

    
4897
  bind(&next);
4898

    
4899
  movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
4900

    
4901
  // For all objects but the receiver, check that the cache is empty.
4902
  EnumLength(rdx, rbx);
4903
  Cmp(rdx, Smi::FromInt(0));
4904
  j(not_equal, call_runtime);
4905

    
4906
  bind(&start);
4907

    
4908
  // Check that there are no elements. Register rcx contains the current JS
4909
  // object we've reached through the prototype chain.
4910
  cmpq(empty_fixed_array_value,
4911
       FieldOperand(rcx, JSObject::kElementsOffset));
4912
  j(not_equal, call_runtime);
4913

    
4914
  movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
4915
  cmpq(rcx, null_value);
4916
  j(not_equal, &next);
4917
}
4918

    
4919
void MacroAssembler::TestJSArrayForAllocationMemento(
4920
    Register receiver_reg,
4921
    Register scratch_reg,
4922
    Label* no_memento_found) {
4923
  ExternalReference new_space_start =
4924
      ExternalReference::new_space_start(isolate());
4925
  ExternalReference new_space_allocation_top =
4926
      ExternalReference::new_space_allocation_top_address(isolate());
4927

    
4928
  lea(scratch_reg, Operand(receiver_reg,
4929
      JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
4930
  movq(kScratchRegister, new_space_start);
4931
  cmpq(scratch_reg, kScratchRegister);
4932
  j(less, no_memento_found);
4933
  cmpq(scratch_reg, ExternalOperand(new_space_allocation_top));
4934
  j(greater, no_memento_found);
4935
  CompareRoot(MemOperand(scratch_reg, -AllocationMemento::kSize),
4936
              Heap::kAllocationMementoMapRootIndex);
4937
}
4938

    
4939

    
4940
void MacroAssembler::RecordObjectAllocation(Isolate* isolate,
4941
                                            Register object,
4942
                                            Register object_size) {
4943
  FrameScope frame(this, StackFrame::EXIT);
4944
  PushSafepointRegisters();
4945
  PrepareCallCFunction(3);
4946
  // In case object is rdx
4947
  movq(kScratchRegister, object);
4948
  movq(arg_reg_3, object_size);
4949
  movq(arg_reg_2, kScratchRegister);
4950
  movq(arg_reg_1, isolate, RelocInfo::EXTERNAL_REFERENCE);
4951
  CallCFunction(
4952
      ExternalReference::record_object_allocation_function(isolate), 3);
4953
  PopSafepointRegisters();
4954
}
4955

    
4956

    
4957
void MacroAssembler::RecordObjectAllocation(Isolate* isolate,
4958
                                            Register object,
4959
                                            int object_size) {
4960
  FrameScope frame(this, StackFrame::EXIT);
4961
  PushSafepointRegisters();
4962
  PrepareCallCFunction(3);
4963
  movq(arg_reg_2, object);
4964
  movq(arg_reg_3, Immediate(object_size));
4965
  movq(arg_reg_1, isolate, RelocInfo::EXTERNAL_REFERENCE);
4966
  CallCFunction(
4967
      ExternalReference::record_object_allocation_function(isolate), 3);
4968
  PopSafepointRegisters();
4969
}
4970

    
4971

    
4972
} }  // namespace v8::internal
4973

    
4974
#endif  // V8_TARGET_ARCH_X64