The data contained in this repository can be downloaded to your computer using one of several clients.
Please see the documentation of your version control software client for more information.

Please select the desired protocol below to get the URL.

This URL has Read-Only access.

Statistics
| Branch: | Revision:

main_repo / deps / v8 / src / x64 / macro-assembler-x64.h @ f230a1cf

History | View | Annotate | Download (65 KB)

1
// Copyright 2012 the V8 project authors. All rights reserved.
2
// Redistribution and use in source and binary forms, with or without
3
// modification, are permitted provided that the following conditions are
4
// met:
5
//
6
//     * Redistributions of source code must retain the above copyright
7
//       notice, this list of conditions and the following disclaimer.
8
//     * Redistributions in binary form must reproduce the above
9
//       copyright notice, this list of conditions and the following
10
//       disclaimer in the documentation and/or other materials provided
11
//       with the distribution.
12
//     * Neither the name of Google Inc. nor the names of its
13
//       contributors may be used to endorse or promote products derived
14
//       from this software without specific prior written permission.
15
//
16
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27

    
28
#ifndef V8_X64_MACRO_ASSEMBLER_X64_H_
29
#define V8_X64_MACRO_ASSEMBLER_X64_H_
30

    
31
#include "assembler.h"
32
#include "frames.h"
33
#include "v8globals.h"
34

    
35
namespace v8 {
36
namespace internal {
37

    
38
// Default scratch register used by MacroAssembler (and other code that needs
39
// a spare register). The register isn't callee save, and not used by the
40
// function calling convention.
41
const Register kScratchRegister = { 10 };      // r10.
42
const Register kSmiConstantRegister = { 12 };  // r12 (callee save).
43
const Register kRootRegister = { 13 };         // r13 (callee save).
44
// Value of smi in kSmiConstantRegister.
45
const int kSmiConstantRegisterValue = 1;
46
// Actual value of root register is offset from the root array's start
47
// to take advantage of negitive 8-bit displacement values.
48
const int kRootRegisterBias = 128;
49

    
50
// Convenience for platform-independent signatures.
51
typedef Operand MemOperand;
52

    
53
enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
54
enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
55

    
56
bool AreAliased(Register r1, Register r2, Register r3, Register r4);
57

    
58
// Forward declaration.
59
class JumpTarget;
60

    
61
struct SmiIndex {
62
  SmiIndex(Register index_register, ScaleFactor scale)
63
      : reg(index_register),
64
        scale(scale) {}
65
  Register reg;
66
  ScaleFactor scale;
67
};
68

    
69

    
70
// MacroAssembler implements a collection of frequently used macros.
71
class MacroAssembler: public Assembler {
72
 public:
73
  // The isolate parameter can be NULL if the macro assembler should
74
  // not use isolate-dependent functionality. In this case, it's the
75
  // responsibility of the caller to never invoke such function on the
76
  // macro assembler.
77
  MacroAssembler(Isolate* isolate, void* buffer, int size);
78

    
79
  // Prevent the use of the RootArray during the lifetime of this
80
  // scope object.
81
  class NoRootArrayScope BASE_EMBEDDED {
82
   public:
83
    explicit NoRootArrayScope(MacroAssembler* assembler)
84
        : variable_(&assembler->root_array_available_),
85
          old_value_(assembler->root_array_available_) {
86
      assembler->root_array_available_ = false;
87
    }
88
    ~NoRootArrayScope() {
89
      *variable_ = old_value_;
90
    }
91
   private:
92
    bool* variable_;
93
    bool old_value_;
94
  };
95

    
96
  // Operand pointing to an external reference.
97
  // May emit code to set up the scratch register. The operand is
98
  // only guaranteed to be correct as long as the scratch register
99
  // isn't changed.
100
  // If the operand is used more than once, use a scratch register
101
  // that is guaranteed not to be clobbered.
102
  Operand ExternalOperand(ExternalReference reference,
103
                          Register scratch = kScratchRegister);
104
  // Loads and stores the value of an external reference.
105
  // Special case code for load and store to take advantage of
106
  // load_rax/store_rax if possible/necessary.
107
  // For other operations, just use:
108
  //   Operand operand = ExternalOperand(extref);
109
  //   operation(operand, ..);
110
  void Load(Register destination, ExternalReference source);
111
  void Store(ExternalReference destination, Register source);
112
  // Loads the address of the external reference into the destination
113
  // register.
114
  void LoadAddress(Register destination, ExternalReference source);
115
  // Returns the size of the code generated by LoadAddress.
116
  // Used by CallSize(ExternalReference) to find the size of a call.
117
  int LoadAddressSize(ExternalReference source);
118
  // Pushes the address of the external reference onto the stack.
119
  void PushAddress(ExternalReference source);
120

    
121
  // Operations on roots in the root-array.
122
  void LoadRoot(Register destination, Heap::RootListIndex index);
123
  void StoreRoot(Register source, Heap::RootListIndex index);
124
  // Load a root value where the index (or part of it) is variable.
125
  // The variable_offset register is added to the fixed_offset value
126
  // to get the index into the root-array.
127
  void LoadRootIndexed(Register destination,
128
                       Register variable_offset,
129
                       int fixed_offset);
130
  void CompareRoot(Register with, Heap::RootListIndex index);
131
  void CompareRoot(const Operand& with, Heap::RootListIndex index);
132
  void PushRoot(Heap::RootListIndex index);
133

    
134
  // These functions do not arrange the registers in any particular order so
135
  // they are not useful for calls that can cause a GC.  The caller can
136
  // exclude up to 3 registers that do not need to be saved and restored.
137
  void PushCallerSaved(SaveFPRegsMode fp_mode,
138
                       Register exclusion1 = no_reg,
139
                       Register exclusion2 = no_reg,
140
                       Register exclusion3 = no_reg);
141
  void PopCallerSaved(SaveFPRegsMode fp_mode,
142
                      Register exclusion1 = no_reg,
143
                      Register exclusion2 = no_reg,
144
                      Register exclusion3 = no_reg);
145

    
146
// ---------------------------------------------------------------------------
147
// GC Support
148

    
149

    
150
  enum RememberedSetFinalAction {
151
    kReturnAtEnd,
152
    kFallThroughAtEnd
153
  };
154

    
155
  // Record in the remembered set the fact that we have a pointer to new space
156
  // at the address pointed to by the addr register.  Only works if addr is not
157
  // in new space.
158
  void RememberedSetHelper(Register object,  // Used for debug code.
159
                           Register addr,
160
                           Register scratch,
161
                           SaveFPRegsMode save_fp,
162
                           RememberedSetFinalAction and_then);
163

    
164
  void CheckPageFlag(Register object,
165
                     Register scratch,
166
                     int mask,
167
                     Condition cc,
168
                     Label* condition_met,
169
                     Label::Distance condition_met_distance = Label::kFar);
170

    
171
  void CheckMapDeprecated(Handle<Map> map,
172
                          Register scratch,
173
                          Label* if_deprecated);
174

    
175
  // Check if object is in new space.  Jumps if the object is not in new space.
176
  // The register scratch can be object itself, but scratch will be clobbered.
177
  void JumpIfNotInNewSpace(Register object,
178
                           Register scratch,
179
                           Label* branch,
180
                           Label::Distance distance = Label::kFar) {
181
    InNewSpace(object, scratch, not_equal, branch, distance);
182
  }
183

    
184
  // Check if object is in new space.  Jumps if the object is in new space.
185
  // The register scratch can be object itself, but it will be clobbered.
186
  void JumpIfInNewSpace(Register object,
187
                        Register scratch,
188
                        Label* branch,
189
                        Label::Distance distance = Label::kFar) {
190
    InNewSpace(object, scratch, equal, branch, distance);
191
  }
192

    
193
  // Check if an object has the black incremental marking color.  Also uses rcx!
194
  void JumpIfBlack(Register object,
195
                   Register scratch0,
196
                   Register scratch1,
197
                   Label* on_black,
198
                   Label::Distance on_black_distance = Label::kFar);
199

    
200
  // Detects conservatively whether an object is data-only, i.e. it does need to
201
  // be scanned by the garbage collector.
202
  void JumpIfDataObject(Register value,
203
                        Register scratch,
204
                        Label* not_data_object,
205
                        Label::Distance not_data_object_distance);
206

    
207
  // Checks the color of an object.  If the object is already grey or black
208
  // then we just fall through, since it is already live.  If it is white and
209
  // we can determine that it doesn't need to be scanned, then we just mark it
210
  // black and fall through.  For the rest we jump to the label so the
211
  // incremental marker can fix its assumptions.
212
  void EnsureNotWhite(Register object,
213
                      Register scratch1,
214
                      Register scratch2,
215
                      Label* object_is_white_and_not_data,
216
                      Label::Distance distance);
217

    
218
  // Notify the garbage collector that we wrote a pointer into an object.
219
  // |object| is the object being stored into, |value| is the object being
220
  // stored.  value and scratch registers are clobbered by the operation.
221
  // The offset is the offset from the start of the object, not the offset from
222
  // the tagged HeapObject pointer.  For use with FieldOperand(reg, off).
223
  void RecordWriteField(
224
      Register object,
225
      int offset,
226
      Register value,
227
      Register scratch,
228
      SaveFPRegsMode save_fp,
229
      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
230
      SmiCheck smi_check = INLINE_SMI_CHECK);
231

    
232
  // As above, but the offset has the tag presubtracted.  For use with
233
  // Operand(reg, off).
234
  void RecordWriteContextSlot(
235
      Register context,
236
      int offset,
237
      Register value,
238
      Register scratch,
239
      SaveFPRegsMode save_fp,
240
      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
241
      SmiCheck smi_check = INLINE_SMI_CHECK) {
242
    RecordWriteField(context,
243
                     offset + kHeapObjectTag,
244
                     value,
245
                     scratch,
246
                     save_fp,
247
                     remembered_set_action,
248
                     smi_check);
249
  }
250

    
251
  // Notify the garbage collector that we wrote a pointer into a fixed array.
252
  // |array| is the array being stored into, |value| is the
253
  // object being stored.  |index| is the array index represented as a non-smi.
254
  // All registers are clobbered by the operation RecordWriteArray
255
  // filters out smis so it does not update the write barrier if the
256
  // value is a smi.
257
  void RecordWriteArray(
258
      Register array,
259
      Register value,
260
      Register index,
261
      SaveFPRegsMode save_fp,
262
      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
263
      SmiCheck smi_check = INLINE_SMI_CHECK);
264

    
265
  // For page containing |object| mark region covering |address|
266
  // dirty. |object| is the object being stored into, |value| is the
267
  // object being stored. The address and value registers are clobbered by the
268
  // operation.  RecordWrite filters out smis so it does not update
269
  // the write barrier if the value is a smi.
270
  void RecordWrite(
271
      Register object,
272
      Register address,
273
      Register value,
274
      SaveFPRegsMode save_fp,
275
      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
276
      SmiCheck smi_check = INLINE_SMI_CHECK);
277

    
278
#ifdef ENABLE_DEBUGGER_SUPPORT
279
  // ---------------------------------------------------------------------------
280
  // Debugger Support
281

    
282
  void DebugBreak();
283
#endif
284

    
285
  // Generates function and stub prologue code.
286
  void Prologue(PrologueFrameMode frame_mode);
287

    
288
  // Enter specific kind of exit frame; either in normal or
289
  // debug mode. Expects the number of arguments in register rax and
290
  // sets up the number of arguments in register rdi and the pointer
291
  // to the first argument in register rsi.
292
  //
293
  // Allocates arg_stack_space * kPointerSize memory (not GCed) on the stack
294
  // accessible via StackSpaceOperand.
295
  void EnterExitFrame(int arg_stack_space = 0, bool save_doubles = false);
296

    
297
  // Enter specific kind of exit frame. Allocates arg_stack_space * kPointerSize
298
  // memory (not GCed) on the stack accessible via StackSpaceOperand.
299
  void EnterApiExitFrame(int arg_stack_space);
300

    
301
  // Leave the current exit frame. Expects/provides the return value in
302
  // register rax:rdx (untouched) and the pointer to the first
303
  // argument in register rsi.
304
  void LeaveExitFrame(bool save_doubles = false);
305

    
306
  // Leave the current exit frame. Expects/provides the return value in
307
  // register rax (untouched).
308
  void LeaveApiExitFrame(bool restore_context);
309

    
310
  // Push and pop the registers that can hold pointers.
311
  void PushSafepointRegisters() { Pushad(); }
312
  void PopSafepointRegisters() { Popad(); }
313
  // Store the value in register src in the safepoint register stack
314
  // slot for register dst.
315
  void StoreToSafepointRegisterSlot(Register dst, const Immediate& imm);
316
  void StoreToSafepointRegisterSlot(Register dst, Register src);
317
  void LoadFromSafepointRegisterSlot(Register dst, Register src);
318

    
319
  void InitializeRootRegister() {
320
    ExternalReference roots_array_start =
321
        ExternalReference::roots_array_start(isolate());
322
    movq(kRootRegister, roots_array_start);
323
    addq(kRootRegister, Immediate(kRootRegisterBias));
324
  }
325

    
326
  // ---------------------------------------------------------------------------
327
  // JavaScript invokes
328

    
329
  // Set up call kind marking in rcx. The method takes rcx as an
330
  // explicit first parameter to make the code more readable at the
331
  // call sites.
332
  void SetCallKind(Register dst, CallKind kind);
333

    
334
  // Invoke the JavaScript function code by either calling or jumping.
335
  void InvokeCode(Register code,
336
                  const ParameterCount& expected,
337
                  const ParameterCount& actual,
338
                  InvokeFlag flag,
339
                  const CallWrapper& call_wrapper,
340
                  CallKind call_kind);
341

    
342
  void InvokeCode(Handle<Code> code,
343
                  const ParameterCount& expected,
344
                  const ParameterCount& actual,
345
                  RelocInfo::Mode rmode,
346
                  InvokeFlag flag,
347
                  const CallWrapper& call_wrapper,
348
                  CallKind call_kind);
349

    
350
  // Invoke the JavaScript function in the given register. Changes the
351
  // current context to the context in the function before invoking.
352
  void InvokeFunction(Register function,
353
                      const ParameterCount& actual,
354
                      InvokeFlag flag,
355
                      const CallWrapper& call_wrapper,
356
                      CallKind call_kind);
357

    
358
  void InvokeFunction(Handle<JSFunction> function,
359
                      const ParameterCount& expected,
360
                      const ParameterCount& actual,
361
                      InvokeFlag flag,
362
                      const CallWrapper& call_wrapper,
363
                      CallKind call_kind);
364

    
365
  // Invoke specified builtin JavaScript function. Adds an entry to
366
  // the unresolved list if the name does not resolve.
367
  void InvokeBuiltin(Builtins::JavaScript id,
368
                     InvokeFlag flag,
369
                     const CallWrapper& call_wrapper = NullCallWrapper());
370

    
371
  // Store the function for the given builtin in the target register.
372
  void GetBuiltinFunction(Register target, Builtins::JavaScript id);
373

    
374
  // Store the code object for the given builtin in the target register.
375
  void GetBuiltinEntry(Register target, Builtins::JavaScript id);
376

    
377

    
378
  // ---------------------------------------------------------------------------
379
  // Smi tagging, untagging and operations on tagged smis.
380

    
381
  // Support for constant splitting.
382
  bool IsUnsafeInt(const int32_t x);
383
  void SafeMove(Register dst, Smi* src);
384
  void SafePush(Smi* src);
385

    
386
  void InitializeSmiConstantRegister() {
387
    movq(kSmiConstantRegister,
388
         reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
389
         RelocInfo::NONE64);
390
  }
391

    
392
  // Conversions between tagged smi values and non-tagged integer values.
393

    
394
  // Tag an integer value. The result must be known to be a valid smi value.
395
  // Only uses the low 32 bits of the src register. Sets the N and Z flags
396
  // based on the value of the resulting smi.
397
  void Integer32ToSmi(Register dst, Register src);
398

    
399
  // Stores an integer32 value into a memory field that already holds a smi.
400
  void Integer32ToSmiField(const Operand& dst, Register src);
401

    
402
  // Adds constant to src and tags the result as a smi.
403
  // Result must be a valid smi.
404
  void Integer64PlusConstantToSmi(Register dst, Register src, int constant);
405

    
406
  // Convert smi to 32-bit integer. I.e., not sign extended into
407
  // high 32 bits of destination.
408
  void SmiToInteger32(Register dst, Register src);
409
  void SmiToInteger32(Register dst, const Operand& src);
410

    
411
  // Convert smi to 64-bit integer (sign extended if necessary).
412
  void SmiToInteger64(Register dst, Register src);
413
  void SmiToInteger64(Register dst, const Operand& src);
414

    
415
  // Multiply a positive smi's integer value by a power of two.
416
  // Provides result as 64-bit integer value.
417
  void PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
418
                                             Register src,
419
                                             int power);
420

    
421
  // Divide a positive smi's integer value by a power of two.
422
  // Provides result as 32-bit integer value.
423
  void PositiveSmiDivPowerOfTwoToInteger32(Register dst,
424
                                           Register src,
425
                                           int power);
426

    
427
  // Perform the logical or of two smi values and return a smi value.
428
  // If either argument is not a smi, jump to on_not_smis and retain
429
  // the original values of source registers. The destination register
430
  // may be changed if it's not one of the source registers.
431
  void SmiOrIfSmis(Register dst,
432
                   Register src1,
433
                   Register src2,
434
                   Label* on_not_smis,
435
                   Label::Distance near_jump = Label::kFar);
436

    
437

    
438
  // Simple comparison of smis.  Both sides must be known smis to use these,
439
  // otherwise use Cmp.
440
  void SmiCompare(Register smi1, Register smi2);
441
  void SmiCompare(Register dst, Smi* src);
442
  void SmiCompare(Register dst, const Operand& src);
443
  void SmiCompare(const Operand& dst, Register src);
444
  void SmiCompare(const Operand& dst, Smi* src);
445
  // Compare the int32 in src register to the value of the smi stored at dst.
446
  void SmiCompareInteger32(const Operand& dst, Register src);
447
  // Sets sign and zero flags depending on value of smi in register.
448
  void SmiTest(Register src);
449

    
450
  // Functions performing a check on a known or potential smi. Returns
451
  // a condition that is satisfied if the check is successful.
452

    
453
  // Is the value a tagged smi.
454
  Condition CheckSmi(Register src);
455
  Condition CheckSmi(const Operand& src);
456

    
457
  // Is the value a non-negative tagged smi.
458
  Condition CheckNonNegativeSmi(Register src);
459

    
460
  // Are both values tagged smis.
461
  Condition CheckBothSmi(Register first, Register second);
462

    
463
  // Are both values non-negative tagged smis.
464
  Condition CheckBothNonNegativeSmi(Register first, Register second);
465

    
466
  // Are either value a tagged smi.
467
  Condition CheckEitherSmi(Register first,
468
                           Register second,
469
                           Register scratch = kScratchRegister);
470

    
471
  // Is the value the minimum smi value (since we are using
472
  // two's complement numbers, negating the value is known to yield
473
  // a non-smi value).
474
  Condition CheckIsMinSmi(Register src);
475

    
476
  // Checks whether an 32-bit integer value is a valid for conversion
477
  // to a smi.
478
  Condition CheckInteger32ValidSmiValue(Register src);
479

    
480
  // Checks whether an 32-bit unsigned integer value is a valid for
481
  // conversion to a smi.
482
  Condition CheckUInteger32ValidSmiValue(Register src);
483

    
484
  // Check whether src is a Smi, and set dst to zero if it is a smi,
485
  // and to one if it isn't.
486
  void CheckSmiToIndicator(Register dst, Register src);
487
  void CheckSmiToIndicator(Register dst, const Operand& src);
488

    
489
  // Test-and-jump functions. Typically combines a check function
490
  // above with a conditional jump.
491

    
492
  // Jump if the value cannot be represented by a smi.
493
  void JumpIfNotValidSmiValue(Register src, Label* on_invalid,
494
                              Label::Distance near_jump = Label::kFar);
495

    
496
  // Jump if the unsigned integer value cannot be represented by a smi.
497
  void JumpIfUIntNotValidSmiValue(Register src, Label* on_invalid,
498
                                  Label::Distance near_jump = Label::kFar);
499

    
500
  // Jump to label if the value is a tagged smi.
501
  void JumpIfSmi(Register src,
502
                 Label* on_smi,
503
                 Label::Distance near_jump = Label::kFar);
504

    
505
  // Jump to label if the value is not a tagged smi.
506
  void JumpIfNotSmi(Register src,
507
                    Label* on_not_smi,
508
                    Label::Distance near_jump = Label::kFar);
509

    
510
  // Jump to label if the value is not a non-negative tagged smi.
511
  void JumpUnlessNonNegativeSmi(Register src,
512
                                Label* on_not_smi,
513
                                Label::Distance near_jump = Label::kFar);
514

    
515
  // Jump to label if the value, which must be a tagged smi, has value equal
516
  // to the constant.
517
  void JumpIfSmiEqualsConstant(Register src,
518
                               Smi* constant,
519
                               Label* on_equals,
520
                               Label::Distance near_jump = Label::kFar);
521

    
522
  // Jump if either or both register are not smi values.
523
  void JumpIfNotBothSmi(Register src1,
524
                        Register src2,
525
                        Label* on_not_both_smi,
526
                        Label::Distance near_jump = Label::kFar);
527

    
528
  // Jump if either or both register are not non-negative smi values.
529
  void JumpUnlessBothNonNegativeSmi(Register src1, Register src2,
530
                                    Label* on_not_both_smi,
531
                                    Label::Distance near_jump = Label::kFar);
532

    
533
  // Operations on tagged smi values.
534

    
535
  // Smis represent a subset of integers. The subset is always equivalent to
536
  // a two's complement interpretation of a fixed number of bits.
537

    
538
  // Add an integer constant to a tagged smi, giving a tagged smi as result.
539
  // No overflow testing on the result is done.
540
  void SmiAddConstant(Register dst, Register src, Smi* constant);
541

    
542
  // Add an integer constant to a tagged smi, giving a tagged smi as result.
543
  // No overflow testing on the result is done.
544
  void SmiAddConstant(const Operand& dst, Smi* constant);
545

    
546
  // Add an integer constant to a tagged smi, giving a tagged smi as result,
547
  // or jumping to a label if the result cannot be represented by a smi.
548
  void SmiAddConstant(Register dst,
549
                      Register src,
550
                      Smi* constant,
551
                      Label* on_not_smi_result,
552
                      Label::Distance near_jump = Label::kFar);
553

    
554
  // Subtract an integer constant from a tagged smi, giving a tagged smi as
555
  // result. No testing on the result is done. Sets the N and Z flags
556
  // based on the value of the resulting integer.
557
  void SmiSubConstant(Register dst, Register src, Smi* constant);
558

    
559
  // Subtract an integer constant from a tagged smi, giving a tagged smi as
560
  // result, or jumping to a label if the result cannot be represented by a smi.
561
  void SmiSubConstant(Register dst,
562
                      Register src,
563
                      Smi* constant,
564
                      Label* on_not_smi_result,
565
                      Label::Distance near_jump = Label::kFar);
566

    
567
  // Negating a smi can give a negative zero or too large positive value.
568
  // NOTICE: This operation jumps on success, not failure!
569
  void SmiNeg(Register dst,
570
              Register src,
571
              Label* on_smi_result,
572
              Label::Distance near_jump = Label::kFar);
573

    
574
  // Adds smi values and return the result as a smi.
575
  // If dst is src1, then src1 will be destroyed if the operation is
576
  // successful, otherwise kept intact.
577
  void SmiAdd(Register dst,
578
              Register src1,
579
              Register src2,
580
              Label* on_not_smi_result,
581
              Label::Distance near_jump = Label::kFar);
582
  void SmiAdd(Register dst,
583
              Register src1,
584
              const Operand& src2,
585
              Label* on_not_smi_result,
586
              Label::Distance near_jump = Label::kFar);
587

    
588
  void SmiAdd(Register dst,
589
              Register src1,
590
              Register src2);
591

    
592
  // Subtracts smi values and return the result as a smi.
593
  // If dst is src1, then src1 will be destroyed if the operation is
594
  // successful, otherwise kept intact.
595
  void SmiSub(Register dst,
596
              Register src1,
597
              Register src2,
598
              Label* on_not_smi_result,
599
              Label::Distance near_jump = Label::kFar);
600
  void SmiSub(Register dst,
601
              Register src1,
602
              const Operand& src2,
603
              Label* on_not_smi_result,
604
              Label::Distance near_jump = Label::kFar);
605

    
606
  void SmiSub(Register dst,
607
              Register src1,
608
              Register src2);
609

    
610
  void SmiSub(Register dst,
611
              Register src1,
612
              const Operand& src2);
613

    
614
  // Multiplies smi values and return the result as a smi,
615
  // if possible.
616
  // If dst is src1, then src1 will be destroyed, even if
617
  // the operation is unsuccessful.
618
  void SmiMul(Register dst,
619
              Register src1,
620
              Register src2,
621
              Label* on_not_smi_result,
622
              Label::Distance near_jump = Label::kFar);
623

    
624
  // Divides one smi by another and returns the quotient.
625
  // Clobbers rax and rdx registers.
626
  void SmiDiv(Register dst,
627
              Register src1,
628
              Register src2,
629
              Label* on_not_smi_result,
630
              Label::Distance near_jump = Label::kFar);
631

    
632
  // Divides one smi by another and returns the remainder.
633
  // Clobbers rax and rdx registers.
634
  void SmiMod(Register dst,
635
              Register src1,
636
              Register src2,
637
              Label* on_not_smi_result,
638
              Label::Distance near_jump = Label::kFar);
639

    
640
  // Bitwise operations.
641
  void SmiNot(Register dst, Register src);
642
  void SmiAnd(Register dst, Register src1, Register src2);
643
  void SmiOr(Register dst, Register src1, Register src2);
644
  void SmiXor(Register dst, Register src1, Register src2);
645
  void SmiAndConstant(Register dst, Register src1, Smi* constant);
646
  void SmiOrConstant(Register dst, Register src1, Smi* constant);
647
  void SmiXorConstant(Register dst, Register src1, Smi* constant);
648

    
649
  void SmiShiftLeftConstant(Register dst,
650
                            Register src,
651
                            int shift_value);
652
  void SmiShiftLogicalRightConstant(Register dst,
653
                                  Register src,
654
                                  int shift_value,
655
                                  Label* on_not_smi_result,
656
                                  Label::Distance near_jump = Label::kFar);
657
  void SmiShiftArithmeticRightConstant(Register dst,
658
                                       Register src,
659
                                       int shift_value);
660

    
661
  // Shifts a smi value to the left, and returns the result if that is a smi.
662
  // Uses and clobbers rcx, so dst may not be rcx.
663
  void SmiShiftLeft(Register dst,
664
                    Register src1,
665
                    Register src2);
666
  // Shifts a smi value to the right, shifting in zero bits at the top, and
667
  // returns the unsigned intepretation of the result if that is a smi.
668
  // Uses and clobbers rcx, so dst may not be rcx.
669
  void SmiShiftLogicalRight(Register dst,
670
                            Register src1,
671
                            Register src2,
672
                            Label* on_not_smi_result,
673
                            Label::Distance near_jump = Label::kFar);
674
  // Shifts a smi value to the right, sign extending the top, and
675
  // returns the signed intepretation of the result. That will always
676
  // be a valid smi value, since it's numerically smaller than the
677
  // original.
678
  // Uses and clobbers rcx, so dst may not be rcx.
679
  void SmiShiftArithmeticRight(Register dst,
680
                               Register src1,
681
                               Register src2);
682

    
683
  // Specialized operations
684

    
685
  // Select the non-smi register of two registers where exactly one is a
686
  // smi. If neither are smis, jump to the failure label.
687
  void SelectNonSmi(Register dst,
688
                    Register src1,
689
                    Register src2,
690
                    Label* on_not_smis,
691
                    Label::Distance near_jump = Label::kFar);
692

    
693
  // Converts, if necessary, a smi to a combination of number and
694
  // multiplier to be used as a scaled index.
695
  // The src register contains a *positive* smi value. The shift is the
696
  // power of two to multiply the index value by (e.g.
697
  // to index by smi-value * kPointerSize, pass the smi and kPointerSizeLog2).
698
  // The returned index register may be either src or dst, depending
699
  // on what is most efficient. If src and dst are different registers,
700
  // src is always unchanged.
701
  SmiIndex SmiToIndex(Register dst, Register src, int shift);
702

    
703
  // Converts a positive smi to a negative index.
704
  SmiIndex SmiToNegativeIndex(Register dst, Register src, int shift);
705

    
706
  // Add the value of a smi in memory to an int32 register.
707
  // Sets flags as a normal add.
708
  void AddSmiField(Register dst, const Operand& src);
709

    
710
  // Basic Smi operations.
711
  void Move(Register dst, Smi* source) {
712
    LoadSmiConstant(dst, source);
713
  }
714

    
715
  void Move(const Operand& dst, Smi* source) {
716
    Register constant = GetSmiConstant(source);
717
    movq(dst, constant);
718
  }
719

    
720
  void Push(Smi* smi);
721

    
722
  // Save away a 64-bit integer on the stack as two 32-bit integers
723
  // masquerading as smis so that the garbage collector skips visiting them.
724
  void PushInt64AsTwoSmis(Register src, Register scratch = kScratchRegister);
725
  // Reconstruct a 64-bit integer from two 32-bit integers masquerading as
726
  // smis on the top of stack.
727
  void PopInt64AsTwoSmis(Register dst, Register scratch = kScratchRegister);
728

    
729
  void Test(const Operand& dst, Smi* source);
730

    
731

    
732
  // ---------------------------------------------------------------------------
733
  // String macros.
734

    
735
  // Generate code to do a lookup in the number string cache. If the number in
736
  // the register object is found in the cache the generated code falls through
737
  // with the result in the result register. The object and the result register
738
  // can be the same. If the number is not found in the cache the code jumps to
739
  // the label not_found with only the content of register object unchanged.
740
  void LookupNumberStringCache(Register object,
741
                               Register result,
742
                               Register scratch1,
743
                               Register scratch2,
744
                               Label* not_found);
745

    
746
  // If object is a string, its map is loaded into object_map.
747
  void JumpIfNotString(Register object,
748
                       Register object_map,
749
                       Label* not_string,
750
                       Label::Distance near_jump = Label::kFar);
751

    
752

    
753
  void JumpIfNotBothSequentialAsciiStrings(
754
      Register first_object,
755
      Register second_object,
756
      Register scratch1,
757
      Register scratch2,
758
      Label* on_not_both_flat_ascii,
759
      Label::Distance near_jump = Label::kFar);
760

    
761
  // Check whether the instance type represents a flat ASCII string. Jump to the
762
  // label if not. If the instance type can be scratched specify same register
763
  // for both instance type and scratch.
764
  void JumpIfInstanceTypeIsNotSequentialAscii(
765
      Register instance_type,
766
      Register scratch,
767
      Label*on_not_flat_ascii_string,
768
      Label::Distance near_jump = Label::kFar);
769

    
770
  void JumpIfBothInstanceTypesAreNotSequentialAscii(
771
      Register first_object_instance_type,
772
      Register second_object_instance_type,
773
      Register scratch1,
774
      Register scratch2,
775
      Label* on_fail,
776
      Label::Distance near_jump = Label::kFar);
777

    
778
  // Checks if the given register or operand is a unique name
779
  void JumpIfNotUniqueName(Register reg, Label* not_unique_name,
780
                           Label::Distance distance = Label::kFar);
781
  void JumpIfNotUniqueName(Operand operand, Label* not_unique_name,
782
                           Label::Distance distance = Label::kFar);
783

    
784
  // ---------------------------------------------------------------------------
785
  // Macro instructions.
786

    
787
  // Load/store with specific representation.
788
  void Load(Register dst, const Operand& src, Representation r);
789
  void Store(const Operand& dst, Register src, Representation r);
790

    
791
  // Load a register with a long value as efficiently as possible.
792
  void Set(Register dst, int64_t x);
793
  void Set(const Operand& dst, int64_t x);
794

    
795
  // cvtsi2sd instruction only writes to the low 64-bit of dst register, which
796
  // hinders register renaming and makes dependence chains longer. So we use
797
  // xorps to clear the dst register before cvtsi2sd to solve this issue.
798
  void Cvtlsi2sd(XMMRegister dst, Register src);
799
  void Cvtlsi2sd(XMMRegister dst, const Operand& src);
800

    
801
  // Move if the registers are not identical.
802
  void Move(Register target, Register source);
803

    
804
  // Bit-field support.
805
  void TestBit(const Operand& dst, int bit_index);
806

    
807
  // Handle support
808
  void Move(Register dst, Handle<Object> source);
809
  void Move(const Operand& dst, Handle<Object> source);
810
  void Cmp(Register dst, Handle<Object> source);
811
  void Cmp(const Operand& dst, Handle<Object> source);
812
  void Cmp(Register dst, Smi* src);
813
  void Cmp(const Operand& dst, Smi* src);
814
  void Push(Handle<Object> source);
815

    
816
  // Load a heap object and handle the case of new-space objects by
817
  // indirecting via a global cell.
818
  void MoveHeapObject(Register result, Handle<Object> object);
819

    
820
  // Load a global cell into a register.
821
  void LoadGlobalCell(Register dst, Handle<Cell> cell);
822

    
823
  // Emit code to discard a non-negative number of pointer-sized elements
824
  // from the stack, clobbering only the rsp register.
825
  void Drop(int stack_elements);
826

    
827
  void Call(Label* target) { call(target); }
828
  void Push(Register src) { push(src); }
829
  void Pop(Register dst) { pop(dst); }
830
  void PushReturnAddressFrom(Register src) { push(src); }
831
  void PopReturnAddressTo(Register dst) { pop(dst); }
832
  void MoveDouble(Register dst, const Operand& src) { movq(dst, src); }
833
  void MoveDouble(const Operand& dst, Register src) { movq(dst, src); }
834

    
835
  // Control Flow
836
  void Jump(Address destination, RelocInfo::Mode rmode);
837
  void Jump(ExternalReference ext);
838
  void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);
839

    
840
  void Call(Address destination, RelocInfo::Mode rmode);
841
  void Call(ExternalReference ext);
842
  void Call(Handle<Code> code_object,
843
            RelocInfo::Mode rmode,
844
            TypeFeedbackId ast_id = TypeFeedbackId::None());
845

    
846
  // The size of the code generated for different call instructions.
847
  int CallSize(Address destination, RelocInfo::Mode rmode) {
848
    return kCallSequenceLength;
849
  }
850
  int CallSize(ExternalReference ext);
851
  int CallSize(Handle<Code> code_object) {
852
    // Code calls use 32-bit relative addressing.
853
    return kShortCallInstructionLength;
854
  }
855
  int CallSize(Register target) {
856
    // Opcode: REX_opt FF /2 m64
857
    return (target.high_bit() != 0) ? 3 : 2;
858
  }
859
  int CallSize(const Operand& target) {
860
    // Opcode: REX_opt FF /2 m64
861
    return (target.requires_rex() ? 2 : 1) + target.operand_size();
862
  }
863

    
864
  // Emit call to the code we are currently generating.
865
  void CallSelf() {
866
    Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
867
    Call(self, RelocInfo::CODE_TARGET);
868
  }
869

    
870
  // Non-x64 instructions.
871
  // Push/pop all general purpose registers.
872
  // Does not push rsp/rbp nor any of the assembler's special purpose registers
873
  // (kScratchRegister, kSmiConstantRegister, kRootRegister).
874
  void Pushad();
875
  void Popad();
876
  // Sets the stack as after performing Popad, without actually loading the
877
  // registers.
878
  void Dropad();
879

    
880
  // Compare object type for heap object.
881
  // Always use unsigned comparisons: above and below, not less and greater.
882
  // Incoming register is heap_object and outgoing register is map.
883
  // They may be the same register, and may be kScratchRegister.
884
  void CmpObjectType(Register heap_object, InstanceType type, Register map);
885

    
886
  // Compare instance type for map.
887
  // Always use unsigned comparisons: above and below, not less and greater.
888
  void CmpInstanceType(Register map, InstanceType type);
889

    
890
  // Check if a map for a JSObject indicates that the object has fast elements.
891
  // Jump to the specified label if it does not.
892
  void CheckFastElements(Register map,
893
                         Label* fail,
894
                         Label::Distance distance = Label::kFar);
895

    
896
  // Check if a map for a JSObject indicates that the object can have both smi
897
  // and HeapObject elements.  Jump to the specified label if it does not.
898
  void CheckFastObjectElements(Register map,
899
                               Label* fail,
900
                               Label::Distance distance = Label::kFar);
901

    
902
  // Check if a map for a JSObject indicates that the object has fast smi only
903
  // elements.  Jump to the specified label if it does not.
904
  void CheckFastSmiElements(Register map,
905
                            Label* fail,
906
                            Label::Distance distance = Label::kFar);
907

    
908
  // Check to see if maybe_number can be stored as a double in
909
  // FastDoubleElements. If it can, store it at the index specified by index in
910
  // the FastDoubleElements array elements, otherwise jump to fail.  Note that
911
  // index must not be smi-tagged.
912
  void StoreNumberToDoubleElements(Register maybe_number,
913
                                   Register elements,
914
                                   Register index,
915
                                   XMMRegister xmm_scratch,
916
                                   Label* fail,
917
                                   int elements_offset = 0);
918

    
919
  // Compare an object's map with the specified map and its transitioned
920
  // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. FLAGS are set with
921
  // result of map compare. If multiple map compares are required, the compare
922
  // sequences branches to early_success.
923
  void CompareMap(Register obj,
924
                  Handle<Map> map,
925
                  Label* early_success);
926

    
927
  // Check if the map of an object is equal to a specified map and branch to
928
  // label if not. Skip the smi check if not required (object is known to be a
929
  // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
930
  // against maps that are ElementsKind transition maps of the specified map.
931
  void CheckMap(Register obj,
932
                Handle<Map> map,
933
                Label* fail,
934
                SmiCheckType smi_check_type);
935

    
936
  // Check if the map of an object is equal to a specified map and branch to a
937
  // specified target if equal. Skip the smi check if not required (object is
938
  // known to be a heap object)
939
  void DispatchMap(Register obj,
940
                   Register unused,
941
                   Handle<Map> map,
942
                   Handle<Code> success,
943
                   SmiCheckType smi_check_type);
944

    
945
  // Check if the object in register heap_object is a string. Afterwards the
946
  // register map contains the object map and the register instance_type
947
  // contains the instance_type. The registers map and instance_type can be the
948
  // same in which case it contains the instance type afterwards. Either of the
949
  // registers map and instance_type can be the same as heap_object.
950
  Condition IsObjectStringType(Register heap_object,
951
                               Register map,
952
                               Register instance_type);
953

    
954
  // Check if the object in register heap_object is a name. Afterwards the
955
  // register map contains the object map and the register instance_type
956
  // contains the instance_type. The registers map and instance_type can be the
957
  // same in which case it contains the instance type afterwards. Either of the
958
  // registers map and instance_type can be the same as heap_object.
959
  Condition IsObjectNameType(Register heap_object,
960
                             Register map,
961
                             Register instance_type);
962

    
963
  // FCmp compares and pops the two values on top of the FPU stack.
964
  // The flag results are similar to integer cmp, but requires unsigned
965
  // jcc instructions (je, ja, jae, jb, jbe, je, and jz).
966
  void FCmp();
967

    
968
  void ClampUint8(Register reg);
969

    
970
  void ClampDoubleToUint8(XMMRegister input_reg,
971
                          XMMRegister temp_xmm_reg,
972
                          Register result_reg);
973

    
974
  void SlowTruncateToI(Register result_reg, Register input_reg,
975
      int offset = HeapNumber::kValueOffset - kHeapObjectTag);
976

    
977
  void TruncateHeapNumberToI(Register result_reg, Register input_reg);
978
  void TruncateDoubleToI(Register result_reg, XMMRegister input_reg);
979

    
980
  void DoubleToI(Register result_reg, XMMRegister input_reg,
981
      XMMRegister scratch, MinusZeroMode minus_zero_mode,
982
      Label* conversion_failed, Label::Distance dst = Label::kFar);
983

    
984
  void TaggedToI(Register result_reg, Register input_reg, XMMRegister temp,
985
      MinusZeroMode minus_zero_mode, Label* lost_precision,
986
      Label::Distance dst = Label::kFar);
987

    
988
  void LoadUint32(XMMRegister dst, Register src, XMMRegister scratch);
989

    
990
  void LoadInstanceDescriptors(Register map, Register descriptors);
991
  void EnumLength(Register dst, Register map);
992
  void NumberOfOwnDescriptors(Register dst, Register map);
993

    
994
  template<typename Field>
995
  void DecodeField(Register reg) {
996
    static const int shift = Field::kShift + kSmiShift;
997
    static const int mask = Field::kMask >> Field::kShift;
998
    shr(reg, Immediate(shift));
999
    and_(reg, Immediate(mask));
1000
    shl(reg, Immediate(kSmiShift));
1001
  }
1002

    
1003
  // Abort execution if argument is not a number, enabled via --debug-code.
1004
  void AssertNumber(Register object);
1005

    
1006
  // Abort execution if argument is a smi, enabled via --debug-code.
1007
  void AssertNotSmi(Register object);
1008

    
1009
  // Abort execution if argument is not a smi, enabled via --debug-code.
1010
  void AssertSmi(Register object);
1011
  void AssertSmi(const Operand& object);
1012

    
1013
  // Abort execution if a 64 bit register containing a 32 bit payload does not
1014
  // have zeros in the top 32 bits, enabled via --debug-code.
1015
  void AssertZeroExtended(Register reg);
1016

    
1017
  // Abort execution if argument is not a string, enabled via --debug-code.
1018
  void AssertString(Register object);
1019

    
1020
  // Abort execution if argument is not a name, enabled via --debug-code.
1021
  void AssertName(Register object);
1022

    
1023
  // Abort execution if argument is not the root value with the given index,
1024
  // enabled via --debug-code.
1025
  void AssertRootValue(Register src,
1026
                       Heap::RootListIndex root_value_index,
1027
                       BailoutReason reason);
1028

    
1029
  // ---------------------------------------------------------------------------
1030
  // Exception handling
1031

    
1032
  // Push a new try handler and link it into try handler chain.
1033
  void PushTryHandler(StackHandler::Kind kind, int handler_index);
1034

    
1035
  // Unlink the stack handler on top of the stack from the try handler chain.
1036
  void PopTryHandler();
1037

    
1038
  // Activate the top handler in the try hander chain and pass the
1039
  // thrown value.
1040
  void Throw(Register value);
1041

    
1042
  // Propagate an uncatchable exception out of the current JS stack.
1043
  void ThrowUncatchable(Register value);
1044

    
1045
  // ---------------------------------------------------------------------------
1046
  // Inline caching support
1047

    
1048
  // Generate code for checking access rights - used for security checks
1049
  // on access to global objects across environments. The holder register
1050
  // is left untouched, but the scratch register and kScratchRegister,
1051
  // which must be different, are clobbered.
1052
  void CheckAccessGlobalProxy(Register holder_reg,
1053
                              Register scratch,
1054
                              Label* miss);
1055

    
1056
  void GetNumberHash(Register r0, Register scratch);
1057

    
1058
  void LoadFromNumberDictionary(Label* miss,
1059
                                Register elements,
1060
                                Register key,
1061
                                Register r0,
1062
                                Register r1,
1063
                                Register r2,
1064
                                Register result);
1065

    
1066

    
1067
  // ---------------------------------------------------------------------------
1068
  // Allocation support
1069

    
1070
  // Allocate an object in new space or old pointer space. If the given space
1071
  // is exhausted control continues at the gc_required label. The allocated
1072
  // object is returned in result and end of the new object is returned in
1073
  // result_end. The register scratch can be passed as no_reg in which case
1074
  // an additional object reference will be added to the reloc info. The
1075
  // returned pointers in result and result_end have not yet been tagged as
1076
  // heap objects. If result_contains_top_on_entry is true the content of
1077
  // result is known to be the allocation top on entry (could be result_end
1078
  // from a previous call). If result_contains_top_on_entry is true scratch
1079
  // should be no_reg as it is never used.
1080
  void Allocate(int object_size,
1081
                Register result,
1082
                Register result_end,
1083
                Register scratch,
1084
                Label* gc_required,
1085
                AllocationFlags flags);
1086

    
1087
  void Allocate(int header_size,
1088
                ScaleFactor element_size,
1089
                Register element_count,
1090
                Register result,
1091
                Register result_end,
1092
                Register scratch,
1093
                Label* gc_required,
1094
                AllocationFlags flags);
1095

    
1096
  void Allocate(Register object_size,
1097
                Register result,
1098
                Register result_end,
1099
                Register scratch,
1100
                Label* gc_required,
1101
                AllocationFlags flags);
1102

    
1103
  // Record a JS object allocation if allocations tracking mode is on.
1104
  void RecordObjectAllocation(Isolate* isolate,
1105
                              Register object,
1106
                              Register object_size);
1107

    
1108
  void RecordObjectAllocation(Isolate* isolate,
1109
                              Register object,
1110
                              int object_size);
1111

    
1112
  // Undo allocation in new space. The object passed and objects allocated after
1113
  // it will no longer be allocated. Make sure that no pointers are left to the
1114
  // object(s) no longer allocated as they would be invalid when allocation is
1115
  // un-done.
1116
  void UndoAllocationInNewSpace(Register object);
1117

    
1118
  // Allocate a heap number in new space with undefined value. Returns
1119
  // tagged pointer in result register, or jumps to gc_required if new
1120
  // space is full.
1121
  void AllocateHeapNumber(Register result,
1122
                          Register scratch,
1123
                          Label* gc_required);
1124

    
1125
  // Allocate a sequential string. All the header fields of the string object
1126
  // are initialized.
1127
  void AllocateTwoByteString(Register result,
1128
                             Register length,
1129
                             Register scratch1,
1130
                             Register scratch2,
1131
                             Register scratch3,
1132
                             Label* gc_required);
1133
  void AllocateAsciiString(Register result,
1134
                           Register length,
1135
                           Register scratch1,
1136
                           Register scratch2,
1137
                           Register scratch3,
1138
                           Label* gc_required);
1139

    
1140
  // Allocate a raw cons string object. Only the map field of the result is
1141
  // initialized.
1142
  void AllocateTwoByteConsString(Register result,
1143
                          Register scratch1,
1144
                          Register scratch2,
1145
                          Label* gc_required);
1146
  void AllocateAsciiConsString(Register result,
1147
                               Register scratch1,
1148
                               Register scratch2,
1149
                               Label* gc_required);
1150

    
1151
  // Allocate a raw sliced string object. Only the map field of the result is
1152
  // initialized.
1153
  void AllocateTwoByteSlicedString(Register result,
1154
                            Register scratch1,
1155
                            Register scratch2,
1156
                            Label* gc_required);
1157
  void AllocateAsciiSlicedString(Register result,
1158
                                 Register scratch1,
1159
                                 Register scratch2,
1160
                                 Label* gc_required);
1161

    
1162
  // ---------------------------------------------------------------------------
1163
  // Support functions.
1164

    
1165
  // Check if result is zero and op is negative.
1166
  void NegativeZeroTest(Register result, Register op, Label* then_label);
1167

    
1168
  // Check if result is zero and op is negative in code using jump targets.
1169
  void NegativeZeroTest(CodeGenerator* cgen,
1170
                        Register result,
1171
                        Register op,
1172
                        JumpTarget* then_target);
1173

    
1174
  // Check if result is zero and any of op1 and op2 are negative.
1175
  // Register scratch is destroyed, and it must be different from op2.
1176
  void NegativeZeroTest(Register result, Register op1, Register op2,
1177
                        Register scratch, Label* then_label);
1178

    
1179
  // Try to get function prototype of a function and puts the value in
1180
  // the result register. Checks that the function really is a
1181
  // function and jumps to the miss label if the fast checks fail. The
1182
  // function register will be untouched; the other register may be
1183
  // clobbered.
1184
  void TryGetFunctionPrototype(Register function,
1185
                               Register result,
1186
                               Label* miss,
1187
                               bool miss_on_bound_function = false);
1188

    
1189
  // Generates code for reporting that an illegal operation has
1190
  // occurred.
1191
  void IllegalOperation(int num_arguments);
1192

    
1193
  // Picks out an array index from the hash field.
1194
  // Register use:
1195
  //   hash - holds the index's hash. Clobbered.
1196
  //   index - holds the overwritten index on exit.
1197
  void IndexFromHash(Register hash, Register index);
1198

    
1199
  // Find the function context up the context chain.
1200
  void LoadContext(Register dst, int context_chain_length);
1201

    
1202
  // Conditionally load the cached Array transitioned map of type
1203
  // transitioned_kind from the native context if the map in register
1204
  // map_in_out is the cached Array map in the native context of
1205
  // expected_kind.
1206
  void LoadTransitionedArrayMapConditional(
1207
      ElementsKind expected_kind,
1208
      ElementsKind transitioned_kind,
1209
      Register map_in_out,
1210
      Register scratch,
1211
      Label* no_map_match);
1212

    
1213
  // Load the initial map for new Arrays from a JSFunction.
1214
  void LoadInitialArrayMap(Register function_in,
1215
                           Register scratch,
1216
                           Register map_out,
1217
                           bool can_have_holes);
1218

    
1219
  // Load the global function with the given index.
1220
  void LoadGlobalFunction(int index, Register function);
1221
  void LoadArrayFunction(Register function);
1222

    
1223
  // Load the initial map from the global function. The registers
1224
  // function and map can be the same.
1225
  void LoadGlobalFunctionInitialMap(Register function, Register map);
1226

    
1227
  // ---------------------------------------------------------------------------
1228
  // Runtime calls
1229

    
1230
  // Call a code stub.
1231
  void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None());
1232

    
1233
  // Tail call a code stub (jump).
1234
  void TailCallStub(CodeStub* stub);
1235

    
1236
  // Return from a code stub after popping its arguments.
1237
  void StubReturn(int argc);
1238

    
1239
  // Call a runtime routine.
1240
  void CallRuntime(const Runtime::Function* f,
1241
                   int num_arguments,
1242
                   SaveFPRegsMode save_doubles = kDontSaveFPRegs);
1243

    
1244
  // Call a runtime function and save the value of XMM registers.
1245
  void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
1246
    const Runtime::Function* function = Runtime::FunctionForId(id);
1247
    CallRuntime(function, function->nargs, kSaveFPRegs);
1248
  }
1249

    
1250
  // Convenience function: Same as above, but takes the fid instead.
1251
  void CallRuntime(Runtime::FunctionId id, int num_arguments) {
1252
    CallRuntime(Runtime::FunctionForId(id), num_arguments);
1253
  }
1254

    
1255
  // Convenience function: call an external reference.
1256
  void CallExternalReference(const ExternalReference& ext,
1257
                             int num_arguments);
1258

    
1259
  // Tail call of a runtime routine (jump).
1260
  // Like JumpToExternalReference, but also takes care of passing the number
1261
  // of parameters.
1262
  void TailCallExternalReference(const ExternalReference& ext,
1263
                                 int num_arguments,
1264
                                 int result_size);
1265

    
1266
  // Convenience function: tail call a runtime routine (jump).
1267
  void TailCallRuntime(Runtime::FunctionId fid,
1268
                       int num_arguments,
1269
                       int result_size);
1270

    
1271
  // Jump to a runtime routine.
1272
  void JumpToExternalReference(const ExternalReference& ext, int result_size);
1273

    
1274
  // Prepares stack to put arguments (aligns and so on).  WIN64 calling
1275
  // convention requires to put the pointer to the return value slot into
1276
  // rcx (rcx must be preserverd until CallApiFunctionAndReturn).  Saves
1277
  // context (rsi).  Clobbers rax.  Allocates arg_stack_space * kPointerSize
1278
  // inside the exit frame (not GCed) accessible via StackSpaceOperand.
1279
  void PrepareCallApiFunction(int arg_stack_space);
1280

    
1281
  // Calls an API function.  Allocates HandleScope, extracts returned value
1282
  // from handle and propagates exceptions.  Clobbers r14, r15, rbx and
1283
  // caller-save registers.  Restores context.  On return removes
1284
  // stack_space * kPointerSize (GCed).
1285
  void CallApiFunctionAndReturn(Address function_address,
1286
                                Address thunk_address,
1287
                                Register thunk_last_arg,
1288
                                int stack_space,
1289
                                Operand return_value_operand,
1290
                                Operand* context_restore_operand);
1291

    
1292
  // Before calling a C-function from generated code, align arguments on stack.
1293
  // After aligning the frame, arguments must be stored in rsp[0], rsp[8],
1294
  // etc., not pushed. The argument count assumes all arguments are word sized.
1295
  // The number of slots reserved for arguments depends on platform. On Windows
1296
  // stack slots are reserved for the arguments passed in registers. On other
1297
  // platforms stack slots are only reserved for the arguments actually passed
1298
  // on the stack.
1299
  void PrepareCallCFunction(int num_arguments);
1300

    
1301
  // Calls a C function and cleans up the space for arguments allocated
1302
  // by PrepareCallCFunction. The called function is not allowed to trigger a
1303
  // garbage collection, since that might move the code and invalidate the
1304
  // return address (unless this is somehow accounted for by the called
1305
  // function).
1306
  void CallCFunction(ExternalReference function, int num_arguments);
1307
  void CallCFunction(Register function, int num_arguments);
1308

    
1309
  // Calculate the number of stack slots to reserve for arguments when calling a
1310
  // C function.
1311
  int ArgumentStackSlotsForCFunctionCall(int num_arguments);
1312

    
1313
  // ---------------------------------------------------------------------------
1314
  // Utilities
1315

    
1316
  void Ret();
1317

    
1318
  // Return and drop arguments from stack, where the number of arguments
1319
  // may be bigger than 2^16 - 1.  Requires a scratch register.
1320
  void Ret(int bytes_dropped, Register scratch);
1321

    
1322
  Handle<Object> CodeObject() {
1323
    ASSERT(!code_object_.is_null());
1324
    return code_object_;
1325
  }
1326

    
1327
  // Copy length bytes from source to destination.
1328
  // Uses scratch register internally (if you have a low-eight register
1329
  // free, do use it, otherwise kScratchRegister will be used).
1330
  // The min_length is a minimum limit on the value that length will have.
1331
  // The algorithm has some special cases that might be omitted if the string
1332
  // is known to always be long.
1333
  void CopyBytes(Register destination,
1334
                 Register source,
1335
                 Register length,
1336
                 int min_length = 0,
1337
                 Register scratch = kScratchRegister);
1338

    
1339
  // Initialize fields with filler values.  Fields starting at |start_offset|
1340
  // not including end_offset are overwritten with the value in |filler|.  At
1341
  // the end the loop, |start_offset| takes the value of |end_offset|.
1342
  void InitializeFieldsWithFiller(Register start_offset,
1343
                                  Register end_offset,
1344
                                  Register filler);
1345

    
1346

    
1347
  // ---------------------------------------------------------------------------
1348
  // StatsCounter support
1349

    
1350
  void SetCounter(StatsCounter* counter, int value);
1351
  void IncrementCounter(StatsCounter* counter, int value);
1352
  void DecrementCounter(StatsCounter* counter, int value);
1353

    
1354

    
1355
  // ---------------------------------------------------------------------------
1356
  // Debugging
1357

    
1358
  // Calls Abort(msg) if the condition cc is not satisfied.
1359
  // Use --debug_code to enable.
1360
  void Assert(Condition cc, BailoutReason reason);
1361

    
1362
  void AssertFastElements(Register elements);
1363

    
1364
  // Like Assert(), but always enabled.
1365
  void Check(Condition cc, BailoutReason reason);
1366

    
1367
  // Print a message to stdout and abort execution.
1368
  void Abort(BailoutReason msg);
1369

    
1370
  // Check that the stack is aligned.
1371
  void CheckStackAlignment();
1372

    
1373
  // Verify restrictions about code generated in stubs.
1374
  void set_generating_stub(bool value) { generating_stub_ = value; }
1375
  bool generating_stub() { return generating_stub_; }
1376
  void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
1377
  bool allow_stub_calls() { return allow_stub_calls_; }
1378
  void set_has_frame(bool value) { has_frame_ = value; }
1379
  bool has_frame() { return has_frame_; }
1380
  inline bool AllowThisStubCall(CodeStub* stub);
1381

    
1382
  static int SafepointRegisterStackIndex(Register reg) {
1383
    return SafepointRegisterStackIndex(reg.code());
1384
  }
1385

    
1386
  // Activation support.
1387
  void EnterFrame(StackFrame::Type type);
1388
  void LeaveFrame(StackFrame::Type type);
1389

    
1390
  // Expects object in rax and returns map with validated enum cache
1391
  // in rax.  Assumes that any other register can be used as a scratch.
1392
  void CheckEnumCache(Register null_value,
1393
                      Label* call_runtime);
1394

    
1395
  // AllocationMemento support. Arrays may have an associated
1396
  // AllocationMemento object that can be checked for in order to pretransition
1397
  // to another type.
1398
  // On entry, receiver_reg should point to the array object.
1399
  // scratch_reg gets clobbered.
1400
  // If allocation info is present, condition flags are set to equal.
1401
  void TestJSArrayForAllocationMemento(Register receiver_reg,
1402
                                       Register scratch_reg,
1403
                                       Label* no_memento_found);
1404

    
1405
  void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
1406
                                         Register scratch_reg,
1407
                                         Label* memento_found) {
1408
    Label no_memento_found;
1409
    TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
1410
                                    &no_memento_found);
1411
    j(equal, memento_found);
1412
    bind(&no_memento_found);
1413
  }
1414

    
1415
 private:
1416
  // Order general registers are pushed by Pushad.
1417
  // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
1418
  static const int kSafepointPushRegisterIndices[Register::kNumRegisters];
1419
  static const int kNumSafepointSavedRegisters = 11;
1420
  static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
1421

    
1422
  bool generating_stub_;
1423
  bool allow_stub_calls_;
1424
  bool has_frame_;
1425
  bool root_array_available_;
1426

    
1427
  // Returns a register holding the smi value. The register MUST NOT be
1428
  // modified. It may be the "smi 1 constant" register.
1429
  Register GetSmiConstant(Smi* value);
1430

    
1431
  intptr_t RootRegisterDelta(ExternalReference other);
1432

    
1433
  // Moves the smi value to the destination register.
1434
  void LoadSmiConstant(Register dst, Smi* value);
1435

    
1436
  // This handle will be patched with the code object on installation.
1437
  Handle<Object> code_object_;
1438

    
1439
  // Helper functions for generating invokes.
1440
  void InvokePrologue(const ParameterCount& expected,
1441
                      const ParameterCount& actual,
1442
                      Handle<Code> code_constant,
1443
                      Register code_register,
1444
                      Label* done,
1445
                      bool* definitely_mismatches,
1446
                      InvokeFlag flag,
1447
                      Label::Distance near_jump = Label::kFar,
1448
                      const CallWrapper& call_wrapper = NullCallWrapper(),
1449
                      CallKind call_kind = CALL_AS_METHOD);
1450

    
1451
  void EnterExitFramePrologue(bool save_rax);
1452

    
1453
  // Allocates arg_stack_space * kPointerSize memory (not GCed) on the stack
1454
  // accessible via StackSpaceOperand.
1455
  void EnterExitFrameEpilogue(int arg_stack_space, bool save_doubles);
1456

    
1457
  void LeaveExitFrameEpilogue(bool restore_context);
1458

    
1459
  // Allocation support helpers.
1460
  // Loads the top of new-space into the result register.
1461
  // Otherwise the address of the new-space top is loaded into scratch (if
1462
  // scratch is valid), and the new-space top is loaded into result.
1463
  void LoadAllocationTopHelper(Register result,
1464
                               Register scratch,
1465
                               AllocationFlags flags);
1466

    
1467
  // Update allocation top with value in result_end register.
1468
  // If scratch is valid, it contains the address of the allocation top.
1469
  void UpdateAllocationTopHelper(Register result_end,
1470
                                 Register scratch,
1471
                                 AllocationFlags flags);
1472

    
1473
  // Helper for PopHandleScope.  Allowed to perform a GC and returns
1474
  // NULL if gc_allowed.  Does not perform a GC if !gc_allowed, and
1475
  // possibly returns a failure object indicating an allocation failure.
1476
  Object* PopHandleScopeHelper(Register saved,
1477
                               Register scratch,
1478
                               bool gc_allowed);
1479

    
1480
  // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
1481
  void InNewSpace(Register object,
1482
                  Register scratch,
1483
                  Condition cc,
1484
                  Label* branch,
1485
                  Label::Distance distance = Label::kFar);
1486

    
1487
  // Helper for finding the mark bits for an address.  Afterwards, the
1488
  // bitmap register points at the word with the mark bits and the mask
1489
  // the position of the first bit.  Uses rcx as scratch and leaves addr_reg
1490
  // unchanged.
1491
  inline void GetMarkBits(Register addr_reg,
1492
                          Register bitmap_reg,
1493
                          Register mask_reg);
1494

    
1495
  // Helper for throwing exceptions.  Compute a handler address and jump to
1496
  // it.  See the implementation for register usage.
1497
  void JumpToHandlerEntry();
1498

    
1499
  // Compute memory operands for safepoint stack slots.
1500
  Operand SafepointRegisterSlot(Register reg);
1501
  static int SafepointRegisterStackIndex(int reg_code) {
1502
    return kNumSafepointRegisters - kSafepointPushRegisterIndices[reg_code] - 1;
1503
  }
1504

    
1505
  // Needs access to SafepointRegisterStackIndex for compiled frame
1506
  // traversal.
1507
  friend class StandardFrame;
1508
};
1509

    
1510

    
1511
// The code patcher is used to patch (typically) small parts of code e.g. for
1512
// debugging and other types of instrumentation. When using the code patcher
1513
// the exact number of bytes specified must be emitted. Is not legal to emit
1514
// relocation information. If any of these constraints are violated it causes
1515
// an assertion.
1516
class CodePatcher {
1517
 public:
1518
  CodePatcher(byte* address, int size);
1519
  virtual ~CodePatcher();
1520

    
1521
  // Macro assembler to emit code.
1522
  MacroAssembler* masm() { return &masm_; }
1523

    
1524
 private:
1525
  byte* address_;  // The address of the code being patched.
1526
  int size_;  // Number of bytes of the expected patch size.
1527
  MacroAssembler masm_;  // Macro assembler used to generate the code.
1528
};
1529

    
1530

    
1531
// -----------------------------------------------------------------------------
1532
// Static helper functions.
1533

    
1534
// Generate an Operand for loading a field from an object.
1535
inline Operand FieldOperand(Register object, int offset) {
1536
  return Operand(object, offset - kHeapObjectTag);
1537
}
1538

    
1539

    
1540
// Generate an Operand for loading an indexed field from an object.
1541
inline Operand FieldOperand(Register object,
1542
                            Register index,
1543
                            ScaleFactor scale,
1544
                            int offset) {
1545
  return Operand(object, index, scale, offset - kHeapObjectTag);
1546
}
1547

    
1548

    
1549
inline Operand ContextOperand(Register context, int index) {
1550
  return Operand(context, Context::SlotOffset(index));
1551
}
1552

    
1553

    
1554
inline Operand GlobalObjectOperand() {
1555
  return ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX);
1556
}
1557

    
1558

    
1559
// Provides access to exit frame stack space (not GCed).
1560
inline Operand StackSpaceOperand(int index) {
1561
#ifdef _WIN64
1562
  const int kShaddowSpace = 4;
1563
  return Operand(rsp, (index + kShaddowSpace) * kPointerSize);
1564
#else
1565
  return Operand(rsp, index * kPointerSize);
1566
#endif
1567
}
1568

    
1569

    
1570
inline Operand StackOperandForReturnAddress(int32_t disp) {
1571
  return Operand(rsp, disp);
1572
}
1573

    
1574

    
1575
#ifdef GENERATED_CODE_COVERAGE
1576
extern void LogGeneratedCodeCoverage(const char* file_line);
1577
#define CODE_COVERAGE_STRINGIFY(x) #x
1578
#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
1579
#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
1580
#define ACCESS_MASM(masm) {                                                  \
1581
    Address x64_coverage_function = FUNCTION_ADDR(LogGeneratedCodeCoverage); \
1582
    masm->pushfq();                                                          \
1583
    masm->Pushad();                                                          \
1584
    masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__)));            \
1585
    masm->Call(x64_coverage_function, RelocInfo::EXTERNAL_REFERENCE);        \
1586
    masm->pop(rax);                                                          \
1587
    masm->Popad();                                                           \
1588
    masm->popfq();                                                           \
1589
  }                                                                          \
1590
  masm->
1591
#else
1592
#define ACCESS_MASM(masm) masm->
1593
#endif
1594

    
1595
} }  // namespace v8::internal
1596

    
1597
#endif  // V8_X64_MACRO_ASSEMBLER_X64_H_