The data contained in this repository can be downloaded to your computer using one of several clients.
Please see the documentation of your version control software client for more information.

Please select the desired protocol below to get the URL.

This URL has Read-Only access.

Statistics
| Branch: | Revision:

main_repo / deps / v8 / src / x64 / lithium-gap-resolver-x64.cc @ f230a1cf

History | View | Annotate | Download (12.5 KB)

1
// Copyright 2011 the V8 project authors. All rights reserved.
2
// Redistribution and use in source and binary forms, with or without
3
// modification, are permitted provided that the following conditions are
4
// met:
5
//
6
//     * Redistributions of source code must retain the above copyright
7
//       notice, this list of conditions and the following disclaimer.
8
//     * Redistributions in binary form must reproduce the above
9
//       copyright notice, this list of conditions and the following
10
//       disclaimer in the documentation and/or other materials provided
11
//       with the distribution.
12
//     * Neither the name of Google Inc. nor the names of its
13
//       contributors may be used to endorse or promote products derived
14
//       from this software without specific prior written permission.
15
//
16
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27

    
28
#include "v8.h"
29

    
30
#if V8_TARGET_ARCH_X64
31

    
32
#include "x64/lithium-gap-resolver-x64.h"
33
#include "x64/lithium-codegen-x64.h"
34

    
35
namespace v8 {
36
namespace internal {
37

    
38
LGapResolver::LGapResolver(LCodeGen* owner)
39
    : cgen_(owner), moves_(32, owner->zone()) {}
40

    
41

    
42
void LGapResolver::Resolve(LParallelMove* parallel_move) {
43
  ASSERT(moves_.is_empty());
44
  // Build up a worklist of moves.
45
  BuildInitialMoveList(parallel_move);
46

    
47
  for (int i = 0; i < moves_.length(); ++i) {
48
    LMoveOperands move = moves_[i];
49
    // Skip constants to perform them last.  They don't block other moves
50
    // and skipping such moves with register destinations keeps those
51
    // registers free for the whole algorithm.
52
    if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
53
      PerformMove(i);
54
    }
55
  }
56

    
57
  // Perform the moves with constant sources.
58
  for (int i = 0; i < moves_.length(); ++i) {
59
    if (!moves_[i].IsEliminated()) {
60
      ASSERT(moves_[i].source()->IsConstantOperand());
61
      EmitMove(i);
62
    }
63
  }
64

    
65
  moves_.Rewind(0);
66
}
67

    
68

    
69
void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
70
  // Perform a linear sweep of the moves to add them to the initial list of
71
  // moves to perform, ignoring any move that is redundant (the source is
72
  // the same as the destination, the destination is ignored and
73
  // unallocated, or the move was already eliminated).
74
  const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
75
  for (int i = 0; i < moves->length(); ++i) {
76
    LMoveOperands move = moves->at(i);
77
    if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
78
  }
79
  Verify();
80
}
81

    
82

    
83
void LGapResolver::PerformMove(int index) {
84
  // Each call to this function performs a move and deletes it from the move
85
  // graph.  We first recursively perform any move blocking this one.  We
86
  // mark a move as "pending" on entry to PerformMove in order to detect
87
  // cycles in the move graph.  We use operand swaps to resolve cycles,
88
  // which means that a call to PerformMove could change any source operand
89
  // in the move graph.
90

    
91
  ASSERT(!moves_[index].IsPending());
92
  ASSERT(!moves_[index].IsRedundant());
93

    
94
  // Clear this move's destination to indicate a pending move.  The actual
95
  // destination is saved in a stack-allocated local.  Recursion may allow
96
  // multiple moves to be pending.
97
  ASSERT(moves_[index].source() != NULL);  // Or else it will look eliminated.
98
  LOperand* destination = moves_[index].destination();
99
  moves_[index].set_destination(NULL);
100

    
101
  // Perform a depth-first traversal of the move graph to resolve
102
  // dependencies.  Any unperformed, unpending move with a source the same
103
  // as this one's destination blocks this one so recursively perform all
104
  // such moves.
105
  for (int i = 0; i < moves_.length(); ++i) {
106
    LMoveOperands other_move = moves_[i];
107
    if (other_move.Blocks(destination) && !other_move.IsPending()) {
108
      // Though PerformMove can change any source operand in the move graph,
109
      // this call cannot create a blocking move via a swap (this loop does
110
      // not miss any).  Assume there is a non-blocking move with source A
111
      // and this move is blocked on source B and there is a swap of A and
112
      // B.  Then A and B must be involved in the same cycle (or they would
113
      // not be swapped).  Since this move's destination is B and there is
114
      // only a single incoming edge to an operand, this move must also be
115
      // involved in the same cycle.  In that case, the blocking move will
116
      // be created but will be "pending" when we return from PerformMove.
117
      PerformMove(i);
118
    }
119
  }
120

    
121
  // We are about to resolve this move and don't need it marked as
122
  // pending, so restore its destination.
123
  moves_[index].set_destination(destination);
124

    
125
  // This move's source may have changed due to swaps to resolve cycles and
126
  // so it may now be the last move in the cycle.  If so remove it.
127
  if (moves_[index].source()->Equals(destination)) {
128
    moves_[index].Eliminate();
129
    return;
130
  }
131

    
132
  // The move may be blocked on a (at most one) pending move, in which case
133
  // we have a cycle.  Search for such a blocking move and perform a swap to
134
  // resolve it.
135
  for (int i = 0; i < moves_.length(); ++i) {
136
    LMoveOperands other_move = moves_[i];
137
    if (other_move.Blocks(destination)) {
138
      ASSERT(other_move.IsPending());
139
      EmitSwap(index);
140
      return;
141
    }
142
  }
143

    
144
  // This move is not blocked.
145
  EmitMove(index);
146
}
147

    
148

    
149
void LGapResolver::Verify() {
150
#ifdef ENABLE_SLOW_ASSERTS
151
  // No operand should be the destination for more than one move.
152
  for (int i = 0; i < moves_.length(); ++i) {
153
    LOperand* destination = moves_[i].destination();
154
    for (int j = i + 1; j < moves_.length(); ++j) {
155
      SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
156
    }
157
  }
158
#endif
159
}
160

    
161

    
162
#define __ ACCESS_MASM(cgen_->masm())
163

    
164

    
165
void LGapResolver::EmitMove(int index) {
166
  LOperand* source = moves_[index].source();
167
  LOperand* destination = moves_[index].destination();
168

    
169
  // Dispatch on the source and destination operand kinds.  Not all
170
  // combinations are possible.
171
  if (source->IsRegister()) {
172
    Register src = cgen_->ToRegister(source);
173
    if (destination->IsRegister()) {
174
      Register dst = cgen_->ToRegister(destination);
175
      __ movq(dst, src);
176
    } else {
177
      ASSERT(destination->IsStackSlot());
178
      Operand dst = cgen_->ToOperand(destination);
179
      __ movq(dst, src);
180
    }
181

    
182
  } else if (source->IsStackSlot()) {
183
    Operand src = cgen_->ToOperand(source);
184
    if (destination->IsRegister()) {
185
      Register dst = cgen_->ToRegister(destination);
186
      __ movq(dst, src);
187
    } else {
188
      ASSERT(destination->IsStackSlot());
189
      Operand dst = cgen_->ToOperand(destination);
190
      __ movq(kScratchRegister, src);
191
      __ movq(dst, kScratchRegister);
192
    }
193

    
194
  } else if (source->IsConstantOperand()) {
195
    LConstantOperand* constant_source = LConstantOperand::cast(source);
196
    if (destination->IsRegister()) {
197
      Register dst = cgen_->ToRegister(destination);
198
      if (cgen_->IsSmiConstant(constant_source)) {
199
        __ Move(dst, cgen_->ToSmi(constant_source));
200
      } else if (cgen_->IsInteger32Constant(constant_source)) {
201
        __ movl(dst, Immediate(cgen_->ToInteger32(constant_source)));
202
      } else {
203
        __ Move(dst, cgen_->ToHandle(constant_source));
204
      }
205
    } else if (destination->IsDoubleRegister()) {
206
      double v = cgen_->ToDouble(constant_source);
207
      uint64_t int_val = BitCast<uint64_t, double>(v);
208
      XMMRegister dst = cgen_->ToDoubleRegister(destination);
209
      if (int_val == 0) {
210
        __ xorps(dst, dst);
211
      } else {
212
        __ movq(kScratchRegister, int_val, RelocInfo::NONE64);
213
        __ movq(dst, kScratchRegister);
214
      }
215
    } else {
216
      ASSERT(destination->IsStackSlot());
217
      Operand dst = cgen_->ToOperand(destination);
218
      if (cgen_->IsSmiConstant(constant_source)) {
219
        __ Move(dst, cgen_->ToSmi(constant_source));
220
      } else if (cgen_->IsInteger32Constant(constant_source)) {
221
        // Zero top 32 bits of a 64 bit spill slot that holds a 32 bit untagged
222
        // value.
223
        __ movq(dst, Immediate(cgen_->ToInteger32(constant_source)));
224
      } else {
225
        __ Move(kScratchRegister, cgen_->ToHandle(constant_source));
226
        __ movq(dst, kScratchRegister);
227
      }
228
    }
229

    
230
  } else if (source->IsDoubleRegister()) {
231
    XMMRegister src = cgen_->ToDoubleRegister(source);
232
    if (destination->IsDoubleRegister()) {
233
      __ movaps(cgen_->ToDoubleRegister(destination), src);
234
    } else {
235
      ASSERT(destination->IsDoubleStackSlot());
236
      __ movsd(cgen_->ToOperand(destination), src);
237
    }
238
  } else if (source->IsDoubleStackSlot()) {
239
    Operand src = cgen_->ToOperand(source);
240
    if (destination->IsDoubleRegister()) {
241
      __ movsd(cgen_->ToDoubleRegister(destination), src);
242
    } else {
243
      ASSERT(destination->IsDoubleStackSlot());
244
      __ movsd(xmm0, src);
245
      __ movsd(cgen_->ToOperand(destination), xmm0);
246
    }
247
  } else {
248
    UNREACHABLE();
249
  }
250

    
251
  moves_[index].Eliminate();
252
}
253

    
254

    
255
void LGapResolver::EmitSwap(int index) {
256
  LOperand* source = moves_[index].source();
257
  LOperand* destination = moves_[index].destination();
258

    
259
  // Dispatch on the source and destination operand kinds.  Not all
260
  // combinations are possible.
261
  if (source->IsRegister() && destination->IsRegister()) {
262
    // Swap two general-purpose registers.
263
    Register src = cgen_->ToRegister(source);
264
    Register dst = cgen_->ToRegister(destination);
265
    __ xchgq(dst, src);
266

    
267
  } else if ((source->IsRegister() && destination->IsStackSlot()) ||
268
             (source->IsStackSlot() && destination->IsRegister())) {
269
    // Swap a general-purpose register and a stack slot.
270
    Register reg =
271
        cgen_->ToRegister(source->IsRegister() ? source : destination);
272
    Operand mem =
273
        cgen_->ToOperand(source->IsRegister() ? destination : source);
274
    __ movq(kScratchRegister, mem);
275
    __ movq(mem, reg);
276
    __ movq(reg, kScratchRegister);
277

    
278
  } else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
279
      (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot())) {
280
    // Swap two stack slots or two double stack slots.
281
    Operand src = cgen_->ToOperand(source);
282
    Operand dst = cgen_->ToOperand(destination);
283
    __ movsd(xmm0, src);
284
    __ movq(kScratchRegister, dst);
285
    __ movsd(dst, xmm0);
286
    __ movq(src, kScratchRegister);
287

    
288
  } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
289
    // Swap two double registers.
290
    XMMRegister source_reg = cgen_->ToDoubleRegister(source);
291
    XMMRegister destination_reg = cgen_->ToDoubleRegister(destination);
292
    __ movaps(xmm0, source_reg);
293
    __ movaps(source_reg, destination_reg);
294
    __ movaps(destination_reg, xmm0);
295

    
296
  } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
297
    // Swap a double register and a double stack slot.
298
    ASSERT((source->IsDoubleRegister() && destination->IsDoubleStackSlot()) ||
299
           (source->IsDoubleStackSlot() && destination->IsDoubleRegister()));
300
    XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister()
301
                                                  ? source
302
                                                  : destination);
303
    LOperand* other = source->IsDoubleRegister() ? destination : source;
304
    ASSERT(other->IsDoubleStackSlot());
305
    Operand other_operand = cgen_->ToOperand(other);
306
    __ movsd(xmm0, other_operand);
307
    __ movsd(other_operand, reg);
308
    __ movsd(reg, xmm0);
309

    
310
  } else {
311
    // No other combinations are possible.
312
    UNREACHABLE();
313
  }
314

    
315
  // The swap of source and destination has executed a move from source to
316
  // destination.
317
  moves_[index].Eliminate();
318

    
319
  // Any unperformed (including pending) move with a source of either
320
  // this move's source or destination needs to have their source
321
  // changed to reflect the state of affairs after the swap.
322
  for (int i = 0; i < moves_.length(); ++i) {
323
    LMoveOperands other_move = moves_[i];
324
    if (other_move.Blocks(source)) {
325
      moves_[i].set_source(destination);
326
    } else if (other_move.Blocks(destination)) {
327
      moves_[i].set_source(source);
328
    }
329
  }
330
}
331

    
332
#undef __
333

    
334
} }  // namespace v8::internal
335

    
336
#endif  // V8_TARGET_ARCH_X64