The data contained in this repository can be downloaded to your computer using one of several clients.
Please see the documentation of your version control software client for more information.
Please select the desired protocol below to get the URL.
This URL has Read-Only access.
main_repo / deps / v8 / src / x64 / lithium-codegen-x64.cc @ f230a1cf
History | View | Annotate | Download (181 KB)
1 |
// Copyright 2013 the V8 project authors. All rights reserved.
|
---|---|
2 |
// Redistribution and use in source and binary forms, with or without
|
3 |
// modification, are permitted provided that the following conditions are
|
4 |
// met:
|
5 |
//
|
6 |
// * Redistributions of source code must retain the above copyright
|
7 |
// notice, this list of conditions and the following disclaimer.
|
8 |
// * Redistributions in binary form must reproduce the above
|
9 |
// copyright notice, this list of conditions and the following
|
10 |
// disclaimer in the documentation and/or other materials provided
|
11 |
// with the distribution.
|
12 |
// * Neither the name of Google Inc. nor the names of its
|
13 |
// contributors may be used to endorse or promote products derived
|
14 |
// from this software without specific prior written permission.
|
15 |
//
|
16 |
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
17 |
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
18 |
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
19 |
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
20 |
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
21 |
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
22 |
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
23 |
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
24 |
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
25 |
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
26 |
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
27 |
|
28 |
#include "v8.h" |
29 |
|
30 |
#if V8_TARGET_ARCH_X64
|
31 |
|
32 |
#include "x64/lithium-codegen-x64.h" |
33 |
#include "code-stubs.h" |
34 |
#include "stub-cache.h" |
35 |
#include "hydrogen-osr.h" |
36 |
|
37 |
namespace v8 {
|
38 |
namespace internal {
|
39 |
|
40 |
|
41 |
// When invoking builtins, we need to record the safepoint in the middle of
|
42 |
// the invoke instruction sequence generated by the macro assembler.
|
43 |
class SafepointGenerator V8_FINAL : public CallWrapper { |
44 |
public:
|
45 |
SafepointGenerator(LCodeGen* codegen, |
46 |
LPointerMap* pointers, |
47 |
Safepoint::DeoptMode mode) |
48 |
: codegen_(codegen), |
49 |
pointers_(pointers), |
50 |
deopt_mode_(mode) { } |
51 |
virtual ~SafepointGenerator() {}
|
52 |
|
53 |
virtual void BeforeCall(int call_size) const V8_OVERRIDE { |
54 |
codegen_->EnsureSpaceForLazyDeopt(Deoptimizer::patch_size() - call_size); |
55 |
} |
56 |
|
57 |
virtual void AfterCall() const V8_OVERRIDE { |
58 |
codegen_->RecordSafepoint(pointers_, deopt_mode_); |
59 |
} |
60 |
|
61 |
private:
|
62 |
LCodeGen* codegen_; |
63 |
LPointerMap* pointers_; |
64 |
Safepoint::DeoptMode deopt_mode_; |
65 |
}; |
66 |
|
67 |
|
68 |
#define __ masm()->
|
69 |
|
70 |
bool LCodeGen::GenerateCode() {
|
71 |
LPhase phase("Z_Code generation", chunk());
|
72 |
ASSERT(is_unused()); |
73 |
status_ = GENERATING; |
74 |
|
75 |
// Open a frame scope to indicate that there is a frame on the stack. The
|
76 |
// MANUAL indicates that the scope shouldn't actually generate code to set up
|
77 |
// the frame (that is done in GeneratePrologue).
|
78 |
FrameScope frame_scope(masm_, StackFrame::MANUAL); |
79 |
|
80 |
return GeneratePrologue() &&
|
81 |
GenerateBody() && |
82 |
GenerateDeferredCode() && |
83 |
GenerateJumpTable() && |
84 |
GenerateSafepointTable(); |
85 |
} |
86 |
|
87 |
|
88 |
void LCodeGen::FinishCode(Handle<Code> code) {
|
89 |
ASSERT(is_done()); |
90 |
code->set_stack_slots(GetStackSlotCount()); |
91 |
code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); |
92 |
RegisterDependentCodeForEmbeddedMaps(code); |
93 |
PopulateDeoptimizationData(code); |
94 |
info()->CommitDependencies(code); |
95 |
} |
96 |
|
97 |
|
98 |
void LChunkBuilder::Abort(BailoutReason reason) {
|
99 |
info()->set_bailout_reason(reason); |
100 |
status_ = ABORTED; |
101 |
} |
102 |
|
103 |
|
104 |
#ifdef _MSC_VER
|
105 |
void LCodeGen::MakeSureStackPagesMapped(int offset) { |
106 |
const int kPageSize = 4 * KB; |
107 |
for (offset -= kPageSize; offset > 0; offset -= kPageSize) { |
108 |
__ movq(Operand(rsp, offset), rax); |
109 |
} |
110 |
} |
111 |
#endif
|
112 |
|
113 |
|
114 |
bool LCodeGen::GeneratePrologue() {
|
115 |
ASSERT(is_generating()); |
116 |
|
117 |
if (info()->IsOptimizing()) {
|
118 |
ProfileEntryHookStub::MaybeCallEntryHook(masm_); |
119 |
|
120 |
#ifdef DEBUG
|
121 |
if (strlen(FLAG_stop_at) > 0 && |
122 |
info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) { |
123 |
__ int3(); |
124 |
} |
125 |
#endif
|
126 |
|
127 |
// Strict mode functions need to replace the receiver with undefined
|
128 |
// when called as functions (without an explicit receiver
|
129 |
// object). rcx is zero for method calls and non-zero for function
|
130 |
// calls.
|
131 |
if (!info_->is_classic_mode() || info_->is_native()) {
|
132 |
Label ok; |
133 |
__ testq(rcx, rcx); |
134 |
__ j(zero, &ok, Label::kNear); |
135 |
StackArgumentsAccessor args(rsp, scope()->num_parameters()); |
136 |
__ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex); |
137 |
__ movq(args.GetReceiverOperand(), kScratchRegister); |
138 |
__ bind(&ok); |
139 |
} |
140 |
} |
141 |
|
142 |
info()->set_prologue_offset(masm_->pc_offset()); |
143 |
if (NeedsEagerFrame()) {
|
144 |
ASSERT(!frame_is_built_); |
145 |
frame_is_built_ = true;
|
146 |
__ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME); |
147 |
info()->AddNoFrameRange(0, masm_->pc_offset());
|
148 |
} |
149 |
|
150 |
// Reserve space for the stack slots needed by the code.
|
151 |
int slots = GetStackSlotCount();
|
152 |
if (slots > 0) { |
153 |
if (FLAG_debug_code) {
|
154 |
__ subq(rsp, Immediate(slots * kPointerSize)); |
155 |
#ifdef _MSC_VER
|
156 |
MakeSureStackPagesMapped(slots * kPointerSize); |
157 |
#endif
|
158 |
__ push(rax); |
159 |
__ Set(rax, slots); |
160 |
__ movq(kScratchRegister, kSlotsZapValue, RelocInfo::NONE64); |
161 |
Label loop; |
162 |
__ bind(&loop); |
163 |
__ movq(MemOperand(rsp, rax, times_pointer_size, 0),
|
164 |
kScratchRegister); |
165 |
__ decl(rax); |
166 |
__ j(not_zero, &loop); |
167 |
__ pop(rax); |
168 |
} else {
|
169 |
__ subq(rsp, Immediate(slots * kPointerSize)); |
170 |
#ifdef _MSC_VER
|
171 |
MakeSureStackPagesMapped(slots * kPointerSize); |
172 |
#endif
|
173 |
} |
174 |
|
175 |
if (info()->saves_caller_doubles()) {
|
176 |
Comment(";;; Save clobbered callee double registers");
|
177 |
int count = 0; |
178 |
BitVector* doubles = chunk()->allocated_double_registers(); |
179 |
BitVector::Iterator save_iterator(doubles); |
180 |
while (!save_iterator.Done()) {
|
181 |
__ movsd(MemOperand(rsp, count * kDoubleSize), |
182 |
XMMRegister::FromAllocationIndex(save_iterator.Current())); |
183 |
save_iterator.Advance(); |
184 |
count++; |
185 |
} |
186 |
} |
187 |
} |
188 |
|
189 |
// Possibly allocate a local context.
|
190 |
int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
|
191 |
if (heap_slots > 0) { |
192 |
Comment(";;; Allocate local context");
|
193 |
// Argument to NewContext is the function, which is still in rdi.
|
194 |
__ push(rdi); |
195 |
if (heap_slots <= FastNewContextStub::kMaximumSlots) {
|
196 |
FastNewContextStub stub(heap_slots); |
197 |
__ CallStub(&stub); |
198 |
} else {
|
199 |
__ CallRuntime(Runtime::kNewFunctionContext, 1);
|
200 |
} |
201 |
RecordSafepoint(Safepoint::kNoLazyDeopt); |
202 |
// Context is returned in both rax and rsi. It replaces the context
|
203 |
// passed to us. It's saved in the stack and kept live in rsi.
|
204 |
__ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi); |
205 |
|
206 |
// Copy any necessary parameters into the context.
|
207 |
int num_parameters = scope()->num_parameters();
|
208 |
for (int i = 0; i < num_parameters; i++) { |
209 |
Variable* var = scope()->parameter(i); |
210 |
if (var->IsContextSlot()) {
|
211 |
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
|
212 |
(num_parameters - 1 - i) * kPointerSize;
|
213 |
// Load parameter from stack.
|
214 |
__ movq(rax, Operand(rbp, parameter_offset)); |
215 |
// Store it in the context.
|
216 |
int context_offset = Context::SlotOffset(var->index());
|
217 |
__ movq(Operand(rsi, context_offset), rax); |
218 |
// Update the write barrier. This clobbers rax and rbx.
|
219 |
__ RecordWriteContextSlot(rsi, context_offset, rax, rbx, kSaveFPRegs); |
220 |
} |
221 |
} |
222 |
Comment(";;; End allocate local context");
|
223 |
} |
224 |
|
225 |
// Trace the call.
|
226 |
if (FLAG_trace && info()->IsOptimizing()) {
|
227 |
__ CallRuntime(Runtime::kTraceEnter, 0);
|
228 |
} |
229 |
return !is_aborted();
|
230 |
} |
231 |
|
232 |
|
233 |
void LCodeGen::GenerateOsrPrologue() {
|
234 |
// Generate the OSR entry prologue at the first unknown OSR value, or if there
|
235 |
// are none, at the OSR entrypoint instruction.
|
236 |
if (osr_pc_offset_ >= 0) return; |
237 |
|
238 |
osr_pc_offset_ = masm()->pc_offset(); |
239 |
|
240 |
// Adjust the frame size, subsuming the unoptimized frame into the
|
241 |
// optimized frame.
|
242 |
int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
|
243 |
ASSERT(slots >= 0);
|
244 |
__ subq(rsp, Immediate(slots * kPointerSize)); |
245 |
} |
246 |
|
247 |
|
248 |
bool LCodeGen::GenerateJumpTable() {
|
249 |
Label needs_frame; |
250 |
if (jump_table_.length() > 0) { |
251 |
Comment(";;; -------------------- Jump table --------------------");
|
252 |
} |
253 |
for (int i = 0; i < jump_table_.length(); i++) { |
254 |
__ bind(&jump_table_[i].label); |
255 |
Address entry = jump_table_[i].address; |
256 |
Deoptimizer::BailoutType type = jump_table_[i].bailout_type; |
257 |
int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
|
258 |
if (id == Deoptimizer::kNotDeoptimizationEntry) {
|
259 |
Comment(";;; jump table entry %d.", i);
|
260 |
} else {
|
261 |
Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
|
262 |
} |
263 |
if (jump_table_[i].needs_frame) {
|
264 |
__ movq(kScratchRegister, ExternalReference::ForDeoptEntry(entry)); |
265 |
if (needs_frame.is_bound()) {
|
266 |
__ jmp(&needs_frame); |
267 |
} else {
|
268 |
__ bind(&needs_frame); |
269 |
__ push(rbp); |
270 |
__ movq(rbp, rsp); |
271 |
__ push(rsi); |
272 |
// This variant of deopt can only be used with stubs. Since we don't
|
273 |
// have a function pointer to install in the stack frame that we're
|
274 |
// building, install a special marker there instead.
|
275 |
ASSERT(info()->IsStub()); |
276 |
__ Move(rsi, Smi::FromInt(StackFrame::STUB)); |
277 |
__ push(rsi); |
278 |
__ movq(rsi, MemOperand(rsp, kPointerSize)); |
279 |
__ call(kScratchRegister); |
280 |
} |
281 |
} else {
|
282 |
__ call(entry, RelocInfo::RUNTIME_ENTRY); |
283 |
} |
284 |
} |
285 |
return !is_aborted();
|
286 |
} |
287 |
|
288 |
|
289 |
bool LCodeGen::GenerateDeferredCode() {
|
290 |
ASSERT(is_generating()); |
291 |
if (deferred_.length() > 0) { |
292 |
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { |
293 |
LDeferredCode* code = deferred_[i]; |
294 |
|
295 |
HValue* value = |
296 |
instructions_->at(code->instruction_index())->hydrogen_value(); |
297 |
RecordAndWritePosition(value->position()); |
298 |
|
299 |
Comment(";;; <@%d,#%d> "
|
300 |
"-------------------- Deferred %s --------------------",
|
301 |
code->instruction_index(), |
302 |
code->instr()->hydrogen_value()->id(), |
303 |
code->instr()->Mnemonic()); |
304 |
__ bind(code->entry()); |
305 |
if (NeedsDeferredFrame()) {
|
306 |
Comment(";;; Build frame");
|
307 |
ASSERT(!frame_is_built_); |
308 |
ASSERT(info()->IsStub()); |
309 |
frame_is_built_ = true;
|
310 |
// Build the frame in such a way that esi isn't trashed.
|
311 |
__ push(rbp); // Caller's frame pointer.
|
312 |
__ push(Operand(rbp, StandardFrameConstants::kContextOffset)); |
313 |
__ Push(Smi::FromInt(StackFrame::STUB)); |
314 |
__ lea(rbp, Operand(rsp, 2 * kPointerSize));
|
315 |
Comment(";;; Deferred code");
|
316 |
} |
317 |
code->Generate(); |
318 |
if (NeedsDeferredFrame()) {
|
319 |
__ bind(code->done()); |
320 |
Comment(";;; Destroy frame");
|
321 |
ASSERT(frame_is_built_); |
322 |
frame_is_built_ = false;
|
323 |
__ movq(rsp, rbp); |
324 |
__ pop(rbp); |
325 |
} |
326 |
__ jmp(code->exit()); |
327 |
} |
328 |
} |
329 |
|
330 |
// Deferred code is the last part of the instruction sequence. Mark
|
331 |
// the generated code as done unless we bailed out.
|
332 |
if (!is_aborted()) status_ = DONE;
|
333 |
return !is_aborted();
|
334 |
} |
335 |
|
336 |
|
337 |
bool LCodeGen::GenerateSafepointTable() {
|
338 |
ASSERT(is_done()); |
339 |
safepoints_.Emit(masm(), GetStackSlotCount()); |
340 |
return !is_aborted();
|
341 |
} |
342 |
|
343 |
|
344 |
Register LCodeGen::ToRegister(int index) const { |
345 |
return Register::FromAllocationIndex(index);
|
346 |
} |
347 |
|
348 |
|
349 |
XMMRegister LCodeGen::ToDoubleRegister(int index) const { |
350 |
return XMMRegister::FromAllocationIndex(index);
|
351 |
} |
352 |
|
353 |
|
354 |
Register LCodeGen::ToRegister(LOperand* op) const {
|
355 |
ASSERT(op->IsRegister()); |
356 |
return ToRegister(op->index());
|
357 |
} |
358 |
|
359 |
|
360 |
XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
|
361 |
ASSERT(op->IsDoubleRegister()); |
362 |
return ToDoubleRegister(op->index());
|
363 |
} |
364 |
|
365 |
|
366 |
bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const { |
367 |
return op->IsConstantOperand() &&
|
368 |
chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32(); |
369 |
} |
370 |
|
371 |
|
372 |
bool LCodeGen::IsSmiConstant(LConstantOperand* op) const { |
373 |
return op->IsConstantOperand() &&
|
374 |
chunk_->LookupLiteralRepresentation(op).IsSmi(); |
375 |
} |
376 |
|
377 |
|
378 |
bool LCodeGen::IsTaggedConstant(LConstantOperand* op) const { |
379 |
return op->IsConstantOperand() &&
|
380 |
chunk_->LookupLiteralRepresentation(op).IsTagged(); |
381 |
} |
382 |
|
383 |
|
384 |
int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
|
385 |
HConstant* constant = chunk_->LookupConstant(op); |
386 |
return constant->Integer32Value();
|
387 |
} |
388 |
|
389 |
|
390 |
Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
|
391 |
HConstant* constant = chunk_->LookupConstant(op); |
392 |
return Smi::FromInt(constant->Integer32Value());
|
393 |
} |
394 |
|
395 |
|
396 |
double LCodeGen::ToDouble(LConstantOperand* op) const { |
397 |
HConstant* constant = chunk_->LookupConstant(op); |
398 |
ASSERT(constant->HasDoubleValue()); |
399 |
return constant->DoubleValue();
|
400 |
} |
401 |
|
402 |
|
403 |
ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
|
404 |
HConstant* constant = chunk_->LookupConstant(op); |
405 |
ASSERT(constant->HasExternalReferenceValue()); |
406 |
return constant->ExternalReferenceValue();
|
407 |
} |
408 |
|
409 |
|
410 |
Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
|
411 |
HConstant* constant = chunk_->LookupConstant(op); |
412 |
ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged()); |
413 |
return constant->handle(isolate());
|
414 |
} |
415 |
|
416 |
|
417 |
Operand LCodeGen::ToOperand(LOperand* op) const {
|
418 |
// Does not handle registers. In X64 assembler, plain registers are not
|
419 |
// representable as an Operand.
|
420 |
ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot()); |
421 |
return Operand(rbp, StackSlotOffset(op->index()));
|
422 |
} |
423 |
|
424 |
|
425 |
void LCodeGen::WriteTranslation(LEnvironment* environment,
|
426 |
Translation* translation) { |
427 |
if (environment == NULL) return; |
428 |
|
429 |
// The translation includes one command per value in the environment.
|
430 |
int translation_size = environment->translation_size();
|
431 |
// The output frame height does not include the parameters.
|
432 |
int height = translation_size - environment->parameter_count();
|
433 |
|
434 |
WriteTranslation(environment->outer(), translation); |
435 |
bool has_closure_id = !info()->closure().is_null() &&
|
436 |
!info()->closure().is_identical_to(environment->closure()); |
437 |
int closure_id = has_closure_id
|
438 |
? DefineDeoptimizationLiteral(environment->closure()) |
439 |
: Translation::kSelfLiteralId; |
440 |
|
441 |
switch (environment->frame_type()) {
|
442 |
case JS_FUNCTION:
|
443 |
translation->BeginJSFrame(environment->ast_id(), closure_id, height); |
444 |
break;
|
445 |
case JS_CONSTRUCT:
|
446 |
translation->BeginConstructStubFrame(closure_id, translation_size); |
447 |
break;
|
448 |
case JS_GETTER:
|
449 |
ASSERT(translation_size == 1);
|
450 |
ASSERT(height == 0);
|
451 |
translation->BeginGetterStubFrame(closure_id); |
452 |
break;
|
453 |
case JS_SETTER:
|
454 |
ASSERT(translation_size == 2);
|
455 |
ASSERT(height == 0);
|
456 |
translation->BeginSetterStubFrame(closure_id); |
457 |
break;
|
458 |
case ARGUMENTS_ADAPTOR:
|
459 |
translation->BeginArgumentsAdaptorFrame(closure_id, translation_size); |
460 |
break;
|
461 |
case STUB:
|
462 |
translation->BeginCompiledStubFrame(); |
463 |
break;
|
464 |
} |
465 |
|
466 |
int object_index = 0; |
467 |
int dematerialized_index = 0; |
468 |
for (int i = 0; i < translation_size; ++i) { |
469 |
LOperand* value = environment->values()->at(i); |
470 |
AddToTranslation(environment, |
471 |
translation, |
472 |
value, |
473 |
environment->HasTaggedValueAt(i), |
474 |
environment->HasUint32ValueAt(i), |
475 |
&object_index, |
476 |
&dematerialized_index); |
477 |
} |
478 |
} |
479 |
|
480 |
|
481 |
void LCodeGen::AddToTranslation(LEnvironment* environment,
|
482 |
Translation* translation, |
483 |
LOperand* op, |
484 |
bool is_tagged,
|
485 |
bool is_uint32,
|
486 |
int* object_index_pointer,
|
487 |
int* dematerialized_index_pointer) {
|
488 |
if (op == LEnvironment::materialization_marker()) {
|
489 |
int object_index = (*object_index_pointer)++;
|
490 |
if (environment->ObjectIsDuplicateAt(object_index)) {
|
491 |
int dupe_of = environment->ObjectDuplicateOfAt(object_index);
|
492 |
translation->DuplicateObject(dupe_of); |
493 |
return;
|
494 |
} |
495 |
int object_length = environment->ObjectLengthAt(object_index);
|
496 |
if (environment->ObjectIsArgumentsAt(object_index)) {
|
497 |
translation->BeginArgumentsObject(object_length); |
498 |
} else {
|
499 |
translation->BeginCapturedObject(object_length); |
500 |
} |
501 |
int dematerialized_index = *dematerialized_index_pointer;
|
502 |
int env_offset = environment->translation_size() + dematerialized_index;
|
503 |
*dematerialized_index_pointer += object_length; |
504 |
for (int i = 0; i < object_length; ++i) { |
505 |
LOperand* value = environment->values()->at(env_offset + i); |
506 |
AddToTranslation(environment, |
507 |
translation, |
508 |
value, |
509 |
environment->HasTaggedValueAt(env_offset + i), |
510 |
environment->HasUint32ValueAt(env_offset + i), |
511 |
object_index_pointer, |
512 |
dematerialized_index_pointer); |
513 |
} |
514 |
return;
|
515 |
} |
516 |
|
517 |
if (op->IsStackSlot()) {
|
518 |
if (is_tagged) {
|
519 |
translation->StoreStackSlot(op->index()); |
520 |
} else if (is_uint32) { |
521 |
translation->StoreUint32StackSlot(op->index()); |
522 |
} else {
|
523 |
translation->StoreInt32StackSlot(op->index()); |
524 |
} |
525 |
} else if (op->IsDoubleStackSlot()) { |
526 |
translation->StoreDoubleStackSlot(op->index()); |
527 |
} else if (op->IsArgument()) { |
528 |
ASSERT(is_tagged); |
529 |
int src_index = GetStackSlotCount() + op->index();
|
530 |
translation->StoreStackSlot(src_index); |
531 |
} else if (op->IsRegister()) { |
532 |
Register reg = ToRegister(op); |
533 |
if (is_tagged) {
|
534 |
translation->StoreRegister(reg); |
535 |
} else if (is_uint32) { |
536 |
translation->StoreUint32Register(reg); |
537 |
} else {
|
538 |
translation->StoreInt32Register(reg); |
539 |
} |
540 |
} else if (op->IsDoubleRegister()) { |
541 |
XMMRegister reg = ToDoubleRegister(op); |
542 |
translation->StoreDoubleRegister(reg); |
543 |
} else if (op->IsConstantOperand()) { |
544 |
HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op)); |
545 |
int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
|
546 |
translation->StoreLiteral(src_index); |
547 |
} else {
|
548 |
UNREACHABLE(); |
549 |
} |
550 |
} |
551 |
|
552 |
|
553 |
void LCodeGen::CallCodeGeneric(Handle<Code> code,
|
554 |
RelocInfo::Mode mode, |
555 |
LInstruction* instr, |
556 |
SafepointMode safepoint_mode, |
557 |
int argc) {
|
558 |
EnsureSpaceForLazyDeopt(Deoptimizer::patch_size() - masm()->CallSize(code)); |
559 |
ASSERT(instr != NULL);
|
560 |
__ call(code, mode); |
561 |
RecordSafepointWithLazyDeopt(instr, safepoint_mode, argc); |
562 |
|
563 |
// Signal that we don't inline smi code before these stubs in the
|
564 |
// optimizing code generator.
|
565 |
if (code->kind() == Code::BINARY_OP_IC ||
|
566 |
code->kind() == Code::COMPARE_IC) { |
567 |
__ nop(); |
568 |
} |
569 |
} |
570 |
|
571 |
|
572 |
void LCodeGen::CallCode(Handle<Code> code,
|
573 |
RelocInfo::Mode mode, |
574 |
LInstruction* instr) { |
575 |
CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, 0);
|
576 |
} |
577 |
|
578 |
|
579 |
void LCodeGen::CallRuntime(const Runtime::Function* function, |
580 |
int num_arguments,
|
581 |
LInstruction* instr, |
582 |
SaveFPRegsMode save_doubles) { |
583 |
ASSERT(instr != NULL);
|
584 |
ASSERT(instr->HasPointerMap()); |
585 |
|
586 |
__ CallRuntime(function, num_arguments, save_doubles); |
587 |
|
588 |
RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
|
589 |
} |
590 |
|
591 |
|
592 |
void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
|
593 |
int argc,
|
594 |
LInstruction* instr) { |
595 |
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); |
596 |
__ CallRuntimeSaveDoubles(id); |
597 |
RecordSafepointWithRegisters( |
598 |
instr->pointer_map(), argc, Safepoint::kNoLazyDeopt); |
599 |
} |
600 |
|
601 |
|
602 |
void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
|
603 |
Safepoint::DeoptMode mode) { |
604 |
if (!environment->HasBeenRegistered()) {
|
605 |
// Physical stack frame layout:
|
606 |
// -x ............. -4 0 ..................................... y
|
607 |
// [incoming arguments] [spill slots] [pushed outgoing arguments]
|
608 |
|
609 |
// Layout of the environment:
|
610 |
// 0 ..................................................... size-1
|
611 |
// [parameters] [locals] [expression stack including arguments]
|
612 |
|
613 |
// Layout of the translation:
|
614 |
// 0 ........................................................ size - 1 + 4
|
615 |
// [expression stack including arguments] [locals] [4 words] [parameters]
|
616 |
// |>------------ translation_size ------------<|
|
617 |
|
618 |
int frame_count = 0; |
619 |
int jsframe_count = 0; |
620 |
for (LEnvironment* e = environment; e != NULL; e = e->outer()) { |
621 |
++frame_count; |
622 |
if (e->frame_type() == JS_FUNCTION) {
|
623 |
++jsframe_count; |
624 |
} |
625 |
} |
626 |
Translation translation(&translations_, frame_count, jsframe_count, zone()); |
627 |
WriteTranslation(environment, &translation); |
628 |
int deoptimization_index = deoptimizations_.length();
|
629 |
int pc_offset = masm()->pc_offset();
|
630 |
environment->Register(deoptimization_index, |
631 |
translation.index(), |
632 |
(mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
|
633 |
deoptimizations_.Add(environment, environment->zone()); |
634 |
} |
635 |
} |
636 |
|
637 |
|
638 |
void LCodeGen::DeoptimizeIf(Condition cc,
|
639 |
LEnvironment* environment, |
640 |
Deoptimizer::BailoutType bailout_type) { |
641 |
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); |
642 |
ASSERT(environment->HasBeenRegistered()); |
643 |
int id = environment->deoptimization_index();
|
644 |
ASSERT(info()->IsOptimizing() || info()->IsStub()); |
645 |
Address entry = |
646 |
Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); |
647 |
if (entry == NULL) { |
648 |
Abort(kBailoutWasNotPrepared); |
649 |
return;
|
650 |
} |
651 |
|
652 |
ASSERT(FLAG_deopt_every_n_times == 0); // Not yet implemented on x64. |
653 |
|
654 |
if (info()->ShouldTrapOnDeopt()) {
|
655 |
Label done; |
656 |
if (cc != no_condition) {
|
657 |
__ j(NegateCondition(cc), &done, Label::kNear); |
658 |
} |
659 |
__ int3(); |
660 |
__ bind(&done); |
661 |
} |
662 |
|
663 |
ASSERT(info()->IsStub() || frame_is_built_); |
664 |
if (cc == no_condition && frame_is_built_) {
|
665 |
__ call(entry, RelocInfo::RUNTIME_ENTRY); |
666 |
} else {
|
667 |
// We often have several deopts to the same entry, reuse the last
|
668 |
// jump entry if this is the case.
|
669 |
if (jump_table_.is_empty() ||
|
670 |
jump_table_.last().address != entry || |
671 |
jump_table_.last().needs_frame != !frame_is_built_ || |
672 |
jump_table_.last().bailout_type != bailout_type) { |
673 |
Deoptimizer::JumpTableEntry table_entry(entry, |
674 |
bailout_type, |
675 |
!frame_is_built_); |
676 |
jump_table_.Add(table_entry, zone()); |
677 |
} |
678 |
if (cc == no_condition) {
|
679 |
__ jmp(&jump_table_.last().label); |
680 |
} else {
|
681 |
__ j(cc, &jump_table_.last().label); |
682 |
} |
683 |
} |
684 |
} |
685 |
|
686 |
|
687 |
void LCodeGen::DeoptimizeIf(Condition cc,
|
688 |
LEnvironment* environment) { |
689 |
Deoptimizer::BailoutType bailout_type = info()->IsStub() |
690 |
? Deoptimizer::LAZY |
691 |
: Deoptimizer::EAGER; |
692 |
DeoptimizeIf(cc, environment, bailout_type); |
693 |
} |
694 |
|
695 |
|
696 |
void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
|
697 |
ZoneList<Handle<Map> > maps(1, zone());
|
698 |
ZoneList<Handle<JSObject> > objects(1, zone());
|
699 |
int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
|
700 |
for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
|
701 |
if (Code::IsWeakEmbeddedObject(code->kind(), it.rinfo()->target_object())) {
|
702 |
if (it.rinfo()->target_object()->IsMap()) {
|
703 |
Handle<Map> map(Map::cast(it.rinfo()->target_object())); |
704 |
maps.Add(map, zone()); |
705 |
} else if (it.rinfo()->target_object()->IsJSObject()) { |
706 |
Handle<JSObject> object(JSObject::cast(it.rinfo()->target_object())); |
707 |
objects.Add(object, zone()); |
708 |
} |
709 |
} |
710 |
} |
711 |
#ifdef VERIFY_HEAP
|
712 |
// This disables verification of weak embedded objects after full GC.
|
713 |
// AddDependentCode can cause a GC, which would observe the state where
|
714 |
// this code is not yet in the depended code lists of the embedded maps.
|
715 |
NoWeakObjectVerificationScope disable_verification_of_embedded_objects; |
716 |
#endif
|
717 |
for (int i = 0; i < maps.length(); i++) { |
718 |
maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code); |
719 |
} |
720 |
for (int i = 0; i < objects.length(); i++) { |
721 |
AddWeakObjectToCodeDependency(isolate()->heap(), objects.at(i), code); |
722 |
} |
723 |
} |
724 |
|
725 |
|
726 |
void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
|
727 |
int length = deoptimizations_.length();
|
728 |
if (length == 0) return; |
729 |
Handle<DeoptimizationInputData> data = |
730 |
factory()->NewDeoptimizationInputData(length, TENURED); |
731 |
|
732 |
Handle<ByteArray> translations = |
733 |
translations_.CreateByteArray(isolate()->factory()); |
734 |
data->SetTranslationByteArray(*translations); |
735 |
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_)); |
736 |
|
737 |
Handle<FixedArray> literals = |
738 |
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED); |
739 |
{ AllowDeferredHandleDereference copy_handles; |
740 |
for (int i = 0; i < deoptimization_literals_.length(); i++) { |
741 |
literals->set(i, *deoptimization_literals_[i]); |
742 |
} |
743 |
data->SetLiteralArray(*literals); |
744 |
} |
745 |
|
746 |
data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt())); |
747 |
data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_)); |
748 |
|
749 |
// Populate the deoptimization entries.
|
750 |
for (int i = 0; i < length; i++) { |
751 |
LEnvironment* env = deoptimizations_[i]; |
752 |
data->SetAstId(i, env->ast_id()); |
753 |
data->SetTranslationIndex(i, Smi::FromInt(env->translation_index())); |
754 |
data->SetArgumentsStackHeight(i, |
755 |
Smi::FromInt(env->arguments_stack_height())); |
756 |
data->SetPc(i, Smi::FromInt(env->pc_offset())); |
757 |
} |
758 |
code->set_deoptimization_data(*data); |
759 |
} |
760 |
|
761 |
|
762 |
int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
|
763 |
int result = deoptimization_literals_.length();
|
764 |
for (int i = 0; i < deoptimization_literals_.length(); ++i) { |
765 |
if (deoptimization_literals_[i].is_identical_to(literal)) return i; |
766 |
} |
767 |
deoptimization_literals_.Add(literal, zone()); |
768 |
return result;
|
769 |
} |
770 |
|
771 |
|
772 |
void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
|
773 |
ASSERT(deoptimization_literals_.length() == 0);
|
774 |
|
775 |
const ZoneList<Handle<JSFunction> >* inlined_closures =
|
776 |
chunk()->inlined_closures(); |
777 |
|
778 |
for (int i = 0, length = inlined_closures->length(); |
779 |
i < length; |
780 |
i++) { |
781 |
DefineDeoptimizationLiteral(inlined_closures->at(i)); |
782 |
} |
783 |
|
784 |
inlined_function_count_ = deoptimization_literals_.length(); |
785 |
} |
786 |
|
787 |
|
788 |
void LCodeGen::RecordSafepointWithLazyDeopt(
|
789 |
LInstruction* instr, SafepointMode safepoint_mode, int argc) {
|
790 |
if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
|
791 |
RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt); |
792 |
} else {
|
793 |
ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS); |
794 |
RecordSafepointWithRegisters( |
795 |
instr->pointer_map(), argc, Safepoint::kLazyDeopt); |
796 |
} |
797 |
} |
798 |
|
799 |
|
800 |
void LCodeGen::RecordSafepoint(
|
801 |
LPointerMap* pointers, |
802 |
Safepoint::Kind kind, |
803 |
int arguments,
|
804 |
Safepoint::DeoptMode deopt_mode) { |
805 |
ASSERT(kind == expected_safepoint_kind_); |
806 |
|
807 |
const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
|
808 |
|
809 |
Safepoint safepoint = safepoints_.DefineSafepoint(masm(), |
810 |
kind, arguments, deopt_mode); |
811 |
for (int i = 0; i < operands->length(); i++) { |
812 |
LOperand* pointer = operands->at(i); |
813 |
if (pointer->IsStackSlot()) {
|
814 |
safepoint.DefinePointerSlot(pointer->index(), zone()); |
815 |
} else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) { |
816 |
safepoint.DefinePointerRegister(ToRegister(pointer), zone()); |
817 |
} |
818 |
} |
819 |
if (kind & Safepoint::kWithRegisters) {
|
820 |
// Register rsi always contains a pointer to the context.
|
821 |
safepoint.DefinePointerRegister(rsi, zone()); |
822 |
} |
823 |
} |
824 |
|
825 |
|
826 |
void LCodeGen::RecordSafepoint(LPointerMap* pointers,
|
827 |
Safepoint::DeoptMode deopt_mode) { |
828 |
RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
|
829 |
} |
830 |
|
831 |
|
832 |
void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
|
833 |
LPointerMap empty_pointers(zone()); |
834 |
RecordSafepoint(&empty_pointers, deopt_mode); |
835 |
} |
836 |
|
837 |
|
838 |
void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
|
839 |
int arguments,
|
840 |
Safepoint::DeoptMode deopt_mode) { |
841 |
RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode); |
842 |
} |
843 |
|
844 |
|
845 |
void LCodeGen::RecordAndWritePosition(int position) { |
846 |
if (position == RelocInfo::kNoPosition) return; |
847 |
masm()->positions_recorder()->RecordPosition(position); |
848 |
masm()->positions_recorder()->WriteRecordedPositions(); |
849 |
} |
850 |
|
851 |
|
852 |
static const char* LabelType(LLabel* label) { |
853 |
if (label->is_loop_header()) return " (loop header)"; |
854 |
if (label->is_osr_entry()) return " (OSR entry)"; |
855 |
return ""; |
856 |
} |
857 |
|
858 |
|
859 |
void LCodeGen::DoLabel(LLabel* label) {
|
860 |
Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
|
861 |
current_instruction_, |
862 |
label->hydrogen_value()->id(), |
863 |
label->block_id(), |
864 |
LabelType(label)); |
865 |
__ bind(label->label()); |
866 |
current_block_ = label->block_id(); |
867 |
DoGap(label); |
868 |
} |
869 |
|
870 |
|
871 |
void LCodeGen::DoParallelMove(LParallelMove* move) {
|
872 |
resolver_.Resolve(move); |
873 |
} |
874 |
|
875 |
|
876 |
void LCodeGen::DoGap(LGap* gap) {
|
877 |
for (int i = LGap::FIRST_INNER_POSITION; |
878 |
i <= LGap::LAST_INNER_POSITION; |
879 |
i++) { |
880 |
LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
|
881 |
LParallelMove* move = gap->GetParallelMove(inner_pos); |
882 |
if (move != NULL) DoParallelMove(move); |
883 |
} |
884 |
} |
885 |
|
886 |
|
887 |
void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
|
888 |
DoGap(instr); |
889 |
} |
890 |
|
891 |
|
892 |
void LCodeGen::DoParameter(LParameter* instr) {
|
893 |
// Nothing to do.
|
894 |
} |
895 |
|
896 |
|
897 |
void LCodeGen::DoCallStub(LCallStub* instr) {
|
898 |
ASSERT(ToRegister(instr->result()).is(rax)); |
899 |
switch (instr->hydrogen()->major_key()) {
|
900 |
case CodeStub::RegExpConstructResult: {
|
901 |
RegExpConstructResultStub stub; |
902 |
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); |
903 |
break;
|
904 |
} |
905 |
case CodeStub::RegExpExec: {
|
906 |
RegExpExecStub stub; |
907 |
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); |
908 |
break;
|
909 |
} |
910 |
case CodeStub::SubString: {
|
911 |
SubStringStub stub; |
912 |
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); |
913 |
break;
|
914 |
} |
915 |
case CodeStub::StringCompare: {
|
916 |
StringCompareStub stub; |
917 |
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); |
918 |
break;
|
919 |
} |
920 |
case CodeStub::TranscendentalCache: {
|
921 |
TranscendentalCacheStub stub(instr->transcendental_type(), |
922 |
TranscendentalCacheStub::TAGGED); |
923 |
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); |
924 |
break;
|
925 |
} |
926 |
default:
|
927 |
UNREACHABLE(); |
928 |
} |
929 |
} |
930 |
|
931 |
|
932 |
void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
|
933 |
GenerateOsrPrologue(); |
934 |
} |
935 |
|
936 |
|
937 |
void LCodeGen::DoModI(LModI* instr) {
|
938 |
HMod* hmod = instr->hydrogen(); |
939 |
HValue* left = hmod->left(); |
940 |
HValue* right = hmod->right(); |
941 |
if (hmod->HasPowerOf2Divisor()) {
|
942 |
// TODO(svenpanne) We should really do the strength reduction on the
|
943 |
// Hydrogen level.
|
944 |
Register left_reg = ToRegister(instr->left()); |
945 |
ASSERT(left_reg.is(ToRegister(instr->result()))); |
946 |
|
947 |
// Note: The code below even works when right contains kMinInt.
|
948 |
int32_t divisor = Abs(right->GetInteger32Constant()); |
949 |
|
950 |
Label left_is_not_negative, done; |
951 |
if (left->CanBeNegative()) {
|
952 |
__ testl(left_reg, left_reg); |
953 |
__ j(not_sign, &left_is_not_negative, Label::kNear); |
954 |
__ negl(left_reg); |
955 |
__ andl(left_reg, Immediate(divisor - 1));
|
956 |
__ negl(left_reg); |
957 |
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
958 |
DeoptimizeIf(zero, instr->environment()); |
959 |
} |
960 |
__ jmp(&done, Label::kNear); |
961 |
} |
962 |
|
963 |
__ bind(&left_is_not_negative); |
964 |
__ andl(left_reg, Immediate(divisor - 1));
|
965 |
__ bind(&done); |
966 |
|
967 |
} else if (hmod->fixed_right_arg().has_value) { |
968 |
Register left_reg = ToRegister(instr->left()); |
969 |
ASSERT(left_reg.is(ToRegister(instr->result()))); |
970 |
Register right_reg = ToRegister(instr->right()); |
971 |
|
972 |
int32_t divisor = hmod->fixed_right_arg().value; |
973 |
ASSERT(IsPowerOf2(divisor)); |
974 |
|
975 |
// Check if our assumption of a fixed right operand still holds.
|
976 |
__ cmpl(right_reg, Immediate(divisor)); |
977 |
DeoptimizeIf(not_equal, instr->environment()); |
978 |
|
979 |
Label left_is_not_negative, done; |
980 |
if (left->CanBeNegative()) {
|
981 |
__ testl(left_reg, left_reg); |
982 |
__ j(not_sign, &left_is_not_negative, Label::kNear); |
983 |
__ negl(left_reg); |
984 |
__ andl(left_reg, Immediate(divisor - 1));
|
985 |
__ negl(left_reg); |
986 |
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
987 |
DeoptimizeIf(zero, instr->environment()); |
988 |
} |
989 |
__ jmp(&done, Label::kNear); |
990 |
} |
991 |
|
992 |
__ bind(&left_is_not_negative); |
993 |
__ andl(left_reg, Immediate(divisor - 1));
|
994 |
__ bind(&done); |
995 |
|
996 |
} else {
|
997 |
Register left_reg = ToRegister(instr->left()); |
998 |
ASSERT(left_reg.is(rax)); |
999 |
Register right_reg = ToRegister(instr->right()); |
1000 |
ASSERT(!right_reg.is(rax)); |
1001 |
ASSERT(!right_reg.is(rdx)); |
1002 |
Register result_reg = ToRegister(instr->result()); |
1003 |
ASSERT(result_reg.is(rdx)); |
1004 |
|
1005 |
Label done; |
1006 |
// Check for x % 0, idiv would signal a divide error. We have to
|
1007 |
// deopt in this case because we can't return a NaN.
|
1008 |
if (right->CanBeZero()) {
|
1009 |
__ testl(right_reg, right_reg); |
1010 |
DeoptimizeIf(zero, instr->environment()); |
1011 |
} |
1012 |
|
1013 |
// Check for kMinInt % -1, idiv would signal a divide error. We
|
1014 |
// have to deopt if we care about -0, because we can't return that.
|
1015 |
if (left->RangeCanInclude(kMinInt) && right->RangeCanInclude(-1)) { |
1016 |
Label no_overflow_possible; |
1017 |
__ cmpl(left_reg, Immediate(kMinInt)); |
1018 |
__ j(not_zero, &no_overflow_possible, Label::kNear); |
1019 |
__ cmpl(right_reg, Immediate(-1));
|
1020 |
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
1021 |
DeoptimizeIf(equal, instr->environment()); |
1022 |
} else {
|
1023 |
__ j(not_equal, &no_overflow_possible, Label::kNear); |
1024 |
__ Set(result_reg, 0);
|
1025 |
__ jmp(&done, Label::kNear); |
1026 |
} |
1027 |
__ bind(&no_overflow_possible); |
1028 |
} |
1029 |
|
1030 |
// Sign extend dividend in eax into edx:eax, since we are using only the low
|
1031 |
// 32 bits of the values.
|
1032 |
__ cdq(); |
1033 |
|
1034 |
// If we care about -0, test if the dividend is <0 and the result is 0.
|
1035 |
if (left->CanBeNegative() &&
|
1036 |
hmod->CanBeZero() && |
1037 |
hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
1038 |
Label positive_left; |
1039 |
__ testl(left_reg, left_reg); |
1040 |
__ j(not_sign, &positive_left, Label::kNear); |
1041 |
__ idivl(right_reg); |
1042 |
__ testl(result_reg, result_reg); |
1043 |
DeoptimizeIf(zero, instr->environment()); |
1044 |
__ jmp(&done, Label::kNear); |
1045 |
__ bind(&positive_left); |
1046 |
} |
1047 |
__ idivl(right_reg); |
1048 |
__ bind(&done); |
1049 |
} |
1050 |
} |
1051 |
|
1052 |
|
1053 |
void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
|
1054 |
ASSERT(instr->right()->IsConstantOperand()); |
1055 |
|
1056 |
const Register dividend = ToRegister(instr->left());
|
1057 |
int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right())); |
1058 |
const Register result = ToRegister(instr->result());
|
1059 |
|
1060 |
switch (divisor) {
|
1061 |
case 0: |
1062 |
DeoptimizeIf(no_condition, instr->environment()); |
1063 |
return;
|
1064 |
|
1065 |
case 1: |
1066 |
if (!result.is(dividend)) {
|
1067 |
__ movl(result, dividend); |
1068 |
} |
1069 |
return;
|
1070 |
|
1071 |
case -1: |
1072 |
if (!result.is(dividend)) {
|
1073 |
__ movl(result, dividend); |
1074 |
} |
1075 |
__ negl(result); |
1076 |
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
1077 |
DeoptimizeIf(zero, instr->environment()); |
1078 |
} |
1079 |
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
|
1080 |
DeoptimizeIf(overflow, instr->environment()); |
1081 |
} |
1082 |
return;
|
1083 |
} |
1084 |
|
1085 |
uint32_t divisor_abs = abs(divisor); |
1086 |
if (IsPowerOf2(divisor_abs)) {
|
1087 |
int32_t power = WhichPowerOf2(divisor_abs); |
1088 |
if (divisor < 0) { |
1089 |
__ movsxlq(result, dividend); |
1090 |
__ neg(result); |
1091 |
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
1092 |
DeoptimizeIf(zero, instr->environment()); |
1093 |
} |
1094 |
__ sar(result, Immediate(power)); |
1095 |
} else {
|
1096 |
if (!result.is(dividend)) {
|
1097 |
__ movl(result, dividend); |
1098 |
} |
1099 |
__ sarl(result, Immediate(power)); |
1100 |
} |
1101 |
} else {
|
1102 |
Register reg1 = ToRegister(instr->temp()); |
1103 |
Register reg2 = ToRegister(instr->result()); |
1104 |
|
1105 |
// Find b which: 2^b < divisor_abs < 2^(b+1).
|
1106 |
unsigned b = 31 - CompilerIntrinsics::CountLeadingZeros(divisor_abs); |
1107 |
unsigned shift = 32 + b; // Precision +1bit (effectively). |
1108 |
double multiplier_f =
|
1109 |
static_cast<double>(static_cast<uint64_t>(1) << shift) / divisor_abs; |
1110 |
int64_t multiplier; |
1111 |
if (multiplier_f - floor(multiplier_f) < 0.5) { |
1112 |
multiplier = static_cast<int64_t>(floor(multiplier_f));
|
1113 |
} else {
|
1114 |
multiplier = static_cast<int64_t>(floor(multiplier_f)) + 1; |
1115 |
} |
1116 |
// The multiplier is a uint32.
|
1117 |
ASSERT(multiplier > 0 &&
|
1118 |
multiplier < (static_cast<int64_t>(1) << 32)); |
1119 |
// The multiply is int64, so sign-extend to r64.
|
1120 |
__ movsxlq(reg1, dividend); |
1121 |
if (divisor < 0 && |
1122 |
instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
1123 |
__ neg(reg1); |
1124 |
DeoptimizeIf(zero, instr->environment()); |
1125 |
} |
1126 |
__ movq(reg2, multiplier, RelocInfo::NONE64); |
1127 |
// Result just fit in r64, because it's int32 * uint32.
|
1128 |
__ imul(reg2, reg1); |
1129 |
|
1130 |
__ addq(reg2, Immediate(1 << 30)); |
1131 |
__ sar(reg2, Immediate(shift)); |
1132 |
} |
1133 |
} |
1134 |
|
1135 |
|
1136 |
void LCodeGen::DoDivI(LDivI* instr) {
|
1137 |
if (!instr->is_flooring() && instr->hydrogen()->HasPowerOf2Divisor()) {
|
1138 |
Register dividend = ToRegister(instr->left()); |
1139 |
int32_t divisor = |
1140 |
HConstant::cast(instr->hydrogen()->right())->Integer32Value(); |
1141 |
int32_t test_value = 0;
|
1142 |
int32_t power = 0;
|
1143 |
|
1144 |
if (divisor > 0) { |
1145 |
test_value = divisor - 1;
|
1146 |
power = WhichPowerOf2(divisor); |
1147 |
} else {
|
1148 |
// Check for (0 / -x) that will produce negative zero.
|
1149 |
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
1150 |
__ testl(dividend, dividend); |
1151 |
DeoptimizeIf(zero, instr->environment()); |
1152 |
} |
1153 |
// Check for (kMinInt / -1).
|
1154 |
if (divisor == -1 && instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { |
1155 |
__ cmpl(dividend, Immediate(kMinInt)); |
1156 |
DeoptimizeIf(zero, instr->environment()); |
1157 |
} |
1158 |
test_value = - divisor - 1;
|
1159 |
power = WhichPowerOf2(-divisor); |
1160 |
} |
1161 |
|
1162 |
if (test_value != 0) { |
1163 |
if (instr->hydrogen()->CheckFlag(
|
1164 |
HInstruction::kAllUsesTruncatingToInt32)) { |
1165 |
Label done, negative; |
1166 |
__ cmpl(dividend, Immediate(0));
|
1167 |
__ j(less, &negative, Label::kNear); |
1168 |
__ sarl(dividend, Immediate(power)); |
1169 |
if (divisor < 0) __ negl(dividend); |
1170 |
__ jmp(&done, Label::kNear); |
1171 |
|
1172 |
__ bind(&negative); |
1173 |
__ negl(dividend); |
1174 |
__ sarl(dividend, Immediate(power)); |
1175 |
if (divisor > 0) __ negl(dividend); |
1176 |
__ bind(&done); |
1177 |
return; // Don't fall through to "__ neg" below. |
1178 |
} else {
|
1179 |
// Deoptimize if remainder is not 0.
|
1180 |
__ testl(dividend, Immediate(test_value)); |
1181 |
DeoptimizeIf(not_zero, instr->environment()); |
1182 |
__ sarl(dividend, Immediate(power)); |
1183 |
} |
1184 |
} |
1185 |
|
1186 |
if (divisor < 0) __ negl(dividend); |
1187 |
|
1188 |
return;
|
1189 |
} |
1190 |
|
1191 |
LOperand* right = instr->right(); |
1192 |
ASSERT(ToRegister(instr->result()).is(rax)); |
1193 |
ASSERT(ToRegister(instr->left()).is(rax)); |
1194 |
ASSERT(!ToRegister(instr->right()).is(rax)); |
1195 |
ASSERT(!ToRegister(instr->right()).is(rdx)); |
1196 |
|
1197 |
Register left_reg = rax; |
1198 |
|
1199 |
// Check for x / 0.
|
1200 |
Register right_reg = ToRegister(right); |
1201 |
if (instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
|
1202 |
__ testl(right_reg, right_reg); |
1203 |
DeoptimizeIf(zero, instr->environment()); |
1204 |
} |
1205 |
|
1206 |
// Check for (0 / -x) that will produce negative zero.
|
1207 |
if (instr->hydrogen_value()->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
1208 |
Label left_not_zero; |
1209 |
__ testl(left_reg, left_reg); |
1210 |
__ j(not_zero, &left_not_zero, Label::kNear); |
1211 |
__ testl(right_reg, right_reg); |
1212 |
DeoptimizeIf(sign, instr->environment()); |
1213 |
__ bind(&left_not_zero); |
1214 |
} |
1215 |
|
1216 |
// Check for (kMinInt / -1).
|
1217 |
if (instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)) {
|
1218 |
Label left_not_min_int; |
1219 |
__ cmpl(left_reg, Immediate(kMinInt)); |
1220 |
__ j(not_zero, &left_not_min_int, Label::kNear); |
1221 |
__ cmpl(right_reg, Immediate(-1));
|
1222 |
DeoptimizeIf(zero, instr->environment()); |
1223 |
__ bind(&left_not_min_int); |
1224 |
} |
1225 |
|
1226 |
// Sign extend to rdx.
|
1227 |
__ cdq(); |
1228 |
__ idivl(right_reg); |
1229 |
|
1230 |
if (instr->is_flooring()) {
|
1231 |
Label done; |
1232 |
__ testl(rdx, rdx); |
1233 |
__ j(zero, &done, Label::kNear); |
1234 |
__ xorl(rdx, right_reg); |
1235 |
__ sarl(rdx, Immediate(31));
|
1236 |
__ addl(rax, rdx); |
1237 |
__ bind(&done); |
1238 |
} else if (!instr->hydrogen()->CheckFlag( |
1239 |
HInstruction::kAllUsesTruncatingToInt32)) { |
1240 |
// Deoptimize if remainder is not 0.
|
1241 |
__ testl(rdx, rdx); |
1242 |
DeoptimizeIf(not_zero, instr->environment()); |
1243 |
} |
1244 |
} |
1245 |
|
1246 |
|
1247 |
void LCodeGen::DoMulI(LMulI* instr) {
|
1248 |
Register left = ToRegister(instr->left()); |
1249 |
LOperand* right = instr->right(); |
1250 |
|
1251 |
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
1252 |
if (instr->hydrogen_value()->representation().IsSmi()) {
|
1253 |
__ movq(kScratchRegister, left); |
1254 |
} else {
|
1255 |
__ movl(kScratchRegister, left); |
1256 |
} |
1257 |
} |
1258 |
|
1259 |
bool can_overflow =
|
1260 |
instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
1261 |
if (right->IsConstantOperand()) {
|
1262 |
int32_t right_value = ToInteger32(LConstantOperand::cast(right)); |
1263 |
if (right_value == -1) { |
1264 |
__ negl(left); |
1265 |
} else if (right_value == 0) { |
1266 |
__ xorl(left, left); |
1267 |
} else if (right_value == 2) { |
1268 |
__ addl(left, left); |
1269 |
} else if (!can_overflow) { |
1270 |
// If the multiplication is known to not overflow, we
|
1271 |
// can use operations that don't set the overflow flag
|
1272 |
// correctly.
|
1273 |
switch (right_value) {
|
1274 |
case 1: |
1275 |
// Do nothing.
|
1276 |
break;
|
1277 |
case 3: |
1278 |
__ leal(left, Operand(left, left, times_2, 0));
|
1279 |
break;
|
1280 |
case 4: |
1281 |
__ shll(left, Immediate(2));
|
1282 |
break;
|
1283 |
case 5: |
1284 |
__ leal(left, Operand(left, left, times_4, 0));
|
1285 |
break;
|
1286 |
case 8: |
1287 |
__ shll(left, Immediate(3));
|
1288 |
break;
|
1289 |
case 9: |
1290 |
__ leal(left, Operand(left, left, times_8, 0));
|
1291 |
break;
|
1292 |
case 16: |
1293 |
__ shll(left, Immediate(4));
|
1294 |
break;
|
1295 |
default:
|
1296 |
__ imull(left, left, Immediate(right_value)); |
1297 |
break;
|
1298 |
} |
1299 |
} else {
|
1300 |
__ imull(left, left, Immediate(right_value)); |
1301 |
} |
1302 |
} else if (right->IsStackSlot()) { |
1303 |
if (instr->hydrogen_value()->representation().IsSmi()) {
|
1304 |
__ SmiToInteger64(left, left); |
1305 |
__ imul(left, ToOperand(right)); |
1306 |
} else {
|
1307 |
__ imull(left, ToOperand(right)); |
1308 |
} |
1309 |
} else {
|
1310 |
if (instr->hydrogen_value()->representation().IsSmi()) {
|
1311 |
__ SmiToInteger64(left, left); |
1312 |
__ imul(left, ToRegister(right)); |
1313 |
} else {
|
1314 |
__ imull(left, ToRegister(right)); |
1315 |
} |
1316 |
} |
1317 |
|
1318 |
if (can_overflow) {
|
1319 |
DeoptimizeIf(overflow, instr->environment()); |
1320 |
} |
1321 |
|
1322 |
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
1323 |
// Bail out if the result is supposed to be negative zero.
|
1324 |
Label done; |
1325 |
if (instr->hydrogen_value()->representation().IsSmi()) {
|
1326 |
__ testq(left, left); |
1327 |
} else {
|
1328 |
__ testl(left, left); |
1329 |
} |
1330 |
__ j(not_zero, &done, Label::kNear); |
1331 |
if (right->IsConstantOperand()) {
|
1332 |
// Constant can't be represented as Smi due to immediate size limit.
|
1333 |
ASSERT(!instr->hydrogen_value()->representation().IsSmi()); |
1334 |
if (ToInteger32(LConstantOperand::cast(right)) < 0) { |
1335 |
DeoptimizeIf(no_condition, instr->environment()); |
1336 |
} else if (ToInteger32(LConstantOperand::cast(right)) == 0) { |
1337 |
__ cmpl(kScratchRegister, Immediate(0));
|
1338 |
DeoptimizeIf(less, instr->environment()); |
1339 |
} |
1340 |
} else if (right->IsStackSlot()) { |
1341 |
if (instr->hydrogen_value()->representation().IsSmi()) {
|
1342 |
__ or_(kScratchRegister, ToOperand(right)); |
1343 |
} else {
|
1344 |
__ orl(kScratchRegister, ToOperand(right)); |
1345 |
} |
1346 |
DeoptimizeIf(sign, instr->environment()); |
1347 |
} else {
|
1348 |
// Test the non-zero operand for negative sign.
|
1349 |
if (instr->hydrogen_value()->representation().IsSmi()) {
|
1350 |
__ or_(kScratchRegister, ToRegister(right)); |
1351 |
} else {
|
1352 |
__ orl(kScratchRegister, ToRegister(right)); |
1353 |
} |
1354 |
DeoptimizeIf(sign, instr->environment()); |
1355 |
} |
1356 |
__ bind(&done); |
1357 |
} |
1358 |
} |
1359 |
|
1360 |
|
1361 |
void LCodeGen::DoBitI(LBitI* instr) {
|
1362 |
LOperand* left = instr->left(); |
1363 |
LOperand* right = instr->right(); |
1364 |
ASSERT(left->Equals(instr->result())); |
1365 |
ASSERT(left->IsRegister()); |
1366 |
|
1367 |
if (right->IsConstantOperand()) {
|
1368 |
int32_t right_operand = ToInteger32(LConstantOperand::cast(right)); |
1369 |
switch (instr->op()) {
|
1370 |
case Token::BIT_AND:
|
1371 |
__ andl(ToRegister(left), Immediate(right_operand)); |
1372 |
break;
|
1373 |
case Token::BIT_OR:
|
1374 |
__ orl(ToRegister(left), Immediate(right_operand)); |
1375 |
break;
|
1376 |
case Token::BIT_XOR:
|
1377 |
if (right_operand == int32_t(~0)) { |
1378 |
__ notl(ToRegister(left)); |
1379 |
} else {
|
1380 |
__ xorl(ToRegister(left), Immediate(right_operand)); |
1381 |
} |
1382 |
break;
|
1383 |
default:
|
1384 |
UNREACHABLE(); |
1385 |
break;
|
1386 |
} |
1387 |
} else if (right->IsStackSlot()) { |
1388 |
switch (instr->op()) {
|
1389 |
case Token::BIT_AND:
|
1390 |
__ and_(ToRegister(left), ToOperand(right)); |
1391 |
break;
|
1392 |
case Token::BIT_OR:
|
1393 |
__ or_(ToRegister(left), ToOperand(right)); |
1394 |
break;
|
1395 |
case Token::BIT_XOR:
|
1396 |
__ xor_(ToRegister(left), ToOperand(right)); |
1397 |
break;
|
1398 |
default:
|
1399 |
UNREACHABLE(); |
1400 |
break;
|
1401 |
} |
1402 |
} else {
|
1403 |
ASSERT(right->IsRegister()); |
1404 |
switch (instr->op()) {
|
1405 |
case Token::BIT_AND:
|
1406 |
__ and_(ToRegister(left), ToRegister(right)); |
1407 |
break;
|
1408 |
case Token::BIT_OR:
|
1409 |
__ or_(ToRegister(left), ToRegister(right)); |
1410 |
break;
|
1411 |
case Token::BIT_XOR:
|
1412 |
__ xor_(ToRegister(left), ToRegister(right)); |
1413 |
break;
|
1414 |
default:
|
1415 |
UNREACHABLE(); |
1416 |
break;
|
1417 |
} |
1418 |
} |
1419 |
} |
1420 |
|
1421 |
|
1422 |
void LCodeGen::DoShiftI(LShiftI* instr) {
|
1423 |
LOperand* left = instr->left(); |
1424 |
LOperand* right = instr->right(); |
1425 |
ASSERT(left->Equals(instr->result())); |
1426 |
ASSERT(left->IsRegister()); |
1427 |
if (right->IsRegister()) {
|
1428 |
ASSERT(ToRegister(right).is(rcx)); |
1429 |
|
1430 |
switch (instr->op()) {
|
1431 |
case Token::ROR:
|
1432 |
__ rorl_cl(ToRegister(left)); |
1433 |
break;
|
1434 |
case Token::SAR:
|
1435 |
__ sarl_cl(ToRegister(left)); |
1436 |
break;
|
1437 |
case Token::SHR:
|
1438 |
__ shrl_cl(ToRegister(left)); |
1439 |
if (instr->can_deopt()) {
|
1440 |
__ testl(ToRegister(left), ToRegister(left)); |
1441 |
DeoptimizeIf(negative, instr->environment()); |
1442 |
} |
1443 |
break;
|
1444 |
case Token::SHL:
|
1445 |
__ shll_cl(ToRegister(left)); |
1446 |
break;
|
1447 |
default:
|
1448 |
UNREACHABLE(); |
1449 |
break;
|
1450 |
} |
1451 |
} else {
|
1452 |
int32_t value = ToInteger32(LConstantOperand::cast(right)); |
1453 |
uint8_t shift_count = static_cast<uint8_t>(value & 0x1F); |
1454 |
switch (instr->op()) {
|
1455 |
case Token::ROR:
|
1456 |
if (shift_count != 0) { |
1457 |
__ rorl(ToRegister(left), Immediate(shift_count)); |
1458 |
} |
1459 |
break;
|
1460 |
case Token::SAR:
|
1461 |
if (shift_count != 0) { |
1462 |
__ sarl(ToRegister(left), Immediate(shift_count)); |
1463 |
} |
1464 |
break;
|
1465 |
case Token::SHR:
|
1466 |
if (shift_count == 0 && instr->can_deopt()) { |
1467 |
__ testl(ToRegister(left), ToRegister(left)); |
1468 |
DeoptimizeIf(negative, instr->environment()); |
1469 |
} else {
|
1470 |
__ shrl(ToRegister(left), Immediate(shift_count)); |
1471 |
} |
1472 |
break;
|
1473 |
case Token::SHL:
|
1474 |
if (shift_count != 0) { |
1475 |
if (instr->hydrogen_value()->representation().IsSmi()) {
|
1476 |
__ shl(ToRegister(left), Immediate(shift_count)); |
1477 |
} else {
|
1478 |
__ shll(ToRegister(left), Immediate(shift_count)); |
1479 |
} |
1480 |
} |
1481 |
break;
|
1482 |
default:
|
1483 |
UNREACHABLE(); |
1484 |
break;
|
1485 |
} |
1486 |
} |
1487 |
} |
1488 |
|
1489 |
|
1490 |
void LCodeGen::DoSubI(LSubI* instr) {
|
1491 |
LOperand* left = instr->left(); |
1492 |
LOperand* right = instr->right(); |
1493 |
ASSERT(left->Equals(instr->result())); |
1494 |
|
1495 |
if (right->IsConstantOperand()) {
|
1496 |
__ subl(ToRegister(left), |
1497 |
Immediate(ToInteger32(LConstantOperand::cast(right)))); |
1498 |
} else if (right->IsRegister()) { |
1499 |
if (instr->hydrogen_value()->representation().IsSmi()) {
|
1500 |
__ subq(ToRegister(left), ToRegister(right)); |
1501 |
} else {
|
1502 |
__ subl(ToRegister(left), ToRegister(right)); |
1503 |
} |
1504 |
} else {
|
1505 |
if (instr->hydrogen_value()->representation().IsSmi()) {
|
1506 |
__ subq(ToRegister(left), ToOperand(right)); |
1507 |
} else {
|
1508 |
__ subl(ToRegister(left), ToOperand(right)); |
1509 |
} |
1510 |
} |
1511 |
|
1512 |
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
|
1513 |
DeoptimizeIf(overflow, instr->environment()); |
1514 |
} |
1515 |
} |
1516 |
|
1517 |
|
1518 |
void LCodeGen::DoConstantI(LConstantI* instr) {
|
1519 |
__ Set(ToRegister(instr->result()), instr->value()); |
1520 |
} |
1521 |
|
1522 |
|
1523 |
void LCodeGen::DoConstantS(LConstantS* instr) {
|
1524 |
__ Move(ToRegister(instr->result()), instr->value()); |
1525 |
} |
1526 |
|
1527 |
|
1528 |
void LCodeGen::DoConstantD(LConstantD* instr) {
|
1529 |
ASSERT(instr->result()->IsDoubleRegister()); |
1530 |
XMMRegister res = ToDoubleRegister(instr->result()); |
1531 |
double v = instr->value();
|
1532 |
uint64_t int_val = BitCast<uint64_t, double>(v);
|
1533 |
// Use xor to produce +0.0 in a fast and compact way, but avoid to
|
1534 |
// do so if the constant is -0.0.
|
1535 |
if (int_val == 0) { |
1536 |
__ xorps(res, res); |
1537 |
} else {
|
1538 |
Register tmp = ToRegister(instr->temp()); |
1539 |
__ Set(tmp, int_val); |
1540 |
__ movq(res, tmp); |
1541 |
} |
1542 |
} |
1543 |
|
1544 |
|
1545 |
void LCodeGen::DoConstantE(LConstantE* instr) {
|
1546 |
__ LoadAddress(ToRegister(instr->result()), instr->value()); |
1547 |
} |
1548 |
|
1549 |
|
1550 |
void LCodeGen::DoConstantT(LConstantT* instr) {
|
1551 |
Handle<Object> value = instr->value(isolate()); |
1552 |
__ Move(ToRegister(instr->result()), value); |
1553 |
} |
1554 |
|
1555 |
|
1556 |
void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
|
1557 |
Register result = ToRegister(instr->result()); |
1558 |
Register map = ToRegister(instr->value()); |
1559 |
__ EnumLength(result, map); |
1560 |
} |
1561 |
|
1562 |
|
1563 |
void LCodeGen::DoElementsKind(LElementsKind* instr) {
|
1564 |
Register result = ToRegister(instr->result()); |
1565 |
Register input = ToRegister(instr->value()); |
1566 |
|
1567 |
// Load map into |result|.
|
1568 |
__ movq(result, FieldOperand(input, HeapObject::kMapOffset)); |
1569 |
// Load the map's "bit field 2" into |result|. We only need the first byte.
|
1570 |
__ movzxbq(result, FieldOperand(result, Map::kBitField2Offset)); |
1571 |
// Retrieve elements_kind from bit field 2.
|
1572 |
__ and_(result, Immediate(Map::kElementsKindMask)); |
1573 |
__ shr(result, Immediate(Map::kElementsKindShift)); |
1574 |
} |
1575 |
|
1576 |
|
1577 |
void LCodeGen::DoValueOf(LValueOf* instr) {
|
1578 |
Register input = ToRegister(instr->value()); |
1579 |
Register result = ToRegister(instr->result()); |
1580 |
ASSERT(input.is(result)); |
1581 |
Label done; |
1582 |
|
1583 |
if (!instr->hydrogen()->value()->IsHeapObject()) {
|
1584 |
// If the object is a smi return the object.
|
1585 |
__ JumpIfSmi(input, &done, Label::kNear); |
1586 |
} |
1587 |
|
1588 |
// If the object is not a value type, return the object.
|
1589 |
__ CmpObjectType(input, JS_VALUE_TYPE, kScratchRegister); |
1590 |
__ j(not_equal, &done, Label::kNear); |
1591 |
__ movq(result, FieldOperand(input, JSValue::kValueOffset)); |
1592 |
|
1593 |
__ bind(&done); |
1594 |
} |
1595 |
|
1596 |
|
1597 |
void LCodeGen::DoDateField(LDateField* instr) {
|
1598 |
Register object = ToRegister(instr->date()); |
1599 |
Register result = ToRegister(instr->result()); |
1600 |
Smi* index = instr->index(); |
1601 |
Label runtime, done, not_date_object; |
1602 |
ASSERT(object.is(result)); |
1603 |
ASSERT(object.is(rax)); |
1604 |
|
1605 |
Condition cc = masm()->CheckSmi(object); |
1606 |
DeoptimizeIf(cc, instr->environment()); |
1607 |
__ CmpObjectType(object, JS_DATE_TYPE, kScratchRegister); |
1608 |
DeoptimizeIf(not_equal, instr->environment()); |
1609 |
|
1610 |
if (index->value() == 0) { |
1611 |
__ movq(result, FieldOperand(object, JSDate::kValueOffset)); |
1612 |
} else {
|
1613 |
if (index->value() < JSDate::kFirstUncachedField) {
|
1614 |
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); |
1615 |
Operand stamp_operand = __ ExternalOperand(stamp); |
1616 |
__ movq(kScratchRegister, stamp_operand); |
1617 |
__ cmpq(kScratchRegister, FieldOperand(object, |
1618 |
JSDate::kCacheStampOffset)); |
1619 |
__ j(not_equal, &runtime, Label::kNear); |
1620 |
__ movq(result, FieldOperand(object, JSDate::kValueOffset + |
1621 |
kPointerSize * index->value())); |
1622 |
__ jmp(&done); |
1623 |
} |
1624 |
__ bind(&runtime); |
1625 |
__ PrepareCallCFunction(2);
|
1626 |
__ movq(arg_reg_1, object); |
1627 |
__ movq(arg_reg_2, index, RelocInfo::NONE64); |
1628 |
__ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
|
1629 |
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); |
1630 |
__ bind(&done); |
1631 |
} |
1632 |
} |
1633 |
|
1634 |
|
1635 |
void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
|
1636 |
Register string = ToRegister(instr->string()); |
1637 |
Register index = ToRegister(instr->index()); |
1638 |
Register value = ToRegister(instr->value()); |
1639 |
String::Encoding encoding = instr->encoding(); |
1640 |
|
1641 |
if (FLAG_debug_code) {
|
1642 |
__ push(value); |
1643 |
__ movq(value, FieldOperand(string, HeapObject::kMapOffset));
|
1644 |
__ movzxbq(value, FieldOperand(value, Map::kInstanceTypeOffset)); |
1645 |
|
1646 |
__ andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask)); |
1647 |
static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; |
1648 |
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; |
1649 |
__ cmpq(value, Immediate(encoding == String::ONE_BYTE_ENCODING |
1650 |
? one_byte_seq_type : two_byte_seq_type)); |
1651 |
__ Check(equal, kUnexpectedStringType); |
1652 |
__ pop(value); |
1653 |
} |
1654 |
|
1655 |
if (encoding == String::ONE_BYTE_ENCODING) {
|
1656 |
__ movb(FieldOperand(string, index, times_1, SeqString::kHeaderSize),
|
1657 |
value); |
1658 |
} else {
|
1659 |
__ movw(FieldOperand(string, index, times_2, SeqString::kHeaderSize),
|
1660 |
value); |
1661 |
} |
1662 |
} |
1663 |
|
1664 |
|
1665 |
void LCodeGen::DoThrow(LThrow* instr) {
|
1666 |
__ push(ToRegister(instr->value())); |
1667 |
CallRuntime(Runtime::kThrow, 1, instr);
|
1668 |
|
1669 |
if (FLAG_debug_code) {
|
1670 |
Comment("Unreachable code.");
|
1671 |
__ int3(); |
1672 |
} |
1673 |
} |
1674 |
|
1675 |
|
1676 |
void LCodeGen::DoAddI(LAddI* instr) {
|
1677 |
LOperand* left = instr->left(); |
1678 |
LOperand* right = instr->right(); |
1679 |
|
1680 |
if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
|
1681 |
if (right->IsConstantOperand()) {
|
1682 |
int32_t offset = ToInteger32(LConstantOperand::cast(right)); |
1683 |
__ leal(ToRegister(instr->result()), |
1684 |
MemOperand(ToRegister(left), offset)); |
1685 |
} else {
|
1686 |
Operand address(ToRegister(left), ToRegister(right), times_1, 0);
|
1687 |
if (instr->hydrogen()->representation().IsSmi()) {
|
1688 |
__ lea(ToRegister(instr->result()), address); |
1689 |
} else {
|
1690 |
__ leal(ToRegister(instr->result()), address); |
1691 |
} |
1692 |
} |
1693 |
} else {
|
1694 |
if (right->IsConstantOperand()) {
|
1695 |
__ addl(ToRegister(left), |
1696 |
Immediate(ToInteger32(LConstantOperand::cast(right)))); |
1697 |
} else if (right->IsRegister()) { |
1698 |
if (instr->hydrogen_value()->representation().IsSmi()) {
|
1699 |
__ addq(ToRegister(left), ToRegister(right)); |
1700 |
} else {
|
1701 |
__ addl(ToRegister(left), ToRegister(right)); |
1702 |
} |
1703 |
} else {
|
1704 |
if (instr->hydrogen_value()->representation().IsSmi()) {
|
1705 |
__ addq(ToRegister(left), ToOperand(right)); |
1706 |
} else {
|
1707 |
__ addl(ToRegister(left), ToOperand(right)); |
1708 |
} |
1709 |
} |
1710 |
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
|
1711 |
DeoptimizeIf(overflow, instr->environment()); |
1712 |
} |
1713 |
} |
1714 |
} |
1715 |
|
1716 |
|
1717 |
void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
|
1718 |
LOperand* left = instr->left(); |
1719 |
LOperand* right = instr->right(); |
1720 |
ASSERT(left->Equals(instr->result())); |
1721 |
HMathMinMax::Operation operation = instr->hydrogen()->operation(); |
1722 |
if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
|
1723 |
Label return_left; |
1724 |
Condition condition = (operation == HMathMinMax::kMathMin) |
1725 |
? less_equal |
1726 |
: greater_equal; |
1727 |
Register left_reg = ToRegister(left); |
1728 |
if (right->IsConstantOperand()) {
|
1729 |
Immediate right_imm = |
1730 |
Immediate(ToInteger32(LConstantOperand::cast(right))); |
1731 |
ASSERT(!instr->hydrogen_value()->representation().IsSmi()); |
1732 |
__ cmpl(left_reg, right_imm); |
1733 |
__ j(condition, &return_left, Label::kNear); |
1734 |
__ movq(left_reg, right_imm); |
1735 |
} else if (right->IsRegister()) { |
1736 |
Register right_reg = ToRegister(right); |
1737 |
if (instr->hydrogen_value()->representation().IsSmi()) {
|
1738 |
__ cmpq(left_reg, right_reg); |
1739 |
} else {
|
1740 |
__ cmpl(left_reg, right_reg); |
1741 |
} |
1742 |
__ j(condition, &return_left, Label::kNear); |
1743 |
__ movq(left_reg, right_reg); |
1744 |
} else {
|
1745 |
Operand right_op = ToOperand(right); |
1746 |
if (instr->hydrogen_value()->representation().IsSmi()) {
|
1747 |
__ cmpq(left_reg, right_op); |
1748 |
} else {
|
1749 |
__ cmpl(left_reg, right_op); |
1750 |
} |
1751 |
__ j(condition, &return_left, Label::kNear); |
1752 |
__ movq(left_reg, right_op); |
1753 |
} |
1754 |
__ bind(&return_left); |
1755 |
} else {
|
1756 |
ASSERT(instr->hydrogen()->representation().IsDouble()); |
1757 |
Label check_nan_left, check_zero, return_left, return_right; |
1758 |
Condition condition = (operation == HMathMinMax::kMathMin) ? below : above; |
1759 |
XMMRegister left_reg = ToDoubleRegister(left); |
1760 |
XMMRegister right_reg = ToDoubleRegister(right); |
1761 |
__ ucomisd(left_reg, right_reg); |
1762 |
__ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
|
1763 |
__ j(equal, &check_zero, Label::kNear); // left == right.
|
1764 |
__ j(condition, &return_left, Label::kNear); |
1765 |
__ jmp(&return_right, Label::kNear); |
1766 |
|
1767 |
__ bind(&check_zero); |
1768 |
XMMRegister xmm_scratch = double_scratch0(); |
1769 |
__ xorps(xmm_scratch, xmm_scratch); |
1770 |
__ ucomisd(left_reg, xmm_scratch); |
1771 |
__ j(not_equal, &return_left, Label::kNear); // left == right != 0.
|
1772 |
// At this point, both left and right are either 0 or -0.
|
1773 |
if (operation == HMathMinMax::kMathMin) {
|
1774 |
__ orpd(left_reg, right_reg); |
1775 |
} else {
|
1776 |
// Since we operate on +0 and/or -0, addsd and andsd have the same effect.
|
1777 |
__ addsd(left_reg, right_reg); |
1778 |
} |
1779 |
__ jmp(&return_left, Label::kNear); |
1780 |
|
1781 |
__ bind(&check_nan_left); |
1782 |
__ ucomisd(left_reg, left_reg); // NaN check.
|
1783 |
__ j(parity_even, &return_left, Label::kNear); |
1784 |
__ bind(&return_right); |
1785 |
__ movsd(left_reg, right_reg); |
1786 |
|
1787 |
__ bind(&return_left); |
1788 |
} |
1789 |
} |
1790 |
|
1791 |
|
1792 |
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
|
1793 |
XMMRegister left = ToDoubleRegister(instr->left()); |
1794 |
XMMRegister right = ToDoubleRegister(instr->right()); |
1795 |
XMMRegister result = ToDoubleRegister(instr->result()); |
1796 |
// All operations except MOD are computed in-place.
|
1797 |
ASSERT(instr->op() == Token::MOD || left.is(result)); |
1798 |
switch (instr->op()) {
|
1799 |
case Token::ADD:
|
1800 |
__ addsd(left, right); |
1801 |
break;
|
1802 |
case Token::SUB:
|
1803 |
__ subsd(left, right); |
1804 |
break;
|
1805 |
case Token::MUL:
|
1806 |
__ mulsd(left, right); |
1807 |
break;
|
1808 |
case Token::DIV:
|
1809 |
__ divsd(left, right); |
1810 |
// Don't delete this mov. It may improve performance on some CPUs,
|
1811 |
// when there is a mulsd depending on the result
|
1812 |
__ movaps(left, left); |
1813 |
break;
|
1814 |
case Token::MOD: {
|
1815 |
XMMRegister xmm_scratch = double_scratch0(); |
1816 |
__ PrepareCallCFunction(2);
|
1817 |
__ movaps(xmm_scratch, left); |
1818 |
ASSERT(right.is(xmm1)); |
1819 |
__ CallCFunction( |
1820 |
ExternalReference::double_fp_operation(Token::MOD, isolate()), 2);
|
1821 |
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); |
1822 |
__ movaps(result, xmm_scratch); |
1823 |
break;
|
1824 |
} |
1825 |
default:
|
1826 |
UNREACHABLE(); |
1827 |
break;
|
1828 |
} |
1829 |
} |
1830 |
|
1831 |
|
1832 |
void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
|
1833 |
ASSERT(ToRegister(instr->left()).is(rdx)); |
1834 |
ASSERT(ToRegister(instr->right()).is(rax)); |
1835 |
ASSERT(ToRegister(instr->result()).is(rax)); |
1836 |
|
1837 |
BinaryOpStub stub(instr->op(), NO_OVERWRITE); |
1838 |
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); |
1839 |
__ nop(); // Signals no inlined code.
|
1840 |
} |
1841 |
|
1842 |
|
1843 |
template<class InstrType> |
1844 |
void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
|
1845 |
int left_block = instr->TrueDestination(chunk_);
|
1846 |
int right_block = instr->FalseDestination(chunk_);
|
1847 |
|
1848 |
int next_block = GetNextEmittedBlock();
|
1849 |
|
1850 |
if (right_block == left_block || cc == no_condition) {
|
1851 |
EmitGoto(left_block); |
1852 |
} else if (left_block == next_block) { |
1853 |
__ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block)); |
1854 |
} else if (right_block == next_block) { |
1855 |
__ j(cc, chunk_->GetAssemblyLabel(left_block)); |
1856 |
} else {
|
1857 |
__ j(cc, chunk_->GetAssemblyLabel(left_block)); |
1858 |
if (cc != always) {
|
1859 |
__ jmp(chunk_->GetAssemblyLabel(right_block)); |
1860 |
} |
1861 |
} |
1862 |
} |
1863 |
|
1864 |
|
1865 |
template<class InstrType> |
1866 |
void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
|
1867 |
int false_block = instr->FalseDestination(chunk_);
|
1868 |
__ j(cc, chunk_->GetAssemblyLabel(false_block)); |
1869 |
} |
1870 |
|
1871 |
|
1872 |
void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
|
1873 |
__ int3(); |
1874 |
} |
1875 |
|
1876 |
|
1877 |
void LCodeGen::DoBranch(LBranch* instr) {
|
1878 |
Representation r = instr->hydrogen()->value()->representation(); |
1879 |
if (r.IsInteger32()) {
|
1880 |
ASSERT(!info()->IsStub()); |
1881 |
Register reg = ToRegister(instr->value()); |
1882 |
__ testl(reg, reg); |
1883 |
EmitBranch(instr, not_zero); |
1884 |
} else if (r.IsSmi()) { |
1885 |
ASSERT(!info()->IsStub()); |
1886 |
Register reg = ToRegister(instr->value()); |
1887 |
__ testq(reg, reg); |
1888 |
EmitBranch(instr, not_zero); |
1889 |
} else if (r.IsDouble()) { |
1890 |
ASSERT(!info()->IsStub()); |
1891 |
XMMRegister reg = ToDoubleRegister(instr->value()); |
1892 |
XMMRegister xmm_scratch = double_scratch0(); |
1893 |
__ xorps(xmm_scratch, xmm_scratch); |
1894 |
__ ucomisd(reg, xmm_scratch); |
1895 |
EmitBranch(instr, not_equal); |
1896 |
} else {
|
1897 |
ASSERT(r.IsTagged()); |
1898 |
Register reg = ToRegister(instr->value()); |
1899 |
HType type = instr->hydrogen()->value()->type(); |
1900 |
if (type.IsBoolean()) {
|
1901 |
ASSERT(!info()->IsStub()); |
1902 |
__ CompareRoot(reg, Heap::kTrueValueRootIndex); |
1903 |
EmitBranch(instr, equal); |
1904 |
} else if (type.IsSmi()) { |
1905 |
ASSERT(!info()->IsStub()); |
1906 |
__ SmiCompare(reg, Smi::FromInt(0));
|
1907 |
EmitBranch(instr, not_equal); |
1908 |
} else if (type.IsJSArray()) { |
1909 |
ASSERT(!info()->IsStub()); |
1910 |
EmitBranch(instr, no_condition); |
1911 |
} else if (type.IsHeapNumber()) { |
1912 |
ASSERT(!info()->IsStub()); |
1913 |
XMMRegister xmm_scratch = double_scratch0(); |
1914 |
__ xorps(xmm_scratch, xmm_scratch); |
1915 |
__ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset)); |
1916 |
EmitBranch(instr, not_equal); |
1917 |
} else if (type.IsString()) { |
1918 |
ASSERT(!info()->IsStub()); |
1919 |
__ cmpq(FieldOperand(reg, String::kLengthOffset), Immediate(0));
|
1920 |
EmitBranch(instr, not_equal); |
1921 |
} else {
|
1922 |
ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types(); |
1923 |
// Avoid deopts in the case where we've never executed this path before.
|
1924 |
if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
|
1925 |
|
1926 |
if (expected.Contains(ToBooleanStub::UNDEFINED)) {
|
1927 |
// undefined -> false.
|
1928 |
__ CompareRoot(reg, Heap::kUndefinedValueRootIndex); |
1929 |
__ j(equal, instr->FalseLabel(chunk_)); |
1930 |
} |
1931 |
if (expected.Contains(ToBooleanStub::BOOLEAN)) {
|
1932 |
// true -> true.
|
1933 |
__ CompareRoot(reg, Heap::kTrueValueRootIndex); |
1934 |
__ j(equal, instr->TrueLabel(chunk_)); |
1935 |
// false -> false.
|
1936 |
__ CompareRoot(reg, Heap::kFalseValueRootIndex); |
1937 |
__ j(equal, instr->FalseLabel(chunk_)); |
1938 |
} |
1939 |
if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
|
1940 |
// 'null' -> false.
|
1941 |
__ CompareRoot(reg, Heap::kNullValueRootIndex); |
1942 |
__ j(equal, instr->FalseLabel(chunk_)); |
1943 |
} |
1944 |
|
1945 |
if (expected.Contains(ToBooleanStub::SMI)) {
|
1946 |
// Smis: 0 -> false, all other -> true.
|
1947 |
__ Cmp(reg, Smi::FromInt(0));
|
1948 |
__ j(equal, instr->FalseLabel(chunk_)); |
1949 |
__ JumpIfSmi(reg, instr->TrueLabel(chunk_)); |
1950 |
} else if (expected.NeedsMap()) { |
1951 |
// If we need a map later and have a Smi -> deopt.
|
1952 |
__ testb(reg, Immediate(kSmiTagMask)); |
1953 |
DeoptimizeIf(zero, instr->environment()); |
1954 |
} |
1955 |
|
1956 |
const Register map = kScratchRegister;
|
1957 |
if (expected.NeedsMap()) {
|
1958 |
__ movq(map, FieldOperand(reg, HeapObject::kMapOffset)); |
1959 |
|
1960 |
if (expected.CanBeUndetectable()) {
|
1961 |
// Undetectable -> false.
|
1962 |
__ testb(FieldOperand(map, Map::kBitFieldOffset), |
1963 |
Immediate(1 << Map::kIsUndetectable));
|
1964 |
__ j(not_zero, instr->FalseLabel(chunk_)); |
1965 |
} |
1966 |
} |
1967 |
|
1968 |
if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
|
1969 |
// spec object -> true.
|
1970 |
__ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE); |
1971 |
__ j(above_equal, instr->TrueLabel(chunk_)); |
1972 |
} |
1973 |
|
1974 |
if (expected.Contains(ToBooleanStub::STRING)) {
|
1975 |
// String value -> false iff empty.
|
1976 |
Label not_string; |
1977 |
__ CmpInstanceType(map, FIRST_NONSTRING_TYPE); |
1978 |
__ j(above_equal, ¬_string, Label::kNear); |
1979 |
__ cmpq(FieldOperand(reg, String::kLengthOffset), Immediate(0));
|
1980 |
__ j(not_zero, instr->TrueLabel(chunk_)); |
1981 |
__ jmp(instr->FalseLabel(chunk_)); |
1982 |
__ bind(¬_string); |
1983 |
} |
1984 |
|
1985 |
if (expected.Contains(ToBooleanStub::SYMBOL)) {
|
1986 |
// Symbol value -> true.
|
1987 |
__ CmpInstanceType(map, SYMBOL_TYPE); |
1988 |
__ j(equal, instr->TrueLabel(chunk_)); |
1989 |
} |
1990 |
|
1991 |
if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
|
1992 |
// heap number -> false iff +0, -0, or NaN.
|
1993 |
Label not_heap_number; |
1994 |
__ CompareRoot(map, Heap::kHeapNumberMapRootIndex); |
1995 |
__ j(not_equal, ¬_heap_number, Label::kNear); |
1996 |
XMMRegister xmm_scratch = double_scratch0(); |
1997 |
__ xorps(xmm_scratch, xmm_scratch); |
1998 |
__ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset)); |
1999 |
__ j(zero, instr->FalseLabel(chunk_)); |
2000 |
__ jmp(instr->TrueLabel(chunk_)); |
2001 |
__ bind(¬_heap_number); |
2002 |
} |
2003 |
|
2004 |
if (!expected.IsGeneric()) {
|
2005 |
// We've seen something for the first time -> deopt.
|
2006 |
// This can only happen if we are not generic already.
|
2007 |
DeoptimizeIf(no_condition, instr->environment()); |
2008 |
} |
2009 |
} |
2010 |
} |
2011 |
} |
2012 |
|
2013 |
|
2014 |
void LCodeGen::EmitGoto(int block) { |
2015 |
if (!IsNextEmittedBlock(block)) {
|
2016 |
__ jmp(chunk_->GetAssemblyLabel(chunk_->LookupDestination(block))); |
2017 |
} |
2018 |
} |
2019 |
|
2020 |
|
2021 |
void LCodeGen::DoGoto(LGoto* instr) {
|
2022 |
EmitGoto(instr->block_id()); |
2023 |
} |
2024 |
|
2025 |
|
2026 |
inline Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { |
2027 |
Condition cond = no_condition; |
2028 |
switch (op) {
|
2029 |
case Token::EQ:
|
2030 |
case Token::EQ_STRICT:
|
2031 |
cond = equal; |
2032 |
break;
|
2033 |
case Token::NE:
|
2034 |
case Token::NE_STRICT:
|
2035 |
cond = not_equal; |
2036 |
break;
|
2037 |
case Token::LT:
|
2038 |
cond = is_unsigned ? below : less; |
2039 |
break;
|
2040 |
case Token::GT:
|
2041 |
cond = is_unsigned ? above : greater; |
2042 |
break;
|
2043 |
case Token::LTE:
|
2044 |
cond = is_unsigned ? below_equal : less_equal; |
2045 |
break;
|
2046 |
case Token::GTE:
|
2047 |
cond = is_unsigned ? above_equal : greater_equal; |
2048 |
break;
|
2049 |
case Token::IN:
|
2050 |
case Token::INSTANCEOF:
|
2051 |
default:
|
2052 |
UNREACHABLE(); |
2053 |
} |
2054 |
return cond;
|
2055 |
} |
2056 |
|
2057 |
|
2058 |
void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
|
2059 |
LOperand* left = instr->left(); |
2060 |
LOperand* right = instr->right(); |
2061 |
Condition cc = TokenToCondition(instr->op(), instr->is_double()); |
2062 |
|
2063 |
if (left->IsConstantOperand() && right->IsConstantOperand()) {
|
2064 |
// We can statically evaluate the comparison.
|
2065 |
double left_val = ToDouble(LConstantOperand::cast(left));
|
2066 |
double right_val = ToDouble(LConstantOperand::cast(right));
|
2067 |
int next_block = EvalComparison(instr->op(), left_val, right_val) ?
|
2068 |
instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_); |
2069 |
EmitGoto(next_block); |
2070 |
} else {
|
2071 |
if (instr->is_double()) {
|
2072 |
// Don't base result on EFLAGS when a NaN is involved. Instead
|
2073 |
// jump to the false block.
|
2074 |
__ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right)); |
2075 |
__ j(parity_even, instr->FalseLabel(chunk_)); |
2076 |
} else {
|
2077 |
int32_t value; |
2078 |
if (right->IsConstantOperand()) {
|
2079 |
value = ToInteger32(LConstantOperand::cast(right)); |
2080 |
if (instr->hydrogen_value()->representation().IsSmi()) {
|
2081 |
__ Cmp(ToRegister(left), Smi::FromInt(value)); |
2082 |
} else {
|
2083 |
__ cmpl(ToRegister(left), Immediate(value)); |
2084 |
} |
2085 |
} else if (left->IsConstantOperand()) { |
2086 |
value = ToInteger32(LConstantOperand::cast(left)); |
2087 |
if (instr->hydrogen_value()->representation().IsSmi()) {
|
2088 |
if (right->IsRegister()) {
|
2089 |
__ Cmp(ToRegister(right), Smi::FromInt(value)); |
2090 |
} else {
|
2091 |
__ Cmp(ToOperand(right), Smi::FromInt(value)); |
2092 |
} |
2093 |
} else if (right->IsRegister()) { |
2094 |
__ cmpl(ToRegister(right), Immediate(value)); |
2095 |
} else {
|
2096 |
__ cmpl(ToOperand(right), Immediate(value)); |
2097 |
} |
2098 |
// We transposed the operands. Reverse the condition.
|
2099 |
cc = ReverseCondition(cc); |
2100 |
} else if (instr->hydrogen_value()->representation().IsSmi()) { |
2101 |
if (right->IsRegister()) {
|
2102 |
__ cmpq(ToRegister(left), ToRegister(right)); |
2103 |
} else {
|
2104 |
__ cmpq(ToRegister(left), ToOperand(right)); |
2105 |
} |
2106 |
} else {
|
2107 |
if (right->IsRegister()) {
|
2108 |
__ cmpl(ToRegister(left), ToRegister(right)); |
2109 |
} else {
|
2110 |
__ cmpl(ToRegister(left), ToOperand(right)); |
2111 |
} |
2112 |
} |
2113 |
} |
2114 |
EmitBranch(instr, cc); |
2115 |
} |
2116 |
} |
2117 |
|
2118 |
|
2119 |
void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
|
2120 |
Register left = ToRegister(instr->left()); |
2121 |
|
2122 |
if (instr->right()->IsConstantOperand()) {
|
2123 |
Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right())); |
2124 |
__ Cmp(left, right); |
2125 |
} else {
|
2126 |
Register right = ToRegister(instr->right()); |
2127 |
__ cmpq(left, right); |
2128 |
} |
2129 |
EmitBranch(instr, equal); |
2130 |
} |
2131 |
|
2132 |
|
2133 |
void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
|
2134 |
if (instr->hydrogen()->representation().IsTagged()) {
|
2135 |
Register input_reg = ToRegister(instr->object()); |
2136 |
__ Cmp(input_reg, factory()->the_hole_value()); |
2137 |
EmitBranch(instr, equal); |
2138 |
return;
|
2139 |
} |
2140 |
|
2141 |
XMMRegister input_reg = ToDoubleRegister(instr->object()); |
2142 |
__ ucomisd(input_reg, input_reg); |
2143 |
EmitFalseBranch(instr, parity_odd); |
2144 |
|
2145 |
__ subq(rsp, Immediate(kDoubleSize)); |
2146 |
__ movsd(MemOperand(rsp, 0), input_reg);
|
2147 |
__ addq(rsp, Immediate(kDoubleSize)); |
2148 |
|
2149 |
int offset = sizeof(kHoleNanUpper32); |
2150 |
__ cmpl(MemOperand(rsp, -offset), Immediate(kHoleNanUpper32)); |
2151 |
EmitBranch(instr, equal); |
2152 |
} |
2153 |
|
2154 |
|
2155 |
Condition LCodeGen::EmitIsObject(Register input, |
2156 |
Label* is_not_object, |
2157 |
Label* is_object) { |
2158 |
ASSERT(!input.is(kScratchRegister)); |
2159 |
|
2160 |
__ JumpIfSmi(input, is_not_object); |
2161 |
|
2162 |
__ CompareRoot(input, Heap::kNullValueRootIndex); |
2163 |
__ j(equal, is_object); |
2164 |
|
2165 |
__ movq(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset)); |
2166 |
// Undetectable objects behave like undefined.
|
2167 |
__ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset), |
2168 |
Immediate(1 << Map::kIsUndetectable));
|
2169 |
__ j(not_zero, is_not_object); |
2170 |
|
2171 |
__ movzxbl(kScratchRegister, |
2172 |
FieldOperand(kScratchRegister, Map::kInstanceTypeOffset)); |
2173 |
__ cmpb(kScratchRegister, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); |
2174 |
__ j(below, is_not_object); |
2175 |
__ cmpb(kScratchRegister, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE)); |
2176 |
return below_equal;
|
2177 |
} |
2178 |
|
2179 |
|
2180 |
void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
|
2181 |
Register reg = ToRegister(instr->value()); |
2182 |
|
2183 |
Condition true_cond = EmitIsObject( |
2184 |
reg, instr->FalseLabel(chunk_), instr->TrueLabel(chunk_)); |
2185 |
|
2186 |
EmitBranch(instr, true_cond); |
2187 |
} |
2188 |
|
2189 |
|
2190 |
Condition LCodeGen::EmitIsString(Register input, |
2191 |
Register temp1, |
2192 |
Label* is_not_string, |
2193 |
SmiCheck check_needed = INLINE_SMI_CHECK) { |
2194 |
if (check_needed == INLINE_SMI_CHECK) {
|
2195 |
__ JumpIfSmi(input, is_not_string); |
2196 |
} |
2197 |
|
2198 |
Condition cond = masm_->IsObjectStringType(input, temp1, temp1); |
2199 |
|
2200 |
return cond;
|
2201 |
} |
2202 |
|
2203 |
|
2204 |
void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
|
2205 |
Register reg = ToRegister(instr->value()); |
2206 |
Register temp = ToRegister(instr->temp()); |
2207 |
|
2208 |
SmiCheck check_needed = |
2209 |
instr->hydrogen()->value()->IsHeapObject() |
2210 |
? OMIT_SMI_CHECK : INLINE_SMI_CHECK; |
2211 |
|
2212 |
Condition true_cond = EmitIsString( |
2213 |
reg, temp, instr->FalseLabel(chunk_), check_needed); |
2214 |
|
2215 |
EmitBranch(instr, true_cond); |
2216 |
} |
2217 |
|
2218 |
|
2219 |
void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
|
2220 |
Condition is_smi; |
2221 |
if (instr->value()->IsRegister()) {
|
2222 |
Register input = ToRegister(instr->value()); |
2223 |
is_smi = masm()->CheckSmi(input); |
2224 |
} else {
|
2225 |
Operand input = ToOperand(instr->value()); |
2226 |
is_smi = masm()->CheckSmi(input); |
2227 |
} |
2228 |
EmitBranch(instr, is_smi); |
2229 |
} |
2230 |
|
2231 |
|
2232 |
void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
|
2233 |
Register input = ToRegister(instr->value()); |
2234 |
Register temp = ToRegister(instr->temp()); |
2235 |
|
2236 |
if (!instr->hydrogen()->value()->IsHeapObject()) {
|
2237 |
__ JumpIfSmi(input, instr->FalseLabel(chunk_)); |
2238 |
} |
2239 |
__ movq(temp, FieldOperand(input, HeapObject::kMapOffset)); |
2240 |
__ testb(FieldOperand(temp, Map::kBitFieldOffset), |
2241 |
Immediate(1 << Map::kIsUndetectable));
|
2242 |
EmitBranch(instr, not_zero); |
2243 |
} |
2244 |
|
2245 |
|
2246 |
void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
|
2247 |
Token::Value op = instr->op(); |
2248 |
|
2249 |
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); |
2250 |
CallCode(ic, RelocInfo::CODE_TARGET, instr); |
2251 |
|
2252 |
Condition condition = TokenToCondition(op, false);
|
2253 |
__ testq(rax, rax); |
2254 |
|
2255 |
EmitBranch(instr, condition); |
2256 |
} |
2257 |
|
2258 |
|
2259 |
static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
|
2260 |
InstanceType from = instr->from(); |
2261 |
InstanceType to = instr->to(); |
2262 |
if (from == FIRST_TYPE) return to; |
2263 |
ASSERT(from == to || to == LAST_TYPE); |
2264 |
return from;
|
2265 |
} |
2266 |
|
2267 |
|
2268 |
static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
|
2269 |
InstanceType from = instr->from(); |
2270 |
InstanceType to = instr->to(); |
2271 |
if (from == to) return equal; |
2272 |
if (to == LAST_TYPE) return above_equal; |
2273 |
if (from == FIRST_TYPE) return below_equal; |
2274 |
UNREACHABLE(); |
2275 |
return equal;
|
2276 |
} |
2277 |
|
2278 |
|
2279 |
void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
|
2280 |
Register input = ToRegister(instr->value()); |
2281 |
|
2282 |
if (!instr->hydrogen()->value()->IsHeapObject()) {
|
2283 |
__ JumpIfSmi(input, instr->FalseLabel(chunk_)); |
2284 |
} |
2285 |
|
2286 |
__ CmpObjectType(input, TestType(instr->hydrogen()), kScratchRegister); |
2287 |
EmitBranch(instr, BranchCondition(instr->hydrogen())); |
2288 |
} |
2289 |
|
2290 |
|
2291 |
void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
|
2292 |
Register input = ToRegister(instr->value()); |
2293 |
Register result = ToRegister(instr->result()); |
2294 |
|
2295 |
__ AssertString(input); |
2296 |
|
2297 |
__ movl(result, FieldOperand(input, String::kHashFieldOffset)); |
2298 |
ASSERT(String::kHashShift >= kSmiTagSize); |
2299 |
__ IndexFromHash(result, result); |
2300 |
} |
2301 |
|
2302 |
|
2303 |
void LCodeGen::DoHasCachedArrayIndexAndBranch(
|
2304 |
LHasCachedArrayIndexAndBranch* instr) { |
2305 |
Register input = ToRegister(instr->value()); |
2306 |
|
2307 |
__ testl(FieldOperand(input, String::kHashFieldOffset), |
2308 |
Immediate(String::kContainsCachedArrayIndexMask)); |
2309 |
EmitBranch(instr, equal); |
2310 |
} |
2311 |
|
2312 |
|
2313 |
// Branches to a label or falls through with the answer in the z flag.
|
2314 |
// Trashes the temp register.
|
2315 |
void LCodeGen::EmitClassOfTest(Label* is_true,
|
2316 |
Label* is_false, |
2317 |
Handle<String> class_name, |
2318 |
Register input, |
2319 |
Register temp, |
2320 |
Register temp2) { |
2321 |
ASSERT(!input.is(temp)); |
2322 |
ASSERT(!input.is(temp2)); |
2323 |
ASSERT(!temp.is(temp2)); |
2324 |
|
2325 |
__ JumpIfSmi(input, is_false); |
2326 |
|
2327 |
if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) { |
2328 |
// Assuming the following assertions, we can use the same compares to test
|
2329 |
// for both being a function type and being in the object type range.
|
2330 |
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
|
2331 |
STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE == |
2332 |
FIRST_SPEC_OBJECT_TYPE + 1);
|
2333 |
STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == |
2334 |
LAST_SPEC_OBJECT_TYPE - 1);
|
2335 |
STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); |
2336 |
__ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp); |
2337 |
__ j(below, is_false); |
2338 |
__ j(equal, is_true); |
2339 |
__ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE); |
2340 |
__ j(equal, is_true); |
2341 |
} else {
|
2342 |
// Faster code path to avoid two compares: subtract lower bound from the
|
2343 |
// actual type and do a signed compare with the width of the type range.
|
2344 |
__ movq(temp, FieldOperand(input, HeapObject::kMapOffset)); |
2345 |
__ movzxbl(temp2, FieldOperand(temp, Map::kInstanceTypeOffset)); |
2346 |
__ subq(temp2, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); |
2347 |
__ cmpq(temp2, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE - |
2348 |
FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); |
2349 |
__ j(above, is_false); |
2350 |
} |
2351 |
|
2352 |
// Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
|
2353 |
// Check if the constructor in the map is a function.
|
2354 |
__ movq(temp, FieldOperand(temp, Map::kConstructorOffset)); |
2355 |
|
2356 |
// Objects with a non-function constructor have class 'Object'.
|
2357 |
__ CmpObjectType(temp, JS_FUNCTION_TYPE, kScratchRegister); |
2358 |
if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) { |
2359 |
__ j(not_equal, is_true); |
2360 |
} else {
|
2361 |
__ j(not_equal, is_false); |
2362 |
} |
2363 |
|
2364 |
// temp now contains the constructor function. Grab the
|
2365 |
// instance class name from there.
|
2366 |
__ movq(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset)); |
2367 |
__ movq(temp, FieldOperand(temp, |
2368 |
SharedFunctionInfo::kInstanceClassNameOffset)); |
2369 |
// The class name we are testing against is internalized since it's a literal.
|
2370 |
// The name in the constructor is internalized because of the way the context
|
2371 |
// is booted. This routine isn't expected to work for random API-created
|
2372 |
// classes and it doesn't have to because you can't access it with natives
|
2373 |
// syntax. Since both sides are internalized it is sufficient to use an
|
2374 |
// identity comparison.
|
2375 |
ASSERT(class_name->IsInternalizedString()); |
2376 |
__ Cmp(temp, class_name); |
2377 |
// End with the answer in the z flag.
|
2378 |
} |
2379 |
|
2380 |
|
2381 |
void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
|
2382 |
Register input = ToRegister(instr->value()); |
2383 |
Register temp = ToRegister(instr->temp()); |
2384 |
Register temp2 = ToRegister(instr->temp2()); |
2385 |
Handle<String> class_name = instr->hydrogen()->class_name(); |
2386 |
|
2387 |
EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), |
2388 |
class_name, input, temp, temp2); |
2389 |
|
2390 |
EmitBranch(instr, equal); |
2391 |
} |
2392 |
|
2393 |
|
2394 |
void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
|
2395 |
Register reg = ToRegister(instr->value()); |
2396 |
|
2397 |
__ Cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map()); |
2398 |
EmitBranch(instr, equal); |
2399 |
} |
2400 |
|
2401 |
|
2402 |
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
|
2403 |
InstanceofStub stub(InstanceofStub::kNoFlags); |
2404 |
__ push(ToRegister(instr->left())); |
2405 |
__ push(ToRegister(instr->right())); |
2406 |
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); |
2407 |
Label true_value, done; |
2408 |
__ testq(rax, rax); |
2409 |
__ j(zero, &true_value, Label::kNear); |
2410 |
__ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex); |
2411 |
__ jmp(&done, Label::kNear); |
2412 |
__ bind(&true_value); |
2413 |
__ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex); |
2414 |
__ bind(&done); |
2415 |
} |
2416 |
|
2417 |
|
2418 |
void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
|
2419 |
class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode { |
2420 |
public:
|
2421 |
DeferredInstanceOfKnownGlobal(LCodeGen* codegen, |
2422 |
LInstanceOfKnownGlobal* instr) |
2423 |
: LDeferredCode(codegen), instr_(instr) { } |
2424 |
virtual void Generate() V8_OVERRIDE { |
2425 |
codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_); |
2426 |
} |
2427 |
virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
2428 |
Label* map_check() { return &map_check_; }
|
2429 |
private:
|
2430 |
LInstanceOfKnownGlobal* instr_; |
2431 |
Label map_check_; |
2432 |
}; |
2433 |
|
2434 |
|
2435 |
DeferredInstanceOfKnownGlobal* deferred; |
2436 |
deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr); |
2437 |
|
2438 |
Label done, false_result; |
2439 |
Register object = ToRegister(instr->value()); |
2440 |
|
2441 |
// A Smi is not an instance of anything.
|
2442 |
__ JumpIfSmi(object, &false_result); |
2443 |
|
2444 |
// This is the inlined call site instanceof cache. The two occurences of the
|
2445 |
// hole value will be patched to the last map/result pair generated by the
|
2446 |
// instanceof stub.
|
2447 |
Label cache_miss; |
2448 |
// Use a temp register to avoid memory operands with variable lengths.
|
2449 |
Register map = ToRegister(instr->temp()); |
2450 |
__ movq(map, FieldOperand(object, HeapObject::kMapOffset)); |
2451 |
__ bind(deferred->map_check()); // Label for calculating code patching.
|
2452 |
Handle<Cell> cache_cell = factory()->NewCell(factory()->the_hole_value()); |
2453 |
__ movq(kScratchRegister, cache_cell, RelocInfo::CELL); |
2454 |
__ cmpq(map, Operand(kScratchRegister, 0));
|
2455 |
__ j(not_equal, &cache_miss, Label::kNear); |
2456 |
// Patched to load either true or false.
|
2457 |
__ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex); |
2458 |
#ifdef DEBUG
|
2459 |
// Check that the code size between patch label and patch sites is invariant.
|
2460 |
Label end_of_patched_code; |
2461 |
__ bind(&end_of_patched_code); |
2462 |
ASSERT(true);
|
2463 |
#endif
|
2464 |
__ jmp(&done); |
2465 |
|
2466 |
// The inlined call site cache did not match. Check for null and string
|
2467 |
// before calling the deferred code.
|
2468 |
__ bind(&cache_miss); // Null is not an instance of anything.
|
2469 |
__ CompareRoot(object, Heap::kNullValueRootIndex); |
2470 |
__ j(equal, &false_result, Label::kNear); |
2471 |
|
2472 |
// String values are not instances of anything.
|
2473 |
__ JumpIfNotString(object, kScratchRegister, deferred->entry()); |
2474 |
|
2475 |
__ bind(&false_result); |
2476 |
__ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex); |
2477 |
|
2478 |
__ bind(deferred->exit()); |
2479 |
__ bind(&done); |
2480 |
} |
2481 |
|
2482 |
|
2483 |
void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
|
2484 |
Label* map_check) { |
2485 |
{ |
2486 |
PushSafepointRegistersScope scope(this);
|
2487 |
InstanceofStub::Flags flags = static_cast<InstanceofStub::Flags>(
|
2488 |
InstanceofStub::kNoFlags | InstanceofStub::kCallSiteInlineCheck); |
2489 |
InstanceofStub stub(flags); |
2490 |
|
2491 |
__ push(ToRegister(instr->value())); |
2492 |
__ Push(instr->function()); |
2493 |
|
2494 |
static const int kAdditionalDelta = 10; |
2495 |
int delta =
|
2496 |
masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta; |
2497 |
ASSERT(delta >= 0);
|
2498 |
__ push_imm32(delta); |
2499 |
|
2500 |
// We are pushing three values on the stack but recording a
|
2501 |
// safepoint with two arguments because stub is going to
|
2502 |
// remove the third argument from the stack before jumping
|
2503 |
// to instanceof builtin on the slow path.
|
2504 |
CallCodeGeneric(stub.GetCode(isolate()), |
2505 |
RelocInfo::CODE_TARGET, |
2506 |
instr, |
2507 |
RECORD_SAFEPOINT_WITH_REGISTERS, |
2508 |
2);
|
2509 |
ASSERT(delta == masm_->SizeOfCodeGeneratedSince(map_check)); |
2510 |
LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment(); |
2511 |
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); |
2512 |
// Move result to a register that survives the end of the
|
2513 |
// PushSafepointRegisterScope.
|
2514 |
__ movq(kScratchRegister, rax); |
2515 |
} |
2516 |
__ testq(kScratchRegister, kScratchRegister); |
2517 |
Label load_false; |
2518 |
Label done; |
2519 |
__ j(not_zero, &load_false); |
2520 |
__ LoadRoot(rax, Heap::kTrueValueRootIndex); |
2521 |
__ jmp(&done); |
2522 |
__ bind(&load_false); |
2523 |
__ LoadRoot(rax, Heap::kFalseValueRootIndex); |
2524 |
__ bind(&done); |
2525 |
} |
2526 |
|
2527 |
|
2528 |
void LCodeGen::DoCmpT(LCmpT* instr) {
|
2529 |
Token::Value op = instr->op(); |
2530 |
|
2531 |
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); |
2532 |
CallCode(ic, RelocInfo::CODE_TARGET, instr); |
2533 |
|
2534 |
Condition condition = TokenToCondition(op, false);
|
2535 |
Label true_value, done; |
2536 |
__ testq(rax, rax); |
2537 |
__ j(condition, &true_value, Label::kNear); |
2538 |
__ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex); |
2539 |
__ jmp(&done, Label::kNear); |
2540 |
__ bind(&true_value); |
2541 |
__ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex); |
2542 |
__ bind(&done); |
2543 |
} |
2544 |
|
2545 |
|
2546 |
void LCodeGen::DoReturn(LReturn* instr) {
|
2547 |
if (FLAG_trace && info()->IsOptimizing()) {
|
2548 |
// Preserve the return value on the stack and rely on the runtime
|
2549 |
// call to return the value in the same register.
|
2550 |
__ push(rax); |
2551 |
__ CallRuntime(Runtime::kTraceExit, 1);
|
2552 |
} |
2553 |
if (info()->saves_caller_doubles()) {
|
2554 |
ASSERT(NeedsEagerFrame()); |
2555 |
BitVector* doubles = chunk()->allocated_double_registers(); |
2556 |
BitVector::Iterator save_iterator(doubles); |
2557 |
int count = 0; |
2558 |
while (!save_iterator.Done()) {
|
2559 |
__ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()), |
2560 |
MemOperand(rsp, count * kDoubleSize)); |
2561 |
save_iterator.Advance(); |
2562 |
count++; |
2563 |
} |
2564 |
} |
2565 |
int no_frame_start = -1; |
2566 |
if (NeedsEagerFrame()) {
|
2567 |
__ movq(rsp, rbp); |
2568 |
__ pop(rbp); |
2569 |
no_frame_start = masm_->pc_offset(); |
2570 |
} |
2571 |
if (instr->has_constant_parameter_count()) {
|
2572 |
__ Ret((ToInteger32(instr->constant_parameter_count()) + 1) * kPointerSize,
|
2573 |
rcx); |
2574 |
} else {
|
2575 |
Register reg = ToRegister(instr->parameter_count()); |
2576 |
// The argument count parameter is a smi
|
2577 |
__ SmiToInteger32(reg, reg); |
2578 |
Register return_addr_reg = reg.is(rcx) ? rbx : rcx; |
2579 |
__ PopReturnAddressTo(return_addr_reg); |
2580 |
__ shl(reg, Immediate(kPointerSizeLog2)); |
2581 |
__ addq(rsp, reg); |
2582 |
__ jmp(return_addr_reg); |
2583 |
} |
2584 |
if (no_frame_start != -1) { |
2585 |
info_->AddNoFrameRange(no_frame_start, masm_->pc_offset()); |
2586 |
} |
2587 |
} |
2588 |
|
2589 |
|
2590 |
void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
|
2591 |
Register result = ToRegister(instr->result()); |
2592 |
__ LoadGlobalCell(result, instr->hydrogen()->cell().handle()); |
2593 |
if (instr->hydrogen()->RequiresHoleCheck()) {
|
2594 |
__ CompareRoot(result, Heap::kTheHoleValueRootIndex); |
2595 |
DeoptimizeIf(equal, instr->environment()); |
2596 |
} |
2597 |
} |
2598 |
|
2599 |
|
2600 |
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
|
2601 |
ASSERT(ToRegister(instr->global_object()).is(rax)); |
2602 |
ASSERT(ToRegister(instr->result()).is(rax)); |
2603 |
|
2604 |
__ Move(rcx, instr->name()); |
2605 |
RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET : |
2606 |
RelocInfo::CODE_TARGET_CONTEXT; |
2607 |
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); |
2608 |
CallCode(ic, mode, instr); |
2609 |
} |
2610 |
|
2611 |
|
2612 |
void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
|
2613 |
Register value = ToRegister(instr->value()); |
2614 |
Handle<Cell> cell_handle = instr->hydrogen()->cell().handle(); |
2615 |
|
2616 |
// If the cell we are storing to contains the hole it could have
|
2617 |
// been deleted from the property dictionary. In that case, we need
|
2618 |
// to update the property details in the property dictionary to mark
|
2619 |
// it as no longer deleted. We deoptimize in that case.
|
2620 |
if (instr->hydrogen()->RequiresHoleCheck()) {
|
2621 |
// We have a temp because CompareRoot might clobber kScratchRegister.
|
2622 |
Register cell = ToRegister(instr->temp()); |
2623 |
ASSERT(!value.is(cell)); |
2624 |
__ movq(cell, cell_handle, RelocInfo::CELL); |
2625 |
__ CompareRoot(Operand(cell, 0), Heap::kTheHoleValueRootIndex);
|
2626 |
DeoptimizeIf(equal, instr->environment()); |
2627 |
// Store the value.
|
2628 |
__ movq(Operand(cell, 0), value);
|
2629 |
} else {
|
2630 |
// Store the value.
|
2631 |
__ movq(kScratchRegister, cell_handle, RelocInfo::CELL); |
2632 |
__ movq(Operand(kScratchRegister, 0), value);
|
2633 |
} |
2634 |
// Cells are always rescanned, so no write barrier here.
|
2635 |
} |
2636 |
|
2637 |
|
2638 |
void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
|
2639 |
ASSERT(ToRegister(instr->global_object()).is(rdx)); |
2640 |
ASSERT(ToRegister(instr->value()).is(rax)); |
2641 |
|
2642 |
__ Move(rcx, instr->name()); |
2643 |
Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode) |
2644 |
? isolate()->builtins()->StoreIC_Initialize_Strict() |
2645 |
: isolate()->builtins()->StoreIC_Initialize(); |
2646 |
CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr); |
2647 |
} |
2648 |
|
2649 |
|
2650 |
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
|
2651 |
Register context = ToRegister(instr->context()); |
2652 |
Register result = ToRegister(instr->result()); |
2653 |
__ movq(result, ContextOperand(context, instr->slot_index())); |
2654 |
if (instr->hydrogen()->RequiresHoleCheck()) {
|
2655 |
__ CompareRoot(result, Heap::kTheHoleValueRootIndex); |
2656 |
if (instr->hydrogen()->DeoptimizesOnHole()) {
|
2657 |
DeoptimizeIf(equal, instr->environment()); |
2658 |
} else {
|
2659 |
Label is_not_hole; |
2660 |
__ j(not_equal, &is_not_hole, Label::kNear); |
2661 |
__ LoadRoot(result, Heap::kUndefinedValueRootIndex); |
2662 |
__ bind(&is_not_hole); |
2663 |
} |
2664 |
} |
2665 |
} |
2666 |
|
2667 |
|
2668 |
void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
|
2669 |
Register context = ToRegister(instr->context()); |
2670 |
Register value = ToRegister(instr->value()); |
2671 |
|
2672 |
Operand target = ContextOperand(context, instr->slot_index()); |
2673 |
|
2674 |
Label skip_assignment; |
2675 |
if (instr->hydrogen()->RequiresHoleCheck()) {
|
2676 |
__ CompareRoot(target, Heap::kTheHoleValueRootIndex); |
2677 |
if (instr->hydrogen()->DeoptimizesOnHole()) {
|
2678 |
DeoptimizeIf(equal, instr->environment()); |
2679 |
} else {
|
2680 |
__ j(not_equal, &skip_assignment); |
2681 |
} |
2682 |
} |
2683 |
__ movq(target, value); |
2684 |
|
2685 |
if (instr->hydrogen()->NeedsWriteBarrier()) {
|
2686 |
SmiCheck check_needed = |
2687 |
instr->hydrogen()->value()->IsHeapObject() |
2688 |
? OMIT_SMI_CHECK : INLINE_SMI_CHECK; |
2689 |
int offset = Context::SlotOffset(instr->slot_index());
|
2690 |
Register scratch = ToRegister(instr->temp()); |
2691 |
__ RecordWriteContextSlot(context, |
2692 |
offset, |
2693 |
value, |
2694 |
scratch, |
2695 |
kSaveFPRegs, |
2696 |
EMIT_REMEMBERED_SET, |
2697 |
check_needed); |
2698 |
} |
2699 |
|
2700 |
__ bind(&skip_assignment); |
2701 |
} |
2702 |
|
2703 |
|
2704 |
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
|
2705 |
HObjectAccess access = instr->hydrogen()->access(); |
2706 |
int offset = access.offset();
|
2707 |
|
2708 |
if (access.IsExternalMemory()) {
|
2709 |
Register result = ToRegister(instr->result()); |
2710 |
if (instr->object()->IsConstantOperand()) {
|
2711 |
ASSERT(result.is(rax)); |
2712 |
__ load_rax(ToExternalReference(LConstantOperand::cast(instr->object()))); |
2713 |
} else {
|
2714 |
Register object = ToRegister(instr->object()); |
2715 |
__ Load(result, MemOperand(object, offset), access.representation()); |
2716 |
} |
2717 |
return;
|
2718 |
} |
2719 |
|
2720 |
Register object = ToRegister(instr->object()); |
2721 |
if (FLAG_track_double_fields &&
|
2722 |
instr->hydrogen()->representation().IsDouble()) { |
2723 |
XMMRegister result = ToDoubleRegister(instr->result()); |
2724 |
__ movsd(result, FieldOperand(object, offset)); |
2725 |
return;
|
2726 |
} |
2727 |
|
2728 |
Register result = ToRegister(instr->result()); |
2729 |
if (!access.IsInobject()) {
|
2730 |
__ movq(result, FieldOperand(object, JSObject::kPropertiesOffset)); |
2731 |
object = result; |
2732 |
} |
2733 |
__ Load(result, FieldOperand(object, offset), access.representation()); |
2734 |
} |
2735 |
|
2736 |
|
2737 |
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
|
2738 |
ASSERT(ToRegister(instr->object()).is(rax)); |
2739 |
ASSERT(ToRegister(instr->result()).is(rax)); |
2740 |
|
2741 |
__ Move(rcx, instr->name()); |
2742 |
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize(); |
2743 |
CallCode(ic, RelocInfo::CODE_TARGET, instr); |
2744 |
} |
2745 |
|
2746 |
|
2747 |
void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
|
2748 |
Register function = ToRegister(instr->function()); |
2749 |
Register result = ToRegister(instr->result()); |
2750 |
|
2751 |
// Check that the function really is a function.
|
2752 |
__ CmpObjectType(function, JS_FUNCTION_TYPE, result); |
2753 |
DeoptimizeIf(not_equal, instr->environment()); |
2754 |
|
2755 |
// Check whether the function has an instance prototype.
|
2756 |
Label non_instance; |
2757 |
__ testb(FieldOperand(result, Map::kBitFieldOffset), |
2758 |
Immediate(1 << Map::kHasNonInstancePrototype));
|
2759 |
__ j(not_zero, &non_instance, Label::kNear); |
2760 |
|
2761 |
// Get the prototype or initial map from the function.
|
2762 |
__ movq(result, |
2763 |
FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); |
2764 |
|
2765 |
// Check that the function has a prototype or an initial map.
|
2766 |
__ CompareRoot(result, Heap::kTheHoleValueRootIndex); |
2767 |
DeoptimizeIf(equal, instr->environment()); |
2768 |
|
2769 |
// If the function does not have an initial map, we're done.
|
2770 |
Label done; |
2771 |
__ CmpObjectType(result, MAP_TYPE, kScratchRegister); |
2772 |
__ j(not_equal, &done, Label::kNear); |
2773 |
|
2774 |
// Get the prototype from the initial map.
|
2775 |
__ movq(result, FieldOperand(result, Map::kPrototypeOffset)); |
2776 |
__ jmp(&done, Label::kNear); |
2777 |
|
2778 |
// Non-instance prototype: Fetch prototype from constructor field
|
2779 |
// in the function's map.
|
2780 |
__ bind(&non_instance); |
2781 |
__ movq(result, FieldOperand(result, Map::kConstructorOffset)); |
2782 |
|
2783 |
// All done.
|
2784 |
__ bind(&done); |
2785 |
} |
2786 |
|
2787 |
|
2788 |
void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
|
2789 |
Register result = ToRegister(instr->result()); |
2790 |
__ LoadRoot(result, instr->index()); |
2791 |
} |
2792 |
|
2793 |
|
2794 |
void LCodeGen::DoLoadExternalArrayPointer(
|
2795 |
LLoadExternalArrayPointer* instr) { |
2796 |
Register result = ToRegister(instr->result()); |
2797 |
Register input = ToRegister(instr->object()); |
2798 |
__ movq(result, FieldOperand(input, |
2799 |
ExternalPixelArray::kExternalPointerOffset)); |
2800 |
} |
2801 |
|
2802 |
|
2803 |
void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
|
2804 |
Register arguments = ToRegister(instr->arguments()); |
2805 |
Register result = ToRegister(instr->result()); |
2806 |
|
2807 |
if (instr->length()->IsConstantOperand() &&
|
2808 |
instr->index()->IsConstantOperand()) { |
2809 |
int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index())); |
2810 |
int32_t const_length = ToInteger32(LConstantOperand::cast(instr->length())); |
2811 |
StackArgumentsAccessor args(arguments, const_length, |
2812 |
ARGUMENTS_DONT_CONTAIN_RECEIVER); |
2813 |
__ movq(result, args.GetArgumentOperand(const_index)); |
2814 |
} else {
|
2815 |
Register length = ToRegister(instr->length()); |
2816 |
// There are two words between the frame pointer and the last argument.
|
2817 |
// Subtracting from length accounts for one of them add one more.
|
2818 |
if (instr->index()->IsRegister()) {
|
2819 |
__ subl(length, ToRegister(instr->index())); |
2820 |
} else {
|
2821 |
__ subl(length, ToOperand(instr->index())); |
2822 |
} |
2823 |
StackArgumentsAccessor args(arguments, length, |
2824 |
ARGUMENTS_DONT_CONTAIN_RECEIVER); |
2825 |
__ movq(result, args.GetArgumentOperand(0));
|
2826 |
} |
2827 |
} |
2828 |
|
2829 |
|
2830 |
void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
|
2831 |
ElementsKind elements_kind = instr->elements_kind(); |
2832 |
LOperand* key = instr->key(); |
2833 |
if (!key->IsConstantOperand()) {
|
2834 |
Register key_reg = ToRegister(key); |
2835 |
// Even though the HLoad/StoreKeyed (in this case) instructions force
|
2836 |
// the input representation for the key to be an integer, the input
|
2837 |
// gets replaced during bound check elimination with the index argument
|
2838 |
// to the bounds check, which can be tagged, so that case must be
|
2839 |
// handled here, too.
|
2840 |
if (instr->hydrogen()->IsDehoisted()) {
|
2841 |
// Sign extend key because it could be a 32 bit negative value
|
2842 |
// and the dehoisted address computation happens in 64 bits
|
2843 |
__ movsxlq(key_reg, key_reg); |
2844 |
} |
2845 |
} |
2846 |
Operand operand(BuildFastArrayOperand( |
2847 |
instr->elements(), |
2848 |
key, |
2849 |
elements_kind, |
2850 |
0,
|
2851 |
instr->additional_index())); |
2852 |
|
2853 |
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
|
2854 |
XMMRegister result(ToDoubleRegister(instr->result())); |
2855 |
__ movss(result, operand); |
2856 |
__ cvtss2sd(result, result); |
2857 |
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { |
2858 |
__ movsd(ToDoubleRegister(instr->result()), operand); |
2859 |
} else {
|
2860 |
Register result(ToRegister(instr->result())); |
2861 |
switch (elements_kind) {
|
2862 |
case EXTERNAL_BYTE_ELEMENTS:
|
2863 |
__ movsxbq(result, operand); |
2864 |
break;
|
2865 |
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
|
2866 |
case EXTERNAL_PIXEL_ELEMENTS:
|
2867 |
__ movzxbq(result, operand); |
2868 |
break;
|
2869 |
case EXTERNAL_SHORT_ELEMENTS:
|
2870 |
__ movsxwq(result, operand); |
2871 |
break;
|
2872 |
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
|
2873 |
__ movzxwq(result, operand); |
2874 |
break;
|
2875 |
case EXTERNAL_INT_ELEMENTS:
|
2876 |
__ movsxlq(result, operand); |
2877 |
break;
|
2878 |
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
|
2879 |
__ movl(result, operand); |
2880 |
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
|
2881 |
__ testl(result, result); |
2882 |
DeoptimizeIf(negative, instr->environment()); |
2883 |
} |
2884 |
break;
|
2885 |
case EXTERNAL_FLOAT_ELEMENTS:
|
2886 |
case EXTERNAL_DOUBLE_ELEMENTS:
|
2887 |
case FAST_ELEMENTS:
|
2888 |
case FAST_SMI_ELEMENTS:
|
2889 |
case FAST_DOUBLE_ELEMENTS:
|
2890 |
case FAST_HOLEY_ELEMENTS:
|
2891 |
case FAST_HOLEY_SMI_ELEMENTS:
|
2892 |
case FAST_HOLEY_DOUBLE_ELEMENTS:
|
2893 |
case DICTIONARY_ELEMENTS:
|
2894 |
case NON_STRICT_ARGUMENTS_ELEMENTS:
|
2895 |
UNREACHABLE(); |
2896 |
break;
|
2897 |
} |
2898 |
} |
2899 |
} |
2900 |
|
2901 |
|
2902 |
void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
|
2903 |
XMMRegister result(ToDoubleRegister(instr->result())); |
2904 |
LOperand* key = instr->key(); |
2905 |
if (!key->IsConstantOperand()) {
|
2906 |
Register key_reg = ToRegister(key); |
2907 |
// Even though the HLoad/StoreKeyed instructions force the input
|
2908 |
// representation for the key to be an integer, the input gets replaced
|
2909 |
// during bound check elimination with the index argument to the bounds
|
2910 |
// check, which can be tagged, so that case must be handled here, too.
|
2911 |
if (instr->hydrogen()->IsDehoisted()) {
|
2912 |
// Sign extend key because it could be a 32 bit negative value
|
2913 |
// and the dehoisted address computation happens in 64 bits
|
2914 |
__ movsxlq(key_reg, key_reg); |
2915 |
} |
2916 |
} |
2917 |
|
2918 |
if (instr->hydrogen()->RequiresHoleCheck()) {
|
2919 |
int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
|
2920 |
sizeof(kHoleNanLower32);
|
2921 |
Operand hole_check_operand = BuildFastArrayOperand( |
2922 |
instr->elements(), |
2923 |
key, |
2924 |
FAST_DOUBLE_ELEMENTS, |
2925 |
offset, |
2926 |
instr->additional_index()); |
2927 |
__ cmpl(hole_check_operand, Immediate(kHoleNanUpper32)); |
2928 |
DeoptimizeIf(equal, instr->environment()); |
2929 |
} |
2930 |
|
2931 |
Operand double_load_operand = BuildFastArrayOperand( |
2932 |
instr->elements(), |
2933 |
key, |
2934 |
FAST_DOUBLE_ELEMENTS, |
2935 |
FixedDoubleArray::kHeaderSize - kHeapObjectTag, |
2936 |
instr->additional_index()); |
2937 |
__ movsd(result, double_load_operand); |
2938 |
} |
2939 |
|
2940 |
|
2941 |
void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
|
2942 |
Register result = ToRegister(instr->result()); |
2943 |
LOperand* key = instr->key(); |
2944 |
if (!key->IsConstantOperand()) {
|
2945 |
Register key_reg = ToRegister(key); |
2946 |
// Even though the HLoad/StoreKeyedFastElement instructions force
|
2947 |
// the input representation for the key to be an integer, the input
|
2948 |
// gets replaced during bound check elimination with the index
|
2949 |
// argument to the bounds check, which can be tagged, so that
|
2950 |
// case must be handled here, too.
|
2951 |
if (instr->hydrogen()->IsDehoisted()) {
|
2952 |
// Sign extend key because it could be a 32 bit negative value
|
2953 |
// and the dehoisted address computation happens in 64 bits
|
2954 |
__ movsxlq(key_reg, key_reg); |
2955 |
} |
2956 |
} |
2957 |
|
2958 |
// Load the result.
|
2959 |
__ movq(result, |
2960 |
BuildFastArrayOperand(instr->elements(), |
2961 |
key, |
2962 |
FAST_ELEMENTS, |
2963 |
FixedArray::kHeaderSize - kHeapObjectTag, |
2964 |
instr->additional_index())); |
2965 |
|
2966 |
// Check for the hole value.
|
2967 |
if (instr->hydrogen()->RequiresHoleCheck()) {
|
2968 |
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
|
2969 |
Condition smi = __ CheckSmi(result); |
2970 |
DeoptimizeIf(NegateCondition(smi), instr->environment()); |
2971 |
} else {
|
2972 |
__ CompareRoot(result, Heap::kTheHoleValueRootIndex); |
2973 |
DeoptimizeIf(equal, instr->environment()); |
2974 |
} |
2975 |
} |
2976 |
} |
2977 |
|
2978 |
|
2979 |
void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
|
2980 |
if (instr->is_external()) {
|
2981 |
DoLoadKeyedExternalArray(instr); |
2982 |
} else if (instr->hydrogen()->representation().IsDouble()) { |
2983 |
DoLoadKeyedFixedDoubleArray(instr); |
2984 |
} else {
|
2985 |
DoLoadKeyedFixedArray(instr); |
2986 |
} |
2987 |
} |
2988 |
|
2989 |
|
2990 |
Operand LCodeGen::BuildFastArrayOperand( |
2991 |
LOperand* elements_pointer, |
2992 |
LOperand* key, |
2993 |
ElementsKind elements_kind, |
2994 |
uint32_t offset, |
2995 |
uint32_t additional_index) { |
2996 |
Register elements_pointer_reg = ToRegister(elements_pointer); |
2997 |
int shift_size = ElementsKindToShiftSize(elements_kind);
|
2998 |
if (key->IsConstantOperand()) {
|
2999 |
int32_t constant_value = ToInteger32(LConstantOperand::cast(key)); |
3000 |
if (constant_value & 0xF0000000) { |
3001 |
Abort(kArrayIndexConstantValueTooBig); |
3002 |
} |
3003 |
return Operand(elements_pointer_reg,
|
3004 |
((constant_value + additional_index) << shift_size) |
3005 |
+ offset); |
3006 |
} else {
|
3007 |
ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
|
3008 |
return Operand(elements_pointer_reg,
|
3009 |
ToRegister(key), |
3010 |
scale_factor, |
3011 |
offset + (additional_index << shift_size)); |
3012 |
} |
3013 |
} |
3014 |
|
3015 |
|
3016 |
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
|
3017 |
ASSERT(ToRegister(instr->object()).is(rdx)); |
3018 |
ASSERT(ToRegister(instr->key()).is(rax)); |
3019 |
|
3020 |
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); |
3021 |
CallCode(ic, RelocInfo::CODE_TARGET, instr); |
3022 |
} |
3023 |
|
3024 |
|
3025 |
void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
|
3026 |
Register result = ToRegister(instr->result()); |
3027 |
|
3028 |
if (instr->hydrogen()->from_inlined()) {
|
3029 |
__ lea(result, Operand(rsp, -kFPOnStackSize + -kPCOnStackSize)); |
3030 |
} else {
|
3031 |
// Check for arguments adapter frame.
|
3032 |
Label done, adapted; |
3033 |
__ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); |
3034 |
__ Cmp(Operand(result, StandardFrameConstants::kContextOffset), |
3035 |
Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); |
3036 |
__ j(equal, &adapted, Label::kNear); |
3037 |
|
3038 |
// No arguments adaptor frame.
|
3039 |
__ movq(result, rbp); |
3040 |
__ jmp(&done, Label::kNear); |
3041 |
|
3042 |
// Arguments adaptor frame present.
|
3043 |
__ bind(&adapted); |
3044 |
__ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); |
3045 |
|
3046 |
// Result is the frame pointer for the frame if not adapted and for the real
|
3047 |
// frame below the adaptor frame if adapted.
|
3048 |
__ bind(&done); |
3049 |
} |
3050 |
} |
3051 |
|
3052 |
|
3053 |
void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
|
3054 |
Register result = ToRegister(instr->result()); |
3055 |
|
3056 |
Label done; |
3057 |
|
3058 |
// If no arguments adaptor frame the number of arguments is fixed.
|
3059 |
if (instr->elements()->IsRegister()) {
|
3060 |
__ cmpq(rbp, ToRegister(instr->elements())); |
3061 |
} else {
|
3062 |
__ cmpq(rbp, ToOperand(instr->elements())); |
3063 |
} |
3064 |
__ movl(result, Immediate(scope()->num_parameters())); |
3065 |
__ j(equal, &done, Label::kNear); |
3066 |
|
3067 |
// Arguments adaptor frame present. Get argument length from there.
|
3068 |
__ movq(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); |
3069 |
__ SmiToInteger32(result, |
3070 |
Operand(result, |
3071 |
ArgumentsAdaptorFrameConstants::kLengthOffset)); |
3072 |
|
3073 |
// Argument length is in result register.
|
3074 |
__ bind(&done); |
3075 |
} |
3076 |
|
3077 |
|
3078 |
void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
|
3079 |
Register receiver = ToRegister(instr->receiver()); |
3080 |
Register function = ToRegister(instr->function()); |
3081 |
|
3082 |
// If the receiver is null or undefined, we have to pass the global
|
3083 |
// object as a receiver to normal functions. Values have to be
|
3084 |
// passed unchanged to builtins and strict-mode functions.
|
3085 |
Label global_object, receiver_ok; |
3086 |
|
3087 |
// Do not transform the receiver to object for strict mode
|
3088 |
// functions.
|
3089 |
__ movq(kScratchRegister, |
3090 |
FieldOperand(function, JSFunction::kSharedFunctionInfoOffset)); |
3091 |
__ testb(FieldOperand(kScratchRegister, |
3092 |
SharedFunctionInfo::kStrictModeByteOffset), |
3093 |
Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
|
3094 |
__ j(not_equal, &receiver_ok, Label::kNear); |
3095 |
|
3096 |
// Do not transform the receiver to object for builtins.
|
3097 |
__ testb(FieldOperand(kScratchRegister, |
3098 |
SharedFunctionInfo::kNativeByteOffset), |
3099 |
Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
|
3100 |
__ j(not_equal, &receiver_ok, Label::kNear); |
3101 |
|
3102 |
// Normal function. Replace undefined or null with global receiver.
|
3103 |
__ CompareRoot(receiver, Heap::kNullValueRootIndex); |
3104 |
__ j(equal, &global_object, Label::kNear); |
3105 |
__ CompareRoot(receiver, Heap::kUndefinedValueRootIndex); |
3106 |
__ j(equal, &global_object, Label::kNear); |
3107 |
|
3108 |
// The receiver should be a JS object.
|
3109 |
Condition is_smi = __ CheckSmi(receiver); |
3110 |
DeoptimizeIf(is_smi, instr->environment()); |
3111 |
__ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, kScratchRegister); |
3112 |
DeoptimizeIf(below, instr->environment()); |
3113 |
__ jmp(&receiver_ok, Label::kNear); |
3114 |
|
3115 |
__ bind(&global_object); |
3116 |
// TODO(kmillikin): We have a hydrogen value for the global object. See
|
3117 |
// if it's better to use it than to explicitly fetch it from the context
|
3118 |
// here.
|
3119 |
__ movq(receiver, ContextOperand(rsi, Context::GLOBAL_OBJECT_INDEX)); |
3120 |
__ movq(receiver, |
3121 |
FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset)); |
3122 |
__ bind(&receiver_ok); |
3123 |
} |
3124 |
|
3125 |
|
3126 |
void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
|
3127 |
Register receiver = ToRegister(instr->receiver()); |
3128 |
Register function = ToRegister(instr->function()); |
3129 |
Register length = ToRegister(instr->length()); |
3130 |
Register elements = ToRegister(instr->elements()); |
3131 |
ASSERT(receiver.is(rax)); // Used for parameter count.
|
3132 |
ASSERT(function.is(rdi)); // Required by InvokeFunction.
|
3133 |
ASSERT(ToRegister(instr->result()).is(rax)); |
3134 |
|
3135 |
// Copy the arguments to this function possibly from the
|
3136 |
// adaptor frame below it.
|
3137 |
const uint32_t kArgumentsLimit = 1 * KB; |
3138 |
__ cmpq(length, Immediate(kArgumentsLimit)); |
3139 |
DeoptimizeIf(above, instr->environment()); |
3140 |
|
3141 |
__ push(receiver); |
3142 |
__ movq(receiver, length); |
3143 |
|
3144 |
// Loop through the arguments pushing them onto the execution
|
3145 |
// stack.
|
3146 |
Label invoke, loop; |
3147 |
// length is a small non-negative integer, due to the test above.
|
3148 |
__ testl(length, length); |
3149 |
__ j(zero, &invoke, Label::kNear); |
3150 |
__ bind(&loop); |
3151 |
StackArgumentsAccessor args(elements, length, |
3152 |
ARGUMENTS_DONT_CONTAIN_RECEIVER); |
3153 |
__ push(args.GetArgumentOperand(0));
|
3154 |
__ decl(length); |
3155 |
__ j(not_zero, &loop); |
3156 |
|
3157 |
// Invoke the function.
|
3158 |
__ bind(&invoke); |
3159 |
ASSERT(instr->HasPointerMap()); |
3160 |
LPointerMap* pointers = instr->pointer_map(); |
3161 |
SafepointGenerator safepoint_generator( |
3162 |
this, pointers, Safepoint::kLazyDeopt);
|
3163 |
ParameterCount actual(rax); |
3164 |
__ InvokeFunction(function, actual, CALL_FUNCTION, |
3165 |
safepoint_generator, CALL_AS_METHOD); |
3166 |
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); |
3167 |
} |
3168 |
|
3169 |
|
3170 |
void LCodeGen::DoPushArgument(LPushArgument* instr) {
|
3171 |
LOperand* argument = instr->value(); |
3172 |
EmitPushTaggedOperand(argument); |
3173 |
} |
3174 |
|
3175 |
|
3176 |
void LCodeGen::DoDrop(LDrop* instr) {
|
3177 |
__ Drop(instr->count()); |
3178 |
} |
3179 |
|
3180 |
|
3181 |
void LCodeGen::DoThisFunction(LThisFunction* instr) {
|
3182 |
Register result = ToRegister(instr->result()); |
3183 |
__ movq(result, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset)); |
3184 |
} |
3185 |
|
3186 |
|
3187 |
void LCodeGen::DoContext(LContext* instr) {
|
3188 |
Register result = ToRegister(instr->result()); |
3189 |
__ movq(result, rsi); |
3190 |
} |
3191 |
|
3192 |
|
3193 |
void LCodeGen::DoOuterContext(LOuterContext* instr) {
|
3194 |
Register context = ToRegister(instr->context()); |
3195 |
Register result = ToRegister(instr->result()); |
3196 |
__ movq(result, |
3197 |
Operand(context, Context::SlotOffset(Context::PREVIOUS_INDEX))); |
3198 |
} |
3199 |
|
3200 |
|
3201 |
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
|
3202 |
__ push(rsi); // The context is the first argument.
|
3203 |
__ Push(instr->hydrogen()->pairs()); |
3204 |
__ Push(Smi::FromInt(instr->hydrogen()->flags())); |
3205 |
CallRuntime(Runtime::kDeclareGlobals, 3, instr);
|
3206 |
} |
3207 |
|
3208 |
|
3209 |
void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
|
3210 |
Register result = ToRegister(instr->result()); |
3211 |
__ movq(result, GlobalObjectOperand()); |
3212 |
} |
3213 |
|
3214 |
|
3215 |
void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
|
3216 |
Register global = ToRegister(instr->global()); |
3217 |
Register result = ToRegister(instr->result()); |
3218 |
__ movq(result, FieldOperand(global, GlobalObject::kGlobalReceiverOffset)); |
3219 |
} |
3220 |
|
3221 |
|
3222 |
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
|
3223 |
int formal_parameter_count,
|
3224 |
int arity,
|
3225 |
LInstruction* instr, |
3226 |
CallKind call_kind, |
3227 |
RDIState rdi_state) { |
3228 |
bool dont_adapt_arguments =
|
3229 |
formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel; |
3230 |
bool can_invoke_directly =
|
3231 |
dont_adapt_arguments || formal_parameter_count == arity; |
3232 |
|
3233 |
LPointerMap* pointers = instr->pointer_map(); |
3234 |
|
3235 |
if (can_invoke_directly) {
|
3236 |
if (rdi_state == RDI_UNINITIALIZED) {
|
3237 |
__ Move(rdi, function); |
3238 |
} |
3239 |
|
3240 |
// Change context.
|
3241 |
__ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset)); |
3242 |
|
3243 |
// Set rax to arguments count if adaption is not needed. Assumes that rax
|
3244 |
// is available to write to at this point.
|
3245 |
if (dont_adapt_arguments) {
|
3246 |
__ Set(rax, arity); |
3247 |
} |
3248 |
|
3249 |
// Invoke function.
|
3250 |
__ SetCallKind(rcx, call_kind); |
3251 |
if (function.is_identical_to(info()->closure())) {
|
3252 |
__ CallSelf(); |
3253 |
} else {
|
3254 |
__ call(FieldOperand(rdi, JSFunction::kCodeEntryOffset)); |
3255 |
} |
3256 |
|
3257 |
// Set up deoptimization.
|
3258 |
RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
|
3259 |
} else {
|
3260 |
// We need to adapt arguments.
|
3261 |
SafepointGenerator generator( |
3262 |
this, pointers, Safepoint::kLazyDeopt);
|
3263 |
ParameterCount count(arity); |
3264 |
ParameterCount expected(formal_parameter_count); |
3265 |
__ InvokeFunction( |
3266 |
function, expected, count, CALL_FUNCTION, generator, call_kind); |
3267 |
} |
3268 |
|
3269 |
// Restore context.
|
3270 |
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); |
3271 |
} |
3272 |
|
3273 |
|
3274 |
void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
|
3275 |
ASSERT(ToRegister(instr->result()).is(rax)); |
3276 |
CallKnownFunction(instr->hydrogen()->function(), |
3277 |
instr->hydrogen()->formal_parameter_count(), |
3278 |
instr->arity(), |
3279 |
instr, |
3280 |
CALL_AS_METHOD, |
3281 |
RDI_UNINITIALIZED); |
3282 |
} |
3283 |
|
3284 |
|
3285 |
void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
|
3286 |
Register input_reg = ToRegister(instr->value()); |
3287 |
__ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset), |
3288 |
Heap::kHeapNumberMapRootIndex); |
3289 |
DeoptimizeIf(not_equal, instr->environment()); |
3290 |
|
3291 |
Label slow, allocated, done; |
3292 |
Register tmp = input_reg.is(rax) ? rcx : rax; |
3293 |
Register tmp2 = tmp.is(rcx) ? rdx : input_reg.is(rcx) ? rdx : rcx; |
3294 |
|
3295 |
// Preserve the value of all registers.
|
3296 |
PushSafepointRegistersScope scope(this);
|
3297 |
|
3298 |
__ movl(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset)); |
3299 |
// Check the sign of the argument. If the argument is positive, just
|
3300 |
// return it. We do not need to patch the stack since |input| and
|
3301 |
// |result| are the same register and |input| will be restored
|
3302 |
// unchanged by popping safepoint registers.
|
3303 |
__ testl(tmp, Immediate(HeapNumber::kSignMask)); |
3304 |
__ j(zero, &done); |
3305 |
|
3306 |
__ AllocateHeapNumber(tmp, tmp2, &slow); |
3307 |
__ jmp(&allocated, Label::kNear); |
3308 |
|
3309 |
// Slow case: Call the runtime system to do the number allocation.
|
3310 |
__ bind(&slow); |
3311 |
CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
|
3312 |
// Set the pointer to the new heap number in tmp.
|
3313 |
if (!tmp.is(rax)) __ movq(tmp, rax);
|
3314 |
// Restore input_reg after call to runtime.
|
3315 |
__ LoadFromSafepointRegisterSlot(input_reg, input_reg); |
3316 |
|
3317 |
__ bind(&allocated); |
3318 |
__ MoveDouble(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset)); |
3319 |
__ shl(tmp2, Immediate(1));
|
3320 |
__ shr(tmp2, Immediate(1));
|
3321 |
__ MoveDouble(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2); |
3322 |
__ StoreToSafepointRegisterSlot(input_reg, tmp); |
3323 |
|
3324 |
__ bind(&done); |
3325 |
} |
3326 |
|
3327 |
|
3328 |
void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
|
3329 |
Register input_reg = ToRegister(instr->value()); |
3330 |
__ testl(input_reg, input_reg); |
3331 |
Label is_positive; |
3332 |
__ j(not_sign, &is_positive, Label::kNear); |
3333 |
__ negl(input_reg); // Sets flags.
|
3334 |
DeoptimizeIf(negative, instr->environment()); |
3335 |
__ bind(&is_positive); |
3336 |
} |
3337 |
|
3338 |
|
3339 |
void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) {
|
3340 |
Register input_reg = ToRegister(instr->value()); |
3341 |
__ testq(input_reg, input_reg); |
3342 |
Label is_positive; |
3343 |
__ j(not_sign, &is_positive, Label::kNear); |
3344 |
__ neg(input_reg); // Sets flags.
|
3345 |
DeoptimizeIf(negative, instr->environment()); |
3346 |
__ bind(&is_positive); |
3347 |
} |
3348 |
|
3349 |
|
3350 |
void LCodeGen::DoMathAbs(LMathAbs* instr) {
|
3351 |
// Class for deferred case.
|
3352 |
class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode { |
3353 |
public:
|
3354 |
DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr) |
3355 |
: LDeferredCode(codegen), instr_(instr) { } |
3356 |
virtual void Generate() V8_OVERRIDE { |
3357 |
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); |
3358 |
} |
3359 |
virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
3360 |
private:
|
3361 |
LMathAbs* instr_; |
3362 |
}; |
3363 |
|
3364 |
ASSERT(instr->value()->Equals(instr->result())); |
3365 |
Representation r = instr->hydrogen()->value()->representation(); |
3366 |
|
3367 |
if (r.IsDouble()) {
|
3368 |
XMMRegister scratch = double_scratch0(); |
3369 |
XMMRegister input_reg = ToDoubleRegister(instr->value()); |
3370 |
__ xorps(scratch, scratch); |
3371 |
__ subsd(scratch, input_reg); |
3372 |
__ andps(input_reg, scratch); |
3373 |
} else if (r.IsInteger32()) { |
3374 |
EmitIntegerMathAbs(instr); |
3375 |
} else if (r.IsSmi()) { |
3376 |
EmitSmiMathAbs(instr); |
3377 |
} else { // Tagged case. |
3378 |
DeferredMathAbsTaggedHeapNumber* deferred = |
3379 |
new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr); |
3380 |
Register input_reg = ToRegister(instr->value()); |
3381 |
// Smi check.
|
3382 |
__ JumpIfNotSmi(input_reg, deferred->entry()); |
3383 |
EmitSmiMathAbs(instr); |
3384 |
__ bind(deferred->exit()); |
3385 |
} |
3386 |
} |
3387 |
|
3388 |
|
3389 |
void LCodeGen::DoMathFloor(LMathFloor* instr) {
|
3390 |
XMMRegister xmm_scratch = double_scratch0(); |
3391 |
Register output_reg = ToRegister(instr->result()); |
3392 |
XMMRegister input_reg = ToDoubleRegister(instr->value()); |
3393 |
|
3394 |
if (CpuFeatures::IsSupported(SSE4_1)) {
|
3395 |
CpuFeatureScope scope(masm(), SSE4_1); |
3396 |
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
3397 |
// Deoptimize if minus zero.
|
3398 |
__ movq(output_reg, input_reg); |
3399 |
__ subq(output_reg, Immediate(1));
|
3400 |
DeoptimizeIf(overflow, instr->environment()); |
3401 |
} |
3402 |
__ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown); |
3403 |
__ cvttsd2si(output_reg, xmm_scratch); |
3404 |
__ cmpl(output_reg, Immediate(0x80000000));
|
3405 |
DeoptimizeIf(equal, instr->environment()); |
3406 |
} else {
|
3407 |
Label negative_sign, done; |
3408 |
// Deoptimize on unordered.
|
3409 |
__ xorps(xmm_scratch, xmm_scratch); // Zero the register.
|
3410 |
__ ucomisd(input_reg, xmm_scratch); |
3411 |
DeoptimizeIf(parity_even, instr->environment()); |
3412 |
__ j(below, &negative_sign, Label::kNear); |
3413 |
|
3414 |
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
3415 |
// Check for negative zero.
|
3416 |
Label positive_sign; |
3417 |
__ j(above, &positive_sign, Label::kNear); |
3418 |
__ movmskpd(output_reg, input_reg); |
3419 |
__ testq(output_reg, Immediate(1));
|
3420 |
DeoptimizeIf(not_zero, instr->environment()); |
3421 |
__ Set(output_reg, 0);
|
3422 |
__ jmp(&done); |
3423 |
__ bind(&positive_sign); |
3424 |
} |
3425 |
|
3426 |
// Use truncating instruction (OK because input is positive).
|
3427 |
__ cvttsd2si(output_reg, input_reg); |
3428 |
// Overflow is signalled with minint.
|
3429 |
__ cmpl(output_reg, Immediate(0x80000000));
|
3430 |
DeoptimizeIf(equal, instr->environment()); |
3431 |
__ jmp(&done, Label::kNear); |
3432 |
|
3433 |
// Non-zero negative reaches here.
|
3434 |
__ bind(&negative_sign); |
3435 |
// Truncate, then compare and compensate.
|
3436 |
__ cvttsd2si(output_reg, input_reg); |
3437 |
__ Cvtlsi2sd(xmm_scratch, output_reg); |
3438 |
__ ucomisd(input_reg, xmm_scratch); |
3439 |
__ j(equal, &done, Label::kNear); |
3440 |
__ subl(output_reg, Immediate(1));
|
3441 |
DeoptimizeIf(overflow, instr->environment()); |
3442 |
|
3443 |
__ bind(&done); |
3444 |
} |
3445 |
} |
3446 |
|
3447 |
|
3448 |
void LCodeGen::DoMathRound(LMathRound* instr) {
|
3449 |
const XMMRegister xmm_scratch = double_scratch0();
|
3450 |
Register output_reg = ToRegister(instr->result()); |
3451 |
XMMRegister input_reg = ToDoubleRegister(instr->value()); |
3452 |
static int64_t one_half = V8_INT64_C(0x3FE0000000000000); // 0.5 |
3453 |
static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000); // -0.5 |
3454 |
|
3455 |
Label done, round_to_zero, below_one_half, do_not_compensate, restore; |
3456 |
__ movq(kScratchRegister, one_half, RelocInfo::NONE64); |
3457 |
__ movq(xmm_scratch, kScratchRegister); |
3458 |
__ ucomisd(xmm_scratch, input_reg); |
3459 |
__ j(above, &below_one_half); |
3460 |
|
3461 |
// CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
|
3462 |
__ addsd(xmm_scratch, input_reg); |
3463 |
__ cvttsd2si(output_reg, xmm_scratch); |
3464 |
// Overflow is signalled with minint.
|
3465 |
__ cmpl(output_reg, Immediate(0x80000000));
|
3466 |
__ RecordComment("D2I conversion overflow");
|
3467 |
DeoptimizeIf(equal, instr->environment()); |
3468 |
__ jmp(&done); |
3469 |
|
3470 |
__ bind(&below_one_half); |
3471 |
__ movq(kScratchRegister, minus_one_half, RelocInfo::NONE64); |
3472 |
__ movq(xmm_scratch, kScratchRegister); |
3473 |
__ ucomisd(xmm_scratch, input_reg); |
3474 |
__ j(below_equal, &round_to_zero); |
3475 |
|
3476 |
// CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
|
3477 |
// compare and compensate.
|
3478 |
__ movq(kScratchRegister, input_reg); // Back up input_reg.
|
3479 |
__ subsd(input_reg, xmm_scratch); |
3480 |
__ cvttsd2si(output_reg, input_reg); |
3481 |
// Catch minint due to overflow, and to prevent overflow when compensating.
|
3482 |
__ cmpl(output_reg, Immediate(0x80000000));
|
3483 |
__ RecordComment("D2I conversion overflow");
|
3484 |
DeoptimizeIf(equal, instr->environment()); |
3485 |
|
3486 |
__ Cvtlsi2sd(xmm_scratch, output_reg); |
3487 |
__ ucomisd(input_reg, xmm_scratch); |
3488 |
__ j(equal, &restore, Label::kNear); |
3489 |
__ subl(output_reg, Immediate(1));
|
3490 |
// No overflow because we already ruled out minint.
|
3491 |
__ bind(&restore); |
3492 |
__ movq(input_reg, kScratchRegister); // Restore input_reg.
|
3493 |
__ jmp(&done); |
3494 |
|
3495 |
__ bind(&round_to_zero); |
3496 |
// We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
|
3497 |
// we can ignore the difference between a result of -0 and +0.
|
3498 |
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
3499 |
__ movq(output_reg, input_reg); |
3500 |
__ testq(output_reg, output_reg); |
3501 |
__ RecordComment("Minus zero");
|
3502 |
DeoptimizeIf(negative, instr->environment()); |
3503 |
} |
3504 |
__ Set(output_reg, 0);
|
3505 |
__ bind(&done); |
3506 |
} |
3507 |
|
3508 |
|
3509 |
void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
|
3510 |
XMMRegister input_reg = ToDoubleRegister(instr->value()); |
3511 |
ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); |
3512 |
__ sqrtsd(input_reg, input_reg); |
3513 |
} |
3514 |
|
3515 |
|
3516 |
void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
|
3517 |
XMMRegister xmm_scratch = double_scratch0(); |
3518 |
XMMRegister input_reg = ToDoubleRegister(instr->value()); |
3519 |
ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); |
3520 |
|
3521 |
// Note that according to ECMA-262 15.8.2.13:
|
3522 |
// Math.pow(-Infinity, 0.5) == Infinity
|
3523 |
// Math.sqrt(-Infinity) == NaN
|
3524 |
Label done, sqrt; |
3525 |
// Check base for -Infinity. According to IEEE-754, double-precision
|
3526 |
// -Infinity has the highest 12 bits set and the lowest 52 bits cleared.
|
3527 |
__ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000), RelocInfo::NONE64);
|
3528 |
__ movq(xmm_scratch, kScratchRegister); |
3529 |
__ ucomisd(xmm_scratch, input_reg); |
3530 |
// Comparing -Infinity with NaN results in "unordered", which sets the
|
3531 |
// zero flag as if both were equal. However, it also sets the carry flag.
|
3532 |
__ j(not_equal, &sqrt, Label::kNear); |
3533 |
__ j(carry, &sqrt, Label::kNear); |
3534 |
// If input is -Infinity, return Infinity.
|
3535 |
__ xorps(input_reg, input_reg); |
3536 |
__ subsd(input_reg, xmm_scratch); |
3537 |
__ jmp(&done, Label::kNear); |
3538 |
|
3539 |
// Square root.
|
3540 |
__ bind(&sqrt); |
3541 |
__ xorps(xmm_scratch, xmm_scratch); |
3542 |
__ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
|
3543 |
__ sqrtsd(input_reg, input_reg); |
3544 |
__ bind(&done); |
3545 |
} |
3546 |
|
3547 |
|
3548 |
void LCodeGen::DoPower(LPower* instr) {
|
3549 |
Representation exponent_type = instr->hydrogen()->right()->representation(); |
3550 |
// Having marked this as a call, we can use any registers.
|
3551 |
// Just make sure that the input/output registers are the expected ones.
|
3552 |
|
3553 |
Register exponent = rdx; |
3554 |
ASSERT(!instr->right()->IsRegister() || |
3555 |
ToRegister(instr->right()).is(exponent)); |
3556 |
ASSERT(!instr->right()->IsDoubleRegister() || |
3557 |
ToDoubleRegister(instr->right()).is(xmm1)); |
3558 |
ASSERT(ToDoubleRegister(instr->left()).is(xmm2)); |
3559 |
ASSERT(ToDoubleRegister(instr->result()).is(xmm3)); |
3560 |
|
3561 |
if (exponent_type.IsSmi()) {
|
3562 |
MathPowStub stub(MathPowStub::TAGGED); |
3563 |
__ CallStub(&stub); |
3564 |
} else if (exponent_type.IsTagged()) { |
3565 |
Label no_deopt; |
3566 |
__ JumpIfSmi(exponent, &no_deopt); |
3567 |
__ CmpObjectType(exponent, HEAP_NUMBER_TYPE, rcx); |
3568 |
DeoptimizeIf(not_equal, instr->environment()); |
3569 |
__ bind(&no_deopt); |
3570 |
MathPowStub stub(MathPowStub::TAGGED); |
3571 |
__ CallStub(&stub); |
3572 |
} else if (exponent_type.IsInteger32()) { |
3573 |
MathPowStub stub(MathPowStub::INTEGER); |
3574 |
__ CallStub(&stub); |
3575 |
} else {
|
3576 |
ASSERT(exponent_type.IsDouble()); |
3577 |
MathPowStub stub(MathPowStub::DOUBLE); |
3578 |
__ CallStub(&stub); |
3579 |
} |
3580 |
} |
3581 |
|
3582 |
|
3583 |
void LCodeGen::DoRandom(LRandom* instr) {
|
3584 |
// Assert that register size is twice the size of each seed.
|
3585 |
static const int kSeedSize = sizeof(uint32_t); |
3586 |
STATIC_ASSERT(kPointerSize == 2 * kSeedSize);
|
3587 |
|
3588 |
// Load native context
|
3589 |
Register global_object = ToRegister(instr->global_object()); |
3590 |
Register native_context = global_object; |
3591 |
__ movq(native_context, FieldOperand( |
3592 |
global_object, GlobalObject::kNativeContextOffset)); |
3593 |
|
3594 |
// Load state (FixedArray of the native context's random seeds)
|
3595 |
static const int kRandomSeedOffset = |
3596 |
FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize; |
3597 |
Register state = native_context; |
3598 |
__ movq(state, FieldOperand(native_context, kRandomSeedOffset)); |
3599 |
|
3600 |
// Load state[0].
|
3601 |
Register state0 = ToRegister(instr->scratch()); |
3602 |
__ movl(state0, FieldOperand(state, ByteArray::kHeaderSize)); |
3603 |
// Load state[1].
|
3604 |
Register state1 = ToRegister(instr->scratch2()); |
3605 |
__ movl(state1, FieldOperand(state, ByteArray::kHeaderSize + kSeedSize)); |
3606 |
|
3607 |
// state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
|
3608 |
Register scratch3 = ToRegister(instr->scratch3()); |
3609 |
__ movzxwl(scratch3, state0); |
3610 |
__ imull(scratch3, scratch3, Immediate(18273));
|
3611 |
__ shrl(state0, Immediate(16));
|
3612 |
__ addl(state0, scratch3); |
3613 |
// Save state[0].
|
3614 |
__ movl(FieldOperand(state, ByteArray::kHeaderSize), state0); |
3615 |
|
3616 |
// state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
|
3617 |
__ movzxwl(scratch3, state1); |
3618 |
__ imull(scratch3, scratch3, Immediate(36969));
|
3619 |
__ shrl(state1, Immediate(16));
|
3620 |
__ addl(state1, scratch3); |
3621 |
// Save state[1].
|
3622 |
__ movl(FieldOperand(state, ByteArray::kHeaderSize + kSeedSize), state1); |
3623 |
|
3624 |
// Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
|
3625 |
Register random = state0; |
3626 |
__ shll(random, Immediate(14));
|
3627 |
__ andl(state1, Immediate(0x3FFFF));
|
3628 |
__ addl(random, state1); |
3629 |
|
3630 |
// Convert 32 random bits in rax to 0.(32 random bits) in a double
|
3631 |
// by computing:
|
3632 |
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
|
3633 |
XMMRegister result = ToDoubleRegister(instr->result()); |
3634 |
XMMRegister scratch4 = double_scratch0(); |
3635 |
__ movq(scratch3, V8_INT64_C(0x4130000000000000),
|
3636 |
RelocInfo::NONE64); // 1.0 x 2^20 as double
|
3637 |
__ movq(scratch4, scratch3); |
3638 |
__ movd(result, random); |
3639 |
__ xorps(result, scratch4); |
3640 |
__ subsd(result, scratch4); |
3641 |
} |
3642 |
|
3643 |
|
3644 |
void LCodeGen::DoMathExp(LMathExp* instr) {
|
3645 |
XMMRegister input = ToDoubleRegister(instr->value()); |
3646 |
XMMRegister result = ToDoubleRegister(instr->result()); |
3647 |
XMMRegister temp0 = double_scratch0(); |
3648 |
Register temp1 = ToRegister(instr->temp1()); |
3649 |
Register temp2 = ToRegister(instr->temp2()); |
3650 |
|
3651 |
MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2); |
3652 |
} |
3653 |
|
3654 |
|
3655 |
void LCodeGen::DoMathLog(LMathLog* instr) {
|
3656 |
ASSERT(instr->value()->Equals(instr->result())); |
3657 |
XMMRegister input_reg = ToDoubleRegister(instr->value()); |
3658 |
XMMRegister xmm_scratch = double_scratch0(); |
3659 |
Label positive, done, zero; |
3660 |
__ xorps(xmm_scratch, xmm_scratch); |
3661 |
__ ucomisd(input_reg, xmm_scratch); |
3662 |
__ j(above, &positive, Label::kNear); |
3663 |
__ j(equal, &zero, Label::kNear); |
3664 |
ExternalReference nan = |
3665 |
ExternalReference::address_of_canonical_non_hole_nan(); |
3666 |
Operand nan_operand = masm()->ExternalOperand(nan); |
3667 |
__ movsd(input_reg, nan_operand); |
3668 |
__ jmp(&done, Label::kNear); |
3669 |
__ bind(&zero); |
3670 |
ExternalReference ninf = |
3671 |
ExternalReference::address_of_negative_infinity(); |
3672 |
Operand ninf_operand = masm()->ExternalOperand(ninf); |
3673 |
__ movsd(input_reg, ninf_operand); |
3674 |
__ jmp(&done, Label::kNear); |
3675 |
__ bind(&positive); |
3676 |
__ fldln2(); |
3677 |
__ subq(rsp, Immediate(kDoubleSize)); |
3678 |
__ movsd(Operand(rsp, 0), input_reg);
|
3679 |
__ fld_d(Operand(rsp, 0));
|
3680 |
__ fyl2x(); |
3681 |
__ fstp_d(Operand(rsp, 0));
|
3682 |
__ movsd(input_reg, Operand(rsp, 0));
|
3683 |
__ addq(rsp, Immediate(kDoubleSize)); |
3684 |
__ bind(&done); |
3685 |
} |
3686 |
|
3687 |
|
3688 |
void LCodeGen::DoMathTan(LMathTan* instr) {
|
3689 |
ASSERT(ToDoubleRegister(instr->result()).is(xmm1)); |
3690 |
TranscendentalCacheStub stub(TranscendentalCache::TAN, |
3691 |
TranscendentalCacheStub::UNTAGGED); |
3692 |
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); |
3693 |
} |
3694 |
|
3695 |
|
3696 |
void LCodeGen::DoMathCos(LMathCos* instr) {
|
3697 |
ASSERT(ToDoubleRegister(instr->result()).is(xmm1)); |
3698 |
TranscendentalCacheStub stub(TranscendentalCache::COS, |
3699 |
TranscendentalCacheStub::UNTAGGED); |
3700 |
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); |
3701 |
} |
3702 |
|
3703 |
|
3704 |
void LCodeGen::DoMathSin(LMathSin* instr) {
|
3705 |
ASSERT(ToDoubleRegister(instr->result()).is(xmm1)); |
3706 |
TranscendentalCacheStub stub(TranscendentalCache::SIN, |
3707 |
TranscendentalCacheStub::UNTAGGED); |
3708 |
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); |
3709 |
} |
3710 |
|
3711 |
|
3712 |
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
|
3713 |
ASSERT(ToRegister(instr->function()).is(rdi)); |
3714 |
ASSERT(instr->HasPointerMap()); |
3715 |
|
3716 |
Handle<JSFunction> known_function = instr->hydrogen()->known_function(); |
3717 |
if (known_function.is_null()) {
|
3718 |
LPointerMap* pointers = instr->pointer_map(); |
3719 |
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
|
3720 |
ParameterCount count(instr->arity()); |
3721 |
__ InvokeFunction(rdi, count, CALL_FUNCTION, generator, CALL_AS_METHOD); |
3722 |
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); |
3723 |
} else {
|
3724 |
CallKnownFunction(known_function, |
3725 |
instr->hydrogen()->formal_parameter_count(), |
3726 |
instr->arity(), |
3727 |
instr, |
3728 |
CALL_AS_METHOD, |
3729 |
RDI_CONTAINS_TARGET); |
3730 |
} |
3731 |
} |
3732 |
|
3733 |
|
3734 |
void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
|
3735 |
ASSERT(ToRegister(instr->key()).is(rcx)); |
3736 |
ASSERT(ToRegister(instr->result()).is(rax)); |
3737 |
|
3738 |
int arity = instr->arity();
|
3739 |
Handle<Code> ic = |
3740 |
isolate()->stub_cache()->ComputeKeyedCallInitialize(arity); |
3741 |
CallCode(ic, RelocInfo::CODE_TARGET, instr); |
3742 |
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); |
3743 |
} |
3744 |
|
3745 |
|
3746 |
void LCodeGen::DoCallNamed(LCallNamed* instr) {
|
3747 |
ASSERT(ToRegister(instr->result()).is(rax)); |
3748 |
|
3749 |
int arity = instr->arity();
|
3750 |
RelocInfo::Mode mode = RelocInfo::CODE_TARGET; |
3751 |
Handle<Code> ic = |
3752 |
isolate()->stub_cache()->ComputeCallInitialize(arity, mode); |
3753 |
__ Move(rcx, instr->name()); |
3754 |
CallCode(ic, mode, instr); |
3755 |
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); |
3756 |
} |
3757 |
|
3758 |
|
3759 |
void LCodeGen::DoCallFunction(LCallFunction* instr) {
|
3760 |
ASSERT(ToRegister(instr->function()).is(rdi)); |
3761 |
ASSERT(ToRegister(instr->result()).is(rax)); |
3762 |
|
3763 |
int arity = instr->arity();
|
3764 |
CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS); |
3765 |
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); |
3766 |
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); |
3767 |
} |
3768 |
|
3769 |
|
3770 |
void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
|
3771 |
ASSERT(ToRegister(instr->result()).is(rax)); |
3772 |
int arity = instr->arity();
|
3773 |
RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT; |
3774 |
Handle<Code> ic = |
3775 |
isolate()->stub_cache()->ComputeCallInitialize(arity, mode); |
3776 |
__ Move(rcx, instr->name()); |
3777 |
CallCode(ic, mode, instr); |
3778 |
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); |
3779 |
} |
3780 |
|
3781 |
|
3782 |
void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
|
3783 |
ASSERT(ToRegister(instr->result()).is(rax)); |
3784 |
CallKnownFunction(instr->hydrogen()->target(), |
3785 |
instr->hydrogen()->formal_parameter_count(), |
3786 |
instr->arity(), |
3787 |
instr, |
3788 |
CALL_AS_FUNCTION, |
3789 |
RDI_UNINITIALIZED); |
3790 |
} |
3791 |
|
3792 |
|
3793 |
void LCodeGen::DoCallNew(LCallNew* instr) {
|
3794 |
ASSERT(ToRegister(instr->constructor()).is(rdi)); |
3795 |
ASSERT(ToRegister(instr->result()).is(rax)); |
3796 |
|
3797 |
__ Set(rax, instr->arity()); |
3798 |
// No cell in ebx for construct type feedback in optimized code
|
3799 |
Handle<Object> undefined_value(isolate()->factory()->undefined_value()); |
3800 |
__ Move(rbx, undefined_value); |
3801 |
CallConstructStub stub(NO_CALL_FUNCTION_FLAGS); |
3802 |
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); |
3803 |
} |
3804 |
|
3805 |
|
3806 |
void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
|
3807 |
ASSERT(ToRegister(instr->constructor()).is(rdi)); |
3808 |
ASSERT(ToRegister(instr->result()).is(rax)); |
3809 |
|
3810 |
__ Set(rax, instr->arity()); |
3811 |
__ Move(rbx, instr->hydrogen()->property_cell()); |
3812 |
ElementsKind kind = instr->hydrogen()->elements_kind(); |
3813 |
AllocationSiteOverrideMode override_mode = |
3814 |
(AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE) |
3815 |
? DISABLE_ALLOCATION_SITES |
3816 |
: DONT_OVERRIDE; |
3817 |
ContextCheckMode context_mode = CONTEXT_CHECK_NOT_REQUIRED; |
3818 |
|
3819 |
if (instr->arity() == 0) { |
3820 |
ArrayNoArgumentConstructorStub stub(kind, context_mode, override_mode); |
3821 |
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); |
3822 |
} else if (instr->arity() == 1) { |
3823 |
Label done; |
3824 |
if (IsFastPackedElementsKind(kind)) {
|
3825 |
Label packed_case; |
3826 |
// We might need a change here
|
3827 |
// look at the first argument
|
3828 |
__ movq(rcx, Operand(rsp, 0));
|
3829 |
__ testq(rcx, rcx); |
3830 |
__ j(zero, &packed_case); |
3831 |
|
3832 |
ElementsKind holey_kind = GetHoleyElementsKind(kind); |
3833 |
ArraySingleArgumentConstructorStub stub(holey_kind, context_mode, |
3834 |
override_mode); |
3835 |
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); |
3836 |
__ jmp(&done); |
3837 |
__ bind(&packed_case); |
3838 |
} |
3839 |
|
3840 |
ArraySingleArgumentConstructorStub stub(kind, context_mode, override_mode); |
3841 |
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); |
3842 |
__ bind(&done); |
3843 |
} else {
|
3844 |
ArrayNArgumentsConstructorStub stub(kind, context_mode, override_mode); |
3845 |
CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); |
3846 |
} |
3847 |
} |
3848 |
|
3849 |
|
3850 |
void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
|
3851 |
CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles()); |
3852 |
} |
3853 |
|
3854 |
|
3855 |
void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
|
3856 |
Register function = ToRegister(instr->function()); |
3857 |
Register code_object = ToRegister(instr->code_object()); |
3858 |
__ lea(code_object, FieldOperand(code_object, Code::kHeaderSize)); |
3859 |
__ movq(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object); |
3860 |
} |
3861 |
|
3862 |
|
3863 |
void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
|
3864 |
Register result = ToRegister(instr->result()); |
3865 |
Register base = ToRegister(instr->base_object()); |
3866 |
__ lea(result, Operand(base, instr->offset())); |
3867 |
} |
3868 |
|
3869 |
|
3870 |
void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
|
3871 |
Representation representation = instr->representation(); |
3872 |
|
3873 |
HObjectAccess access = instr->hydrogen()->access(); |
3874 |
int offset = access.offset();
|
3875 |
|
3876 |
if (access.IsExternalMemory()) {
|
3877 |
ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); |
3878 |
Register value = ToRegister(instr->value()); |
3879 |
if (instr->object()->IsConstantOperand()) {
|
3880 |
ASSERT(value.is(rax)); |
3881 |
ASSERT(!access.representation().IsSpecialization()); |
3882 |
LConstantOperand* object = LConstantOperand::cast(instr->object()); |
3883 |
__ store_rax(ToExternalReference(object)); |
3884 |
} else {
|
3885 |
Register object = ToRegister(instr->object()); |
3886 |
__ Store(MemOperand(object, offset), value, representation); |
3887 |
} |
3888 |
return;
|
3889 |
} |
3890 |
|
3891 |
Register object = ToRegister(instr->object()); |
3892 |
Handle<Map> transition = instr->transition(); |
3893 |
|
3894 |
if (FLAG_track_fields && representation.IsSmi()) {
|
3895 |
if (instr->value()->IsConstantOperand()) {
|
3896 |
LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); |
3897 |
if (!IsSmiConstant(operand_value)) {
|
3898 |
DeoptimizeIf(no_condition, instr->environment()); |
3899 |
} |
3900 |
} |
3901 |
} else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) { |
3902 |
if (instr->value()->IsConstantOperand()) {
|
3903 |
LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); |
3904 |
if (IsInteger32Constant(operand_value)) {
|
3905 |
DeoptimizeIf(no_condition, instr->environment()); |
3906 |
} |
3907 |
} else {
|
3908 |
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
|
3909 |
Register value = ToRegister(instr->value()); |
3910 |
Condition cc = masm()->CheckSmi(value); |
3911 |
DeoptimizeIf(cc, instr->environment()); |
3912 |
} |
3913 |
} |
3914 |
} else if (FLAG_track_double_fields && representation.IsDouble()) { |
3915 |
ASSERT(transition.is_null()); |
3916 |
ASSERT(access.IsInobject()); |
3917 |
ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); |
3918 |
XMMRegister value = ToDoubleRegister(instr->value()); |
3919 |
__ movsd(FieldOperand(object, offset), value); |
3920 |
return;
|
3921 |
} |
3922 |
|
3923 |
if (!transition.is_null()) {
|
3924 |
if (!instr->hydrogen()->NeedsWriteBarrierForMap()) {
|
3925 |
__ Move(FieldOperand(object, HeapObject::kMapOffset), transition); |
3926 |
} else {
|
3927 |
Register temp = ToRegister(instr->temp()); |
3928 |
__ Move(kScratchRegister, transition); |
3929 |
__ movq(FieldOperand(object, HeapObject::kMapOffset), kScratchRegister); |
3930 |
// Update the write barrier for the map field.
|
3931 |
__ RecordWriteField(object, |
3932 |
HeapObject::kMapOffset, |
3933 |
kScratchRegister, |
3934 |
temp, |
3935 |
kSaveFPRegs, |
3936 |
OMIT_REMEMBERED_SET, |
3937 |
OMIT_SMI_CHECK); |
3938 |
} |
3939 |
} |
3940 |
|
3941 |
// Do the store.
|
3942 |
SmiCheck check_needed = |
3943 |
instr->hydrogen()->value()->IsHeapObject() |
3944 |
? OMIT_SMI_CHECK : INLINE_SMI_CHECK; |
3945 |
|
3946 |
Register write_register = object; |
3947 |
if (!access.IsInobject()) {
|
3948 |
write_register = ToRegister(instr->temp()); |
3949 |
__ movq(write_register, FieldOperand(object, JSObject::kPropertiesOffset)); |
3950 |
} |
3951 |
|
3952 |
if (instr->value()->IsConstantOperand()) {
|
3953 |
LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); |
3954 |
if (operand_value->IsRegister()) {
|
3955 |
Register value = ToRegister(operand_value); |
3956 |
__ Store(FieldOperand(write_register, offset), value, representation); |
3957 |
} else {
|
3958 |
Handle<Object> handle_value = ToHandle(operand_value); |
3959 |
ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); |
3960 |
__ Move(FieldOperand(write_register, offset), handle_value); |
3961 |
} |
3962 |
} else {
|
3963 |
Register value = ToRegister(instr->value()); |
3964 |
__ Store(FieldOperand(write_register, offset), value, representation); |
3965 |
} |
3966 |
|
3967 |
if (instr->hydrogen()->NeedsWriteBarrier()) {
|
3968 |
Register value = ToRegister(instr->value()); |
3969 |
Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object; |
3970 |
// Update the write barrier for the object for in-object properties.
|
3971 |
__ RecordWriteField(write_register, |
3972 |
offset, |
3973 |
value, |
3974 |
temp, |
3975 |
kSaveFPRegs, |
3976 |
EMIT_REMEMBERED_SET, |
3977 |
check_needed); |
3978 |
} |
3979 |
} |
3980 |
|
3981 |
|
3982 |
void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
|
3983 |
ASSERT(ToRegister(instr->object()).is(rdx)); |
3984 |
ASSERT(ToRegister(instr->value()).is(rax)); |
3985 |
|
3986 |
__ Move(rcx, instr->hydrogen()->name()); |
3987 |
Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode) |
3988 |
? isolate()->builtins()->StoreIC_Initialize_Strict() |
3989 |
: isolate()->builtins()->StoreIC_Initialize(); |
3990 |
CallCode(ic, RelocInfo::CODE_TARGET, instr); |
3991 |
} |
3992 |
|
3993 |
|
3994 |
void LCodeGen::ApplyCheckIf(Condition cc, LBoundsCheck* check) {
|
3995 |
if (FLAG_debug_code && check->hydrogen()->skip_check()) {
|
3996 |
Label done; |
3997 |
__ j(NegateCondition(cc), &done, Label::kNear); |
3998 |
__ int3(); |
3999 |
__ bind(&done); |
4000 |
} else {
|
4001 |
DeoptimizeIf(cc, check->environment()); |
4002 |
} |
4003 |
} |
4004 |
|
4005 |
|
4006 |
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
|
4007 |
if (instr->hydrogen()->skip_check()) return; |
4008 |
|
4009 |
if (instr->length()->IsRegister()) {
|
4010 |
Register reg = ToRegister(instr->length()); |
4011 |
if (!instr->hydrogen()->length()->representation().IsSmi()) {
|
4012 |
__ AssertZeroExtended(reg); |
4013 |
} |
4014 |
if (instr->index()->IsConstantOperand()) {
|
4015 |
int32_t constant_index = |
4016 |
ToInteger32(LConstantOperand::cast(instr->index())); |
4017 |
if (instr->hydrogen()->length()->representation().IsSmi()) {
|
4018 |
__ Cmp(reg, Smi::FromInt(constant_index)); |
4019 |
} else {
|
4020 |
__ cmpq(reg, Immediate(constant_index)); |
4021 |
} |
4022 |
} else {
|
4023 |
Register reg2 = ToRegister(instr->index()); |
4024 |
if (!instr->hydrogen()->index()->representation().IsSmi()) {
|
4025 |
__ AssertZeroExtended(reg2); |
4026 |
} |
4027 |
__ cmpq(reg, reg2); |
4028 |
} |
4029 |
} else {
|
4030 |
Operand length = ToOperand(instr->length()); |
4031 |
if (instr->index()->IsConstantOperand()) {
|
4032 |
int32_t constant_index = |
4033 |
ToInteger32(LConstantOperand::cast(instr->index())); |
4034 |
if (instr->hydrogen()->length()->representation().IsSmi()) {
|
4035 |
__ Cmp(length, Smi::FromInt(constant_index)); |
4036 |
} else {
|
4037 |
__ cmpq(length, Immediate(constant_index)); |
4038 |
} |
4039 |
} else {
|
4040 |
__ cmpq(length, ToRegister(instr->index())); |
4041 |
} |
4042 |
} |
4043 |
Condition condition = |
4044 |
instr->hydrogen()->allow_equality() ? below : below_equal; |
4045 |
ApplyCheckIf(condition, instr); |
4046 |
} |
4047 |
|
4048 |
|
4049 |
void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
|
4050 |
ElementsKind elements_kind = instr->elements_kind(); |
4051 |
LOperand* key = instr->key(); |
4052 |
if (!key->IsConstantOperand()) {
|
4053 |
Register key_reg = ToRegister(key); |
4054 |
// Even though the HLoad/StoreKeyedFastElement instructions force
|
4055 |
// the input representation for the key to be an integer, the input
|
4056 |
// gets replaced during bound check elimination with the index
|
4057 |
// argument to the bounds check, which can be tagged, so that case
|
4058 |
// must be handled here, too.
|
4059 |
if (instr->hydrogen()->IsDehoisted()) {
|
4060 |
// Sign extend key because it could be a 32 bit negative value
|
4061 |
// and the dehoisted address computation happens in 64 bits
|
4062 |
__ movsxlq(key_reg, key_reg); |
4063 |
} |
4064 |
} |
4065 |
Operand operand(BuildFastArrayOperand( |
4066 |
instr->elements(), |
4067 |
key, |
4068 |
elements_kind, |
4069 |
0,
|
4070 |
instr->additional_index())); |
4071 |
|
4072 |
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
|
4073 |
XMMRegister value(ToDoubleRegister(instr->value())); |
4074 |
__ cvtsd2ss(value, value); |
4075 |
__ movss(operand, value); |
4076 |
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { |
4077 |
__ movsd(operand, ToDoubleRegister(instr->value())); |
4078 |
} else {
|
4079 |
Register value(ToRegister(instr->value())); |
4080 |
switch (elements_kind) {
|
4081 |
case EXTERNAL_PIXEL_ELEMENTS:
|
4082 |
case EXTERNAL_BYTE_ELEMENTS:
|
4083 |
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
|
4084 |
__ movb(operand, value); |
4085 |
break;
|
4086 |
case EXTERNAL_SHORT_ELEMENTS:
|
4087 |
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
|
4088 |
__ movw(operand, value); |
4089 |
break;
|
4090 |
case EXTERNAL_INT_ELEMENTS:
|
4091 |
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
|
4092 |
__ movl(operand, value); |
4093 |
break;
|
4094 |
case EXTERNAL_FLOAT_ELEMENTS:
|
4095 |
case EXTERNAL_DOUBLE_ELEMENTS:
|
4096 |
case FAST_ELEMENTS:
|
4097 |
case FAST_SMI_ELEMENTS:
|
4098 |
case FAST_DOUBLE_ELEMENTS:
|
4099 |
case FAST_HOLEY_ELEMENTS:
|
4100 |
case FAST_HOLEY_SMI_ELEMENTS:
|
4101 |
case FAST_HOLEY_DOUBLE_ELEMENTS:
|
4102 |
case DICTIONARY_ELEMENTS:
|
4103 |
case NON_STRICT_ARGUMENTS_ELEMENTS:
|
4104 |
UNREACHABLE(); |
4105 |
break;
|
4106 |
} |
4107 |
} |
4108 |
} |
4109 |
|
4110 |
|
4111 |
void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
|
4112 |
XMMRegister value = ToDoubleRegister(instr->value()); |
4113 |
LOperand* key = instr->key(); |
4114 |
if (!key->IsConstantOperand()) {
|
4115 |
Register key_reg = ToRegister(key); |
4116 |
// Even though the HLoad/StoreKeyedFastElement instructions force
|
4117 |
// the input representation for the key to be an integer, the
|
4118 |
// input gets replaced during bound check elimination with the index
|
4119 |
// argument to the bounds check, which can be tagged, so that case
|
4120 |
// must be handled here, too.
|
4121 |
if (instr->hydrogen()->IsDehoisted()) {
|
4122 |
// Sign extend key because it could be a 32 bit negative value
|
4123 |
// and the dehoisted address computation happens in 64 bits
|
4124 |
__ movsxlq(key_reg, key_reg); |
4125 |
} |
4126 |
} |
4127 |
|
4128 |
if (instr->NeedsCanonicalization()) {
|
4129 |
Label have_value; |
4130 |
|
4131 |
__ ucomisd(value, value); |
4132 |
__ j(parity_odd, &have_value); // NaN.
|
4133 |
|
4134 |
__ Set(kScratchRegister, BitCast<uint64_t>( |
4135 |
FixedDoubleArray::canonical_not_the_hole_nan_as_double())); |
4136 |
__ movq(value, kScratchRegister); |
4137 |
|
4138 |
__ bind(&have_value); |
4139 |
} |
4140 |
|
4141 |
Operand double_store_operand = BuildFastArrayOperand( |
4142 |
instr->elements(), |
4143 |
key, |
4144 |
FAST_DOUBLE_ELEMENTS, |
4145 |
FixedDoubleArray::kHeaderSize - kHeapObjectTag, |
4146 |
instr->additional_index()); |
4147 |
|
4148 |
__ movsd(double_store_operand, value); |
4149 |
} |
4150 |
|
4151 |
|
4152 |
void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
|
4153 |
Register elements = ToRegister(instr->elements()); |
4154 |
LOperand* key = instr->key(); |
4155 |
if (!key->IsConstantOperand()) {
|
4156 |
Register key_reg = ToRegister(key); |
4157 |
// Even though the HLoad/StoreKeyedFastElement instructions force
|
4158 |
// the input representation for the key to be an integer, the
|
4159 |
// input gets replaced during bound check elimination with the index
|
4160 |
// argument to the bounds check, which can be tagged, so that case
|
4161 |
// must be handled here, too.
|
4162 |
if (instr->hydrogen()->IsDehoisted()) {
|
4163 |
// Sign extend key because it could be a 32 bit negative value
|
4164 |
// and the dehoisted address computation happens in 64 bits
|
4165 |
__ movsxlq(key_reg, key_reg); |
4166 |
} |
4167 |
} |
4168 |
|
4169 |
Operand operand = |
4170 |
BuildFastArrayOperand(instr->elements(), |
4171 |
key, |
4172 |
FAST_ELEMENTS, |
4173 |
FixedArray::kHeaderSize - kHeapObjectTag, |
4174 |
instr->additional_index()); |
4175 |
if (instr->value()->IsRegister()) {
|
4176 |
__ movq(operand, ToRegister(instr->value())); |
4177 |
} else {
|
4178 |
LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); |
4179 |
if (IsInteger32Constant(operand_value)) {
|
4180 |
Smi* smi_value = Smi::FromInt(ToInteger32(operand_value)); |
4181 |
__ Move(operand, smi_value); |
4182 |
} else {
|
4183 |
Handle<Object> handle_value = ToHandle(operand_value); |
4184 |
__ Move(operand, handle_value); |
4185 |
} |
4186 |
} |
4187 |
|
4188 |
if (instr->hydrogen()->NeedsWriteBarrier()) {
|
4189 |
ASSERT(instr->value()->IsRegister()); |
4190 |
Register value = ToRegister(instr->value()); |
4191 |
ASSERT(!instr->key()->IsConstantOperand()); |
4192 |
SmiCheck check_needed = |
4193 |
instr->hydrogen()->value()->IsHeapObject() |
4194 |
? OMIT_SMI_CHECK : INLINE_SMI_CHECK; |
4195 |
// Compute address of modified element and store it into key register.
|
4196 |
Register key_reg(ToRegister(key)); |
4197 |
__ lea(key_reg, operand); |
4198 |
__ RecordWrite(elements, |
4199 |
key_reg, |
4200 |
value, |
4201 |
kSaveFPRegs, |
4202 |
EMIT_REMEMBERED_SET, |
4203 |
check_needed); |
4204 |
} |
4205 |
} |
4206 |
|
4207 |
|
4208 |
void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
|
4209 |
if (instr->is_external()) {
|
4210 |
DoStoreKeyedExternalArray(instr); |
4211 |
} else if (instr->hydrogen()->value()->representation().IsDouble()) { |
4212 |
DoStoreKeyedFixedDoubleArray(instr); |
4213 |
} else {
|
4214 |
DoStoreKeyedFixedArray(instr); |
4215 |
} |
4216 |
} |
4217 |
|
4218 |
|
4219 |
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
|
4220 |
ASSERT(ToRegister(instr->object()).is(rdx)); |
4221 |
ASSERT(ToRegister(instr->key()).is(rcx)); |
4222 |
ASSERT(ToRegister(instr->value()).is(rax)); |
4223 |
|
4224 |
Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode) |
4225 |
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict() |
4226 |
: isolate()->builtins()->KeyedStoreIC_Initialize(); |
4227 |
CallCode(ic, RelocInfo::CODE_TARGET, instr); |
4228 |
} |
4229 |
|
4230 |
|
4231 |
void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
|
4232 |
Register object_reg = ToRegister(instr->object()); |
4233 |
|
4234 |
Handle<Map> from_map = instr->original_map(); |
4235 |
Handle<Map> to_map = instr->transitioned_map(); |
4236 |
ElementsKind from_kind = instr->from_kind(); |
4237 |
ElementsKind to_kind = instr->to_kind(); |
4238 |
|
4239 |
Label not_applicable; |
4240 |
__ Cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map); |
4241 |
__ j(not_equal, ¬_applicable); |
4242 |
if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
|
4243 |
Register new_map_reg = ToRegister(instr->new_map_temp()); |
4244 |
__ movq(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT); |
4245 |
__ movq(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg); |
4246 |
// Write barrier.
|
4247 |
ASSERT_NE(instr->temp(), NULL);
|
4248 |
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg, |
4249 |
ToRegister(instr->temp()), kDontSaveFPRegs); |
4250 |
} else {
|
4251 |
PushSafepointRegistersScope scope(this);
|
4252 |
if (!object_reg.is(rax)) {
|
4253 |
__ movq(rax, object_reg); |
4254 |
} |
4255 |
__ Move(rbx, to_map); |
4256 |
TransitionElementsKindStub stub(from_kind, to_kind); |
4257 |
__ CallStub(&stub); |
4258 |
RecordSafepointWithRegisters( |
4259 |
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
|
4260 |
} |
4261 |
__ bind(¬_applicable); |
4262 |
} |
4263 |
|
4264 |
|
4265 |
void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
|
4266 |
Register object = ToRegister(instr->object()); |
4267 |
Register temp = ToRegister(instr->temp()); |
4268 |
Label no_memento_found; |
4269 |
__ TestJSArrayForAllocationMemento(object, temp, &no_memento_found); |
4270 |
DeoptimizeIf(equal, instr->environment()); |
4271 |
__ bind(&no_memento_found); |
4272 |
} |
4273 |
|
4274 |
|
4275 |
void LCodeGen::DoStringAdd(LStringAdd* instr) {
|
4276 |
EmitPushTaggedOperand(instr->left()); |
4277 |
EmitPushTaggedOperand(instr->right()); |
4278 |
StringAddStub stub(instr->hydrogen()->flags()); |
4279 |
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); |
4280 |
} |
4281 |
|
4282 |
|
4283 |
void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
|
4284 |
class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode { |
4285 |
public:
|
4286 |
DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr) |
4287 |
: LDeferredCode(codegen), instr_(instr) { } |
4288 |
virtual void Generate() V8_OVERRIDE { |
4289 |
codegen()->DoDeferredStringCharCodeAt(instr_); |
4290 |
} |
4291 |
virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
4292 |
private:
|
4293 |
LStringCharCodeAt* instr_; |
4294 |
}; |
4295 |
|
4296 |
DeferredStringCharCodeAt* deferred = |
4297 |
new(zone()) DeferredStringCharCodeAt(this, instr); |
4298 |
|
4299 |
StringCharLoadGenerator::Generate(masm(), |
4300 |
ToRegister(instr->string()),
|
4301 |
ToRegister(instr->index()), |
4302 |
ToRegister(instr->result()), |
4303 |
deferred->entry()); |
4304 |
__ bind(deferred->exit()); |
4305 |
} |
4306 |
|
4307 |
|
4308 |
void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
|
4309 |
Register string = ToRegister(instr->string()); |
4310 |
Register result = ToRegister(instr->result()); |
4311 |
|
4312 |
// TODO(3095996): Get rid of this. For now, we need to make the
|
4313 |
// result register contain a valid pointer because it is already
|
4314 |
// contained in the register pointer map.
|
4315 |
__ Set(result, 0);
|
4316 |
|
4317 |
PushSafepointRegistersScope scope(this);
|
4318 |
__ push(string);
|
4319 |
// Push the index as a smi. This is safe because of the checks in
|
4320 |
// DoStringCharCodeAt above.
|
4321 |
STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue); |
4322 |
if (instr->index()->IsConstantOperand()) {
|
4323 |
int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index())); |
4324 |
__ Push(Smi::FromInt(const_index)); |
4325 |
} else {
|
4326 |
Register index = ToRegister(instr->index()); |
4327 |
__ Integer32ToSmi(index, index); |
4328 |
__ push(index); |
4329 |
} |
4330 |
CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2, instr);
|
4331 |
__ AssertSmi(rax); |
4332 |
__ SmiToInteger32(rax, rax); |
4333 |
__ StoreToSafepointRegisterSlot(result, rax); |
4334 |
} |
4335 |
|
4336 |
|
4337 |
void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
|
4338 |
class DeferredStringCharFromCode V8_FINAL : public LDeferredCode { |
4339 |
public:
|
4340 |
DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr) |
4341 |
: LDeferredCode(codegen), instr_(instr) { } |
4342 |
virtual void Generate() V8_OVERRIDE { |
4343 |
codegen()->DoDeferredStringCharFromCode(instr_); |
4344 |
} |
4345 |
virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
4346 |
private:
|
4347 |
LStringCharFromCode* instr_; |
4348 |
}; |
4349 |
|
4350 |
DeferredStringCharFromCode* deferred = |
4351 |
new(zone()) DeferredStringCharFromCode(this, instr); |
4352 |
|
4353 |
ASSERT(instr->hydrogen()->value()->representation().IsInteger32()); |
4354 |
Register char_code = ToRegister(instr->char_code()); |
4355 |
Register result = ToRegister(instr->result()); |
4356 |
ASSERT(!char_code.is(result)); |
4357 |
|
4358 |
__ cmpl(char_code, Immediate(String::kMaxOneByteCharCode)); |
4359 |
__ j(above, deferred->entry()); |
4360 |
__ movsxlq(char_code, char_code); |
4361 |
__ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex); |
4362 |
__ movq(result, FieldOperand(result, |
4363 |
char_code, times_pointer_size, |
4364 |
FixedArray::kHeaderSize)); |
4365 |
__ CompareRoot(result, Heap::kUndefinedValueRootIndex); |
4366 |
__ j(equal, deferred->entry()); |
4367 |
__ bind(deferred->exit()); |
4368 |
} |
4369 |
|
4370 |
|
4371 |
void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
|
4372 |
Register char_code = ToRegister(instr->char_code()); |
4373 |
Register result = ToRegister(instr->result()); |
4374 |
|
4375 |
// TODO(3095996): Get rid of this. For now, we need to make the
|
4376 |
// result register contain a valid pointer because it is already
|
4377 |
// contained in the register pointer map.
|
4378 |
__ Set(result, 0);
|
4379 |
|
4380 |
PushSafepointRegistersScope scope(this);
|
4381 |
__ Integer32ToSmi(char_code, char_code); |
4382 |
__ push(char_code); |
4383 |
CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr);
|
4384 |
__ StoreToSafepointRegisterSlot(result, rax); |
4385 |
} |
4386 |
|
4387 |
|
4388 |
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
|
4389 |
LOperand* input = instr->value(); |
4390 |
ASSERT(input->IsRegister() || input->IsStackSlot()); |
4391 |
LOperand* output = instr->result(); |
4392 |
ASSERT(output->IsDoubleRegister()); |
4393 |
if (input->IsRegister()) {
|
4394 |
__ Cvtlsi2sd(ToDoubleRegister(output), ToRegister(input)); |
4395 |
} else {
|
4396 |
__ Cvtlsi2sd(ToDoubleRegister(output), ToOperand(input)); |
4397 |
} |
4398 |
} |
4399 |
|
4400 |
|
4401 |
void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
|
4402 |
LOperand* input = instr->value(); |
4403 |
ASSERT(input->IsRegister()); |
4404 |
LOperand* output = instr->result(); |
4405 |
__ Integer32ToSmi(ToRegister(output), ToRegister(input)); |
4406 |
if (!instr->hydrogen()->value()->HasRange() ||
|
4407 |
!instr->hydrogen()->value()->range()->IsInSmiRange()) { |
4408 |
DeoptimizeIf(overflow, instr->environment()); |
4409 |
} |
4410 |
} |
4411 |
|
4412 |
|
4413 |
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
|
4414 |
LOperand* input = instr->value(); |
4415 |
LOperand* output = instr->result(); |
4416 |
LOperand* temp = instr->temp(); |
4417 |
|
4418 |
__ LoadUint32(ToDoubleRegister(output), |
4419 |
ToRegister(input), |
4420 |
ToDoubleRegister(temp)); |
4421 |
} |
4422 |
|
4423 |
|
4424 |
void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) {
|
4425 |
LOperand* input = instr->value(); |
4426 |
ASSERT(input->IsRegister()); |
4427 |
LOperand* output = instr->result(); |
4428 |
if (!instr->hydrogen()->value()->HasRange() ||
|
4429 |
!instr->hydrogen()->value()->range()->IsInSmiRange() || |
4430 |
instr->hydrogen()->value()->range()->upper() == kMaxInt) { |
4431 |
// The Range class can't express upper bounds in the (kMaxInt, kMaxUint32]
|
4432 |
// interval, so we treat kMaxInt as a sentinel for this entire interval.
|
4433 |
__ testl(ToRegister(input), Immediate(0x80000000));
|
4434 |
DeoptimizeIf(not_zero, instr->environment()); |
4435 |
} |
4436 |
__ Integer32ToSmi(ToRegister(output), ToRegister(input)); |
4437 |
} |
4438 |
|
4439 |
|
4440 |
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
|
4441 |
LOperand* input = instr->value(); |
4442 |
ASSERT(input->IsRegister() && input->Equals(instr->result())); |
4443 |
Register reg = ToRegister(input); |
4444 |
|
4445 |
__ Integer32ToSmi(reg, reg); |
4446 |
} |
4447 |
|
4448 |
|
4449 |
void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
|
4450 |
class DeferredNumberTagU V8_FINAL : public LDeferredCode { |
4451 |
public:
|
4452 |
DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr) |
4453 |
: LDeferredCode(codegen), instr_(instr) { } |
4454 |
virtual void Generate() V8_OVERRIDE { |
4455 |
codegen()->DoDeferredNumberTagU(instr_); |
4456 |
} |
4457 |
virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
4458 |
private:
|
4459 |
LNumberTagU* instr_; |
4460 |
}; |
4461 |
|
4462 |
LOperand* input = instr->value(); |
4463 |
ASSERT(input->IsRegister() && input->Equals(instr->result())); |
4464 |
Register reg = ToRegister(input); |
4465 |
|
4466 |
DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr); |
4467 |
__ cmpl(reg, Immediate(Smi::kMaxValue)); |
4468 |
__ j(above, deferred->entry()); |
4469 |
__ Integer32ToSmi(reg, reg); |
4470 |
__ bind(deferred->exit()); |
4471 |
} |
4472 |
|
4473 |
|
4474 |
void LCodeGen::DoDeferredNumberTagU(LNumberTagU* instr) {
|
4475 |
Label slow; |
4476 |
Register reg = ToRegister(instr->value()); |
4477 |
Register tmp = reg.is(rax) ? rcx : rax; |
4478 |
XMMRegister temp_xmm = ToDoubleRegister(instr->temp()); |
4479 |
|
4480 |
// Preserve the value of all registers.
|
4481 |
PushSafepointRegistersScope scope(this);
|
4482 |
|
4483 |
Label done; |
4484 |
// Load value into temp_xmm which will be preserved across potential call to
|
4485 |
// runtime (MacroAssembler::EnterExitFrameEpilogue preserves only allocatable
|
4486 |
// XMM registers on x64).
|
4487 |
XMMRegister xmm_scratch = double_scratch0(); |
4488 |
__ LoadUint32(temp_xmm, reg, xmm_scratch); |
4489 |
|
4490 |
if (FLAG_inline_new) {
|
4491 |
__ AllocateHeapNumber(reg, tmp, &slow); |
4492 |
__ jmp(&done, Label::kNear); |
4493 |
} |
4494 |
|
4495 |
// Slow case: Call the runtime system to do the number allocation.
|
4496 |
__ bind(&slow); |
4497 |
|
4498 |
// Put a valid pointer value in the stack slot where the result
|
4499 |
// register is stored, as this register is in the pointer map, but contains an
|
4500 |
// integer value.
|
4501 |
__ StoreToSafepointRegisterSlot(reg, Immediate(0));
|
4502 |
|
4503 |
CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
|
4504 |
if (!reg.is(rax)) __ movq(reg, rax);
|
4505 |
|
4506 |
// Done. Put the value in temp_xmm into the value of the allocated heap
|
4507 |
// number.
|
4508 |
__ bind(&done); |
4509 |
__ movsd(FieldOperand(reg, HeapNumber::kValueOffset), temp_xmm); |
4510 |
__ StoreToSafepointRegisterSlot(reg, reg); |
4511 |
} |
4512 |
|
4513 |
|
4514 |
void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
|
4515 |
class DeferredNumberTagD V8_FINAL : public LDeferredCode { |
4516 |
public:
|
4517 |
DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) |
4518 |
: LDeferredCode(codegen), instr_(instr) { } |
4519 |
virtual void Generate() V8_OVERRIDE { |
4520 |
codegen()->DoDeferredNumberTagD(instr_); |
4521 |
} |
4522 |
virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
4523 |
private:
|
4524 |
LNumberTagD* instr_; |
4525 |
}; |
4526 |
|
4527 |
XMMRegister input_reg = ToDoubleRegister(instr->value()); |
4528 |
Register reg = ToRegister(instr->result()); |
4529 |
Register tmp = ToRegister(instr->temp()); |
4530 |
|
4531 |
DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); |
4532 |
if (FLAG_inline_new) {
|
4533 |
__ AllocateHeapNumber(reg, tmp, deferred->entry()); |
4534 |
} else {
|
4535 |
__ jmp(deferred->entry()); |
4536 |
} |
4537 |
__ bind(deferred->exit()); |
4538 |
__ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg); |
4539 |
} |
4540 |
|
4541 |
|
4542 |
void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
|
4543 |
// TODO(3095996): Get rid of this. For now, we need to make the
|
4544 |
// result register contain a valid pointer because it is already
|
4545 |
// contained in the register pointer map.
|
4546 |
Register reg = ToRegister(instr->result()); |
4547 |
__ Move(reg, Smi::FromInt(0));
|
4548 |
|
4549 |
{ |
4550 |
PushSafepointRegistersScope scope(this);
|
4551 |
CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
|
4552 |
// Ensure that value in rax survives popping registers.
|
4553 |
__ movq(kScratchRegister, rax); |
4554 |
} |
4555 |
__ movq(reg, kScratchRegister); |
4556 |
} |
4557 |
|
4558 |
|
4559 |
void LCodeGen::DoSmiTag(LSmiTag* instr) {
|
4560 |
ASSERT(instr->value()->Equals(instr->result())); |
4561 |
Register input = ToRegister(instr->value()); |
4562 |
ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)); |
4563 |
__ Integer32ToSmi(input, input); |
4564 |
} |
4565 |
|
4566 |
|
4567 |
void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
|
4568 |
ASSERT(instr->value()->Equals(instr->result())); |
4569 |
Register input = ToRegister(instr->value()); |
4570 |
if (instr->needs_check()) {
|
4571 |
Condition is_smi = __ CheckSmi(input); |
4572 |
DeoptimizeIf(NegateCondition(is_smi), instr->environment()); |
4573 |
} else {
|
4574 |
__ AssertSmi(input); |
4575 |
} |
4576 |
__ SmiToInteger32(input, input); |
4577 |
} |
4578 |
|
4579 |
|
4580 |
void LCodeGen::EmitNumberUntagD(Register input_reg,
|
4581 |
XMMRegister result_reg, |
4582 |
bool can_convert_undefined_to_nan,
|
4583 |
bool deoptimize_on_minus_zero,
|
4584 |
LEnvironment* env, |
4585 |
NumberUntagDMode mode) { |
4586 |
Label convert, load_smi, done; |
4587 |
|
4588 |
if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
|
4589 |
// Smi check.
|
4590 |
__ JumpIfSmi(input_reg, &load_smi, Label::kNear); |
4591 |
|
4592 |
// Heap number map check.
|
4593 |
__ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset), |
4594 |
Heap::kHeapNumberMapRootIndex); |
4595 |
|
4596 |
// On x64 it is safe to load at heap number offset before evaluating the map
|
4597 |
// check, since all heap objects are at least two words long.
|
4598 |
__ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset)); |
4599 |
|
4600 |
if (can_convert_undefined_to_nan) {
|
4601 |
__ j(not_equal, &convert); |
4602 |
} else {
|
4603 |
DeoptimizeIf(not_equal, env); |
4604 |
} |
4605 |
|
4606 |
if (deoptimize_on_minus_zero) {
|
4607 |
XMMRegister xmm_scratch = double_scratch0(); |
4608 |
__ xorps(xmm_scratch, xmm_scratch); |
4609 |
__ ucomisd(xmm_scratch, result_reg); |
4610 |
__ j(not_equal, &done, Label::kNear); |
4611 |
__ movmskpd(kScratchRegister, result_reg); |
4612 |
__ testq(kScratchRegister, Immediate(1));
|
4613 |
DeoptimizeIf(not_zero, env); |
4614 |
} |
4615 |
__ jmp(&done, Label::kNear); |
4616 |
|
4617 |
if (can_convert_undefined_to_nan) {
|
4618 |
__ bind(&convert); |
4619 |
|
4620 |
// Convert undefined (and hole) to NaN. Compute NaN as 0/0.
|
4621 |
__ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex); |
4622 |
DeoptimizeIf(not_equal, env); |
4623 |
|
4624 |
__ xorps(result_reg, result_reg); |
4625 |
__ divsd(result_reg, result_reg); |
4626 |
__ jmp(&done, Label::kNear); |
4627 |
} |
4628 |
} else {
|
4629 |
ASSERT(mode == NUMBER_CANDIDATE_IS_SMI); |
4630 |
} |
4631 |
|
4632 |
// Smi to XMM conversion
|
4633 |
__ bind(&load_smi); |
4634 |
__ SmiToInteger32(kScratchRegister, input_reg); |
4635 |
__ Cvtlsi2sd(result_reg, kScratchRegister); |
4636 |
__ bind(&done); |
4637 |
} |
4638 |
|
4639 |
|
4640 |
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
|
4641 |
Register input_reg = ToRegister(instr->value()); |
4642 |
|
4643 |
if (instr->truncating()) {
|
4644 |
Label no_heap_number, check_bools, check_false; |
4645 |
|
4646 |
// Heap number map check.
|
4647 |
__ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset), |
4648 |
Heap::kHeapNumberMapRootIndex); |
4649 |
__ j(not_equal, &no_heap_number, Label::kNear); |
4650 |
__ TruncateHeapNumberToI(input_reg, input_reg); |
4651 |
__ jmp(done); |
4652 |
|
4653 |
__ bind(&no_heap_number); |
4654 |
// Check for Oddballs. Undefined/False is converted to zero and True to one
|
4655 |
// for truncating conversions.
|
4656 |
__ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex); |
4657 |
__ j(not_equal, &check_bools, Label::kNear); |
4658 |
__ Set(input_reg, 0);
|
4659 |
__ jmp(done); |
4660 |
|
4661 |
__ bind(&check_bools); |
4662 |
__ CompareRoot(input_reg, Heap::kTrueValueRootIndex); |
4663 |
__ j(not_equal, &check_false, Label::kNear); |
4664 |
__ Set(input_reg, 1);
|
4665 |
__ jmp(done); |
4666 |
|
4667 |
__ bind(&check_false); |
4668 |
__ CompareRoot(input_reg, Heap::kFalseValueRootIndex); |
4669 |
__ RecordComment("Deferred TaggedToI: cannot truncate");
|
4670 |
DeoptimizeIf(not_equal, instr->environment()); |
4671 |
__ Set(input_reg, 0);
|
4672 |
__ jmp(done); |
4673 |
} else {
|
4674 |
Label bailout; |
4675 |
XMMRegister xmm_temp = ToDoubleRegister(instr->temp()); |
4676 |
__ TaggedToI(input_reg, input_reg, xmm_temp, |
4677 |
instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear); |
4678 |
|
4679 |
__ jmp(done); |
4680 |
__ bind(&bailout); |
4681 |
DeoptimizeIf(no_condition, instr->environment()); |
4682 |
} |
4683 |
} |
4684 |
|
4685 |
|
4686 |
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
|
4687 |
class DeferredTaggedToI V8_FINAL : public LDeferredCode { |
4688 |
public:
|
4689 |
DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) |
4690 |
: LDeferredCode(codegen), instr_(instr) { } |
4691 |
virtual void Generate() V8_OVERRIDE { |
4692 |
codegen()->DoDeferredTaggedToI(instr_, done()); |
4693 |
} |
4694 |
virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
4695 |
private:
|
4696 |
LTaggedToI* instr_; |
4697 |
}; |
4698 |
|
4699 |
LOperand* input = instr->value(); |
4700 |
ASSERT(input->IsRegister()); |
4701 |
ASSERT(input->Equals(instr->result())); |
4702 |
Register input_reg = ToRegister(input); |
4703 |
|
4704 |
if (instr->hydrogen()->value()->representation().IsSmi()) {
|
4705 |
__ SmiToInteger32(input_reg, input_reg); |
4706 |
} else {
|
4707 |
DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr); |
4708 |
__ JumpIfNotSmi(input_reg, deferred->entry()); |
4709 |
__ SmiToInteger32(input_reg, input_reg); |
4710 |
__ bind(deferred->exit()); |
4711 |
} |
4712 |
} |
4713 |
|
4714 |
|
4715 |
void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
|
4716 |
LOperand* input = instr->value(); |
4717 |
ASSERT(input->IsRegister()); |
4718 |
LOperand* result = instr->result(); |
4719 |
ASSERT(result->IsDoubleRegister()); |
4720 |
|
4721 |
Register input_reg = ToRegister(input); |
4722 |
XMMRegister result_reg = ToDoubleRegister(result); |
4723 |
|
4724 |
HValue* value = instr->hydrogen()->value(); |
4725 |
NumberUntagDMode mode = value->representation().IsSmi() |
4726 |
? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED; |
4727 |
|
4728 |
EmitNumberUntagD(input_reg, result_reg, |
4729 |
instr->hydrogen()->can_convert_undefined_to_nan(), |
4730 |
instr->hydrogen()->deoptimize_on_minus_zero(), |
4731 |
instr->environment(), |
4732 |
mode); |
4733 |
} |
4734 |
|
4735 |
|
4736 |
void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
|
4737 |
LOperand* input = instr->value(); |
4738 |
ASSERT(input->IsDoubleRegister()); |
4739 |
LOperand* result = instr->result(); |
4740 |
ASSERT(result->IsRegister()); |
4741 |
|
4742 |
XMMRegister input_reg = ToDoubleRegister(input); |
4743 |
Register result_reg = ToRegister(result); |
4744 |
|
4745 |
if (instr->truncating()) {
|
4746 |
__ TruncateDoubleToI(result_reg, input_reg); |
4747 |
} else {
|
4748 |
Label bailout, done; |
4749 |
XMMRegister xmm_scratch = double_scratch0(); |
4750 |
__ DoubleToI(result_reg, input_reg, xmm_scratch, |
4751 |
instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear); |
4752 |
|
4753 |
__ jmp(&done, Label::kNear); |
4754 |
__ bind(&bailout); |
4755 |
DeoptimizeIf(no_condition, instr->environment()); |
4756 |
__ bind(&done); |
4757 |
} |
4758 |
} |
4759 |
|
4760 |
|
4761 |
void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
|
4762 |
LOperand* input = instr->value(); |
4763 |
ASSERT(input->IsDoubleRegister()); |
4764 |
LOperand* result = instr->result(); |
4765 |
ASSERT(result->IsRegister()); |
4766 |
|
4767 |
XMMRegister input_reg = ToDoubleRegister(input); |
4768 |
Register result_reg = ToRegister(result); |
4769 |
|
4770 |
Label bailout, done; |
4771 |
XMMRegister xmm_scratch = double_scratch0(); |
4772 |
__ DoubleToI(result_reg, input_reg, xmm_scratch, |
4773 |
instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear); |
4774 |
|
4775 |
__ jmp(&done, Label::kNear); |
4776 |
__ bind(&bailout); |
4777 |
DeoptimizeIf(no_condition, instr->environment()); |
4778 |
__ bind(&done); |
4779 |
|
4780 |
__ Integer32ToSmi(result_reg, result_reg); |
4781 |
DeoptimizeIf(overflow, instr->environment()); |
4782 |
} |
4783 |
|
4784 |
|
4785 |
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
|
4786 |
LOperand* input = instr->value(); |
4787 |
Condition cc = masm()->CheckSmi(ToRegister(input)); |
4788 |
DeoptimizeIf(NegateCondition(cc), instr->environment()); |
4789 |
} |
4790 |
|
4791 |
|
4792 |
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
|
4793 |
if (!instr->hydrogen()->value()->IsHeapObject()) {
|
4794 |
LOperand* input = instr->value(); |
4795 |
Condition cc = masm()->CheckSmi(ToRegister(input)); |
4796 |
DeoptimizeIf(cc, instr->environment()); |
4797 |
} |
4798 |
} |
4799 |
|
4800 |
|
4801 |
void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
|
4802 |
Register input = ToRegister(instr->value()); |
4803 |
|
4804 |
__ movq(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset)); |
4805 |
|
4806 |
if (instr->hydrogen()->is_interval_check()) {
|
4807 |
InstanceType first; |
4808 |
InstanceType last; |
4809 |
instr->hydrogen()->GetCheckInterval(&first, &last); |
4810 |
|
4811 |
__ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset), |
4812 |
Immediate(static_cast<int8_t>(first)));
|
4813 |
|
4814 |
// If there is only one type in the interval check for equality.
|
4815 |
if (first == last) {
|
4816 |
DeoptimizeIf(not_equal, instr->environment()); |
4817 |
} else {
|
4818 |
DeoptimizeIf(below, instr->environment()); |
4819 |
// Omit check for the last type.
|
4820 |
if (last != LAST_TYPE) {
|
4821 |
__ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset), |
4822 |
Immediate(static_cast<int8_t>(last)));
|
4823 |
DeoptimizeIf(above, instr->environment()); |
4824 |
} |
4825 |
} |
4826 |
} else {
|
4827 |
uint8_t mask; |
4828 |
uint8_t tag; |
4829 |
instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); |
4830 |
|
4831 |
if (IsPowerOf2(mask)) {
|
4832 |
ASSERT(tag == 0 || IsPowerOf2(tag));
|
4833 |
__ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset), |
4834 |
Immediate(mask)); |
4835 |
DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment());
|
4836 |
} else {
|
4837 |
__ movzxbl(kScratchRegister, |
4838 |
FieldOperand(kScratchRegister, Map::kInstanceTypeOffset)); |
4839 |
__ andb(kScratchRegister, Immediate(mask)); |
4840 |
__ cmpb(kScratchRegister, Immediate(tag)); |
4841 |
DeoptimizeIf(not_equal, instr->environment()); |
4842 |
} |
4843 |
} |
4844 |
} |
4845 |
|
4846 |
|
4847 |
void LCodeGen::DoCheckValue(LCheckValue* instr) {
|
4848 |
Register reg = ToRegister(instr->value()); |
4849 |
__ Cmp(reg, instr->hydrogen()->object().handle()); |
4850 |
DeoptimizeIf(not_equal, instr->environment()); |
4851 |
} |
4852 |
|
4853 |
|
4854 |
void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
|
4855 |
{ |
4856 |
PushSafepointRegistersScope scope(this);
|
4857 |
__ push(object); |
4858 |
CallRuntimeFromDeferred(Runtime::kMigrateInstance, 1, instr);
|
4859 |
__ testq(rax, Immediate(kSmiTagMask)); |
4860 |
} |
4861 |
DeoptimizeIf(zero, instr->environment()); |
4862 |
} |
4863 |
|
4864 |
|
4865 |
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
|
4866 |
class DeferredCheckMaps V8_FINAL : public LDeferredCode { |
4867 |
public:
|
4868 |
DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) |
4869 |
: LDeferredCode(codegen), instr_(instr), object_(object) { |
4870 |
SetExit(check_maps()); |
4871 |
} |
4872 |
virtual void Generate() V8_OVERRIDE { |
4873 |
codegen()->DoDeferredInstanceMigration(instr_, object_); |
4874 |
} |
4875 |
Label* check_maps() { return &check_maps_; }
|
4876 |
virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
4877 |
private:
|
4878 |
LCheckMaps* instr_; |
4879 |
Label check_maps_; |
4880 |
Register object_; |
4881 |
}; |
4882 |
|
4883 |
if (instr->hydrogen()->CanOmitMapChecks()) return; |
4884 |
|
4885 |
LOperand* input = instr->value(); |
4886 |
ASSERT(input->IsRegister()); |
4887 |
Register reg = ToRegister(input); |
4888 |
|
4889 |
DeferredCheckMaps* deferred = NULL;
|
4890 |
if (instr->hydrogen()->has_migration_target()) {
|
4891 |
deferred = new(zone()) DeferredCheckMaps(this, instr, reg); |
4892 |
__ bind(deferred->check_maps()); |
4893 |
} |
4894 |
|
4895 |
UniqueSet<Map> map_set = instr->hydrogen()->map_set(); |
4896 |
Label success; |
4897 |
for (int i = 0; i < map_set.size() - 1; i++) { |
4898 |
Handle<Map> map = map_set.at(i).handle(); |
4899 |
__ CompareMap(reg, map, &success); |
4900 |
__ j(equal, &success); |
4901 |
} |
4902 |
|
4903 |
Handle<Map> map = map_set.at(map_set.size() - 1).handle();
|
4904 |
__ CompareMap(reg, map, &success); |
4905 |
if (instr->hydrogen()->has_migration_target()) {
|
4906 |
__ j(not_equal, deferred->entry()); |
4907 |
} else {
|
4908 |
DeoptimizeIf(not_equal, instr->environment()); |
4909 |
} |
4910 |
|
4911 |
__ bind(&success); |
4912 |
} |
4913 |
|
4914 |
|
4915 |
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
|
4916 |
XMMRegister value_reg = ToDoubleRegister(instr->unclamped()); |
4917 |
XMMRegister xmm_scratch = double_scratch0(); |
4918 |
Register result_reg = ToRegister(instr->result()); |
4919 |
__ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg); |
4920 |
} |
4921 |
|
4922 |
|
4923 |
void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
|
4924 |
ASSERT(instr->unclamped()->Equals(instr->result())); |
4925 |
Register value_reg = ToRegister(instr->result()); |
4926 |
__ ClampUint8(value_reg); |
4927 |
} |
4928 |
|
4929 |
|
4930 |
void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
|
4931 |
ASSERT(instr->unclamped()->Equals(instr->result())); |
4932 |
Register input_reg = ToRegister(instr->unclamped()); |
4933 |
XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm()); |
4934 |
XMMRegister xmm_scratch = double_scratch0(); |
4935 |
Label is_smi, done, heap_number; |
4936 |
|
4937 |
__ JumpIfSmi(input_reg, &is_smi); |
4938 |
|
4939 |
// Check for heap number
|
4940 |
__ Cmp(FieldOperand(input_reg, HeapObject::kMapOffset), |
4941 |
factory()->heap_number_map()); |
4942 |
__ j(equal, &heap_number, Label::kNear); |
4943 |
|
4944 |
// Check for undefined. Undefined is converted to zero for clamping
|
4945 |
// conversions.
|
4946 |
__ Cmp(input_reg, factory()->undefined_value()); |
4947 |
DeoptimizeIf(not_equal, instr->environment()); |
4948 |
__ movq(input_reg, Immediate(0));
|
4949 |
__ jmp(&done, Label::kNear); |
4950 |
|
4951 |
// Heap number
|
4952 |
__ bind(&heap_number); |
4953 |
__ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset)); |
4954 |
__ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg); |
4955 |
__ jmp(&done, Label::kNear); |
4956 |
|
4957 |
// smi
|
4958 |
__ bind(&is_smi); |
4959 |
__ SmiToInteger32(input_reg, input_reg); |
4960 |
__ ClampUint8(input_reg); |
4961 |
|
4962 |
__ bind(&done); |
4963 |
} |
4964 |
|
4965 |
|
4966 |
void LCodeGen::DoAllocate(LAllocate* instr) {
|
4967 |
class DeferredAllocate V8_FINAL : public LDeferredCode { |
4968 |
public:
|
4969 |
DeferredAllocate(LCodeGen* codegen, LAllocate* instr) |
4970 |
: LDeferredCode(codegen), instr_(instr) { } |
4971 |
virtual void Generate() V8_OVERRIDE { |
4972 |
codegen()->DoDeferredAllocate(instr_); |
4973 |
} |
4974 |
virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
4975 |
private:
|
4976 |
LAllocate* instr_; |
4977 |
}; |
4978 |
|
4979 |
DeferredAllocate* deferred = |
4980 |
new(zone()) DeferredAllocate(this, instr); |
4981 |
|
4982 |
Register result = ToRegister(instr->result()); |
4983 |
Register temp = ToRegister(instr->temp()); |
4984 |
|
4985 |
// Allocate memory for the object.
|
4986 |
AllocationFlags flags = TAG_OBJECT; |
4987 |
if (instr->hydrogen()->MustAllocateDoubleAligned()) {
|
4988 |
flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
|
4989 |
} |
4990 |
if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
|
4991 |
ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation()); |
4992 |
ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); |
4993 |
flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
|
4994 |
} else if (instr->hydrogen()->IsOldDataSpaceAllocation()) { |
4995 |
ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); |
4996 |
flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
|
4997 |
} |
4998 |
|
4999 |
if (instr->size()->IsConstantOperand()) {
|
5000 |
int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); |
5001 |
__ Allocate(size, result, temp, no_reg, deferred->entry(), flags); |
5002 |
} else {
|
5003 |
Register size = ToRegister(instr->size()); |
5004 |
__ Allocate(size, result, temp, no_reg, deferred->entry(), flags); |
5005 |
} |
5006 |
|
5007 |
__ bind(deferred->exit()); |
5008 |
|
5009 |
if (instr->hydrogen()->MustPrefillWithFiller()) {
|
5010 |
if (instr->size()->IsConstantOperand()) {
|
5011 |
int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); |
5012 |
__ movl(temp, Immediate((size / kPointerSize) - 1));
|
5013 |
} else {
|
5014 |
temp = ToRegister(instr->size()); |
5015 |
__ sar(temp, Immediate(kPointerSizeLog2)); |
5016 |
__ decl(temp); |
5017 |
} |
5018 |
Label loop; |
5019 |
__ bind(&loop); |
5020 |
__ Move(FieldOperand(result, temp, times_pointer_size, 0),
|
5021 |
isolate()->factory()->one_pointer_filler_map()); |
5022 |
__ decl(temp); |
5023 |
__ j(not_zero, &loop); |
5024 |
} |
5025 |
} |
5026 |
|
5027 |
|
5028 |
void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
|
5029 |
Register result = ToRegister(instr->result()); |
5030 |
|
5031 |
// TODO(3095996): Get rid of this. For now, we need to make the
|
5032 |
// result register contain a valid pointer because it is already
|
5033 |
// contained in the register pointer map.
|
5034 |
__ Move(result, Smi::FromInt(0));
|
5035 |
|
5036 |
PushSafepointRegistersScope scope(this);
|
5037 |
if (instr->size()->IsRegister()) {
|
5038 |
Register size = ToRegister(instr->size()); |
5039 |
ASSERT(!size.is(result)); |
5040 |
__ Integer32ToSmi(size, size); |
5041 |
__ push(size); |
5042 |
} else {
|
5043 |
int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); |
5044 |
__ Push(Smi::FromInt(size)); |
5045 |
} |
5046 |
|
5047 |
if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
|
5048 |
ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation()); |
5049 |
ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); |
5050 |
CallRuntimeFromDeferred(Runtime::kAllocateInOldPointerSpace, 1, instr);
|
5051 |
} else if (instr->hydrogen()->IsOldDataSpaceAllocation()) { |
5052 |
ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); |
5053 |
CallRuntimeFromDeferred(Runtime::kAllocateInOldDataSpace, 1, instr);
|
5054 |
} else {
|
5055 |
CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
|
5056 |
} |
5057 |
__ StoreToSafepointRegisterSlot(result, rax); |
5058 |
} |
5059 |
|
5060 |
|
5061 |
void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
|
5062 |
ASSERT(ToRegister(instr->value()).is(rax)); |
5063 |
__ push(rax); |
5064 |
CallRuntime(Runtime::kToFastProperties, 1, instr);
|
5065 |
} |
5066 |
|
5067 |
|
5068 |
void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
|
5069 |
Label materialized; |
5070 |
// Registers will be used as follows:
|
5071 |
// rcx = literals array.
|
5072 |
// rbx = regexp literal.
|
5073 |
// rax = regexp literal clone.
|
5074 |
int literal_offset =
|
5075 |
FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index()); |
5076 |
__ Move(rcx, instr->hydrogen()->literals()); |
5077 |
__ movq(rbx, FieldOperand(rcx, literal_offset)); |
5078 |
__ CompareRoot(rbx, Heap::kUndefinedValueRootIndex); |
5079 |
__ j(not_equal, &materialized, Label::kNear); |
5080 |
|
5081 |
// Create regexp literal using runtime function
|
5082 |
// Result will be in rax.
|
5083 |
__ push(rcx); |
5084 |
__ Push(Smi::FromInt(instr->hydrogen()->literal_index())); |
5085 |
__ Push(instr->hydrogen()->pattern()); |
5086 |
__ Push(instr->hydrogen()->flags()); |
5087 |
CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
|
5088 |
__ movq(rbx, rax); |
5089 |
|
5090 |
__ bind(&materialized); |
5091 |
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
|
5092 |
Label allocated, runtime_allocate; |
5093 |
__ Allocate(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT); |
5094 |
__ jmp(&allocated); |
5095 |
|
5096 |
__ bind(&runtime_allocate); |
5097 |
__ push(rbx); |
5098 |
__ Push(Smi::FromInt(size)); |
5099 |
CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
|
5100 |
__ pop(rbx); |
5101 |
|
5102 |
__ bind(&allocated); |
5103 |
// Copy the content into the newly allocated memory.
|
5104 |
// (Unroll copy loop once for better throughput).
|
5105 |
for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) { |
5106 |
__ movq(rdx, FieldOperand(rbx, i)); |
5107 |
__ movq(rcx, FieldOperand(rbx, i + kPointerSize)); |
5108 |
__ movq(FieldOperand(rax, i), rdx); |
5109 |
__ movq(FieldOperand(rax, i + kPointerSize), rcx); |
5110 |
} |
5111 |
if ((size % (2 * kPointerSize)) != 0) { |
5112 |
__ movq(rdx, FieldOperand(rbx, size - kPointerSize)); |
5113 |
__ movq(FieldOperand(rax, size - kPointerSize), rdx); |
5114 |
} |
5115 |
} |
5116 |
|
5117 |
|
5118 |
void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
|
5119 |
// Use the fast case closure allocation code that allocates in new
|
5120 |
// space for nested functions that don't need literals cloning.
|
5121 |
bool pretenure = instr->hydrogen()->pretenure();
|
5122 |
if (!pretenure && instr->hydrogen()->has_no_literals()) {
|
5123 |
FastNewClosureStub stub(instr->hydrogen()->language_mode(), |
5124 |
instr->hydrogen()->is_generator()); |
5125 |
__ Move(rbx, instr->hydrogen()->shared_info()); |
5126 |
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); |
5127 |
} else {
|
5128 |
__ push(rsi); |
5129 |
__ Push(instr->hydrogen()->shared_info()); |
5130 |
__ PushRoot(pretenure ? Heap::kTrueValueRootIndex : |
5131 |
Heap::kFalseValueRootIndex); |
5132 |
CallRuntime(Runtime::kNewClosure, 3, instr);
|
5133 |
} |
5134 |
} |
5135 |
|
5136 |
|
5137 |
void LCodeGen::DoTypeof(LTypeof* instr) {
|
5138 |
LOperand* input = instr->value(); |
5139 |
EmitPushTaggedOperand(input); |
5140 |
CallRuntime(Runtime::kTypeof, 1, instr);
|
5141 |
} |
5142 |
|
5143 |
|
5144 |
void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
|
5145 |
ASSERT(!operand->IsDoubleRegister()); |
5146 |
if (operand->IsConstantOperand()) {
|
5147 |
__ Push(ToHandle(LConstantOperand::cast(operand))); |
5148 |
} else if (operand->IsRegister()) { |
5149 |
__ push(ToRegister(operand)); |
5150 |
} else {
|
5151 |
__ push(ToOperand(operand)); |
5152 |
} |
5153 |
} |
5154 |
|
5155 |
|
5156 |
void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
|
5157 |
Register input = ToRegister(instr->value()); |
5158 |
|
5159 |
Condition final_branch_condition = |
5160 |
EmitTypeofIs(instr->TrueLabel(chunk_), |
5161 |
instr->FalseLabel(chunk_), input, instr->type_literal()); |
5162 |
if (final_branch_condition != no_condition) {
|
5163 |
EmitBranch(instr, final_branch_condition); |
5164 |
} |
5165 |
} |
5166 |
|
5167 |
|
5168 |
Condition LCodeGen::EmitTypeofIs(Label* true_label, |
5169 |
Label* false_label, |
5170 |
Register input, |
5171 |
Handle<String> type_name) { |
5172 |
Condition final_branch_condition = no_condition; |
5173 |
if (type_name->Equals(heap()->number_string())) {
|
5174 |
__ JumpIfSmi(input, true_label); |
5175 |
__ CompareRoot(FieldOperand(input, HeapObject::kMapOffset), |
5176 |
Heap::kHeapNumberMapRootIndex); |
5177 |
|
5178 |
final_branch_condition = equal; |
5179 |
|
5180 |
} else if (type_name->Equals(heap()->string_string())) { |
5181 |
__ JumpIfSmi(input, false_label); |
5182 |
__ CmpObjectType(input, FIRST_NONSTRING_TYPE, input); |
5183 |
__ j(above_equal, false_label); |
5184 |
__ testb(FieldOperand(input, Map::kBitFieldOffset), |
5185 |
Immediate(1 << Map::kIsUndetectable));
|
5186 |
final_branch_condition = zero; |
5187 |
|
5188 |
} else if (type_name->Equals(heap()->symbol_string())) { |
5189 |
__ JumpIfSmi(input, false_label); |
5190 |
__ CmpObjectType(input, SYMBOL_TYPE, input); |
5191 |
final_branch_condition = equal; |
5192 |
|
5193 |
} else if (type_name->Equals(heap()->boolean_string())) { |
5194 |
__ CompareRoot(input, Heap::kTrueValueRootIndex); |
5195 |
__ j(equal, true_label); |
5196 |
__ CompareRoot(input, Heap::kFalseValueRootIndex); |
5197 |
final_branch_condition = equal; |
5198 |
|
5199 |
} else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) { |
5200 |
__ CompareRoot(input, Heap::kNullValueRootIndex); |
5201 |
final_branch_condition = equal; |
5202 |
|
5203 |
} else if (type_name->Equals(heap()->undefined_string())) { |
5204 |
__ CompareRoot(input, Heap::kUndefinedValueRootIndex); |
5205 |
__ j(equal, true_label); |
5206 |
__ JumpIfSmi(input, false_label); |
5207 |
// Check for undetectable objects => true.
|
5208 |
__ movq(input, FieldOperand(input, HeapObject::kMapOffset)); |
5209 |
__ testb(FieldOperand(input, Map::kBitFieldOffset), |
5210 |
Immediate(1 << Map::kIsUndetectable));
|
5211 |
final_branch_condition = not_zero; |
5212 |
|
5213 |
} else if (type_name->Equals(heap()->function_string())) { |
5214 |
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
|
5215 |
__ JumpIfSmi(input, false_label); |
5216 |
__ CmpObjectType(input, JS_FUNCTION_TYPE, input); |
5217 |
__ j(equal, true_label); |
5218 |
__ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE); |
5219 |
final_branch_condition = equal; |
5220 |
|
5221 |
} else if (type_name->Equals(heap()->object_string())) { |
5222 |
__ JumpIfSmi(input, false_label); |
5223 |
if (!FLAG_harmony_typeof) {
|
5224 |
__ CompareRoot(input, Heap::kNullValueRootIndex); |
5225 |
__ j(equal, true_label); |
5226 |
} |
5227 |
__ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input); |
5228 |
__ j(below, false_label); |
5229 |
__ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE); |
5230 |
__ j(above, false_label); |
5231 |
// Check for undetectable objects => false.
|
5232 |
__ testb(FieldOperand(input, Map::kBitFieldOffset), |
5233 |
Immediate(1 << Map::kIsUndetectable));
|
5234 |
final_branch_condition = zero; |
5235 |
|
5236 |
} else {
|
5237 |
__ jmp(false_label); |
5238 |
} |
5239 |
|
5240 |
return final_branch_condition;
|
5241 |
} |
5242 |
|
5243 |
|
5244 |
void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
|
5245 |
Register temp = ToRegister(instr->temp()); |
5246 |
|
5247 |
EmitIsConstructCall(temp); |
5248 |
EmitBranch(instr, equal); |
5249 |
} |
5250 |
|
5251 |
|
5252 |
void LCodeGen::EmitIsConstructCall(Register temp) {
|
5253 |
// Get the frame pointer for the calling frame.
|
5254 |
__ movq(temp, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); |
5255 |
|
5256 |
// Skip the arguments adaptor frame if it exists.
|
5257 |
Label check_frame_marker; |
5258 |
__ Cmp(Operand(temp, StandardFrameConstants::kContextOffset), |
5259 |
Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); |
5260 |
__ j(not_equal, &check_frame_marker, Label::kNear); |
5261 |
__ movq(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset)); |
5262 |
|
5263 |
// Check the marker in the calling frame.
|
5264 |
__ bind(&check_frame_marker); |
5265 |
__ Cmp(Operand(temp, StandardFrameConstants::kMarkerOffset), |
5266 |
Smi::FromInt(StackFrame::CONSTRUCT)); |
5267 |
} |
5268 |
|
5269 |
|
5270 |
void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) { |
5271 |
if (info()->IsStub()) return; |
5272 |
// Ensure that we have enough space after the previous lazy-bailout
|
5273 |
// instruction for patching the code here.
|
5274 |
int current_pc = masm()->pc_offset();
|
5275 |
if (current_pc < last_lazy_deopt_pc_ + space_needed) {
|
5276 |
int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
|
5277 |
__ Nop(padding_size); |
5278 |
} |
5279 |
} |
5280 |
|
5281 |
|
5282 |
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
|
5283 |
EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); |
5284 |
last_lazy_deopt_pc_ = masm()->pc_offset(); |
5285 |
ASSERT(instr->HasEnvironment()); |
5286 |
LEnvironment* env = instr->environment(); |
5287 |
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); |
5288 |
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); |
5289 |
} |
5290 |
|
5291 |
|
5292 |
void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
|
5293 |
Deoptimizer::BailoutType type = instr->hydrogen()->type(); |
5294 |
// TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
|
5295 |
// needed return address), even though the implementation of LAZY and EAGER is
|
5296 |
// now identical. When LAZY is eventually completely folded into EAGER, remove
|
5297 |
// the special case below.
|
5298 |
if (info()->IsStub() && type == Deoptimizer::EAGER) {
|
5299 |
type = Deoptimizer::LAZY; |
5300 |
} |
5301 |
|
5302 |
Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
|
5303 |
DeoptimizeIf(no_condition, instr->environment(), type); |
5304 |
} |
5305 |
|
5306 |
|
5307 |
void LCodeGen::DoDummyUse(LDummyUse* instr) {
|
5308 |
// Nothing to see here, move on!
|
5309 |
} |
5310 |
|
5311 |
|
5312 |
void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
|
5313 |
PushSafepointRegistersScope scope(this);
|
5314 |
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); |
5315 |
__ CallRuntimeSaveDoubles(Runtime::kStackGuard); |
5316 |
RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
|
5317 |
ASSERT(instr->HasEnvironment()); |
5318 |
LEnvironment* env = instr->environment(); |
5319 |
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); |
5320 |
} |
5321 |
|
5322 |
|
5323 |
void LCodeGen::DoStackCheck(LStackCheck* instr) {
|
5324 |
class DeferredStackCheck V8_FINAL : public LDeferredCode { |
5325 |
public:
|
5326 |
DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr) |
5327 |
: LDeferredCode(codegen), instr_(instr) { } |
5328 |
virtual void Generate() V8_OVERRIDE { |
5329 |
codegen()->DoDeferredStackCheck(instr_); |
5330 |
} |
5331 |
virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
5332 |
private:
|
5333 |
LStackCheck* instr_; |
5334 |
}; |
5335 |
|
5336 |
ASSERT(instr->HasEnvironment()); |
5337 |
LEnvironment* env = instr->environment(); |
5338 |
// There is no LLazyBailout instruction for stack-checks. We have to
|
5339 |
// prepare for lazy deoptimization explicitly here.
|
5340 |
if (instr->hydrogen()->is_function_entry()) {
|
5341 |
// Perform stack overflow check.
|
5342 |
Label done; |
5343 |
__ CompareRoot(rsp, Heap::kStackLimitRootIndex); |
5344 |
__ j(above_equal, &done, Label::kNear); |
5345 |
CallCode(isolate()->builtins()->StackCheck(), |
5346 |
RelocInfo::CODE_TARGET, |
5347 |
instr); |
5348 |
EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); |
5349 |
last_lazy_deopt_pc_ = masm()->pc_offset(); |
5350 |
__ bind(&done); |
5351 |
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); |
5352 |
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); |
5353 |
} else {
|
5354 |
ASSERT(instr->hydrogen()->is_backwards_branch()); |
5355 |
// Perform stack overflow check if this goto needs it before jumping.
|
5356 |
DeferredStackCheck* deferred_stack_check = |
5357 |
new(zone()) DeferredStackCheck(this, instr); |
5358 |
__ CompareRoot(rsp, Heap::kStackLimitRootIndex); |
5359 |
__ j(below, deferred_stack_check->entry()); |
5360 |
EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); |
5361 |
last_lazy_deopt_pc_ = masm()->pc_offset(); |
5362 |
__ bind(instr->done_label()); |
5363 |
deferred_stack_check->SetExit(instr->done_label()); |
5364 |
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); |
5365 |
// Don't record a deoptimization index for the safepoint here.
|
5366 |
// This will be done explicitly when emitting call and the safepoint in
|
5367 |
// the deferred code.
|
5368 |
} |
5369 |
} |
5370 |
|
5371 |
|
5372 |
void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
|
5373 |
// This is a pseudo-instruction that ensures that the environment here is
|
5374 |
// properly registered for deoptimization and records the assembler's PC
|
5375 |
// offset.
|
5376 |
LEnvironment* environment = instr->environment(); |
5377 |
|
5378 |
// If the environment were already registered, we would have no way of
|
5379 |
// backpatching it with the spill slot operands.
|
5380 |
ASSERT(!environment->HasBeenRegistered()); |
5381 |
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); |
5382 |
|
5383 |
GenerateOsrPrologue(); |
5384 |
} |
5385 |
|
5386 |
|
5387 |
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
|
5388 |
__ CompareRoot(rax, Heap::kUndefinedValueRootIndex); |
5389 |
DeoptimizeIf(equal, instr->environment()); |
5390 |
|
5391 |
Register null_value = rdi; |
5392 |
__ LoadRoot(null_value, Heap::kNullValueRootIndex); |
5393 |
__ cmpq(rax, null_value); |
5394 |
DeoptimizeIf(equal, instr->environment()); |
5395 |
|
5396 |
Condition cc = masm()->CheckSmi(rax); |
5397 |
DeoptimizeIf(cc, instr->environment()); |
5398 |
|
5399 |
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); |
5400 |
__ CmpObjectType(rax, LAST_JS_PROXY_TYPE, rcx); |
5401 |
DeoptimizeIf(below_equal, instr->environment()); |
5402 |
|
5403 |
Label use_cache, call_runtime; |
5404 |
__ CheckEnumCache(null_value, &call_runtime); |
5405 |
|
5406 |
__ movq(rax, FieldOperand(rax, HeapObject::kMapOffset)); |
5407 |
__ jmp(&use_cache, Label::kNear); |
5408 |
|
5409 |
// Get the set of properties to enumerate.
|
5410 |
__ bind(&call_runtime); |
5411 |
__ push(rax); |
5412 |
CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
|
5413 |
|
5414 |
__ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset), |
5415 |
Heap::kMetaMapRootIndex); |
5416 |
DeoptimizeIf(not_equal, instr->environment()); |
5417 |
__ bind(&use_cache); |
5418 |
} |
5419 |
|
5420 |
|
5421 |
void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
|
5422 |
Register map = ToRegister(instr->map()); |
5423 |
Register result = ToRegister(instr->result()); |
5424 |
Label load_cache, done; |
5425 |
__ EnumLength(result, map); |
5426 |
__ Cmp(result, Smi::FromInt(0));
|
5427 |
__ j(not_equal, &load_cache); |
5428 |
__ LoadRoot(result, Heap::kEmptyFixedArrayRootIndex); |
5429 |
__ jmp(&done); |
5430 |
__ bind(&load_cache); |
5431 |
__ LoadInstanceDescriptors(map, result); |
5432 |
__ movq(result, |
5433 |
FieldOperand(result, DescriptorArray::kEnumCacheOffset)); |
5434 |
__ movq(result, |
5435 |
FieldOperand(result, FixedArray::SizeFor(instr->idx()))); |
5436 |
__ bind(&done); |
5437 |
Condition cc = masm()->CheckSmi(result); |
5438 |
DeoptimizeIf(cc, instr->environment()); |
5439 |
} |
5440 |
|
5441 |
|
5442 |
void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
|
5443 |
Register object = ToRegister(instr->value()); |
5444 |
__ cmpq(ToRegister(instr->map()), |
5445 |
FieldOperand(object, HeapObject::kMapOffset)); |
5446 |
DeoptimizeIf(not_equal, instr->environment()); |
5447 |
} |
5448 |
|
5449 |
|
5450 |
void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
|
5451 |
Register object = ToRegister(instr->object()); |
5452 |
Register index = ToRegister(instr->index()); |
5453 |
|
5454 |
Label out_of_object, done; |
5455 |
__ SmiToInteger32(index, index); |
5456 |
__ cmpl(index, Immediate(0));
|
5457 |
__ j(less, &out_of_object); |
5458 |
__ movq(object, FieldOperand(object, |
5459 |
index, |
5460 |
times_pointer_size, |
5461 |
JSObject::kHeaderSize)); |
5462 |
__ jmp(&done, Label::kNear); |
5463 |
|
5464 |
__ bind(&out_of_object); |
5465 |
__ movq(object, FieldOperand(object, JSObject::kPropertiesOffset)); |
5466 |
__ negl(index); |
5467 |
// Index is now equal to out of object property index plus 1.
|
5468 |
__ movq(object, FieldOperand(object, |
5469 |
index, |
5470 |
times_pointer_size, |
5471 |
FixedArray::kHeaderSize - kPointerSize)); |
5472 |
__ bind(&done); |
5473 |
} |
5474 |
|
5475 |
|
5476 |
#undef __
|
5477 |
|
5478 |
} } // namespace v8::internal
|
5479 |
|
5480 |
#endif // V8_TARGET_ARCH_X64 |