The data contained in this repository can be downloaded to your computer using one of several clients.
Please see the documentation of your version control software client for more information.
Please select the desired protocol below to get the URL.
This URL has Read-Only access.
main_repo / deps / v8 / src / arm / code-stubs-arm.cc @ f230a1cf
History | View | Annotate | Download (217 KB)
1 |
// Copyright 2012 the V8 project authors. All rights reserved.
|
---|---|
2 |
// Redistribution and use in source and binary forms, with or without
|
3 |
// modification, are permitted provided that the following conditions are
|
4 |
// met:
|
5 |
//
|
6 |
// * Redistributions of source code must retain the above copyright
|
7 |
// notice, this list of conditions and the following disclaimer.
|
8 |
// * Redistributions in binary form must reproduce the above
|
9 |
// copyright notice, this list of conditions and the following
|
10 |
// disclaimer in the documentation and/or other materials provided
|
11 |
// with the distribution.
|
12 |
// * Neither the name of Google Inc. nor the names of its
|
13 |
// contributors may be used to endorse or promote products derived
|
14 |
// from this software without specific prior written permission.
|
15 |
//
|
16 |
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
17 |
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
18 |
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
19 |
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
20 |
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
21 |
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
22 |
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
23 |
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
24 |
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
25 |
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
26 |
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
27 |
|
28 |
#include "v8.h" |
29 |
|
30 |
#if V8_TARGET_ARCH_ARM
|
31 |
|
32 |
#include "bootstrapper.h" |
33 |
#include "code-stubs.h" |
34 |
#include "regexp-macro-assembler.h" |
35 |
#include "stub-cache.h" |
36 |
|
37 |
namespace v8 {
|
38 |
namespace internal {
|
39 |
|
40 |
|
41 |
void FastNewClosureStub::InitializeInterfaceDescriptor(
|
42 |
Isolate* isolate, |
43 |
CodeStubInterfaceDescriptor* descriptor) { |
44 |
static Register registers[] = { r2 };
|
45 |
descriptor->register_param_count_ = 1;
|
46 |
descriptor->register_params_ = registers; |
47 |
descriptor->deoptimization_handler_ = |
48 |
Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry; |
49 |
} |
50 |
|
51 |
|
52 |
void ToNumberStub::InitializeInterfaceDescriptor(
|
53 |
Isolate* isolate, |
54 |
CodeStubInterfaceDescriptor* descriptor) { |
55 |
static Register registers[] = { r0 };
|
56 |
descriptor->register_param_count_ = 1;
|
57 |
descriptor->register_params_ = registers; |
58 |
descriptor->deoptimization_handler_ = NULL;
|
59 |
} |
60 |
|
61 |
|
62 |
void NumberToStringStub::InitializeInterfaceDescriptor(
|
63 |
Isolate* isolate, |
64 |
CodeStubInterfaceDescriptor* descriptor) { |
65 |
static Register registers[] = { r0 };
|
66 |
descriptor->register_param_count_ = 1;
|
67 |
descriptor->register_params_ = registers; |
68 |
descriptor->deoptimization_handler_ = |
69 |
Runtime::FunctionForId(Runtime::kNumberToString)->entry; |
70 |
} |
71 |
|
72 |
|
73 |
void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
|
74 |
Isolate* isolate, |
75 |
CodeStubInterfaceDescriptor* descriptor) { |
76 |
static Register registers[] = { r3, r2, r1 };
|
77 |
descriptor->register_param_count_ = 3;
|
78 |
descriptor->register_params_ = registers; |
79 |
descriptor->deoptimization_handler_ = |
80 |
Runtime::FunctionForId(Runtime::kCreateArrayLiteralShallow)->entry; |
81 |
} |
82 |
|
83 |
|
84 |
void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
|
85 |
Isolate* isolate, |
86 |
CodeStubInterfaceDescriptor* descriptor) { |
87 |
static Register registers[] = { r3, r2, r1, r0 };
|
88 |
descriptor->register_param_count_ = 4;
|
89 |
descriptor->register_params_ = registers; |
90 |
descriptor->deoptimization_handler_ = |
91 |
Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry; |
92 |
} |
93 |
|
94 |
|
95 |
void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
|
96 |
Isolate* isolate, |
97 |
CodeStubInterfaceDescriptor* descriptor) { |
98 |
static Register registers[] = { r2 };
|
99 |
descriptor->register_param_count_ = 1;
|
100 |
descriptor->register_params_ = registers; |
101 |
descriptor->deoptimization_handler_ = NULL;
|
102 |
} |
103 |
|
104 |
|
105 |
void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
|
106 |
Isolate* isolate, |
107 |
CodeStubInterfaceDescriptor* descriptor) { |
108 |
static Register registers[] = { r1, r0 };
|
109 |
descriptor->register_param_count_ = 2;
|
110 |
descriptor->register_params_ = registers; |
111 |
descriptor->deoptimization_handler_ = |
112 |
FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure); |
113 |
} |
114 |
|
115 |
|
116 |
void LoadFieldStub::InitializeInterfaceDescriptor(
|
117 |
Isolate* isolate, |
118 |
CodeStubInterfaceDescriptor* descriptor) { |
119 |
static Register registers[] = { r0 };
|
120 |
descriptor->register_param_count_ = 1;
|
121 |
descriptor->register_params_ = registers; |
122 |
descriptor->deoptimization_handler_ = NULL;
|
123 |
} |
124 |
|
125 |
|
126 |
void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
|
127 |
Isolate* isolate, |
128 |
CodeStubInterfaceDescriptor* descriptor) { |
129 |
static Register registers[] = { r1 };
|
130 |
descriptor->register_param_count_ = 1;
|
131 |
descriptor->register_params_ = registers; |
132 |
descriptor->deoptimization_handler_ = NULL;
|
133 |
} |
134 |
|
135 |
|
136 |
void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
|
137 |
Isolate* isolate, |
138 |
CodeStubInterfaceDescriptor* descriptor) { |
139 |
static Register registers[] = { r2, r1, r0 };
|
140 |
descriptor->register_param_count_ = 3;
|
141 |
descriptor->register_params_ = registers; |
142 |
descriptor->deoptimization_handler_ = |
143 |
FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure); |
144 |
} |
145 |
|
146 |
|
147 |
void TransitionElementsKindStub::InitializeInterfaceDescriptor(
|
148 |
Isolate* isolate, |
149 |
CodeStubInterfaceDescriptor* descriptor) { |
150 |
static Register registers[] = { r0, r1 };
|
151 |
descriptor->register_param_count_ = 2;
|
152 |
descriptor->register_params_ = registers; |
153 |
Address entry = |
154 |
Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry; |
155 |
descriptor->deoptimization_handler_ = FUNCTION_ADDR(entry); |
156 |
} |
157 |
|
158 |
|
159 |
void CompareNilICStub::InitializeInterfaceDescriptor(
|
160 |
Isolate* isolate, |
161 |
CodeStubInterfaceDescriptor* descriptor) { |
162 |
static Register registers[] = { r0 };
|
163 |
descriptor->register_param_count_ = 1;
|
164 |
descriptor->register_params_ = registers; |
165 |
descriptor->deoptimization_handler_ = |
166 |
FUNCTION_ADDR(CompareNilIC_Miss); |
167 |
descriptor->SetMissHandler( |
168 |
ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate)); |
169 |
} |
170 |
|
171 |
|
172 |
void BinaryOpStub::InitializeInterfaceDescriptor(
|
173 |
Isolate* isolate, |
174 |
CodeStubInterfaceDescriptor* descriptor) { |
175 |
static Register registers[] = { r1, r0 };
|
176 |
descriptor->register_param_count_ = 2;
|
177 |
descriptor->register_params_ = registers; |
178 |
descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss); |
179 |
descriptor->SetMissHandler( |
180 |
ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate)); |
181 |
} |
182 |
|
183 |
|
184 |
static void InitializeArrayConstructorDescriptor( |
185 |
Isolate* isolate, |
186 |
CodeStubInterfaceDescriptor* descriptor, |
187 |
int constant_stack_parameter_count) {
|
188 |
// register state
|
189 |
// r0 -- number of arguments
|
190 |
// r1 -- function
|
191 |
// r2 -- type info cell with elements kind
|
192 |
static Register registers[] = { r1, r2 };
|
193 |
descriptor->register_param_count_ = 2;
|
194 |
if (constant_stack_parameter_count != 0) { |
195 |
// stack param count needs (constructor pointer, and single argument)
|
196 |
descriptor->stack_parameter_count_ = r0; |
197 |
} |
198 |
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count; |
199 |
descriptor->register_params_ = registers; |
200 |
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE; |
201 |
descriptor->deoptimization_handler_ = |
202 |
Runtime::FunctionForId(Runtime::kArrayConstructor)->entry; |
203 |
} |
204 |
|
205 |
|
206 |
static void InitializeInternalArrayConstructorDescriptor( |
207 |
Isolate* isolate, |
208 |
CodeStubInterfaceDescriptor* descriptor, |
209 |
int constant_stack_parameter_count) {
|
210 |
// register state
|
211 |
// r0 -- number of arguments
|
212 |
// r1 -- constructor function
|
213 |
static Register registers[] = { r1 };
|
214 |
descriptor->register_param_count_ = 1;
|
215 |
|
216 |
if (constant_stack_parameter_count != 0) { |
217 |
// stack param count needs (constructor pointer, and single argument)
|
218 |
descriptor->stack_parameter_count_ = r0; |
219 |
} |
220 |
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count; |
221 |
descriptor->register_params_ = registers; |
222 |
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE; |
223 |
descriptor->deoptimization_handler_ = |
224 |
Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry; |
225 |
} |
226 |
|
227 |
|
228 |
void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
|
229 |
Isolate* isolate, |
230 |
CodeStubInterfaceDescriptor* descriptor) { |
231 |
InitializeArrayConstructorDescriptor(isolate, descriptor, 0);
|
232 |
} |
233 |
|
234 |
|
235 |
void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
|
236 |
Isolate* isolate, |
237 |
CodeStubInterfaceDescriptor* descriptor) { |
238 |
InitializeArrayConstructorDescriptor(isolate, descriptor, 1);
|
239 |
} |
240 |
|
241 |
|
242 |
void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
|
243 |
Isolate* isolate, |
244 |
CodeStubInterfaceDescriptor* descriptor) { |
245 |
InitializeArrayConstructorDescriptor(isolate, descriptor, -1);
|
246 |
} |
247 |
|
248 |
|
249 |
void ToBooleanStub::InitializeInterfaceDescriptor(
|
250 |
Isolate* isolate, |
251 |
CodeStubInterfaceDescriptor* descriptor) { |
252 |
static Register registers[] = { r0 };
|
253 |
descriptor->register_param_count_ = 1;
|
254 |
descriptor->register_params_ = registers; |
255 |
descriptor->deoptimization_handler_ = |
256 |
FUNCTION_ADDR(ToBooleanIC_Miss); |
257 |
descriptor->SetMissHandler( |
258 |
ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate)); |
259 |
} |
260 |
|
261 |
|
262 |
void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
|
263 |
Isolate* isolate, |
264 |
CodeStubInterfaceDescriptor* descriptor) { |
265 |
InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 0);
|
266 |
} |
267 |
|
268 |
|
269 |
void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
|
270 |
Isolate* isolate, |
271 |
CodeStubInterfaceDescriptor* descriptor) { |
272 |
InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 1);
|
273 |
} |
274 |
|
275 |
|
276 |
void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
|
277 |
Isolate* isolate, |
278 |
CodeStubInterfaceDescriptor* descriptor) { |
279 |
InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1);
|
280 |
} |
281 |
|
282 |
|
283 |
void StoreGlobalStub::InitializeInterfaceDescriptor(
|
284 |
Isolate* isolate, |
285 |
CodeStubInterfaceDescriptor* descriptor) { |
286 |
static Register registers[] = { r1, r2, r0 };
|
287 |
descriptor->register_param_count_ = 3;
|
288 |
descriptor->register_params_ = registers; |
289 |
descriptor->deoptimization_handler_ = |
290 |
FUNCTION_ADDR(StoreIC_MissFromStubFailure); |
291 |
} |
292 |
|
293 |
|
294 |
void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
|
295 |
Isolate* isolate, |
296 |
CodeStubInterfaceDescriptor* descriptor) { |
297 |
static Register registers[] = { r0, r3, r1, r2 };
|
298 |
descriptor->register_param_count_ = 4;
|
299 |
descriptor->register_params_ = registers; |
300 |
descriptor->deoptimization_handler_ = |
301 |
FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss); |
302 |
} |
303 |
|
304 |
|
305 |
#define __ ACCESS_MASM(masm)
|
306 |
|
307 |
|
308 |
static void EmitIdenticalObjectComparison(MacroAssembler* masm, |
309 |
Label* slow, |
310 |
Condition cond); |
311 |
static void EmitSmiNonsmiComparison(MacroAssembler* masm, |
312 |
Register lhs, |
313 |
Register rhs, |
314 |
Label* lhs_not_nan, |
315 |
Label* slow, |
316 |
bool strict);
|
317 |
static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, |
318 |
Register lhs, |
319 |
Register rhs); |
320 |
|
321 |
|
322 |
void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
|
323 |
// Update the static counter each time a new code stub is generated.
|
324 |
Isolate* isolate = masm->isolate(); |
325 |
isolate->counters()->code_stubs()->Increment(); |
326 |
|
327 |
CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate); |
328 |
int param_count = descriptor->register_param_count_;
|
329 |
{ |
330 |
// Call the runtime system in a fresh internal frame.
|
331 |
FrameScope scope(masm, StackFrame::INTERNAL); |
332 |
ASSERT(descriptor->register_param_count_ == 0 ||
|
333 |
r0.is(descriptor->register_params_[param_count - 1]));
|
334 |
// Push arguments
|
335 |
for (int i = 0; i < param_count; ++i) { |
336 |
__ push(descriptor->register_params_[i]); |
337 |
} |
338 |
ExternalReference miss = descriptor->miss_handler(); |
339 |
__ CallExternalReference(miss, descriptor->register_param_count_); |
340 |
} |
341 |
|
342 |
__ Ret(); |
343 |
} |
344 |
|
345 |
|
346 |
void FastNewContextStub::Generate(MacroAssembler* masm) {
|
347 |
// Try to allocate the context in new space.
|
348 |
Label gc; |
349 |
int length = slots_ + Context::MIN_CONTEXT_SLOTS;
|
350 |
|
351 |
// Attempt to allocate the context in new space.
|
352 |
__ Allocate(FixedArray::SizeFor(length), r0, r1, r2, &gc, TAG_OBJECT); |
353 |
|
354 |
// Load the function from the stack.
|
355 |
__ ldr(r3, MemOperand(sp, 0));
|
356 |
|
357 |
// Set up the object header.
|
358 |
__ LoadRoot(r1, Heap::kFunctionContextMapRootIndex); |
359 |
__ mov(r2, Operand(Smi::FromInt(length))); |
360 |
__ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset)); |
361 |
__ str(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); |
362 |
|
363 |
// Set up the fixed slots, copy the global object from the previous context.
|
364 |
__ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); |
365 |
__ mov(r1, Operand(Smi::FromInt(0)));
|
366 |
__ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX))); |
367 |
__ str(cp, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX))); |
368 |
__ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX))); |
369 |
__ str(r2, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); |
370 |
|
371 |
// Initialize the rest of the slots to undefined.
|
372 |
__ LoadRoot(r1, Heap::kUndefinedValueRootIndex); |
373 |
for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) { |
374 |
__ str(r1, MemOperand(r0, Context::SlotOffset(i))); |
375 |
} |
376 |
|
377 |
// Remove the on-stack argument and return.
|
378 |
__ mov(cp, r0); |
379 |
__ pop(); |
380 |
__ Ret(); |
381 |
|
382 |
// Need to collect. Call into runtime system.
|
383 |
__ bind(&gc); |
384 |
__ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1); |
385 |
} |
386 |
|
387 |
|
388 |
void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
|
389 |
// Stack layout on entry:
|
390 |
//
|
391 |
// [sp]: function.
|
392 |
// [sp + kPointerSize]: serialized scope info
|
393 |
|
394 |
// Try to allocate the context in new space.
|
395 |
Label gc; |
396 |
int length = slots_ + Context::MIN_CONTEXT_SLOTS;
|
397 |
__ Allocate(FixedArray::SizeFor(length), r0, r1, r2, &gc, TAG_OBJECT); |
398 |
|
399 |
// Load the function from the stack.
|
400 |
__ ldr(r3, MemOperand(sp, 0));
|
401 |
|
402 |
// Load the serialized scope info from the stack.
|
403 |
__ ldr(r1, MemOperand(sp, 1 * kPointerSize));
|
404 |
|
405 |
// Set up the object header.
|
406 |
__ LoadRoot(r2, Heap::kBlockContextMapRootIndex); |
407 |
__ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); |
408 |
__ mov(r2, Operand(Smi::FromInt(length))); |
409 |
__ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset)); |
410 |
|
411 |
// If this block context is nested in the native context we get a smi
|
412 |
// sentinel instead of a function. The block context should get the
|
413 |
// canonical empty function of the native context as its closure which
|
414 |
// we still have to look up.
|
415 |
Label after_sentinel; |
416 |
__ JumpIfNotSmi(r3, &after_sentinel); |
417 |
if (FLAG_debug_code) {
|
418 |
__ cmp(r3, Operand::Zero()); |
419 |
__ Assert(eq, kExpected0AsASmiSentinel); |
420 |
} |
421 |
__ ldr(r3, GlobalObjectOperand()); |
422 |
__ ldr(r3, FieldMemOperand(r3, GlobalObject::kNativeContextOffset)); |
423 |
__ ldr(r3, ContextOperand(r3, Context::CLOSURE_INDEX)); |
424 |
__ bind(&after_sentinel); |
425 |
|
426 |
// Set up the fixed slots, copy the global object from the previous context.
|
427 |
__ ldr(r2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX)); |
428 |
__ str(r3, ContextOperand(r0, Context::CLOSURE_INDEX)); |
429 |
__ str(cp, ContextOperand(r0, Context::PREVIOUS_INDEX)); |
430 |
__ str(r1, ContextOperand(r0, Context::EXTENSION_INDEX)); |
431 |
__ str(r2, ContextOperand(r0, Context::GLOBAL_OBJECT_INDEX)); |
432 |
|
433 |
// Initialize the rest of the slots to the hole value.
|
434 |
__ LoadRoot(r1, Heap::kTheHoleValueRootIndex); |
435 |
for (int i = 0; i < slots_; i++) { |
436 |
__ str(r1, ContextOperand(r0, i + Context::MIN_CONTEXT_SLOTS)); |
437 |
} |
438 |
|
439 |
// Remove the on-stack argument and return.
|
440 |
__ mov(cp, r0); |
441 |
__ add(sp, sp, Operand(2 * kPointerSize));
|
442 |
__ Ret(); |
443 |
|
444 |
// Need to collect. Call into runtime system.
|
445 |
__ bind(&gc); |
446 |
__ TailCallRuntime(Runtime::kPushBlockContext, 2, 1); |
447 |
} |
448 |
|
449 |
|
450 |
// Takes a Smi and converts to an IEEE 64 bit floating point value in two
|
451 |
// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
|
452 |
// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
|
453 |
// scratch register. Destroys the source register. No GC occurs during this
|
454 |
// stub so you don't have to set up the frame.
|
455 |
class ConvertToDoubleStub : public PlatformCodeStub { |
456 |
public:
|
457 |
ConvertToDoubleStub(Register result_reg_1, |
458 |
Register result_reg_2, |
459 |
Register source_reg, |
460 |
Register scratch_reg) |
461 |
: result1_(result_reg_1), |
462 |
result2_(result_reg_2), |
463 |
source_(source_reg), |
464 |
zeros_(scratch_reg) { } |
465 |
|
466 |
private:
|
467 |
Register result1_; |
468 |
Register result2_; |
469 |
Register source_; |
470 |
Register zeros_; |
471 |
|
472 |
// Minor key encoding in 16 bits.
|
473 |
class ModeBits: public BitField<OverwriteMode, 0, 2> {}; |
474 |
class OpBits: public BitField<Token::Value, 2, 14> {}; |
475 |
|
476 |
Major MajorKey() { return ConvertToDouble; }
|
477 |
int MinorKey() {
|
478 |
// Encode the parameters in a unique 16 bit value.
|
479 |
return result1_.code() +
|
480 |
(result2_.code() << 4) +
|
481 |
(source_.code() << 8) +
|
482 |
(zeros_.code() << 12);
|
483 |
} |
484 |
|
485 |
void Generate(MacroAssembler* masm);
|
486 |
}; |
487 |
|
488 |
|
489 |
void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
|
490 |
Register exponent = result1_; |
491 |
Register mantissa = result2_; |
492 |
|
493 |
Label not_special; |
494 |
__ SmiUntag(source_); |
495 |
// Move sign bit from source to destination. This works because the sign bit
|
496 |
// in the exponent word of the double has the same position and polarity as
|
497 |
// the 2's complement sign bit in a Smi.
|
498 |
STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
|
499 |
__ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC); |
500 |
// Subtract from 0 if source was negative.
|
501 |
__ rsb(source_, source_, Operand::Zero(), LeaveCC, ne); |
502 |
|
503 |
// We have -1, 0 or 1, which we treat specially. Register source_ contains
|
504 |
// absolute value: it is either equal to 1 (special case of -1 and 1),
|
505 |
// greater than 1 (not a special case) or less than 1 (special case of 0).
|
506 |
__ cmp(source_, Operand(1));
|
507 |
__ b(gt, ¬_special); |
508 |
|
509 |
// For 1 or -1 we need to or in the 0 exponent (biased to 1023).
|
510 |
const uint32_t exponent_word_for_1 =
|
511 |
HeapNumber::kExponentBias << HeapNumber::kExponentShift; |
512 |
__ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq); |
513 |
// 1, 0 and -1 all have 0 for the second word.
|
514 |
__ mov(mantissa, Operand::Zero()); |
515 |
__ Ret(); |
516 |
|
517 |
__ bind(¬_special); |
518 |
__ clz(zeros_, source_); |
519 |
// Compute exponent and or it into the exponent register.
|
520 |
// We use mantissa as a scratch register here. Use a fudge factor to
|
521 |
// divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts
|
522 |
// that fit in the ARM's constant field.
|
523 |
int fudge = 0x400; |
524 |
__ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias - fudge));
|
525 |
__ add(mantissa, mantissa, Operand(fudge)); |
526 |
__ orr(exponent, |
527 |
exponent, |
528 |
Operand(mantissa, LSL, HeapNumber::kExponentShift)); |
529 |
// Shift up the source chopping the top bit off.
|
530 |
__ add(zeros_, zeros_, Operand(1));
|
531 |
// This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
|
532 |
__ mov(source_, Operand(source_, LSL, zeros_)); |
533 |
// Compute lower part of fraction (last 12 bits).
|
534 |
__ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord)); |
535 |
// And the top (top 20 bits).
|
536 |
__ orr(exponent, |
537 |
exponent, |
538 |
Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord));
|
539 |
__ Ret(); |
540 |
} |
541 |
|
542 |
|
543 |
void DoubleToIStub::Generate(MacroAssembler* masm) {
|
544 |
Label out_of_range, only_low, negate, done; |
545 |
Register input_reg = source(); |
546 |
Register result_reg = destination(); |
547 |
|
548 |
int double_offset = offset();
|
549 |
// Account for saved regs if input is sp.
|
550 |
if (input_reg.is(sp)) double_offset += 2 * kPointerSize; |
551 |
|
552 |
// Immediate values for this stub fit in instructions, so it's safe to use ip.
|
553 |
Register scratch = ip; |
554 |
Register scratch_low = |
555 |
GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch); |
556 |
Register scratch_high = |
557 |
GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch_low); |
558 |
LowDwVfpRegister double_scratch = kScratchDoubleReg; |
559 |
|
560 |
__ Push(scratch_high, scratch_low); |
561 |
|
562 |
if (!skip_fastpath()) {
|
563 |
// Load double input.
|
564 |
__ vldr(double_scratch, MemOperand(input_reg, double_offset)); |
565 |
__ vmov(scratch_low, scratch_high, double_scratch); |
566 |
|
567 |
// Do fast-path convert from double to int.
|
568 |
__ vcvt_s32_f64(double_scratch.low(), double_scratch); |
569 |
__ vmov(result_reg, double_scratch.low()); |
570 |
|
571 |
// If result is not saturated (0x7fffffff or 0x80000000), we are done.
|
572 |
__ sub(scratch, result_reg, Operand(1));
|
573 |
__ cmp(scratch, Operand(0x7ffffffe));
|
574 |
__ b(lt, &done); |
575 |
} else {
|
576 |
// We've already done MacroAssembler::TryFastTruncatedDoubleToILoad, so we
|
577 |
// know exponent > 31, so we can skip the vcvt_s32_f64 which will saturate.
|
578 |
if (double_offset == 0) { |
579 |
__ ldm(ia, input_reg, scratch_low.bit() | scratch_high.bit()); |
580 |
} else {
|
581 |
__ ldr(scratch_low, MemOperand(input_reg, double_offset)); |
582 |
__ ldr(scratch_high, MemOperand(input_reg, double_offset + kIntSize)); |
583 |
} |
584 |
} |
585 |
|
586 |
__ Ubfx(scratch, scratch_high, |
587 |
HeapNumber::kExponentShift, HeapNumber::kExponentBits); |
588 |
// Load scratch with exponent - 1. This is faster than loading
|
589 |
// with exponent because Bias + 1 = 1024 which is an *ARM* immediate value.
|
590 |
STATIC_ASSERT(HeapNumber::kExponentBias + 1 == 1024); |
591 |
__ sub(scratch, scratch, Operand(HeapNumber::kExponentBias + 1));
|
592 |
// If exponent is greater than or equal to 84, the 32 less significant
|
593 |
// bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
|
594 |
// the result is 0.
|
595 |
// Compare exponent with 84 (compare exponent - 1 with 83).
|
596 |
__ cmp(scratch, Operand(83));
|
597 |
__ b(ge, &out_of_range); |
598 |
|
599 |
// If we reach this code, 31 <= exponent <= 83.
|
600 |
// So, we don't have to handle cases where 0 <= exponent <= 20 for
|
601 |
// which we would need to shift right the high part of the mantissa.
|
602 |
// Scratch contains exponent - 1.
|
603 |
// Load scratch with 52 - exponent (load with 51 - (exponent - 1)).
|
604 |
__ rsb(scratch, scratch, Operand(51), SetCC);
|
605 |
__ b(ls, &only_low); |
606 |
// 21 <= exponent <= 51, shift scratch_low and scratch_high
|
607 |
// to generate the result.
|
608 |
__ mov(scratch_low, Operand(scratch_low, LSR, scratch)); |
609 |
// Scratch contains: 52 - exponent.
|
610 |
// We needs: exponent - 20.
|
611 |
// So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
|
612 |
__ rsb(scratch, scratch, Operand(32));
|
613 |
__ Ubfx(result_reg, scratch_high, |
614 |
0, HeapNumber::kMantissaBitsInTopWord);
|
615 |
// Set the implicit 1 before the mantissa part in scratch_high.
|
616 |
__ orr(result_reg, result_reg, |
617 |
Operand(1 << HeapNumber::kMantissaBitsInTopWord));
|
618 |
__ orr(result_reg, scratch_low, Operand(result_reg, LSL, scratch)); |
619 |
__ b(&negate); |
620 |
|
621 |
__ bind(&out_of_range); |
622 |
__ mov(result_reg, Operand::Zero()); |
623 |
__ b(&done); |
624 |
|
625 |
__ bind(&only_low); |
626 |
// 52 <= exponent <= 83, shift only scratch_low.
|
627 |
// On entry, scratch contains: 52 - exponent.
|
628 |
__ rsb(scratch, scratch, Operand::Zero()); |
629 |
__ mov(result_reg, Operand(scratch_low, LSL, scratch)); |
630 |
|
631 |
__ bind(&negate); |
632 |
// If input was positive, scratch_high ASR 31 equals 0 and
|
633 |
// scratch_high LSR 31 equals zero.
|
634 |
// New result = (result eor 0) + 0 = result.
|
635 |
// If the input was negative, we have to negate the result.
|
636 |
// Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1.
|
637 |
// New result = (result eor 0xffffffff) + 1 = 0 - result.
|
638 |
__ eor(result_reg, result_reg, Operand(scratch_high, ASR, 31));
|
639 |
__ add(result_reg, result_reg, Operand(scratch_high, LSR, 31));
|
640 |
|
641 |
__ bind(&done); |
642 |
|
643 |
__ Pop(scratch_high, scratch_low); |
644 |
__ Ret(); |
645 |
} |
646 |
|
647 |
|
648 |
bool WriteInt32ToHeapNumberStub::IsPregenerated(Isolate* isolate) {
|
649 |
// These variants are compiled ahead of time. See next method.
|
650 |
if (the_int_.is(r1) && the_heap_number_.is(r0) && scratch_.is(r2)) {
|
651 |
return true; |
652 |
} |
653 |
if (the_int_.is(r2) && the_heap_number_.is(r0) && scratch_.is(r3)) {
|
654 |
return true; |
655 |
} |
656 |
// Other register combinations are generated as and when they are needed,
|
657 |
// so it is unsafe to call them from stubs (we can't generate a stub while
|
658 |
// we are generating a stub).
|
659 |
return false; |
660 |
} |
661 |
|
662 |
|
663 |
void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(
|
664 |
Isolate* isolate) { |
665 |
WriteInt32ToHeapNumberStub stub1(r1, r0, r2); |
666 |
WriteInt32ToHeapNumberStub stub2(r2, r0, r3); |
667 |
stub1.GetCode(isolate)->set_is_pregenerated(true);
|
668 |
stub2.GetCode(isolate)->set_is_pregenerated(true);
|
669 |
} |
670 |
|
671 |
|
672 |
// See comment for class.
|
673 |
void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
|
674 |
Label max_negative_int; |
675 |
// the_int_ has the answer which is a signed int32 but not a Smi.
|
676 |
// We test for the special value that has a different exponent. This test
|
677 |
// has the neat side effect of setting the flags according to the sign.
|
678 |
STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
|
679 |
__ cmp(the_int_, Operand(0x80000000u));
|
680 |
__ b(eq, &max_negative_int); |
681 |
// Set up the correct exponent in scratch_. All non-Smi int32s have the same.
|
682 |
// A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
|
683 |
uint32_t non_smi_exponent = |
684 |
(HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
|
685 |
__ mov(scratch_, Operand(non_smi_exponent)); |
686 |
// Set the sign bit in scratch_ if the value was negative.
|
687 |
__ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs); |
688 |
// Subtract from 0 if the value was negative.
|
689 |
__ rsb(the_int_, the_int_, Operand::Zero(), LeaveCC, cs); |
690 |
// We should be masking the implict first digit of the mantissa away here,
|
691 |
// but it just ends up combining harmlessly with the last digit of the
|
692 |
// exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
|
693 |
// the most significant 1 to hit the last bit of the 12 bit sign and exponent.
|
694 |
ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0); |
695 |
const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; |
696 |
__ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance)); |
697 |
__ str(scratch_, FieldMemOperand(the_heap_number_, |
698 |
HeapNumber::kExponentOffset)); |
699 |
__ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance));
|
700 |
__ str(scratch_, FieldMemOperand(the_heap_number_, |
701 |
HeapNumber::kMantissaOffset)); |
702 |
__ Ret(); |
703 |
|
704 |
__ bind(&max_negative_int); |
705 |
// The max negative int32 is stored as a positive number in the mantissa of
|
706 |
// a double because it uses a sign bit instead of using two's complement.
|
707 |
// The actual mantissa bits stored are all 0 because the implicit most
|
708 |
// significant 1 bit is not stored.
|
709 |
non_smi_exponent += 1 << HeapNumber::kExponentShift;
|
710 |
__ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent)); |
711 |
__ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset)); |
712 |
__ mov(ip, Operand::Zero()); |
713 |
__ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset)); |
714 |
__ Ret(); |
715 |
} |
716 |
|
717 |
|
718 |
// Handle the case where the lhs and rhs are the same object.
|
719 |
// Equality is almost reflexive (everything but NaN), so this is a test
|
720 |
// for "identity and not NaN".
|
721 |
static void EmitIdenticalObjectComparison(MacroAssembler* masm, |
722 |
Label* slow, |
723 |
Condition cond) { |
724 |
Label not_identical; |
725 |
Label heap_number, return_equal; |
726 |
__ cmp(r0, r1); |
727 |
__ b(ne, ¬_identical); |
728 |
|
729 |
// Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
|
730 |
// so we do the second best thing - test it ourselves.
|
731 |
// They are both equal and they are not both Smis so both of them are not
|
732 |
// Smis. If it's not a heap number, then return equal.
|
733 |
if (cond == lt || cond == gt) {
|
734 |
__ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE); |
735 |
__ b(ge, slow); |
736 |
} else {
|
737 |
__ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); |
738 |
__ b(eq, &heap_number); |
739 |
// Comparing JS objects with <=, >= is complicated.
|
740 |
if (cond != eq) {
|
741 |
__ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE)); |
742 |
__ b(ge, slow); |
743 |
// Normally here we fall through to return_equal, but undefined is
|
744 |
// special: (undefined == undefined) == true, but
|
745 |
// (undefined <= undefined) == false! See ECMAScript 11.8.5.
|
746 |
if (cond == le || cond == ge) {
|
747 |
__ cmp(r4, Operand(ODDBALL_TYPE)); |
748 |
__ b(ne, &return_equal); |
749 |
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex); |
750 |
__ cmp(r0, r2); |
751 |
__ b(ne, &return_equal); |
752 |
if (cond == le) {
|
753 |
// undefined <= undefined should fail.
|
754 |
__ mov(r0, Operand(GREATER)); |
755 |
} else {
|
756 |
// undefined >= undefined should fail.
|
757 |
__ mov(r0, Operand(LESS)); |
758 |
} |
759 |
__ Ret(); |
760 |
} |
761 |
} |
762 |
} |
763 |
|
764 |
__ bind(&return_equal); |
765 |
if (cond == lt) {
|
766 |
__ mov(r0, Operand(GREATER)); // Things aren't less than themselves.
|
767 |
} else if (cond == gt) { |
768 |
__ mov(r0, Operand(LESS)); // Things aren't greater than themselves.
|
769 |
} else {
|
770 |
__ mov(r0, Operand(EQUAL)); // Things are <=, >=, ==, === themselves.
|
771 |
} |
772 |
__ Ret(); |
773 |
|
774 |
// For less and greater we don't have to check for NaN since the result of
|
775 |
// x < x is false regardless. For the others here is some code to check
|
776 |
// for NaN.
|
777 |
if (cond != lt && cond != gt) {
|
778 |
__ bind(&heap_number); |
779 |
// It is a heap number, so return non-equal if it's NaN and equal if it's
|
780 |
// not NaN.
|
781 |
|
782 |
// The representation of NaN values has all exponent bits (52..62) set,
|
783 |
// and not all mantissa bits (0..51) clear.
|
784 |
// Read top bits of double representation (second word of value).
|
785 |
__ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); |
786 |
// Test that exponent bits are all set.
|
787 |
__ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits); |
788 |
// NaNs have all-one exponents so they sign extend to -1.
|
789 |
__ cmp(r3, Operand(-1));
|
790 |
__ b(ne, &return_equal); |
791 |
|
792 |
// Shift out flag and all exponent bits, retaining only mantissa.
|
793 |
__ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord)); |
794 |
// Or with all low-bits of mantissa.
|
795 |
__ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); |
796 |
__ orr(r0, r3, Operand(r2), SetCC); |
797 |
// For equal we already have the right value in r0: Return zero (equal)
|
798 |
// if all bits in mantissa are zero (it's an Infinity) and non-zero if
|
799 |
// not (it's a NaN). For <= and >= we need to load r0 with the failing
|
800 |
// value if it's a NaN.
|
801 |
if (cond != eq) {
|
802 |
// All-zero means Infinity means equal.
|
803 |
__ Ret(eq); |
804 |
if (cond == le) {
|
805 |
__ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
|
806 |
} else {
|
807 |
__ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
|
808 |
} |
809 |
} |
810 |
__ Ret(); |
811 |
} |
812 |
// No fall through here.
|
813 |
|
814 |
__ bind(¬_identical); |
815 |
} |
816 |
|
817 |
|
818 |
// See comment at call site.
|
819 |
static void EmitSmiNonsmiComparison(MacroAssembler* masm, |
820 |
Register lhs, |
821 |
Register rhs, |
822 |
Label* lhs_not_nan, |
823 |
Label* slow, |
824 |
bool strict) {
|
825 |
ASSERT((lhs.is(r0) && rhs.is(r1)) || |
826 |
(lhs.is(r1) && rhs.is(r0))); |
827 |
|
828 |
Label rhs_is_smi; |
829 |
__ JumpIfSmi(rhs, &rhs_is_smi); |
830 |
|
831 |
// Lhs is a Smi. Check whether the rhs is a heap number.
|
832 |
__ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE); |
833 |
if (strict) {
|
834 |
// If rhs is not a number and lhs is a Smi then strict equality cannot
|
835 |
// succeed. Return non-equal
|
836 |
// If rhs is r0 then there is already a non zero value in it.
|
837 |
if (!rhs.is(r0)) {
|
838 |
__ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne); |
839 |
} |
840 |
__ Ret(ne); |
841 |
} else {
|
842 |
// Smi compared non-strictly with a non-Smi non-heap-number. Call
|
843 |
// the runtime.
|
844 |
__ b(ne, slow); |
845 |
} |
846 |
|
847 |
// Lhs is a smi, rhs is a number.
|
848 |
// Convert lhs to a double in d7.
|
849 |
__ SmiToDouble(d7, lhs); |
850 |
// Load the double from rhs, tagged HeapNumber r0, to d6.
|
851 |
__ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag); |
852 |
|
853 |
// We now have both loaded as doubles but we can skip the lhs nan check
|
854 |
// since it's a smi.
|
855 |
__ jmp(lhs_not_nan); |
856 |
|
857 |
__ bind(&rhs_is_smi); |
858 |
// Rhs is a smi. Check whether the non-smi lhs is a heap number.
|
859 |
__ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE); |
860 |
if (strict) {
|
861 |
// If lhs is not a number and rhs is a smi then strict equality cannot
|
862 |
// succeed. Return non-equal.
|
863 |
// If lhs is r0 then there is already a non zero value in it.
|
864 |
if (!lhs.is(r0)) {
|
865 |
__ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne); |
866 |
} |
867 |
__ Ret(ne); |
868 |
} else {
|
869 |
// Smi compared non-strictly with a non-smi non-heap-number. Call
|
870 |
// the runtime.
|
871 |
__ b(ne, slow); |
872 |
} |
873 |
|
874 |
// Rhs is a smi, lhs is a heap number.
|
875 |
// Load the double from lhs, tagged HeapNumber r1, to d7.
|
876 |
__ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag); |
877 |
// Convert rhs to a double in d6 .
|
878 |
__ SmiToDouble(d6, rhs); |
879 |
// Fall through to both_loaded_as_doubles.
|
880 |
} |
881 |
|
882 |
|
883 |
// See comment at call site.
|
884 |
static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, |
885 |
Register lhs, |
886 |
Register rhs) { |
887 |
ASSERT((lhs.is(r0) && rhs.is(r1)) || |
888 |
(lhs.is(r1) && rhs.is(r0))); |
889 |
|
890 |
// If either operand is a JS object or an oddball value, then they are
|
891 |
// not equal since their pointers are different.
|
892 |
// There is no test for undetectability in strict equality.
|
893 |
STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE); |
894 |
Label first_non_object; |
895 |
// Get the type of the first operand into r2 and compare it with
|
896 |
// FIRST_SPEC_OBJECT_TYPE.
|
897 |
__ CompareObjectType(rhs, r2, r2, FIRST_SPEC_OBJECT_TYPE); |
898 |
__ b(lt, &first_non_object); |
899 |
|
900 |
// Return non-zero (r0 is not zero)
|
901 |
Label return_not_equal; |
902 |
__ bind(&return_not_equal); |
903 |
__ Ret(); |
904 |
|
905 |
__ bind(&first_non_object); |
906 |
// Check for oddballs: true, false, null, undefined.
|
907 |
__ cmp(r2, Operand(ODDBALL_TYPE)); |
908 |
__ b(eq, &return_not_equal); |
909 |
|
910 |
__ CompareObjectType(lhs, r3, r3, FIRST_SPEC_OBJECT_TYPE); |
911 |
__ b(ge, &return_not_equal); |
912 |
|
913 |
// Check for oddballs: true, false, null, undefined.
|
914 |
__ cmp(r3, Operand(ODDBALL_TYPE)); |
915 |
__ b(eq, &return_not_equal); |
916 |
|
917 |
// Now that we have the types we might as well check for
|
918 |
// internalized-internalized.
|
919 |
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); |
920 |
__ orr(r2, r2, Operand(r3)); |
921 |
__ tst(r2, Operand(kIsNotStringMask | kIsNotInternalizedMask)); |
922 |
__ b(eq, &return_not_equal); |
923 |
} |
924 |
|
925 |
|
926 |
// See comment at call site.
|
927 |
static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, |
928 |
Register lhs, |
929 |
Register rhs, |
930 |
Label* both_loaded_as_doubles, |
931 |
Label* not_heap_numbers, |
932 |
Label* slow) { |
933 |
ASSERT((lhs.is(r0) && rhs.is(r1)) || |
934 |
(lhs.is(r1) && rhs.is(r0))); |
935 |
|
936 |
__ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE); |
937 |
__ b(ne, not_heap_numbers); |
938 |
__ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset)); |
939 |
__ cmp(r2, r3); |
940 |
__ b(ne, slow); // First was a heap number, second wasn't. Go slow case.
|
941 |
|
942 |
// Both are heap numbers. Load them up then jump to the code we have
|
943 |
// for that.
|
944 |
__ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag); |
945 |
__ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag); |
946 |
__ jmp(both_loaded_as_doubles); |
947 |
} |
948 |
|
949 |
|
950 |
// Fast negative check for internalized-to-internalized equality.
|
951 |
static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm, |
952 |
Register lhs, |
953 |
Register rhs, |
954 |
Label* possible_strings, |
955 |
Label* not_both_strings) { |
956 |
ASSERT((lhs.is(r0) && rhs.is(r1)) || |
957 |
(lhs.is(r1) && rhs.is(r0))); |
958 |
|
959 |
// r2 is object type of rhs.
|
960 |
Label object_test; |
961 |
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); |
962 |
__ tst(r2, Operand(kIsNotStringMask)); |
963 |
__ b(ne, &object_test); |
964 |
__ tst(r2, Operand(kIsNotInternalizedMask)); |
965 |
__ b(ne, possible_strings); |
966 |
__ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE); |
967 |
__ b(ge, not_both_strings); |
968 |
__ tst(r3, Operand(kIsNotInternalizedMask)); |
969 |
__ b(ne, possible_strings); |
970 |
|
971 |
// Both are internalized. We already checked they weren't the same pointer
|
972 |
// so they are not equal.
|
973 |
__ mov(r0, Operand(NOT_EQUAL)); |
974 |
__ Ret(); |
975 |
|
976 |
__ bind(&object_test); |
977 |
__ cmp(r2, Operand(FIRST_SPEC_OBJECT_TYPE)); |
978 |
__ b(lt, not_both_strings); |
979 |
__ CompareObjectType(lhs, r2, r3, FIRST_SPEC_OBJECT_TYPE); |
980 |
__ b(lt, not_both_strings); |
981 |
// If both objects are undetectable, they are equal. Otherwise, they
|
982 |
// are not equal, since they are different objects and an object is not
|
983 |
// equal to undefined.
|
984 |
__ ldr(r3, FieldMemOperand(rhs, HeapObject::kMapOffset)); |
985 |
__ ldrb(r2, FieldMemOperand(r2, Map::kBitFieldOffset)); |
986 |
__ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset)); |
987 |
__ and_(r0, r2, Operand(r3)); |
988 |
__ and_(r0, r0, Operand(1 << Map::kIsUndetectable));
|
989 |
__ eor(r0, r0, Operand(1 << Map::kIsUndetectable));
|
990 |
__ Ret(); |
991 |
} |
992 |
|
993 |
|
994 |
static void ICCompareStub_CheckInputType(MacroAssembler* masm, |
995 |
Register input, |
996 |
Register scratch, |
997 |
CompareIC::State expected, |
998 |
Label* fail) { |
999 |
Label ok; |
1000 |
if (expected == CompareIC::SMI) {
|
1001 |
__ JumpIfNotSmi(input, fail); |
1002 |
} else if (expected == CompareIC::NUMBER) { |
1003 |
__ JumpIfSmi(input, &ok); |
1004 |
__ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail, |
1005 |
DONT_DO_SMI_CHECK); |
1006 |
} |
1007 |
// We could be strict about internalized/non-internalized here, but as long as
|
1008 |
// hydrogen doesn't care, the stub doesn't have to care either.
|
1009 |
__ bind(&ok); |
1010 |
} |
1011 |
|
1012 |
|
1013 |
// On entry r1 and r2 are the values to be compared.
|
1014 |
// On exit r0 is 0, positive or negative to indicate the result of
|
1015 |
// the comparison.
|
1016 |
void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
|
1017 |
Register lhs = r1; |
1018 |
Register rhs = r0; |
1019 |
Condition cc = GetCondition(); |
1020 |
|
1021 |
Label miss; |
1022 |
ICCompareStub_CheckInputType(masm, lhs, r2, left_, &miss); |
1023 |
ICCompareStub_CheckInputType(masm, rhs, r3, right_, &miss); |
1024 |
|
1025 |
Label slow; // Call builtin.
|
1026 |
Label not_smis, both_loaded_as_doubles, lhs_not_nan; |
1027 |
|
1028 |
Label not_two_smis, smi_done; |
1029 |
__ orr(r2, r1, r0); |
1030 |
__ JumpIfNotSmi(r2, ¬_two_smis); |
1031 |
__ mov(r1, Operand(r1, ASR, 1));
|
1032 |
__ sub(r0, r1, Operand(r0, ASR, 1));
|
1033 |
__ Ret(); |
1034 |
__ bind(¬_two_smis); |
1035 |
|
1036 |
// NOTICE! This code is only reached after a smi-fast-case check, so
|
1037 |
// it is certain that at least one operand isn't a smi.
|
1038 |
|
1039 |
// Handle the case where the objects are identical. Either returns the answer
|
1040 |
// or goes to slow. Only falls through if the objects were not identical.
|
1041 |
EmitIdenticalObjectComparison(masm, &slow, cc); |
1042 |
|
1043 |
// If either is a Smi (we know that not both are), then they can only
|
1044 |
// be strictly equal if the other is a HeapNumber.
|
1045 |
STATIC_ASSERT(kSmiTag == 0);
|
1046 |
ASSERT_EQ(0, Smi::FromInt(0)); |
1047 |
__ and_(r2, lhs, Operand(rhs)); |
1048 |
__ JumpIfNotSmi(r2, ¬_smis); |
1049 |
// One operand is a smi. EmitSmiNonsmiComparison generates code that can:
|
1050 |
// 1) Return the answer.
|
1051 |
// 2) Go to slow.
|
1052 |
// 3) Fall through to both_loaded_as_doubles.
|
1053 |
// 4) Jump to lhs_not_nan.
|
1054 |
// In cases 3 and 4 we have found out we were dealing with a number-number
|
1055 |
// comparison. If VFP3 is supported the double values of the numbers have
|
1056 |
// been loaded into d7 and d6. Otherwise, the double values have been loaded
|
1057 |
// into r0, r1, r2, and r3.
|
1058 |
EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict()); |
1059 |
|
1060 |
__ bind(&both_loaded_as_doubles); |
1061 |
// The arguments have been converted to doubles and stored in d6 and d7, if
|
1062 |
// VFP3 is supported, or in r0, r1, r2, and r3.
|
1063 |
Isolate* isolate = masm->isolate(); |
1064 |
__ bind(&lhs_not_nan); |
1065 |
Label no_nan; |
1066 |
// ARMv7 VFP3 instructions to implement double precision comparison.
|
1067 |
__ VFPCompareAndSetFlags(d7, d6); |
1068 |
Label nan; |
1069 |
__ b(vs, &nan); |
1070 |
__ mov(r0, Operand(EQUAL), LeaveCC, eq); |
1071 |
__ mov(r0, Operand(LESS), LeaveCC, lt); |
1072 |
__ mov(r0, Operand(GREATER), LeaveCC, gt); |
1073 |
__ Ret(); |
1074 |
|
1075 |
__ bind(&nan); |
1076 |
// If one of the sides was a NaN then the v flag is set. Load r0 with
|
1077 |
// whatever it takes to make the comparison fail, since comparisons with NaN
|
1078 |
// always fail.
|
1079 |
if (cc == lt || cc == le) {
|
1080 |
__ mov(r0, Operand(GREATER)); |
1081 |
} else {
|
1082 |
__ mov(r0, Operand(LESS)); |
1083 |
} |
1084 |
__ Ret(); |
1085 |
|
1086 |
__ bind(¬_smis); |
1087 |
// At this point we know we are dealing with two different objects,
|
1088 |
// and neither of them is a Smi. The objects are in rhs_ and lhs_.
|
1089 |
if (strict()) {
|
1090 |
// This returns non-equal for some object types, or falls through if it
|
1091 |
// was not lucky.
|
1092 |
EmitStrictTwoHeapObjectCompare(masm, lhs, rhs); |
1093 |
} |
1094 |
|
1095 |
Label check_for_internalized_strings; |
1096 |
Label flat_string_check; |
1097 |
// Check for heap-number-heap-number comparison. Can jump to slow case,
|
1098 |
// or load both doubles into r0, r1, r2, r3 and jump to the code that handles
|
1099 |
// that case. If the inputs are not doubles then jumps to
|
1100 |
// check_for_internalized_strings.
|
1101 |
// In this case r2 will contain the type of rhs_. Never falls through.
|
1102 |
EmitCheckForTwoHeapNumbers(masm, |
1103 |
lhs, |
1104 |
rhs, |
1105 |
&both_loaded_as_doubles, |
1106 |
&check_for_internalized_strings, |
1107 |
&flat_string_check); |
1108 |
|
1109 |
__ bind(&check_for_internalized_strings); |
1110 |
// In the strict case the EmitStrictTwoHeapObjectCompare already took care of
|
1111 |
// internalized strings.
|
1112 |
if (cc == eq && !strict()) {
|
1113 |
// Returns an answer for two internalized strings or two detectable objects.
|
1114 |
// Otherwise jumps to string case or not both strings case.
|
1115 |
// Assumes that r2 is the type of rhs_ on entry.
|
1116 |
EmitCheckForInternalizedStringsOrObjects( |
1117 |
masm, lhs, rhs, &flat_string_check, &slow); |
1118 |
} |
1119 |
|
1120 |
// Check for both being sequential ASCII strings, and inline if that is the
|
1121 |
// case.
|
1122 |
__ bind(&flat_string_check); |
1123 |
|
1124 |
__ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, r2, r3, &slow); |
1125 |
|
1126 |
__ IncrementCounter(isolate->counters()->string_compare_native(), 1, r2, r3);
|
1127 |
if (cc == eq) {
|
1128 |
StringCompareStub::GenerateFlatAsciiStringEquals(masm, |
1129 |
lhs, |
1130 |
rhs, |
1131 |
r2, |
1132 |
r3, |
1133 |
r4); |
1134 |
} else {
|
1135 |
StringCompareStub::GenerateCompareFlatAsciiStrings(masm, |
1136 |
lhs, |
1137 |
rhs, |
1138 |
r2, |
1139 |
r3, |
1140 |
r4, |
1141 |
r5); |
1142 |
} |
1143 |
// Never falls through to here.
|
1144 |
|
1145 |
__ bind(&slow); |
1146 |
|
1147 |
__ Push(lhs, rhs); |
1148 |
// Figure out which native to call and setup the arguments.
|
1149 |
Builtins::JavaScript native; |
1150 |
if (cc == eq) {
|
1151 |
native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS; |
1152 |
} else {
|
1153 |
native = Builtins::COMPARE; |
1154 |
int ncr; // NaN compare result |
1155 |
if (cc == lt || cc == le) {
|
1156 |
ncr = GREATER; |
1157 |
} else {
|
1158 |
ASSERT(cc == gt || cc == ge); // remaining cases
|
1159 |
ncr = LESS; |
1160 |
} |
1161 |
__ mov(r0, Operand(Smi::FromInt(ncr))); |
1162 |
__ push(r0); |
1163 |
} |
1164 |
|
1165 |
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
|
1166 |
// tagged as a small integer.
|
1167 |
__ InvokeBuiltin(native, JUMP_FUNCTION); |
1168 |
|
1169 |
__ bind(&miss); |
1170 |
GenerateMiss(masm); |
1171 |
} |
1172 |
|
1173 |
|
1174 |
void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
|
1175 |
// We don't allow a GC during a store buffer overflow so there is no need to
|
1176 |
// store the registers in any particular way, but we do have to store and
|
1177 |
// restore them.
|
1178 |
__ stm(db_w, sp, kCallerSaved | lr.bit()); |
1179 |
|
1180 |
const Register scratch = r1;
|
1181 |
|
1182 |
if (save_doubles_ == kSaveFPRegs) {
|
1183 |
__ SaveFPRegs(sp, scratch); |
1184 |
} |
1185 |
const int argument_count = 1; |
1186 |
const int fp_argument_count = 0; |
1187 |
|
1188 |
AllowExternalCallThatCantCauseGC scope(masm); |
1189 |
__ PrepareCallCFunction(argument_count, fp_argument_count, scratch); |
1190 |
__ mov(r0, Operand(ExternalReference::isolate_address(masm->isolate()))); |
1191 |
__ CallCFunction( |
1192 |
ExternalReference::store_buffer_overflow_function(masm->isolate()), |
1193 |
argument_count); |
1194 |
if (save_doubles_ == kSaveFPRegs) {
|
1195 |
__ RestoreFPRegs(sp, scratch); |
1196 |
} |
1197 |
__ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0).
|
1198 |
} |
1199 |
|
1200 |
|
1201 |
void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
|
1202 |
// Untagged case: double input in d2, double result goes
|
1203 |
// into d2.
|
1204 |
// Tagged case: tagged input on top of stack and in r0,
|
1205 |
// tagged result (heap number) goes into r0.
|
1206 |
|
1207 |
Label input_not_smi; |
1208 |
Label loaded; |
1209 |
Label calculate; |
1210 |
Label invalid_cache; |
1211 |
const Register scratch0 = r9;
|
1212 |
Register scratch1 = no_reg; // will be r4
|
1213 |
const Register cache_entry = r0;
|
1214 |
const bool tagged = (argument_type_ == TAGGED); |
1215 |
|
1216 |
if (tagged) {
|
1217 |
// Argument is a number and is on stack and in r0.
|
1218 |
// Load argument and check if it is a smi.
|
1219 |
__ JumpIfNotSmi(r0, &input_not_smi); |
1220 |
|
1221 |
// Input is a smi. Convert to double and load the low and high words
|
1222 |
// of the double into r2, r3.
|
1223 |
__ SmiToDouble(d7, r0); |
1224 |
__ vmov(r2, r3, d7); |
1225 |
__ b(&loaded); |
1226 |
|
1227 |
__ bind(&input_not_smi); |
1228 |
// Check if input is a HeapNumber.
|
1229 |
__ CheckMap(r0, |
1230 |
r1, |
1231 |
Heap::kHeapNumberMapRootIndex, |
1232 |
&calculate, |
1233 |
DONT_DO_SMI_CHECK); |
1234 |
// Input is a HeapNumber. Load it to a double register and store the
|
1235 |
// low and high words into r2, r3.
|
1236 |
__ vldr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset)); |
1237 |
__ vmov(r2, r3, d0); |
1238 |
} else {
|
1239 |
// Input is untagged double in d2. Output goes to d2.
|
1240 |
__ vmov(r2, r3, d2); |
1241 |
} |
1242 |
__ bind(&loaded); |
1243 |
// r2 = low 32 bits of double value
|
1244 |
// r3 = high 32 bits of double value
|
1245 |
// Compute hash (the shifts are arithmetic):
|
1246 |
// h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
|
1247 |
__ eor(r1, r2, Operand(r3)); |
1248 |
__ eor(r1, r1, Operand(r1, ASR, 16));
|
1249 |
__ eor(r1, r1, Operand(r1, ASR, 8));
|
1250 |
ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize)); |
1251 |
__ And(r1, r1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
|
1252 |
|
1253 |
// r2 = low 32 bits of double value.
|
1254 |
// r3 = high 32 bits of double value.
|
1255 |
// r1 = TranscendentalCache::hash(double value).
|
1256 |
Isolate* isolate = masm->isolate(); |
1257 |
ExternalReference cache_array = |
1258 |
ExternalReference::transcendental_cache_array_address(isolate); |
1259 |
__ mov(cache_entry, Operand(cache_array)); |
1260 |
// cache_entry points to cache array.
|
1261 |
int cache_array_index
|
1262 |
= type_ * sizeof(isolate->transcendental_cache()->caches_[0]); |
1263 |
__ ldr(cache_entry, MemOperand(cache_entry, cache_array_index)); |
1264 |
// r0 points to the cache for the type type_.
|
1265 |
// If NULL, the cache hasn't been initialized yet, so go through runtime.
|
1266 |
__ cmp(cache_entry, Operand::Zero()); |
1267 |
__ b(eq, &invalid_cache); |
1268 |
|
1269 |
#ifdef DEBUG
|
1270 |
// Check that the layout of cache elements match expectations.
|
1271 |
{ TranscendentalCache::SubCache::Element test_elem[2];
|
1272 |
char* elem_start = reinterpret_cast<char*>(&test_elem[0]); |
1273 |
char* elem2_start = reinterpret_cast<char*>(&test_elem[1]); |
1274 |
char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0])); |
1275 |
char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1])); |
1276 |
char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output)); |
1277 |
CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer. |
1278 |
CHECK_EQ(0, elem_in0 - elem_start);
|
1279 |
CHECK_EQ(kIntSize, elem_in1 - elem_start); |
1280 |
CHECK_EQ(2 * kIntSize, elem_out - elem_start);
|
1281 |
} |
1282 |
#endif
|
1283 |
|
1284 |
// Find the address of the r1'st entry in the cache, i.e., &r0[r1*12].
|
1285 |
__ add(r1, r1, Operand(r1, LSL, 1));
|
1286 |
__ add(cache_entry, cache_entry, Operand(r1, LSL, 2));
|
1287 |
// Check if cache matches: Double value is stored in uint32_t[2] array.
|
1288 |
__ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit()); |
1289 |
__ cmp(r2, r4); |
1290 |
__ cmp(r3, r5, eq); |
1291 |
__ b(ne, &calculate); |
1292 |
|
1293 |
scratch1 = r4; // Start of scratch1 range.
|
1294 |
|
1295 |
// Cache hit. Load result, cleanup and return.
|
1296 |
Counters* counters = masm->isolate()->counters(); |
1297 |
__ IncrementCounter( |
1298 |
counters->transcendental_cache_hit(), 1, scratch0, scratch1);
|
1299 |
if (tagged) {
|
1300 |
// Pop input value from stack and load result into r0.
|
1301 |
__ pop(); |
1302 |
__ mov(r0, Operand(r6)); |
1303 |
} else {
|
1304 |
// Load result into d2.
|
1305 |
__ vldr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset)); |
1306 |
} |
1307 |
__ Ret(); |
1308 |
|
1309 |
__ bind(&calculate); |
1310 |
__ IncrementCounter( |
1311 |
counters->transcendental_cache_miss(), 1, scratch0, scratch1);
|
1312 |
if (tagged) {
|
1313 |
__ bind(&invalid_cache); |
1314 |
ExternalReference runtime_function = |
1315 |
ExternalReference(RuntimeFunction(), masm->isolate()); |
1316 |
__ TailCallExternalReference(runtime_function, 1, 1); |
1317 |
} else {
|
1318 |
Label no_update; |
1319 |
Label skip_cache; |
1320 |
|
1321 |
// Call C function to calculate the result and update the cache.
|
1322 |
// r0: precalculated cache entry address.
|
1323 |
// r2 and r3: parts of the double value.
|
1324 |
// Store r0, r2 and r3 on stack for later before calling C function.
|
1325 |
__ Push(r3, r2, cache_entry); |
1326 |
GenerateCallCFunction(masm, scratch0); |
1327 |
__ GetCFunctionDoubleResult(d2); |
1328 |
|
1329 |
// Try to update the cache. If we cannot allocate a
|
1330 |
// heap number, we return the result without updating.
|
1331 |
__ Pop(r3, r2, cache_entry); |
1332 |
__ LoadRoot(r5, Heap::kHeapNumberMapRootIndex); |
1333 |
__ AllocateHeapNumber(r6, scratch0, scratch1, r5, &no_update); |
1334 |
__ vstr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset)); |
1335 |
__ stm(ia, cache_entry, r2.bit() | r3.bit() | r6.bit()); |
1336 |
__ Ret(); |
1337 |
|
1338 |
__ bind(&invalid_cache); |
1339 |
// The cache is invalid. Call runtime which will recreate the
|
1340 |
// cache.
|
1341 |
__ LoadRoot(r5, Heap::kHeapNumberMapRootIndex); |
1342 |
__ AllocateHeapNumber(r0, scratch0, scratch1, r5, &skip_cache); |
1343 |
__ vstr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset)); |
1344 |
{ |
1345 |
FrameScope scope(masm, StackFrame::INTERNAL); |
1346 |
__ push(r0); |
1347 |
__ CallRuntime(RuntimeFunction(), 1);
|
1348 |
} |
1349 |
__ vldr(d2, FieldMemOperand(r0, HeapNumber::kValueOffset)); |
1350 |
__ Ret(); |
1351 |
|
1352 |
__ bind(&skip_cache); |
1353 |
// Call C function to calculate the result and answer directly
|
1354 |
// without updating the cache.
|
1355 |
GenerateCallCFunction(masm, scratch0); |
1356 |
__ GetCFunctionDoubleResult(d2); |
1357 |
__ bind(&no_update); |
1358 |
|
1359 |
// We return the value in d2 without adding it to the cache, but
|
1360 |
// we cause a scavenging GC so that future allocations will succeed.
|
1361 |
{ |
1362 |
FrameScope scope(masm, StackFrame::INTERNAL); |
1363 |
|
1364 |
// Allocate an aligned object larger than a HeapNumber.
|
1365 |
ASSERT(4 * kPointerSize >= HeapNumber::kSize);
|
1366 |
__ mov(scratch0, Operand(4 * kPointerSize));
|
1367 |
__ push(scratch0); |
1368 |
__ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace); |
1369 |
} |
1370 |
__ Ret(); |
1371 |
} |
1372 |
} |
1373 |
|
1374 |
|
1375 |
void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
|
1376 |
Register scratch) { |
1377 |
Isolate* isolate = masm->isolate(); |
1378 |
|
1379 |
__ push(lr); |
1380 |
__ PrepareCallCFunction(0, 1, scratch); |
1381 |
if (masm->use_eabi_hardfloat()) {
|
1382 |
__ vmov(d0, d2); |
1383 |
} else {
|
1384 |
__ vmov(r0, r1, d2); |
1385 |
} |
1386 |
AllowExternalCallThatCantCauseGC scope(masm); |
1387 |
switch (type_) {
|
1388 |
case TranscendentalCache::SIN:
|
1389 |
__ CallCFunction(ExternalReference::math_sin_double_function(isolate), |
1390 |
0, 1); |
1391 |
break;
|
1392 |
case TranscendentalCache::COS:
|
1393 |
__ CallCFunction(ExternalReference::math_cos_double_function(isolate), |
1394 |
0, 1); |
1395 |
break;
|
1396 |
case TranscendentalCache::TAN:
|
1397 |
__ CallCFunction(ExternalReference::math_tan_double_function(isolate), |
1398 |
0, 1); |
1399 |
break;
|
1400 |
case TranscendentalCache::LOG:
|
1401 |
__ CallCFunction(ExternalReference::math_log_double_function(isolate), |
1402 |
0, 1); |
1403 |
break;
|
1404 |
default:
|
1405 |
UNIMPLEMENTED(); |
1406 |
break;
|
1407 |
} |
1408 |
__ pop(lr); |
1409 |
} |
1410 |
|
1411 |
|
1412 |
Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() { |
1413 |
switch (type_) {
|
1414 |
// Add more cases when necessary.
|
1415 |
case TranscendentalCache::SIN: return Runtime::kMath_sin; |
1416 |
case TranscendentalCache::COS: return Runtime::kMath_cos; |
1417 |
case TranscendentalCache::TAN: return Runtime::kMath_tan; |
1418 |
case TranscendentalCache::LOG: return Runtime::kMath_log; |
1419 |
default:
|
1420 |
UNIMPLEMENTED(); |
1421 |
return Runtime::kAbort;
|
1422 |
} |
1423 |
} |
1424 |
|
1425 |
|
1426 |
void MathPowStub::Generate(MacroAssembler* masm) {
|
1427 |
const Register base = r1;
|
1428 |
const Register exponent = r2;
|
1429 |
const Register heapnumbermap = r5;
|
1430 |
const Register heapnumber = r0;
|
1431 |
const DwVfpRegister double_base = d1;
|
1432 |
const DwVfpRegister double_exponent = d2;
|
1433 |
const DwVfpRegister double_result = d3;
|
1434 |
const DwVfpRegister double_scratch = d0;
|
1435 |
const SwVfpRegister single_scratch = s0;
|
1436 |
const Register scratch = r9;
|
1437 |
const Register scratch2 = r4;
|
1438 |
|
1439 |
Label call_runtime, done, int_exponent; |
1440 |
if (exponent_type_ == ON_STACK) {
|
1441 |
Label base_is_smi, unpack_exponent; |
1442 |
// The exponent and base are supplied as arguments on the stack.
|
1443 |
// This can only happen if the stub is called from non-optimized code.
|
1444 |
// Load input parameters from stack to double registers.
|
1445 |
__ ldr(base, MemOperand(sp, 1 * kPointerSize));
|
1446 |
__ ldr(exponent, MemOperand(sp, 0 * kPointerSize));
|
1447 |
|
1448 |
__ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex); |
1449 |
|
1450 |
__ UntagAndJumpIfSmi(scratch, base, &base_is_smi); |
1451 |
__ ldr(scratch, FieldMemOperand(base, JSObject::kMapOffset)); |
1452 |
__ cmp(scratch, heapnumbermap); |
1453 |
__ b(ne, &call_runtime); |
1454 |
|
1455 |
__ vldr(double_base, FieldMemOperand(base, HeapNumber::kValueOffset)); |
1456 |
__ jmp(&unpack_exponent); |
1457 |
|
1458 |
__ bind(&base_is_smi); |
1459 |
__ vmov(single_scratch, scratch); |
1460 |
__ vcvt_f64_s32(double_base, single_scratch); |
1461 |
__ bind(&unpack_exponent); |
1462 |
|
1463 |
__ UntagAndJumpIfSmi(scratch, exponent, &int_exponent); |
1464 |
|
1465 |
__ ldr(scratch, FieldMemOperand(exponent, JSObject::kMapOffset)); |
1466 |
__ cmp(scratch, heapnumbermap); |
1467 |
__ b(ne, &call_runtime); |
1468 |
__ vldr(double_exponent, |
1469 |
FieldMemOperand(exponent, HeapNumber::kValueOffset)); |
1470 |
} else if (exponent_type_ == TAGGED) { |
1471 |
// Base is already in double_base.
|
1472 |
__ UntagAndJumpIfSmi(scratch, exponent, &int_exponent); |
1473 |
|
1474 |
__ vldr(double_exponent, |
1475 |
FieldMemOperand(exponent, HeapNumber::kValueOffset)); |
1476 |
} |
1477 |
|
1478 |
if (exponent_type_ != INTEGER) {
|
1479 |
Label int_exponent_convert; |
1480 |
// Detect integer exponents stored as double.
|
1481 |
__ vcvt_u32_f64(single_scratch, double_exponent); |
1482 |
// We do not check for NaN or Infinity here because comparing numbers on
|
1483 |
// ARM correctly distinguishes NaNs. We end up calling the built-in.
|
1484 |
__ vcvt_f64_u32(double_scratch, single_scratch); |
1485 |
__ VFPCompareAndSetFlags(double_scratch, double_exponent); |
1486 |
__ b(eq, &int_exponent_convert); |
1487 |
|
1488 |
if (exponent_type_ == ON_STACK) {
|
1489 |
// Detect square root case. Crankshaft detects constant +/-0.5 at
|
1490 |
// compile time and uses DoMathPowHalf instead. We then skip this check
|
1491 |
// for non-constant cases of +/-0.5 as these hardly occur.
|
1492 |
Label not_plus_half; |
1493 |
|
1494 |
// Test for 0.5.
|
1495 |
__ vmov(double_scratch, 0.5, scratch); |
1496 |
__ VFPCompareAndSetFlags(double_exponent, double_scratch); |
1497 |
__ b(ne, ¬_plus_half); |
1498 |
|
1499 |
// Calculates square root of base. Check for the special case of
|
1500 |
// Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
|
1501 |
__ vmov(double_scratch, -V8_INFINITY, scratch); |
1502 |
__ VFPCompareAndSetFlags(double_base, double_scratch); |
1503 |
__ vneg(double_result, double_scratch, eq); |
1504 |
__ b(eq, &done); |
1505 |
|
1506 |
// Add +0 to convert -0 to +0.
|
1507 |
__ vadd(double_scratch, double_base, kDoubleRegZero); |
1508 |
__ vsqrt(double_result, double_scratch); |
1509 |
__ jmp(&done); |
1510 |
|
1511 |
__ bind(¬_plus_half); |
1512 |
__ vmov(double_scratch, -0.5, scratch); |
1513 |
__ VFPCompareAndSetFlags(double_exponent, double_scratch); |
1514 |
__ b(ne, &call_runtime); |
1515 |
|
1516 |
// Calculates square root of base. Check for the special case of
|
1517 |
// Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
|
1518 |
__ vmov(double_scratch, -V8_INFINITY, scratch); |
1519 |
__ VFPCompareAndSetFlags(double_base, double_scratch); |
1520 |
__ vmov(double_result, kDoubleRegZero, eq); |
1521 |
__ b(eq, &done); |
1522 |
|
1523 |
// Add +0 to convert -0 to +0.
|
1524 |
__ vadd(double_scratch, double_base, kDoubleRegZero); |
1525 |
__ vmov(double_result, 1.0, scratch); |
1526 |
__ vsqrt(double_scratch, double_scratch); |
1527 |
__ vdiv(double_result, double_result, double_scratch); |
1528 |
__ jmp(&done); |
1529 |
} |
1530 |
|
1531 |
__ push(lr); |
1532 |
{ |
1533 |
AllowExternalCallThatCantCauseGC scope(masm); |
1534 |
__ PrepareCallCFunction(0, 2, scratch); |
1535 |
__ SetCallCDoubleArguments(double_base, double_exponent); |
1536 |
__ CallCFunction( |
1537 |
ExternalReference::power_double_double_function(masm->isolate()), |
1538 |
0, 2); |
1539 |
} |
1540 |
__ pop(lr); |
1541 |
__ GetCFunctionDoubleResult(double_result); |
1542 |
__ jmp(&done); |
1543 |
|
1544 |
__ bind(&int_exponent_convert); |
1545 |
__ vcvt_u32_f64(single_scratch, double_exponent); |
1546 |
__ vmov(scratch, single_scratch); |
1547 |
} |
1548 |
|
1549 |
// Calculate power with integer exponent.
|
1550 |
__ bind(&int_exponent); |
1551 |
|
1552 |
// Get two copies of exponent in the registers scratch and exponent.
|
1553 |
if (exponent_type_ == INTEGER) {
|
1554 |
__ mov(scratch, exponent); |
1555 |
} else {
|
1556 |
// Exponent has previously been stored into scratch as untagged integer.
|
1557 |
__ mov(exponent, scratch); |
1558 |
} |
1559 |
__ vmov(double_scratch, double_base); // Back up base.
|
1560 |
__ vmov(double_result, 1.0, scratch2); |
1561 |
|
1562 |
// Get absolute value of exponent.
|
1563 |
__ cmp(scratch, Operand::Zero()); |
1564 |
__ mov(scratch2, Operand::Zero(), LeaveCC, mi); |
1565 |
__ sub(scratch, scratch2, scratch, LeaveCC, mi); |
1566 |
|
1567 |
Label while_true; |
1568 |
__ bind(&while_true); |
1569 |
__ mov(scratch, Operand(scratch, ASR, 1), SetCC);
|
1570 |
__ vmul(double_result, double_result, double_scratch, cs); |
1571 |
__ vmul(double_scratch, double_scratch, double_scratch, ne); |
1572 |
__ b(ne, &while_true); |
1573 |
|
1574 |
__ cmp(exponent, Operand::Zero()); |
1575 |
__ b(ge, &done); |
1576 |
__ vmov(double_scratch, 1.0, scratch); |
1577 |
__ vdiv(double_result, double_scratch, double_result); |
1578 |
// Test whether result is zero. Bail out to check for subnormal result.
|
1579 |
// Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
|
1580 |
__ VFPCompareAndSetFlags(double_result, 0.0); |
1581 |
__ b(ne, &done); |
1582 |
// double_exponent may not containe the exponent value if the input was a
|
1583 |
// smi. We set it with exponent value before bailing out.
|
1584 |
__ vmov(single_scratch, exponent); |
1585 |
__ vcvt_f64_s32(double_exponent, single_scratch); |
1586 |
|
1587 |
// Returning or bailing out.
|
1588 |
Counters* counters = masm->isolate()->counters(); |
1589 |
if (exponent_type_ == ON_STACK) {
|
1590 |
// The arguments are still on the stack.
|
1591 |
__ bind(&call_runtime); |
1592 |
__ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1); |
1593 |
|
1594 |
// The stub is called from non-optimized code, which expects the result
|
1595 |
// as heap number in exponent.
|
1596 |
__ bind(&done); |
1597 |
__ AllocateHeapNumber( |
1598 |
heapnumber, scratch, scratch2, heapnumbermap, &call_runtime); |
1599 |
__ vstr(double_result, |
1600 |
FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); |
1601 |
ASSERT(heapnumber.is(r0)); |
1602 |
__ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
|
1603 |
__ Ret(2);
|
1604 |
} else {
|
1605 |
__ push(lr); |
1606 |
{ |
1607 |
AllowExternalCallThatCantCauseGC scope(masm); |
1608 |
__ PrepareCallCFunction(0, 2, scratch); |
1609 |
__ SetCallCDoubleArguments(double_base, double_exponent); |
1610 |
__ CallCFunction( |
1611 |
ExternalReference::power_double_double_function(masm->isolate()), |
1612 |
0, 2); |
1613 |
} |
1614 |
__ pop(lr); |
1615 |
__ GetCFunctionDoubleResult(double_result); |
1616 |
|
1617 |
__ bind(&done); |
1618 |
__ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
|
1619 |
__ Ret(); |
1620 |
} |
1621 |
} |
1622 |
|
1623 |
|
1624 |
bool CEntryStub::NeedsImmovableCode() {
|
1625 |
return true; |
1626 |
} |
1627 |
|
1628 |
|
1629 |
bool CEntryStub::IsPregenerated(Isolate* isolate) {
|
1630 |
return (!save_doubles_ || isolate->fp_stubs_generated()) &&
|
1631 |
result_size_ == 1;
|
1632 |
} |
1633 |
|
1634 |
|
1635 |
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
|
1636 |
CEntryStub::GenerateAheadOfTime(isolate); |
1637 |
WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate); |
1638 |
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate); |
1639 |
StubFailureTrampolineStub::GenerateAheadOfTime(isolate); |
1640 |
RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate); |
1641 |
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); |
1642 |
CreateAllocationSiteStub::GenerateAheadOfTime(isolate); |
1643 |
BinaryOpStub::GenerateAheadOfTime(isolate); |
1644 |
} |
1645 |
|
1646 |
|
1647 |
void CodeStub::GenerateFPStubs(Isolate* isolate) {
|
1648 |
SaveFPRegsMode mode = kSaveFPRegs; |
1649 |
CEntryStub save_doubles(1, mode);
|
1650 |
StoreBufferOverflowStub stub(mode); |
1651 |
// These stubs might already be in the snapshot, detect that and don't
|
1652 |
// regenerate, which would lead to code stub initialization state being messed
|
1653 |
// up.
|
1654 |
Code* save_doubles_code; |
1655 |
if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) {
|
1656 |
save_doubles_code = *save_doubles.GetCode(isolate); |
1657 |
} |
1658 |
Code* store_buffer_overflow_code; |
1659 |
if (!stub.FindCodeInCache(&store_buffer_overflow_code, isolate)) {
|
1660 |
store_buffer_overflow_code = *stub.GetCode(isolate); |
1661 |
} |
1662 |
save_doubles_code->set_is_pregenerated(true);
|
1663 |
store_buffer_overflow_code->set_is_pregenerated(true);
|
1664 |
isolate->set_fp_stubs_generated(true);
|
1665 |
} |
1666 |
|
1667 |
|
1668 |
void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
|
1669 |
CEntryStub stub(1, kDontSaveFPRegs);
|
1670 |
Handle<Code> code = stub.GetCode(isolate); |
1671 |
code->set_is_pregenerated(true);
|
1672 |
} |
1673 |
|
1674 |
|
1675 |
static void JumpIfOOM(MacroAssembler* masm, |
1676 |
Register value, |
1677 |
Register scratch, |
1678 |
Label* oom_label) { |
1679 |
STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3);
|
1680 |
STATIC_ASSERT(kFailureTag == 3);
|
1681 |
__ and_(scratch, value, Operand(0xf));
|
1682 |
__ cmp(scratch, Operand(0xf));
|
1683 |
__ b(eq, oom_label); |
1684 |
} |
1685 |
|
1686 |
|
1687 |
void CEntryStub::GenerateCore(MacroAssembler* masm,
|
1688 |
Label* throw_normal_exception, |
1689 |
Label* throw_termination_exception, |
1690 |
Label* throw_out_of_memory_exception, |
1691 |
bool do_gc,
|
1692 |
bool always_allocate) {
|
1693 |
// r0: result parameter for PerformGC, if any
|
1694 |
// r4: number of arguments including receiver (C callee-saved)
|
1695 |
// r5: pointer to builtin function (C callee-saved)
|
1696 |
// r6: pointer to the first argument (C callee-saved)
|
1697 |
Isolate* isolate = masm->isolate(); |
1698 |
|
1699 |
if (do_gc) {
|
1700 |
// Passing r0.
|
1701 |
__ PrepareCallCFunction(2, 0, r1); |
1702 |
__ mov(r1, Operand(ExternalReference::isolate_address(masm->isolate()))); |
1703 |
__ CallCFunction(ExternalReference::perform_gc_function(isolate), |
1704 |
2, 0); |
1705 |
} |
1706 |
|
1707 |
ExternalReference scope_depth = |
1708 |
ExternalReference::heap_always_allocate_scope_depth(isolate); |
1709 |
if (always_allocate) {
|
1710 |
__ mov(r0, Operand(scope_depth)); |
1711 |
__ ldr(r1, MemOperand(r0)); |
1712 |
__ add(r1, r1, Operand(1));
|
1713 |
__ str(r1, MemOperand(r0)); |
1714 |
} |
1715 |
|
1716 |
// Call C built-in.
|
1717 |
// r0 = argc, r1 = argv
|
1718 |
__ mov(r0, Operand(r4)); |
1719 |
__ mov(r1, Operand(r6)); |
1720 |
|
1721 |
#if V8_HOST_ARCH_ARM
|
1722 |
int frame_alignment = MacroAssembler::ActivationFrameAlignment();
|
1723 |
int frame_alignment_mask = frame_alignment - 1; |
1724 |
if (FLAG_debug_code) {
|
1725 |
if (frame_alignment > kPointerSize) {
|
1726 |
Label alignment_as_expected; |
1727 |
ASSERT(IsPowerOf2(frame_alignment)); |
1728 |
__ tst(sp, Operand(frame_alignment_mask)); |
1729 |
__ b(eq, &alignment_as_expected); |
1730 |
// Don't use Check here, as it will call Runtime_Abort re-entering here.
|
1731 |
__ stop("Unexpected alignment");
|
1732 |
__ bind(&alignment_as_expected); |
1733 |
} |
1734 |
} |
1735 |
#endif
|
1736 |
|
1737 |
__ mov(r2, Operand(ExternalReference::isolate_address(isolate))); |
1738 |
|
1739 |
// To let the GC traverse the return address of the exit frames, we need to
|
1740 |
// know where the return address is. The CEntryStub is unmovable, so
|
1741 |
// we can store the address on the stack to be able to find it again and
|
1742 |
// we never have to restore it, because it will not change.
|
1743 |
// Compute the return address in lr to return to after the jump below. Pc is
|
1744 |
// already at '+ 8' from the current instruction but return is after three
|
1745 |
// instructions so add another 4 to pc to get the return address.
|
1746 |
{ |
1747 |
// Prevent literal pool emission before return address.
|
1748 |
Assembler::BlockConstPoolScope block_const_pool(masm); |
1749 |
masm->add(lr, pc, Operand(4));
|
1750 |
__ str(lr, MemOperand(sp, 0));
|
1751 |
masm->Jump(r5); |
1752 |
} |
1753 |
|
1754 |
__ VFPEnsureFPSCRState(r2); |
1755 |
|
1756 |
if (always_allocate) {
|
1757 |
// It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
|
1758 |
// though (contain the result).
|
1759 |
__ mov(r2, Operand(scope_depth)); |
1760 |
__ ldr(r3, MemOperand(r2)); |
1761 |
__ sub(r3, r3, Operand(1));
|
1762 |
__ str(r3, MemOperand(r2)); |
1763 |
} |
1764 |
|
1765 |
// check for failure result
|
1766 |
Label failure_returned; |
1767 |
STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0); |
1768 |
// Lower 2 bits of r2 are 0 iff r0 has failure tag.
|
1769 |
__ add(r2, r0, Operand(1));
|
1770 |
__ tst(r2, Operand(kFailureTagMask)); |
1771 |
__ b(eq, &failure_returned); |
1772 |
|
1773 |
// Exit C frame and return.
|
1774 |
// r0:r1: result
|
1775 |
// sp: stack pointer
|
1776 |
// fp: frame pointer
|
1777 |
// Callee-saved register r4 still holds argc.
|
1778 |
__ LeaveExitFrame(save_doubles_, r4, true);
|
1779 |
__ mov(pc, lr); |
1780 |
|
1781 |
// check if we should retry or throw exception
|
1782 |
Label retry; |
1783 |
__ bind(&failure_returned); |
1784 |
STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
|
1785 |
__ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize)); |
1786 |
__ b(eq, &retry); |
1787 |
|
1788 |
// Special handling of out of memory exceptions.
|
1789 |
JumpIfOOM(masm, r0, ip, throw_out_of_memory_exception); |
1790 |
|
1791 |
// Retrieve the pending exception.
|
1792 |
__ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, |
1793 |
isolate))); |
1794 |
__ ldr(r0, MemOperand(ip)); |
1795 |
|
1796 |
// See if we just retrieved an OOM exception.
|
1797 |
JumpIfOOM(masm, r0, ip, throw_out_of_memory_exception); |
1798 |
|
1799 |
// Clear the pending exception.
|
1800 |
__ mov(r3, Operand(isolate->factory()->the_hole_value())); |
1801 |
__ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, |
1802 |
isolate))); |
1803 |
__ str(r3, MemOperand(ip)); |
1804 |
|
1805 |
// Special handling of termination exceptions which are uncatchable
|
1806 |
// by javascript code.
|
1807 |
__ cmp(r0, Operand(isolate->factory()->termination_exception())); |
1808 |
__ b(eq, throw_termination_exception); |
1809 |
|
1810 |
// Handle normal exception.
|
1811 |
__ jmp(throw_normal_exception); |
1812 |
|
1813 |
__ bind(&retry); // pass last failure (r0) as parameter (r0) when retrying
|
1814 |
} |
1815 |
|
1816 |
|
1817 |
void CEntryStub::Generate(MacroAssembler* masm) {
|
1818 |
// Called from JavaScript; parameters are on stack as if calling JS function
|
1819 |
// r0: number of arguments including receiver
|
1820 |
// r1: pointer to builtin function
|
1821 |
// fp: frame pointer (restored after C call)
|
1822 |
// sp: stack pointer (restored as callee's sp after C call)
|
1823 |
// cp: current context (C callee-saved)
|
1824 |
|
1825 |
ProfileEntryHookStub::MaybeCallEntryHook(masm); |
1826 |
|
1827 |
// Result returned in r0 or r0+r1 by default.
|
1828 |
|
1829 |
// NOTE: Invocations of builtins may return failure objects
|
1830 |
// instead of a proper result. The builtin entry handles
|
1831 |
// this by performing a garbage collection and retrying the
|
1832 |
// builtin once.
|
1833 |
|
1834 |
// Compute the argv pointer in a callee-saved register.
|
1835 |
__ add(r6, sp, Operand(r0, LSL, kPointerSizeLog2)); |
1836 |
__ sub(r6, r6, Operand(kPointerSize)); |
1837 |
|
1838 |
// Enter the exit frame that transitions from JavaScript to C++.
|
1839 |
FrameScope scope(masm, StackFrame::MANUAL); |
1840 |
__ EnterExitFrame(save_doubles_); |
1841 |
|
1842 |
// Set up argc and the builtin function in callee-saved registers.
|
1843 |
__ mov(r4, Operand(r0)); |
1844 |
__ mov(r5, Operand(r1)); |
1845 |
|
1846 |
// r4: number of arguments (C callee-saved)
|
1847 |
// r5: pointer to builtin function (C callee-saved)
|
1848 |
// r6: pointer to first argument (C callee-saved)
|
1849 |
|
1850 |
Label throw_normal_exception; |
1851 |
Label throw_termination_exception; |
1852 |
Label throw_out_of_memory_exception; |
1853 |
|
1854 |
// Call into the runtime system.
|
1855 |
GenerateCore(masm, |
1856 |
&throw_normal_exception, |
1857 |
&throw_termination_exception, |
1858 |
&throw_out_of_memory_exception, |
1859 |
false,
|
1860 |
false);
|
1861 |
|
1862 |
// Do space-specific GC and retry runtime call.
|
1863 |
GenerateCore(masm, |
1864 |
&throw_normal_exception, |
1865 |
&throw_termination_exception, |
1866 |
&throw_out_of_memory_exception, |
1867 |
true,
|
1868 |
false);
|
1869 |
|
1870 |
// Do full GC and retry runtime call one final time.
|
1871 |
Failure* failure = Failure::InternalError(); |
1872 |
__ mov(r0, Operand(reinterpret_cast<int32_t>(failure)));
|
1873 |
GenerateCore(masm, |
1874 |
&throw_normal_exception, |
1875 |
&throw_termination_exception, |
1876 |
&throw_out_of_memory_exception, |
1877 |
true,
|
1878 |
true);
|
1879 |
|
1880 |
__ bind(&throw_out_of_memory_exception); |
1881 |
// Set external caught exception to false.
|
1882 |
Isolate* isolate = masm->isolate(); |
1883 |
ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress, |
1884 |
isolate); |
1885 |
__ mov(r0, Operand(false, RelocInfo::NONE32));
|
1886 |
__ mov(r2, Operand(external_caught)); |
1887 |
__ str(r0, MemOperand(r2)); |
1888 |
|
1889 |
// Set pending exception and r0 to out of memory exception.
|
1890 |
Label already_have_failure; |
1891 |
JumpIfOOM(masm, r0, ip, &already_have_failure); |
1892 |
Failure* out_of_memory = Failure::OutOfMemoryException(0x1);
|
1893 |
__ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
|
1894 |
__ bind(&already_have_failure); |
1895 |
__ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress, |
1896 |
isolate))); |
1897 |
__ str(r0, MemOperand(r2)); |
1898 |
// Fall through to the next label.
|
1899 |
|
1900 |
__ bind(&throw_termination_exception); |
1901 |
__ ThrowUncatchable(r0); |
1902 |
|
1903 |
__ bind(&throw_normal_exception); |
1904 |
__ Throw(r0); |
1905 |
} |
1906 |
|
1907 |
|
1908 |
void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { |
1909 |
// r0: code entry
|
1910 |
// r1: function
|
1911 |
// r2: receiver
|
1912 |
// r3: argc
|
1913 |
// [sp+0]: argv
|
1914 |
|
1915 |
Label invoke, handler_entry, exit; |
1916 |
|
1917 |
ProfileEntryHookStub::MaybeCallEntryHook(masm); |
1918 |
|
1919 |
// Called from C, so do not pop argc and args on exit (preserve sp)
|
1920 |
// No need to save register-passed args
|
1921 |
// Save callee-saved registers (incl. cp and fp), sp, and lr
|
1922 |
__ stm(db_w, sp, kCalleeSaved | lr.bit()); |
1923 |
|
1924 |
// Save callee-saved vfp registers.
|
1925 |
__ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg); |
1926 |
// Set up the reserved register for 0.0.
|
1927 |
__ vmov(kDoubleRegZero, 0.0); |
1928 |
__ VFPEnsureFPSCRState(r4); |
1929 |
|
1930 |
// Get address of argv, see stm above.
|
1931 |
// r0: code entry
|
1932 |
// r1: function
|
1933 |
// r2: receiver
|
1934 |
// r3: argc
|
1935 |
|
1936 |
// Set up argv in r4.
|
1937 |
int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize; |
1938 |
offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize; |
1939 |
__ ldr(r4, MemOperand(sp, offset_to_argv)); |
1940 |
|
1941 |
// Push a frame with special values setup to mark it as an entry frame.
|
1942 |
// r0: code entry
|
1943 |
// r1: function
|
1944 |
// r2: receiver
|
1945 |
// r3: argc
|
1946 |
// r4: argv
|
1947 |
Isolate* isolate = masm->isolate(); |
1948 |
int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
|
1949 |
__ mov(r8, Operand(Smi::FromInt(marker))); |
1950 |
__ mov(r6, Operand(Smi::FromInt(marker))); |
1951 |
__ mov(r5, |
1952 |
Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate))); |
1953 |
__ ldr(r5, MemOperand(r5)); |
1954 |
__ mov(ip, Operand(-1)); // Push a bad frame pointer to fail if it is used. |
1955 |
__ Push(ip, r8, r6, r5); |
1956 |
|
1957 |
// Set up frame pointer for the frame to be pushed.
|
1958 |
__ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); |
1959 |
|
1960 |
// If this is the outermost JS call, set js_entry_sp value.
|
1961 |
Label non_outermost_js; |
1962 |
ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate); |
1963 |
__ mov(r5, Operand(ExternalReference(js_entry_sp))); |
1964 |
__ ldr(r6, MemOperand(r5)); |
1965 |
__ cmp(r6, Operand::Zero()); |
1966 |
__ b(ne, &non_outermost_js); |
1967 |
__ str(fp, MemOperand(r5)); |
1968 |
__ mov(ip, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME))); |
1969 |
Label cont; |
1970 |
__ b(&cont); |
1971 |
__ bind(&non_outermost_js); |
1972 |
__ mov(ip, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME))); |
1973 |
__ bind(&cont); |
1974 |
__ push(ip); |
1975 |
|
1976 |
// Jump to a faked try block that does the invoke, with a faked catch
|
1977 |
// block that sets the pending exception.
|
1978 |
__ jmp(&invoke); |
1979 |
|
1980 |
// Block literal pool emission whilst taking the position of the handler
|
1981 |
// entry. This avoids making the assumption that literal pools are always
|
1982 |
// emitted after an instruction is emitted, rather than before.
|
1983 |
{ |
1984 |
Assembler::BlockConstPoolScope block_const_pool(masm); |
1985 |
__ bind(&handler_entry); |
1986 |
handler_offset_ = handler_entry.pos(); |
1987 |
// Caught exception: Store result (exception) in the pending exception
|
1988 |
// field in the JSEnv and return a failure sentinel. Coming in here the
|
1989 |
// fp will be invalid because the PushTryHandler below sets it to 0 to
|
1990 |
// signal the existence of the JSEntry frame.
|
1991 |
__ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, |
1992 |
isolate))); |
1993 |
} |
1994 |
__ str(r0, MemOperand(ip)); |
1995 |
__ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
|
1996 |
__ b(&exit); |
1997 |
|
1998 |
// Invoke: Link this frame into the handler chain. There's only one
|
1999 |
// handler block in this code object, so its index is 0.
|
2000 |
__ bind(&invoke); |
2001 |
// Must preserve r0-r4, r5-r6 are available.
|
2002 |
__ PushTryHandler(StackHandler::JS_ENTRY, 0);
|
2003 |
// If an exception not caught by another handler occurs, this handler
|
2004 |
// returns control to the code after the bl(&invoke) above, which
|
2005 |
// restores all kCalleeSaved registers (including cp and fp) to their
|
2006 |
// saved values before returning a failure to C.
|
2007 |
|
2008 |
// Clear any pending exceptions.
|
2009 |
__ mov(r5, Operand(isolate->factory()->the_hole_value())); |
2010 |
__ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, |
2011 |
isolate))); |
2012 |
__ str(r5, MemOperand(ip)); |
2013 |
|
2014 |
// Invoke the function by calling through JS entry trampoline builtin.
|
2015 |
// Notice that we cannot store a reference to the trampoline code directly in
|
2016 |
// this stub, because runtime stubs are not traversed when doing GC.
|
2017 |
|
2018 |
// Expected registers by Builtins::JSEntryTrampoline
|
2019 |
// r0: code entry
|
2020 |
// r1: function
|
2021 |
// r2: receiver
|
2022 |
// r3: argc
|
2023 |
// r4: argv
|
2024 |
if (is_construct) {
|
2025 |
ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline, |
2026 |
isolate); |
2027 |
__ mov(ip, Operand(construct_entry)); |
2028 |
} else {
|
2029 |
ExternalReference entry(Builtins::kJSEntryTrampoline, isolate); |
2030 |
__ mov(ip, Operand(entry)); |
2031 |
} |
2032 |
__ ldr(ip, MemOperand(ip)); // deref address
|
2033 |
|
2034 |
// Branch and link to JSEntryTrampoline. We don't use the double underscore
|
2035 |
// macro for the add instruction because we don't want the coverage tool
|
2036 |
// inserting instructions here after we read the pc. We block literal pool
|
2037 |
// emission for the same reason.
|
2038 |
{ |
2039 |
Assembler::BlockConstPoolScope block_const_pool(masm); |
2040 |
__ mov(lr, Operand(pc)); |
2041 |
masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag)); |
2042 |
} |
2043 |
|
2044 |
// Unlink this frame from the handler chain.
|
2045 |
__ PopTryHandler(); |
2046 |
|
2047 |
__ bind(&exit); // r0 holds result
|
2048 |
// Check if the current stack frame is marked as the outermost JS frame.
|
2049 |
Label non_outermost_js_2; |
2050 |
__ pop(r5); |
2051 |
__ cmp(r5, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME))); |
2052 |
__ b(ne, &non_outermost_js_2); |
2053 |
__ mov(r6, Operand::Zero()); |
2054 |
__ mov(r5, Operand(ExternalReference(js_entry_sp))); |
2055 |
__ str(r6, MemOperand(r5)); |
2056 |
__ bind(&non_outermost_js_2); |
2057 |
|
2058 |
// Restore the top frame descriptors from the stack.
|
2059 |
__ pop(r3); |
2060 |
__ mov(ip, |
2061 |
Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate))); |
2062 |
__ str(r3, MemOperand(ip)); |
2063 |
|
2064 |
// Reset the stack to the callee saved registers.
|
2065 |
__ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); |
2066 |
|
2067 |
// Restore callee-saved registers and return.
|
2068 |
#ifdef DEBUG
|
2069 |
if (FLAG_debug_code) {
|
2070 |
__ mov(lr, Operand(pc)); |
2071 |
} |
2072 |
#endif
|
2073 |
|
2074 |
// Restore callee-saved vfp registers.
|
2075 |
__ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg); |
2076 |
|
2077 |
__ ldm(ia_w, sp, kCalleeSaved | pc.bit()); |
2078 |
} |
2079 |
|
2080 |
|
2081 |
// Uses registers r0 to r4.
|
2082 |
// Expected input (depending on whether args are in registers or on the stack):
|
2083 |
// * object: r0 or at sp + 1 * kPointerSize.
|
2084 |
// * function: r1 or at sp.
|
2085 |
//
|
2086 |
// An inlined call site may have been generated before calling this stub.
|
2087 |
// In this case the offset to the inline site to patch is passed on the stack,
|
2088 |
// in the safepoint slot for register r4.
|
2089 |
// (See LCodeGen::DoInstanceOfKnownGlobal)
|
2090 |
void InstanceofStub::Generate(MacroAssembler* masm) {
|
2091 |
// Call site inlining and patching implies arguments in registers.
|
2092 |
ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck()); |
2093 |
// ReturnTrueFalse is only implemented for inlined call sites.
|
2094 |
ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck()); |
2095 |
|
2096 |
// Fixed register usage throughout the stub:
|
2097 |
const Register object = r0; // Object (lhs). |
2098 |
Register map = r3; // Map of the object.
|
2099 |
const Register function = r1; // Function (rhs). |
2100 |
const Register prototype = r4; // Prototype of the function. |
2101 |
const Register inline_site = r9;
|
2102 |
const Register scratch = r2;
|
2103 |
|
2104 |
const int32_t kDeltaToLoadBoolResult = 4 * kPointerSize; |
2105 |
|
2106 |
Label slow, loop, is_instance, is_not_instance, not_js_object; |
2107 |
|
2108 |
if (!HasArgsInRegisters()) {
|
2109 |
__ ldr(object, MemOperand(sp, 1 * kPointerSize));
|
2110 |
__ ldr(function, MemOperand(sp, 0));
|
2111 |
} |
2112 |
|
2113 |
// Check that the left hand is a JS object and load map.
|
2114 |
__ JumpIfSmi(object, ¬_js_object); |
2115 |
__ IsObjectJSObjectType(object, map, scratch, ¬_js_object); |
2116 |
|
2117 |
// If there is a call site cache don't look in the global cache, but do the
|
2118 |
// real lookup and update the call site cache.
|
2119 |
if (!HasCallSiteInlineCheck()) {
|
2120 |
Label miss; |
2121 |
__ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex); |
2122 |
__ b(ne, &miss); |
2123 |
__ CompareRoot(map, Heap::kInstanceofCacheMapRootIndex); |
2124 |
__ b(ne, &miss); |
2125 |
__ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); |
2126 |
__ Ret(HasArgsInRegisters() ? 0 : 2); |
2127 |
|
2128 |
__ bind(&miss); |
2129 |
} |
2130 |
|
2131 |
// Get the prototype of the function.
|
2132 |
__ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
|
2133 |
|
2134 |
// Check that the function prototype is a JS object.
|
2135 |
__ JumpIfSmi(prototype, &slow); |
2136 |
__ IsObjectJSObjectType(prototype, scratch, scratch, &slow); |
2137 |
|
2138 |
// Update the global instanceof or call site inlined cache with the current
|
2139 |
// map and function. The cached answer will be set when it is known below.
|
2140 |
if (!HasCallSiteInlineCheck()) {
|
2141 |
__ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex); |
2142 |
__ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex); |
2143 |
} else {
|
2144 |
ASSERT(HasArgsInRegisters()); |
2145 |
// Patch the (relocated) inlined map check.
|
2146 |
|
2147 |
// The offset was stored in r4 safepoint slot.
|
2148 |
// (See LCodeGen::DoDeferredLInstanceOfKnownGlobal)
|
2149 |
__ LoadFromSafepointRegisterSlot(scratch, r4); |
2150 |
__ sub(inline_site, lr, scratch); |
2151 |
// Get the map location in scratch and patch it.
|
2152 |
__ GetRelocatedValueLocation(inline_site, scratch); |
2153 |
__ ldr(scratch, MemOperand(scratch)); |
2154 |
__ str(map, FieldMemOperand(scratch, Cell::kValueOffset)); |
2155 |
} |
2156 |
|
2157 |
// Register mapping: r3 is object map and r4 is function prototype.
|
2158 |
// Get prototype of object into r2.
|
2159 |
__ ldr(scratch, FieldMemOperand(map, Map::kPrototypeOffset)); |
2160 |
|
2161 |
// We don't need map any more. Use it as a scratch register.
|
2162 |
Register scratch2 = map; |
2163 |
map = no_reg; |
2164 |
|
2165 |
// Loop through the prototype chain looking for the function prototype.
|
2166 |
__ LoadRoot(scratch2, Heap::kNullValueRootIndex); |
2167 |
__ bind(&loop); |
2168 |
__ cmp(scratch, Operand(prototype)); |
2169 |
__ b(eq, &is_instance); |
2170 |
__ cmp(scratch, scratch2); |
2171 |
__ b(eq, &is_not_instance); |
2172 |
__ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset)); |
2173 |
__ ldr(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset)); |
2174 |
__ jmp(&loop); |
2175 |
|
2176 |
__ bind(&is_instance); |
2177 |
if (!HasCallSiteInlineCheck()) {
|
2178 |
__ mov(r0, Operand(Smi::FromInt(0)));
|
2179 |
__ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); |
2180 |
} else {
|
2181 |
// Patch the call site to return true.
|
2182 |
__ LoadRoot(r0, Heap::kTrueValueRootIndex); |
2183 |
__ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult)); |
2184 |
// Get the boolean result location in scratch and patch it.
|
2185 |
__ GetRelocatedValueLocation(inline_site, scratch); |
2186 |
__ str(r0, MemOperand(scratch)); |
2187 |
|
2188 |
if (!ReturnTrueFalseObject()) {
|
2189 |
__ mov(r0, Operand(Smi::FromInt(0)));
|
2190 |
} |
2191 |
} |
2192 |
__ Ret(HasArgsInRegisters() ? 0 : 2); |
2193 |
|
2194 |
__ bind(&is_not_instance); |
2195 |
if (!HasCallSiteInlineCheck()) {
|
2196 |
__ mov(r0, Operand(Smi::FromInt(1)));
|
2197 |
__ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); |
2198 |
} else {
|
2199 |
// Patch the call site to return false.
|
2200 |
__ LoadRoot(r0, Heap::kFalseValueRootIndex); |
2201 |
__ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult)); |
2202 |
// Get the boolean result location in scratch and patch it.
|
2203 |
__ GetRelocatedValueLocation(inline_site, scratch); |
2204 |
__ str(r0, MemOperand(scratch)); |
2205 |
|
2206 |
if (!ReturnTrueFalseObject()) {
|
2207 |
__ mov(r0, Operand(Smi::FromInt(1)));
|
2208 |
} |
2209 |
} |
2210 |
__ Ret(HasArgsInRegisters() ? 0 : 2); |
2211 |
|
2212 |
Label object_not_null, object_not_null_or_smi; |
2213 |
__ bind(¬_js_object); |
2214 |
// Before null, smi and string value checks, check that the rhs is a function
|
2215 |
// as for a non-function rhs an exception needs to be thrown.
|
2216 |
__ JumpIfSmi(function, &slow); |
2217 |
__ CompareObjectType(function, scratch2, scratch, JS_FUNCTION_TYPE); |
2218 |
__ b(ne, &slow); |
2219 |
|
2220 |
// Null is not instance of anything.
|
2221 |
__ cmp(scratch, Operand(masm->isolate()->factory()->null_value())); |
2222 |
__ b(ne, &object_not_null); |
2223 |
__ mov(r0, Operand(Smi::FromInt(1)));
|
2224 |
__ Ret(HasArgsInRegisters() ? 0 : 2); |
2225 |
|
2226 |
__ bind(&object_not_null); |
2227 |
// Smi values are not instances of anything.
|
2228 |
__ JumpIfNotSmi(object, &object_not_null_or_smi); |
2229 |
__ mov(r0, Operand(Smi::FromInt(1)));
|
2230 |
__ Ret(HasArgsInRegisters() ? 0 : 2); |
2231 |
|
2232 |
__ bind(&object_not_null_or_smi); |
2233 |
// String values are not instances of anything.
|
2234 |
__ IsObjectJSStringType(object, scratch, &slow); |
2235 |
__ mov(r0, Operand(Smi::FromInt(1)));
|
2236 |
__ Ret(HasArgsInRegisters() ? 0 : 2); |
2237 |
|
2238 |
// Slow-case. Tail call builtin.
|
2239 |
__ bind(&slow); |
2240 |
if (!ReturnTrueFalseObject()) {
|
2241 |
if (HasArgsInRegisters()) {
|
2242 |
__ Push(r0, r1); |
2243 |
} |
2244 |
__ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); |
2245 |
} else {
|
2246 |
{ |
2247 |
FrameScope scope(masm, StackFrame::INTERNAL); |
2248 |
__ Push(r0, r1); |
2249 |
__ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION); |
2250 |
} |
2251 |
__ cmp(r0, Operand::Zero()); |
2252 |
__ LoadRoot(r0, Heap::kTrueValueRootIndex, eq); |
2253 |
__ LoadRoot(r0, Heap::kFalseValueRootIndex, ne); |
2254 |
__ Ret(HasArgsInRegisters() ? 0 : 2); |
2255 |
} |
2256 |
} |
2257 |
|
2258 |
|
2259 |
void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
|
2260 |
Label miss; |
2261 |
Register receiver; |
2262 |
if (kind() == Code::KEYED_LOAD_IC) {
|
2263 |
// ----------- S t a t e -------------
|
2264 |
// -- lr : return address
|
2265 |
// -- r0 : key
|
2266 |
// -- r1 : receiver
|
2267 |
// -----------------------------------
|
2268 |
__ cmp(r0, Operand(masm->isolate()->factory()->prototype_string())); |
2269 |
__ b(ne, &miss); |
2270 |
receiver = r1; |
2271 |
} else {
|
2272 |
ASSERT(kind() == Code::LOAD_IC); |
2273 |
// ----------- S t a t e -------------
|
2274 |
// -- r2 : name
|
2275 |
// -- lr : return address
|
2276 |
// -- r0 : receiver
|
2277 |
// -- sp[0] : receiver
|
2278 |
// -----------------------------------
|
2279 |
receiver = r0; |
2280 |
} |
2281 |
|
2282 |
StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, r3, r4, &miss); |
2283 |
__ bind(&miss); |
2284 |
StubCompiler::TailCallBuiltin( |
2285 |
masm, BaseLoadStoreStubCompiler::MissBuiltin(kind())); |
2286 |
} |
2287 |
|
2288 |
|
2289 |
void StringLengthStub::Generate(MacroAssembler* masm) {
|
2290 |
Label miss; |
2291 |
Register receiver; |
2292 |
if (kind() == Code::KEYED_LOAD_IC) {
|
2293 |
// ----------- S t a t e -------------
|
2294 |
// -- lr : return address
|
2295 |
// -- r0 : key
|
2296 |
// -- r1 : receiver
|
2297 |
// -----------------------------------
|
2298 |
__ cmp(r0, Operand(masm->isolate()->factory()->length_string())); |
2299 |
__ b(ne, &miss); |
2300 |
receiver = r1; |
2301 |
} else {
|
2302 |
ASSERT(kind() == Code::LOAD_IC); |
2303 |
// ----------- S t a t e -------------
|
2304 |
// -- r2 : name
|
2305 |
// -- lr : return address
|
2306 |
// -- r0 : receiver
|
2307 |
// -- sp[0] : receiver
|
2308 |
// -----------------------------------
|
2309 |
receiver = r0; |
2310 |
} |
2311 |
|
2312 |
StubCompiler::GenerateLoadStringLength(masm, receiver, r3, r4, &miss); |
2313 |
|
2314 |
__ bind(&miss); |
2315 |
StubCompiler::TailCallBuiltin( |
2316 |
masm, BaseLoadStoreStubCompiler::MissBuiltin(kind())); |
2317 |
} |
2318 |
|
2319 |
|
2320 |
void StoreArrayLengthStub::Generate(MacroAssembler* masm) {
|
2321 |
// This accepts as a receiver anything JSArray::SetElementsLength accepts
|
2322 |
// (currently anything except for external arrays which means anything with
|
2323 |
// elements of FixedArray type). Value must be a number, but only smis are
|
2324 |
// accepted as the most common case.
|
2325 |
Label miss; |
2326 |
|
2327 |
Register receiver; |
2328 |
Register value; |
2329 |
if (kind() == Code::KEYED_STORE_IC) {
|
2330 |
// ----------- S t a t e -------------
|
2331 |
// -- lr : return address
|
2332 |
// -- r0 : value
|
2333 |
// -- r1 : key
|
2334 |
// -- r2 : receiver
|
2335 |
// -----------------------------------
|
2336 |
__ cmp(r1, Operand(masm->isolate()->factory()->length_string())); |
2337 |
__ b(ne, &miss); |
2338 |
receiver = r2; |
2339 |
value = r0; |
2340 |
} else {
|
2341 |
ASSERT(kind() == Code::STORE_IC); |
2342 |
// ----------- S t a t e -------------
|
2343 |
// -- lr : return address
|
2344 |
// -- r0 : value
|
2345 |
// -- r1 : receiver
|
2346 |
// -- r2 : key
|
2347 |
// -----------------------------------
|
2348 |
receiver = r1; |
2349 |
value = r0; |
2350 |
} |
2351 |
Register scratch = r3; |
2352 |
|
2353 |
// Check that the receiver isn't a smi.
|
2354 |
__ JumpIfSmi(receiver, &miss); |
2355 |
|
2356 |
// Check that the object is a JS array.
|
2357 |
__ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE); |
2358 |
__ b(ne, &miss); |
2359 |
|
2360 |
// Check that elements are FixedArray.
|
2361 |
// We rely on StoreIC_ArrayLength below to deal with all types of
|
2362 |
// fast elements (including COW).
|
2363 |
__ ldr(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset)); |
2364 |
__ CompareObjectType(scratch, scratch, scratch, FIXED_ARRAY_TYPE); |
2365 |
__ b(ne, &miss); |
2366 |
|
2367 |
// Check that the array has fast properties, otherwise the length
|
2368 |
// property might have been redefined.
|
2369 |
__ ldr(scratch, FieldMemOperand(receiver, JSArray::kPropertiesOffset)); |
2370 |
__ ldr(scratch, FieldMemOperand(scratch, FixedArray::kMapOffset)); |
2371 |
__ CompareRoot(scratch, Heap::kHashTableMapRootIndex); |
2372 |
__ b(eq, &miss); |
2373 |
|
2374 |
// Check that value is a smi.
|
2375 |
__ JumpIfNotSmi(value, &miss); |
2376 |
|
2377 |
// Prepare tail call to StoreIC_ArrayLength.
|
2378 |
__ Push(receiver, value); |
2379 |
|
2380 |
ExternalReference ref = |
2381 |
ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate()); |
2382 |
__ TailCallExternalReference(ref, 2, 1); |
2383 |
|
2384 |
__ bind(&miss); |
2385 |
|
2386 |
StubCompiler::TailCallBuiltin( |
2387 |
masm, BaseLoadStoreStubCompiler::MissBuiltin(kind())); |
2388 |
} |
2389 |
|
2390 |
|
2391 |
Register InstanceofStub::left() { return r0; }
|
2392 |
|
2393 |
|
2394 |
Register InstanceofStub::right() { return r1; }
|
2395 |
|
2396 |
|
2397 |
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
|
2398 |
// The displacement is the offset of the last parameter (if any)
|
2399 |
// relative to the frame pointer.
|
2400 |
const int kDisplacement = |
2401 |
StandardFrameConstants::kCallerSPOffset - kPointerSize; |
2402 |
|
2403 |
// Check that the key is a smi.
|
2404 |
Label slow; |
2405 |
__ JumpIfNotSmi(r1, &slow); |
2406 |
|
2407 |
// Check if the calling frame is an arguments adaptor frame.
|
2408 |
Label adaptor; |
2409 |
__ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
2410 |
__ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset)); |
2411 |
__ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
2412 |
__ b(eq, &adaptor); |
2413 |
|
2414 |
// Check index against formal parameters count limit passed in
|
2415 |
// through register r0. Use unsigned comparison to get negative
|
2416 |
// check for free.
|
2417 |
__ cmp(r1, r0); |
2418 |
__ b(hs, &slow); |
2419 |
|
2420 |
// Read the argument from the stack and return it.
|
2421 |
__ sub(r3, r0, r1); |
2422 |
__ add(r3, fp, Operand::PointerOffsetFromSmiKey(r3)); |
2423 |
__ ldr(r0, MemOperand(r3, kDisplacement)); |
2424 |
__ Jump(lr); |
2425 |
|
2426 |
// Arguments adaptor case: Check index against actual arguments
|
2427 |
// limit found in the arguments adaptor frame. Use unsigned
|
2428 |
// comparison to get negative check for free.
|
2429 |
__ bind(&adaptor); |
2430 |
__ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset)); |
2431 |
__ cmp(r1, r0); |
2432 |
__ b(cs, &slow); |
2433 |
|
2434 |
// Read the argument from the adaptor frame and return it.
|
2435 |
__ sub(r3, r0, r1); |
2436 |
__ add(r3, r2, Operand::PointerOffsetFromSmiKey(r3)); |
2437 |
__ ldr(r0, MemOperand(r3, kDisplacement)); |
2438 |
__ Jump(lr); |
2439 |
|
2440 |
// Slow-case: Handle non-smi or out-of-bounds access to arguments
|
2441 |
// by calling the runtime system.
|
2442 |
__ bind(&slow); |
2443 |
__ push(r1); |
2444 |
__ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1); |
2445 |
} |
2446 |
|
2447 |
|
2448 |
void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
|
2449 |
// sp[0] : number of parameters
|
2450 |
// sp[4] : receiver displacement
|
2451 |
// sp[8] : function
|
2452 |
|
2453 |
// Check if the calling frame is an arguments adaptor frame.
|
2454 |
Label runtime; |
2455 |
__ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
2456 |
__ ldr(r2, MemOperand(r3, StandardFrameConstants::kContextOffset)); |
2457 |
__ cmp(r2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
2458 |
__ b(ne, &runtime); |
2459 |
|
2460 |
// Patch the arguments.length and the parameters pointer in the current frame.
|
2461 |
__ ldr(r2, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset)); |
2462 |
__ str(r2, MemOperand(sp, 0 * kPointerSize));
|
2463 |
__ add(r3, r3, Operand(r2, LSL, 1));
|
2464 |
__ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset)); |
2465 |
__ str(r3, MemOperand(sp, 1 * kPointerSize));
|
2466 |
|
2467 |
__ bind(&runtime); |
2468 |
__ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); |
2469 |
} |
2470 |
|
2471 |
|
2472 |
void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
|
2473 |
// Stack layout:
|
2474 |
// sp[0] : number of parameters (tagged)
|
2475 |
// sp[4] : address of receiver argument
|
2476 |
// sp[8] : function
|
2477 |
// Registers used over whole function:
|
2478 |
// r6 : allocated object (tagged)
|
2479 |
// r9 : mapped parameter count (tagged)
|
2480 |
|
2481 |
__ ldr(r1, MemOperand(sp, 0 * kPointerSize));
|
2482 |
// r1 = parameter count (tagged)
|
2483 |
|
2484 |
// Check if the calling frame is an arguments adaptor frame.
|
2485 |
Label runtime; |
2486 |
Label adaptor_frame, try_allocate; |
2487 |
__ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
2488 |
__ ldr(r2, MemOperand(r3, StandardFrameConstants::kContextOffset)); |
2489 |
__ cmp(r2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
2490 |
__ b(eq, &adaptor_frame); |
2491 |
|
2492 |
// No adaptor, parameter count = argument count.
|
2493 |
__ mov(r2, r1); |
2494 |
__ b(&try_allocate); |
2495 |
|
2496 |
// We have an adaptor frame. Patch the parameters pointer.
|
2497 |
__ bind(&adaptor_frame); |
2498 |
__ ldr(r2, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset)); |
2499 |
__ add(r3, r3, Operand(r2, LSL, 1));
|
2500 |
__ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset)); |
2501 |
__ str(r3, MemOperand(sp, 1 * kPointerSize));
|
2502 |
|
2503 |
// r1 = parameter count (tagged)
|
2504 |
// r2 = argument count (tagged)
|
2505 |
// Compute the mapped parameter count = min(r1, r2) in r1.
|
2506 |
__ cmp(r1, Operand(r2)); |
2507 |
__ mov(r1, Operand(r2), LeaveCC, gt); |
2508 |
|
2509 |
__ bind(&try_allocate); |
2510 |
|
2511 |
// Compute the sizes of backing store, parameter map, and arguments object.
|
2512 |
// 1. Parameter map, has 2 extra words containing context and backing store.
|
2513 |
const int kParameterMapHeaderSize = |
2514 |
FixedArray::kHeaderSize + 2 * kPointerSize;
|
2515 |
// If there are no mapped parameters, we do not need the parameter_map.
|
2516 |
__ cmp(r1, Operand(Smi::FromInt(0)));
|
2517 |
__ mov(r9, Operand::Zero(), LeaveCC, eq); |
2518 |
__ mov(r9, Operand(r1, LSL, 1), LeaveCC, ne);
|
2519 |
__ add(r9, r9, Operand(kParameterMapHeaderSize), LeaveCC, ne); |
2520 |
|
2521 |
// 2. Backing store.
|
2522 |
__ add(r9, r9, Operand(r2, LSL, 1));
|
2523 |
__ add(r9, r9, Operand(FixedArray::kHeaderSize)); |
2524 |
|
2525 |
// 3. Arguments object.
|
2526 |
__ add(r9, r9, Operand(Heap::kArgumentsObjectSize)); |
2527 |
|
2528 |
// Do the allocation of all three objects in one go.
|
2529 |
__ Allocate(r9, r0, r3, r4, &runtime, TAG_OBJECT); |
2530 |
|
2531 |
// r0 = address of new object(s) (tagged)
|
2532 |
// r2 = argument count (tagged)
|
2533 |
// Get the arguments boilerplate from the current native context into r4.
|
2534 |
const int kNormalOffset = |
2535 |
Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX); |
2536 |
const int kAliasedOffset = |
2537 |
Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX); |
2538 |
|
2539 |
__ ldr(r4, MemOperand(r8, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); |
2540 |
__ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset)); |
2541 |
__ cmp(r1, Operand::Zero()); |
2542 |
__ ldr(r4, MemOperand(r4, kNormalOffset), eq); |
2543 |
__ ldr(r4, MemOperand(r4, kAliasedOffset), ne); |
2544 |
|
2545 |
// r0 = address of new object (tagged)
|
2546 |
// r1 = mapped parameter count (tagged)
|
2547 |
// r2 = argument count (tagged)
|
2548 |
// r4 = address of boilerplate object (tagged)
|
2549 |
// Copy the JS object part.
|
2550 |
for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) { |
2551 |
__ ldr(r3, FieldMemOperand(r4, i)); |
2552 |
__ str(r3, FieldMemOperand(r0, i)); |
2553 |
} |
2554 |
|
2555 |
// Set up the callee in-object property.
|
2556 |
STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
|
2557 |
__ ldr(r3, MemOperand(sp, 2 * kPointerSize));
|
2558 |
const int kCalleeOffset = JSObject::kHeaderSize + |
2559 |
Heap::kArgumentsCalleeIndex * kPointerSize; |
2560 |
__ str(r3, FieldMemOperand(r0, kCalleeOffset)); |
2561 |
|
2562 |
// Use the length (smi tagged) and set that as an in-object property too.
|
2563 |
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
|
2564 |
const int kLengthOffset = JSObject::kHeaderSize + |
2565 |
Heap::kArgumentsLengthIndex * kPointerSize; |
2566 |
__ str(r2, FieldMemOperand(r0, kLengthOffset)); |
2567 |
|
2568 |
// Set up the elements pointer in the allocated arguments object.
|
2569 |
// If we allocated a parameter map, r4 will point there, otherwise
|
2570 |
// it will point to the backing store.
|
2571 |
__ add(r4, r0, Operand(Heap::kArgumentsObjectSize)); |
2572 |
__ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset)); |
2573 |
|
2574 |
// r0 = address of new object (tagged)
|
2575 |
// r1 = mapped parameter count (tagged)
|
2576 |
// r2 = argument count (tagged)
|
2577 |
// r4 = address of parameter map or backing store (tagged)
|
2578 |
// Initialize parameter map. If there are no mapped arguments, we're done.
|
2579 |
Label skip_parameter_map; |
2580 |
__ cmp(r1, Operand(Smi::FromInt(0)));
|
2581 |
// Move backing store address to r3, because it is
|
2582 |
// expected there when filling in the unmapped arguments.
|
2583 |
__ mov(r3, r4, LeaveCC, eq); |
2584 |
__ b(eq, &skip_parameter_map); |
2585 |
|
2586 |
__ LoadRoot(r6, Heap::kNonStrictArgumentsElementsMapRootIndex); |
2587 |
__ str(r6, FieldMemOperand(r4, FixedArray::kMapOffset)); |
2588 |
__ add(r6, r1, Operand(Smi::FromInt(2)));
|
2589 |
__ str(r6, FieldMemOperand(r4, FixedArray::kLengthOffset)); |
2590 |
__ str(r8, FieldMemOperand(r4, FixedArray::kHeaderSize + 0 * kPointerSize));
|
2591 |
__ add(r6, r4, Operand(r1, LSL, 1));
|
2592 |
__ add(r6, r6, Operand(kParameterMapHeaderSize)); |
2593 |
__ str(r6, FieldMemOperand(r4, FixedArray::kHeaderSize + 1 * kPointerSize));
|
2594 |
|
2595 |
// Copy the parameter slots and the holes in the arguments.
|
2596 |
// We need to fill in mapped_parameter_count slots. They index the context,
|
2597 |
// where parameters are stored in reverse order, at
|
2598 |
// MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
|
2599 |
// The mapped parameter thus need to get indices
|
2600 |
// MIN_CONTEXT_SLOTS+parameter_count-1 ..
|
2601 |
// MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
|
2602 |
// We loop from right to left.
|
2603 |
Label parameters_loop, parameters_test; |
2604 |
__ mov(r6, r1); |
2605 |
__ ldr(r9, MemOperand(sp, 0 * kPointerSize));
|
2606 |
__ add(r9, r9, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS))); |
2607 |
__ sub(r9, r9, Operand(r1)); |
2608 |
__ LoadRoot(r5, Heap::kTheHoleValueRootIndex); |
2609 |
__ add(r3, r4, Operand(r6, LSL, 1));
|
2610 |
__ add(r3, r3, Operand(kParameterMapHeaderSize)); |
2611 |
|
2612 |
// r6 = loop variable (tagged)
|
2613 |
// r1 = mapping index (tagged)
|
2614 |
// r3 = address of backing store (tagged)
|
2615 |
// r4 = address of parameter map (tagged), which is also the address of new
|
2616 |
// object + Heap::kArgumentsObjectSize (tagged)
|
2617 |
// r0 = temporary scratch (a.o., for address calculation)
|
2618 |
// r5 = the hole value
|
2619 |
__ jmp(¶meters_test); |
2620 |
|
2621 |
__ bind(¶meters_loop); |
2622 |
__ sub(r6, r6, Operand(Smi::FromInt(1)));
|
2623 |
__ mov(r0, Operand(r6, LSL, 1));
|
2624 |
__ add(r0, r0, Operand(kParameterMapHeaderSize - kHeapObjectTag)); |
2625 |
__ str(r9, MemOperand(r4, r0)); |
2626 |
__ sub(r0, r0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize)); |
2627 |
__ str(r5, MemOperand(r3, r0)); |
2628 |
__ add(r9, r9, Operand(Smi::FromInt(1)));
|
2629 |
__ bind(¶meters_test); |
2630 |
__ cmp(r6, Operand(Smi::FromInt(0)));
|
2631 |
__ b(ne, ¶meters_loop); |
2632 |
|
2633 |
// Restore r0 = new object (tagged)
|
2634 |
__ sub(r0, r4, Operand(Heap::kArgumentsObjectSize)); |
2635 |
|
2636 |
__ bind(&skip_parameter_map); |
2637 |
// r0 = address of new object (tagged)
|
2638 |
// r2 = argument count (tagged)
|
2639 |
// r3 = address of backing store (tagged)
|
2640 |
// r5 = scratch
|
2641 |
// Copy arguments header and remaining slots (if there are any).
|
2642 |
__ LoadRoot(r5, Heap::kFixedArrayMapRootIndex); |
2643 |
__ str(r5, FieldMemOperand(r3, FixedArray::kMapOffset)); |
2644 |
__ str(r2, FieldMemOperand(r3, FixedArray::kLengthOffset)); |
2645 |
|
2646 |
Label arguments_loop, arguments_test; |
2647 |
__ mov(r9, r1); |
2648 |
__ ldr(r4, MemOperand(sp, 1 * kPointerSize));
|
2649 |
__ sub(r4, r4, Operand(r9, LSL, 1));
|
2650 |
__ jmp(&arguments_test); |
2651 |
|
2652 |
__ bind(&arguments_loop); |
2653 |
__ sub(r4, r4, Operand(kPointerSize)); |
2654 |
__ ldr(r6, MemOperand(r4, 0));
|
2655 |
__ add(r5, r3, Operand(r9, LSL, 1));
|
2656 |
__ str(r6, FieldMemOperand(r5, FixedArray::kHeaderSize)); |
2657 |
__ add(r9, r9, Operand(Smi::FromInt(1)));
|
2658 |
|
2659 |
__ bind(&arguments_test); |
2660 |
__ cmp(r9, Operand(r2)); |
2661 |
__ b(lt, &arguments_loop); |
2662 |
|
2663 |
// Return and remove the on-stack parameters.
|
2664 |
__ add(sp, sp, Operand(3 * kPointerSize));
|
2665 |
__ Ret(); |
2666 |
|
2667 |
// Do the runtime call to allocate the arguments object.
|
2668 |
// r0 = address of new object (tagged)
|
2669 |
// r2 = argument count (tagged)
|
2670 |
__ bind(&runtime); |
2671 |
__ str(r2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count. |
2672 |
__ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); |
2673 |
} |
2674 |
|
2675 |
|
2676 |
void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
|
2677 |
// sp[0] : number of parameters
|
2678 |
// sp[4] : receiver displacement
|
2679 |
// sp[8] : function
|
2680 |
// Check if the calling frame is an arguments adaptor frame.
|
2681 |
Label adaptor_frame, try_allocate, runtime; |
2682 |
__ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
2683 |
__ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset)); |
2684 |
__ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
2685 |
__ b(eq, &adaptor_frame); |
2686 |
|
2687 |
// Get the length from the frame.
|
2688 |
__ ldr(r1, MemOperand(sp, 0));
|
2689 |
__ b(&try_allocate); |
2690 |
|
2691 |
// Patch the arguments.length and the parameters pointer.
|
2692 |
__ bind(&adaptor_frame); |
2693 |
__ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset)); |
2694 |
__ str(r1, MemOperand(sp, 0));
|
2695 |
__ add(r3, r2, Operand::PointerOffsetFromSmiKey(r1)); |
2696 |
__ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset)); |
2697 |
__ str(r3, MemOperand(sp, 1 * kPointerSize));
|
2698 |
|
2699 |
// Try the new space allocation. Start out with computing the size
|
2700 |
// of the arguments object and the elements array in words.
|
2701 |
Label add_arguments_object; |
2702 |
__ bind(&try_allocate); |
2703 |
__ SmiUntag(r1, SetCC); |
2704 |
__ b(eq, &add_arguments_object); |
2705 |
__ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize)); |
2706 |
__ bind(&add_arguments_object); |
2707 |
__ add(r1, r1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize)); |
2708 |
|
2709 |
// Do the allocation of both objects in one go.
|
2710 |
__ Allocate(r1, r0, r2, r3, &runtime, |
2711 |
static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
|
2712 |
|
2713 |
// Get the arguments boilerplate from the current native context.
|
2714 |
__ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); |
2715 |
__ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset)); |
2716 |
__ ldr(r4, MemOperand(r4, Context::SlotOffset( |
2717 |
Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX))); |
2718 |
|
2719 |
// Copy the JS object part.
|
2720 |
__ CopyFields(r0, r4, d0, JSObject::kHeaderSize / kPointerSize); |
2721 |
|
2722 |
// Get the length (smi tagged) and set that as an in-object property too.
|
2723 |
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
|
2724 |
__ ldr(r1, MemOperand(sp, 0 * kPointerSize));
|
2725 |
__ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize + |
2726 |
Heap::kArgumentsLengthIndex * kPointerSize)); |
2727 |
|
2728 |
// If there are no actual arguments, we're done.
|
2729 |
Label done; |
2730 |
__ cmp(r1, Operand::Zero()); |
2731 |
__ b(eq, &done); |
2732 |
|
2733 |
// Get the parameters pointer from the stack.
|
2734 |
__ ldr(r2, MemOperand(sp, 1 * kPointerSize));
|
2735 |
|
2736 |
// Set up the elements pointer in the allocated arguments object and
|
2737 |
// initialize the header in the elements fixed array.
|
2738 |
__ add(r4, r0, Operand(Heap::kArgumentsObjectSizeStrict)); |
2739 |
__ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset)); |
2740 |
__ LoadRoot(r3, Heap::kFixedArrayMapRootIndex); |
2741 |
__ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset)); |
2742 |
__ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset)); |
2743 |
__ SmiUntag(r1); |
2744 |
|
2745 |
// Copy the fixed array slots.
|
2746 |
Label loop; |
2747 |
// Set up r4 to point to the first array slot.
|
2748 |
__ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
2749 |
__ bind(&loop); |
2750 |
// Pre-decrement r2 with kPointerSize on each iteration.
|
2751 |
// Pre-decrement in order to skip receiver.
|
2752 |
__ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex)); |
2753 |
// Post-increment r4 with kPointerSize on each iteration.
|
2754 |
__ str(r3, MemOperand(r4, kPointerSize, PostIndex)); |
2755 |
__ sub(r1, r1, Operand(1));
|
2756 |
__ cmp(r1, Operand::Zero()); |
2757 |
__ b(ne, &loop); |
2758 |
|
2759 |
// Return and remove the on-stack parameters.
|
2760 |
__ bind(&done); |
2761 |
__ add(sp, sp, Operand(3 * kPointerSize));
|
2762 |
__ Ret(); |
2763 |
|
2764 |
// Do the runtime call to allocate the arguments object.
|
2765 |
__ bind(&runtime); |
2766 |
__ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1); |
2767 |
} |
2768 |
|
2769 |
|
2770 |
void RegExpExecStub::Generate(MacroAssembler* masm) {
|
2771 |
// Just jump directly to runtime if native RegExp is not selected at compile
|
2772 |
// time or if regexp entry in generated code is turned off runtime switch or
|
2773 |
// at compilation.
|
2774 |
#ifdef V8_INTERPRETED_REGEXP
|
2775 |
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1); |
2776 |
#else // V8_INTERPRETED_REGEXP |
2777 |
|
2778 |
// Stack frame on entry.
|
2779 |
// sp[0]: last_match_info (expected JSArray)
|
2780 |
// sp[4]: previous index
|
2781 |
// sp[8]: subject string
|
2782 |
// sp[12]: JSRegExp object
|
2783 |
|
2784 |
const int kLastMatchInfoOffset = 0 * kPointerSize; |
2785 |
const int kPreviousIndexOffset = 1 * kPointerSize; |
2786 |
const int kSubjectOffset = 2 * kPointerSize; |
2787 |
const int kJSRegExpOffset = 3 * kPointerSize; |
2788 |
|
2789 |
Label runtime; |
2790 |
// Allocation of registers for this function. These are in callee save
|
2791 |
// registers and will be preserved by the call to the native RegExp code, as
|
2792 |
// this code is called using the normal C calling convention. When calling
|
2793 |
// directly from generated code the native RegExp code will not do a GC and
|
2794 |
// therefore the content of these registers are safe to use after the call.
|
2795 |
Register subject = r4; |
2796 |
Register regexp_data = r5; |
2797 |
Register last_match_info_elements = no_reg; // will be r6;
|
2798 |
|
2799 |
// Ensure that a RegExp stack is allocated.
|
2800 |
Isolate* isolate = masm->isolate(); |
2801 |
ExternalReference address_of_regexp_stack_memory_address = |
2802 |
ExternalReference::address_of_regexp_stack_memory_address(isolate); |
2803 |
ExternalReference address_of_regexp_stack_memory_size = |
2804 |
ExternalReference::address_of_regexp_stack_memory_size(isolate); |
2805 |
__ mov(r0, Operand(address_of_regexp_stack_memory_size)); |
2806 |
__ ldr(r0, MemOperand(r0, 0));
|
2807 |
__ cmp(r0, Operand::Zero()); |
2808 |
__ b(eq, &runtime); |
2809 |
|
2810 |
// Check that the first argument is a JSRegExp object.
|
2811 |
__ ldr(r0, MemOperand(sp, kJSRegExpOffset)); |
2812 |
__ JumpIfSmi(r0, &runtime); |
2813 |
__ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE); |
2814 |
__ b(ne, &runtime); |
2815 |
|
2816 |
// Check that the RegExp has been compiled (data contains a fixed array).
|
2817 |
__ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset)); |
2818 |
if (FLAG_debug_code) {
|
2819 |
__ SmiTst(regexp_data); |
2820 |
__ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected); |
2821 |
__ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE); |
2822 |
__ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected); |
2823 |
} |
2824 |
|
2825 |
// regexp_data: RegExp data (FixedArray)
|
2826 |
// Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
|
2827 |
__ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset)); |
2828 |
__ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP))); |
2829 |
__ b(ne, &runtime); |
2830 |
|
2831 |
// regexp_data: RegExp data (FixedArray)
|
2832 |
// Check that the number of captures fit in the static offsets vector buffer.
|
2833 |
__ ldr(r2, |
2834 |
FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); |
2835 |
// Check (number_of_captures + 1) * 2 <= offsets vector size
|
2836 |
// Or number_of_captures * 2 <= offsets vector size - 2
|
2837 |
// Multiplying by 2 comes for free since r2 is smi-tagged.
|
2838 |
STATIC_ASSERT(kSmiTag == 0);
|
2839 |
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
|
2840 |
STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
|
2841 |
__ cmp(r2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
|
2842 |
__ b(hi, &runtime); |
2843 |
|
2844 |
// Reset offset for possibly sliced string.
|
2845 |
__ mov(r9, Operand::Zero()); |
2846 |
__ ldr(subject, MemOperand(sp, kSubjectOffset)); |
2847 |
__ JumpIfSmi(subject, &runtime); |
2848 |
__ mov(r3, subject); // Make a copy of the original subject string.
|
2849 |
__ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset)); |
2850 |
__ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset)); |
2851 |
// subject: subject string
|
2852 |
// r3: subject string
|
2853 |
// r0: subject string instance type
|
2854 |
// regexp_data: RegExp data (FixedArray)
|
2855 |
// Handle subject string according to its encoding and representation:
|
2856 |
// (1) Sequential string? If yes, go to (5).
|
2857 |
// (2) Anything but sequential or cons? If yes, go to (6).
|
2858 |
// (3) Cons string. If the string is flat, replace subject with first string.
|
2859 |
// Otherwise bailout.
|
2860 |
// (4) Is subject external? If yes, go to (7).
|
2861 |
// (5) Sequential string. Load regexp code according to encoding.
|
2862 |
// (E) Carry on.
|
2863 |
/// [...]
|
2864 |
|
2865 |
// Deferred code at the end of the stub:
|
2866 |
// (6) Not a long external string? If yes, go to (8).
|
2867 |
// (7) External string. Make it, offset-wise, look like a sequential string.
|
2868 |
// Go to (5).
|
2869 |
// (8) Short external string or not a string? If yes, bail out to runtime.
|
2870 |
// (9) Sliced string. Replace subject with parent. Go to (4).
|
2871 |
|
2872 |
Label seq_string /* 5 */, external_string /* 7 */, |
2873 |
check_underlying /* 4 */, not_seq_nor_cons /* 6 */, |
2874 |
not_long_external /* 8 */;
|
2875 |
|
2876 |
// (1) Sequential string? If yes, go to (5).
|
2877 |
__ and_(r1, |
2878 |
r0, |
2879 |
Operand(kIsNotStringMask | |
2880 |
kStringRepresentationMask | |
2881 |
kShortExternalStringMask), |
2882 |
SetCC); |
2883 |
STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
|
2884 |
__ b(eq, &seq_string); // Go to (5).
|
2885 |
|
2886 |
// (2) Anything but sequential or cons? If yes, go to (6).
|
2887 |
STATIC_ASSERT(kConsStringTag < kExternalStringTag); |
2888 |
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag); |
2889 |
STATIC_ASSERT(kIsNotStringMask > kExternalStringTag); |
2890 |
STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag); |
2891 |
__ cmp(r1, Operand(kExternalStringTag)); |
2892 |
__ b(ge, ¬_seq_nor_cons); // Go to (6).
|
2893 |
|
2894 |
// (3) Cons string. Check that it's flat.
|
2895 |
// Replace subject with first string and reload instance type.
|
2896 |
__ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset)); |
2897 |
__ CompareRoot(r0, Heap::kempty_stringRootIndex); |
2898 |
__ b(ne, &runtime); |
2899 |
__ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset)); |
2900 |
|
2901 |
// (4) Is subject external? If yes, go to (7).
|
2902 |
__ bind(&check_underlying); |
2903 |
__ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset)); |
2904 |
__ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset)); |
2905 |
STATIC_ASSERT(kSeqStringTag == 0);
|
2906 |
__ tst(r0, Operand(kStringRepresentationMask)); |
2907 |
// The underlying external string is never a short external string.
|
2908 |
STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength); |
2909 |
STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength); |
2910 |
__ b(ne, &external_string); // Go to (7).
|
2911 |
|
2912 |
// (5) Sequential string. Load regexp code according to encoding.
|
2913 |
__ bind(&seq_string); |
2914 |
// subject: sequential subject string (or look-alike, external string)
|
2915 |
// r3: original subject string
|
2916 |
// Load previous index and check range before r3 is overwritten. We have to
|
2917 |
// use r3 instead of subject here because subject might have been only made
|
2918 |
// to look like a sequential string when it actually is an external string.
|
2919 |
__ ldr(r1, MemOperand(sp, kPreviousIndexOffset)); |
2920 |
__ JumpIfNotSmi(r1, &runtime); |
2921 |
__ ldr(r3, FieldMemOperand(r3, String::kLengthOffset)); |
2922 |
__ cmp(r3, Operand(r1)); |
2923 |
__ b(ls, &runtime); |
2924 |
__ SmiUntag(r1); |
2925 |
|
2926 |
STATIC_ASSERT(4 == kOneByteStringTag);
|
2927 |
STATIC_ASSERT(kTwoByteStringTag == 0);
|
2928 |
__ and_(r0, r0, Operand(kStringEncodingMask)); |
2929 |
__ mov(r3, Operand(r0, ASR, 2), SetCC);
|
2930 |
__ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne); |
2931 |
__ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq); |
2932 |
|
2933 |
// (E) Carry on. String handling is done.
|
2934 |
// r6: irregexp code
|
2935 |
// Check that the irregexp code has been generated for the actual string
|
2936 |
// encoding. If it has, the field contains a code object otherwise it contains
|
2937 |
// a smi (code flushing support).
|
2938 |
__ JumpIfSmi(r6, &runtime); |
2939 |
|
2940 |
// r1: previous index
|
2941 |
// r3: encoding of subject string (1 if ASCII, 0 if two_byte);
|
2942 |
// r6: code
|
2943 |
// subject: Subject string
|
2944 |
// regexp_data: RegExp data (FixedArray)
|
2945 |
// All checks done. Now push arguments for native regexp code.
|
2946 |
__ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, r0, r2);
|
2947 |
|
2948 |
// Isolates: note we add an additional parameter here (isolate pointer).
|
2949 |
const int kRegExpExecuteArguments = 9; |
2950 |
const int kParameterRegisters = 4; |
2951 |
__ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
|
2952 |
|
2953 |
// Stack pointer now points to cell where return address is to be written.
|
2954 |
// Arguments are before that on the stack or in registers.
|
2955 |
|
2956 |
// Argument 9 (sp[20]): Pass current isolate address.
|
2957 |
__ mov(r0, Operand(ExternalReference::isolate_address(isolate))); |
2958 |
__ str(r0, MemOperand(sp, 5 * kPointerSize));
|
2959 |
|
2960 |
// Argument 8 (sp[16]): Indicate that this is a direct call from JavaScript.
|
2961 |
__ mov(r0, Operand(1));
|
2962 |
__ str(r0, MemOperand(sp, 4 * kPointerSize));
|
2963 |
|
2964 |
// Argument 7 (sp[12]): Start (high end) of backtracking stack memory area.
|
2965 |
__ mov(r0, Operand(address_of_regexp_stack_memory_address)); |
2966 |
__ ldr(r0, MemOperand(r0, 0));
|
2967 |
__ mov(r2, Operand(address_of_regexp_stack_memory_size)); |
2968 |
__ ldr(r2, MemOperand(r2, 0));
|
2969 |
__ add(r0, r0, Operand(r2)); |
2970 |
__ str(r0, MemOperand(sp, 3 * kPointerSize));
|
2971 |
|
2972 |
// Argument 6: Set the number of capture registers to zero to force global
|
2973 |
// regexps to behave as non-global. This does not affect non-global regexps.
|
2974 |
__ mov(r0, Operand::Zero()); |
2975 |
__ str(r0, MemOperand(sp, 2 * kPointerSize));
|
2976 |
|
2977 |
// Argument 5 (sp[4]): static offsets vector buffer.
|
2978 |
__ mov(r0, |
2979 |
Operand(ExternalReference::address_of_static_offsets_vector(isolate))); |
2980 |
__ str(r0, MemOperand(sp, 1 * kPointerSize));
|
2981 |
|
2982 |
// For arguments 4 and 3 get string length, calculate start of string data and
|
2983 |
// calculate the shift of the index (0 for ASCII and 1 for two byte).
|
2984 |
__ add(r8, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag)); |
2985 |
__ eor(r3, r3, Operand(1));
|
2986 |
// Load the length from the original subject string from the previous stack
|
2987 |
// frame. Therefore we have to use fp, which points exactly to two pointer
|
2988 |
// sizes below the previous sp. (Because creating a new stack frame pushes
|
2989 |
// the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
|
2990 |
__ ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
|
2991 |
// If slice offset is not 0, load the length from the original sliced string.
|
2992 |
// Argument 4, r3: End of string data
|
2993 |
// Argument 3, r2: Start of string data
|
2994 |
// Prepare start and end index of the input.
|
2995 |
__ add(r9, r8, Operand(r9, LSL, r3)); |
2996 |
__ add(r2, r9, Operand(r1, LSL, r3)); |
2997 |
|
2998 |
__ ldr(r8, FieldMemOperand(subject, String::kLengthOffset)); |
2999 |
__ SmiUntag(r8); |
3000 |
__ add(r3, r9, Operand(r8, LSL, r3)); |
3001 |
|
3002 |
// Argument 2 (r1): Previous index.
|
3003 |
// Already there
|
3004 |
|
3005 |
// Argument 1 (r0): Subject string.
|
3006 |
__ mov(r0, subject); |
3007 |
|
3008 |
// Locate the code entry and call it.
|
3009 |
__ add(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag)); |
3010 |
DirectCEntryStub stub; |
3011 |
stub.GenerateCall(masm, r6); |
3012 |
|
3013 |
__ LeaveExitFrame(false, no_reg, true); |
3014 |
|
3015 |
last_match_info_elements = r6; |
3016 |
|
3017 |
// r0: result
|
3018 |
// subject: subject string (callee saved)
|
3019 |
// regexp_data: RegExp data (callee saved)
|
3020 |
// last_match_info_elements: Last match info elements (callee saved)
|
3021 |
// Check the result.
|
3022 |
Label success; |
3023 |
__ cmp(r0, Operand(1));
|
3024 |
// We expect exactly one result since we force the called regexp to behave
|
3025 |
// as non-global.
|
3026 |
__ b(eq, &success); |
3027 |
Label failure; |
3028 |
__ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE)); |
3029 |
__ b(eq, &failure); |
3030 |
__ cmp(r0, Operand(NativeRegExpMacroAssembler::EXCEPTION)); |
3031 |
// If not exception it can only be retry. Handle that in the runtime system.
|
3032 |
__ b(ne, &runtime); |
3033 |
// Result must now be exception. If there is no pending exception already a
|
3034 |
// stack overflow (on the backtrack stack) was detected in RegExp code but
|
3035 |
// haven't created the exception yet. Handle that in the runtime system.
|
3036 |
// TODO(592): Rerunning the RegExp to get the stack overflow exception.
|
3037 |
__ mov(r1, Operand(isolate->factory()->the_hole_value())); |
3038 |
__ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress, |
3039 |
isolate))); |
3040 |
__ ldr(r0, MemOperand(r2, 0));
|
3041 |
__ cmp(r0, r1); |
3042 |
__ b(eq, &runtime); |
3043 |
|
3044 |
__ str(r1, MemOperand(r2, 0)); // Clear pending exception. |
3045 |
|
3046 |
// Check if the exception is a termination. If so, throw as uncatchable.
|
3047 |
__ CompareRoot(r0, Heap::kTerminationExceptionRootIndex); |
3048 |
|
3049 |
Label termination_exception; |
3050 |
__ b(eq, &termination_exception); |
3051 |
|
3052 |
__ Throw(r0); |
3053 |
|
3054 |
__ bind(&termination_exception); |
3055 |
__ ThrowUncatchable(r0); |
3056 |
|
3057 |
__ bind(&failure); |
3058 |
// For failure and exception return null.
|
3059 |
__ mov(r0, Operand(masm->isolate()->factory()->null_value())); |
3060 |
__ add(sp, sp, Operand(4 * kPointerSize));
|
3061 |
__ Ret(); |
3062 |
|
3063 |
// Process the result from the native regexp code.
|
3064 |
__ bind(&success); |
3065 |
__ ldr(r1, |
3066 |
FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); |
3067 |
// Calculate number of capture registers (number_of_captures + 1) * 2.
|
3068 |
// Multiplying by 2 comes for free since r1 is smi-tagged.
|
3069 |
STATIC_ASSERT(kSmiTag == 0);
|
3070 |
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
|
3071 |
__ add(r1, r1, Operand(2)); // r1 was a smi. |
3072 |
|
3073 |
__ ldr(r0, MemOperand(sp, kLastMatchInfoOffset)); |
3074 |
__ JumpIfSmi(r0, &runtime); |
3075 |
__ CompareObjectType(r0, r2, r2, JS_ARRAY_TYPE); |
3076 |
__ b(ne, &runtime); |
3077 |
// Check that the JSArray is in fast case.
|
3078 |
__ ldr(last_match_info_elements, |
3079 |
FieldMemOperand(r0, JSArray::kElementsOffset)); |
3080 |
__ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset)); |
3081 |
__ CompareRoot(r0, Heap::kFixedArrayMapRootIndex); |
3082 |
__ b(ne, &runtime); |
3083 |
// Check that the last match info has space for the capture registers and the
|
3084 |
// additional information.
|
3085 |
__ ldr(r0, |
3086 |
FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset)); |
3087 |
__ add(r2, r1, Operand(RegExpImpl::kLastMatchOverhead)); |
3088 |
__ cmp(r2, Operand::SmiUntag(r0)); |
3089 |
__ b(gt, &runtime); |
3090 |
|
3091 |
// r1: number of capture registers
|
3092 |
// r4: subject string
|
3093 |
// Store the capture count.
|
3094 |
__ SmiTag(r2, r1); |
3095 |
__ str(r2, FieldMemOperand(last_match_info_elements, |
3096 |
RegExpImpl::kLastCaptureCountOffset)); |
3097 |
// Store last subject and last input.
|
3098 |
__ str(subject, |
3099 |
FieldMemOperand(last_match_info_elements, |
3100 |
RegExpImpl::kLastSubjectOffset)); |
3101 |
__ mov(r2, subject); |
3102 |
__ RecordWriteField(last_match_info_elements, |
3103 |
RegExpImpl::kLastSubjectOffset, |
3104 |
subject, |
3105 |
r3, |
3106 |
kLRHasNotBeenSaved, |
3107 |
kDontSaveFPRegs); |
3108 |
__ mov(subject, r2); |
3109 |
__ str(subject, |
3110 |
FieldMemOperand(last_match_info_elements, |
3111 |
RegExpImpl::kLastInputOffset)); |
3112 |
__ RecordWriteField(last_match_info_elements, |
3113 |
RegExpImpl::kLastInputOffset, |
3114 |
subject, |
3115 |
r3, |
3116 |
kLRHasNotBeenSaved, |
3117 |
kDontSaveFPRegs); |
3118 |
|
3119 |
// Get the static offsets vector filled by the native regexp code.
|
3120 |
ExternalReference address_of_static_offsets_vector = |
3121 |
ExternalReference::address_of_static_offsets_vector(isolate); |
3122 |
__ mov(r2, Operand(address_of_static_offsets_vector)); |
3123 |
|
3124 |
// r1: number of capture registers
|
3125 |
// r2: offsets vector
|
3126 |
Label next_capture, done; |
3127 |
// Capture register counter starts from number of capture registers and
|
3128 |
// counts down until wraping after zero.
|
3129 |
__ add(r0, |
3130 |
last_match_info_elements, |
3131 |
Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag)); |
3132 |
__ bind(&next_capture); |
3133 |
__ sub(r1, r1, Operand(1), SetCC);
|
3134 |
__ b(mi, &done); |
3135 |
// Read the value from the static offsets vector buffer.
|
3136 |
__ ldr(r3, MemOperand(r2, kPointerSize, PostIndex)); |
3137 |
// Store the smi value in the last match info.
|
3138 |
__ SmiTag(r3); |
3139 |
__ str(r3, MemOperand(r0, kPointerSize, PostIndex)); |
3140 |
__ jmp(&next_capture); |
3141 |
__ bind(&done); |
3142 |
|
3143 |
// Return last match info.
|
3144 |
__ ldr(r0, MemOperand(sp, kLastMatchInfoOffset)); |
3145 |
__ add(sp, sp, Operand(4 * kPointerSize));
|
3146 |
__ Ret(); |
3147 |
|
3148 |
// Do the runtime call to execute the regexp.
|
3149 |
__ bind(&runtime); |
3150 |
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1); |
3151 |
|
3152 |
// Deferred code for string handling.
|
3153 |
// (6) Not a long external string? If yes, go to (8).
|
3154 |
__ bind(¬_seq_nor_cons); |
3155 |
// Compare flags are still set.
|
3156 |
__ b(gt, ¬_long_external); // Go to (8).
|
3157 |
|
3158 |
// (7) External string. Make it, offset-wise, look like a sequential string.
|
3159 |
__ bind(&external_string); |
3160 |
__ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset)); |
3161 |
__ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset)); |
3162 |
if (FLAG_debug_code) {
|
3163 |
// Assert that we do not have a cons or slice (indirect strings) here.
|
3164 |
// Sequential strings have already been ruled out.
|
3165 |
__ tst(r0, Operand(kIsIndirectStringMask)); |
3166 |
__ Assert(eq, kExternalStringExpectedButNotFound); |
3167 |
} |
3168 |
__ ldr(subject, |
3169 |
FieldMemOperand(subject, ExternalString::kResourceDataOffset)); |
3170 |
// Move the pointer so that offset-wise, it looks like a sequential string.
|
3171 |
STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); |
3172 |
__ sub(subject, |
3173 |
subject, |
3174 |
Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); |
3175 |
__ jmp(&seq_string); // Go to (5).
|
3176 |
|
3177 |
// (8) Short external string or not a string? If yes, bail out to runtime.
|
3178 |
__ bind(¬_long_external); |
3179 |
STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0); |
3180 |
__ tst(r1, Operand(kIsNotStringMask | kShortExternalStringMask)); |
3181 |
__ b(ne, &runtime); |
3182 |
|
3183 |
// (9) Sliced string. Replace subject with parent. Go to (4).
|
3184 |
// Load offset into r9 and replace subject string with parent.
|
3185 |
__ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset)); |
3186 |
__ SmiUntag(r9); |
3187 |
__ ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset)); |
3188 |
__ jmp(&check_underlying); // Go to (4).
|
3189 |
#endif // V8_INTERPRETED_REGEXP |
3190 |
} |
3191 |
|
3192 |
|
3193 |
void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
|
3194 |
const int kMaxInlineLength = 100; |
3195 |
Label slowcase; |
3196 |
Label done; |
3197 |
Factory* factory = masm->isolate()->factory(); |
3198 |
|
3199 |
__ ldr(r1, MemOperand(sp, kPointerSize * 2));
|
3200 |
STATIC_ASSERT(kSmiTag == 0);
|
3201 |
STATIC_ASSERT(kSmiTagSize == 1);
|
3202 |
__ JumpIfNotSmi(r1, &slowcase); |
3203 |
__ cmp(r1, Operand(Smi::FromInt(kMaxInlineLength))); |
3204 |
__ b(hi, &slowcase); |
3205 |
// Smi-tagging is equivalent to multiplying by 2.
|
3206 |
// Allocate RegExpResult followed by FixedArray with size in ebx.
|
3207 |
// JSArray: [Map][empty properties][Elements][Length-smi][index][input]
|
3208 |
// Elements: [Map][Length][..elements..]
|
3209 |
// Size of JSArray with two in-object properties and the header of a
|
3210 |
// FixedArray.
|
3211 |
int objects_size =
|
3212 |
(JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize; |
3213 |
__ SmiUntag(r5, r1); |
3214 |
__ add(r2, r5, Operand(objects_size)); |
3215 |
__ Allocate( |
3216 |
r2, // In: Size, in words.
|
3217 |
r0, // Out: Start of allocation (tagged).
|
3218 |
r3, // Scratch register.
|
3219 |
r4, // Scratch register.
|
3220 |
&slowcase, |
3221 |
static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
|
3222 |
// r0: Start of allocated area, object-tagged.
|
3223 |
// r1: Number of elements in array, as smi.
|
3224 |
// r5: Number of elements, untagged.
|
3225 |
|
3226 |
// Set JSArray map to global.regexp_result_map().
|
3227 |
// Set empty properties FixedArray.
|
3228 |
// Set elements to point to FixedArray allocated right after the JSArray.
|
3229 |
// Interleave operations for better latency.
|
3230 |
__ ldr(r2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX)); |
3231 |
__ add(r3, r0, Operand(JSRegExpResult::kSize)); |
3232 |
__ mov(r4, Operand(factory->empty_fixed_array())); |
3233 |
__ ldr(r2, FieldMemOperand(r2, GlobalObject::kNativeContextOffset)); |
3234 |
__ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset)); |
3235 |
__ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX)); |
3236 |
__ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset)); |
3237 |
__ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); |
3238 |
|
3239 |
// Set input, index and length fields from arguments.
|
3240 |
__ ldr(r1, MemOperand(sp, kPointerSize * 0));
|
3241 |
__ ldr(r2, MemOperand(sp, kPointerSize * 1));
|
3242 |
__ ldr(r6, MemOperand(sp, kPointerSize * 2));
|
3243 |
__ str(r1, FieldMemOperand(r0, JSRegExpResult::kInputOffset)); |
3244 |
__ str(r2, FieldMemOperand(r0, JSRegExpResult::kIndexOffset)); |
3245 |
__ str(r6, FieldMemOperand(r0, JSArray::kLengthOffset)); |
3246 |
|
3247 |
// Fill out the elements FixedArray.
|
3248 |
// r0: JSArray, tagged.
|
3249 |
// r3: FixedArray, tagged.
|
3250 |
// r5: Number of elements in array, untagged.
|
3251 |
|
3252 |
// Set map.
|
3253 |
__ mov(r2, Operand(factory->fixed_array_map())); |
3254 |
__ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset)); |
3255 |
// Set FixedArray length.
|
3256 |
__ SmiTag(r6, r5); |
3257 |
__ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset)); |
3258 |
// Fill contents of fixed-array with undefined.
|
3259 |
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex); |
3260 |
__ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
3261 |
// Fill fixed array elements with undefined.
|
3262 |
// r0: JSArray, tagged.
|
3263 |
// r2: undefined.
|
3264 |
// r3: Start of elements in FixedArray.
|
3265 |
// r5: Number of elements to fill.
|
3266 |
Label loop; |
3267 |
__ cmp(r5, Operand::Zero()); |
3268 |
__ bind(&loop); |
3269 |
__ b(le, &done); // Jump if r5 is negative or zero.
|
3270 |
__ sub(r5, r5, Operand(1), SetCC);
|
3271 |
__ str(r2, MemOperand(r3, r5, LSL, kPointerSizeLog2)); |
3272 |
__ jmp(&loop); |
3273 |
|
3274 |
__ bind(&done); |
3275 |
__ add(sp, sp, Operand(3 * kPointerSize));
|
3276 |
__ Ret(); |
3277 |
|
3278 |
__ bind(&slowcase); |
3279 |
__ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1); |
3280 |
} |
3281 |
|
3282 |
|
3283 |
static void GenerateRecordCallTarget(MacroAssembler* masm) { |
3284 |
// Cache the called function in a global property cell. Cache states
|
3285 |
// are uninitialized, monomorphic (indicated by a JSFunction), and
|
3286 |
// megamorphic.
|
3287 |
// r0 : number of arguments to the construct function
|
3288 |
// r1 : the function to call
|
3289 |
// r2 : cache cell for call target
|
3290 |
Label initialize, done, miss, megamorphic, not_array_function; |
3291 |
|
3292 |
ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()), |
3293 |
masm->isolate()->heap()->undefined_value()); |
3294 |
ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()), |
3295 |
masm->isolate()->heap()->the_hole_value()); |
3296 |
|
3297 |
// Load the cache state into r3.
|
3298 |
__ ldr(r3, FieldMemOperand(r2, Cell::kValueOffset)); |
3299 |
|
3300 |
// A monomorphic cache hit or an already megamorphic state: invoke the
|
3301 |
// function without changing the state.
|
3302 |
__ cmp(r3, r1); |
3303 |
__ b(eq, &done); |
3304 |
|
3305 |
// If we came here, we need to see if we are the array function.
|
3306 |
// If we didn't have a matching function, and we didn't find the megamorph
|
3307 |
// sentinel, then we have in the cell either some other function or an
|
3308 |
// AllocationSite. Do a map check on the object in ecx.
|
3309 |
__ ldr(r5, FieldMemOperand(r3, 0));
|
3310 |
__ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex); |
3311 |
__ b(ne, &miss); |
3312 |
|
3313 |
// Make sure the function is the Array() function
|
3314 |
__ LoadArrayFunction(r3); |
3315 |
__ cmp(r1, r3); |
3316 |
__ b(ne, &megamorphic); |
3317 |
__ jmp(&done); |
3318 |
|
3319 |
__ bind(&miss); |
3320 |
|
3321 |
// A monomorphic miss (i.e, here the cache is not uninitialized) goes
|
3322 |
// megamorphic.
|
3323 |
__ CompareRoot(r3, Heap::kTheHoleValueRootIndex); |
3324 |
__ b(eq, &initialize); |
3325 |
// MegamorphicSentinel is an immortal immovable object (undefined) so no
|
3326 |
// write-barrier is needed.
|
3327 |
__ bind(&megamorphic); |
3328 |
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex); |
3329 |
__ str(ip, FieldMemOperand(r2, Cell::kValueOffset)); |
3330 |
__ jmp(&done); |
3331 |
|
3332 |
// An uninitialized cache is patched with the function or sentinel to
|
3333 |
// indicate the ElementsKind if function is the Array constructor.
|
3334 |
__ bind(&initialize); |
3335 |
// Make sure the function is the Array() function
|
3336 |
__ LoadArrayFunction(r3); |
3337 |
__ cmp(r1, r3); |
3338 |
__ b(ne, ¬_array_function); |
3339 |
|
3340 |
// The target function is the Array constructor,
|
3341 |
// Create an AllocationSite if we don't already have it, store it in the cell
|
3342 |
{ |
3343 |
FrameScope scope(masm, StackFrame::INTERNAL); |
3344 |
|
3345 |
// Arguments register must be smi-tagged to call out.
|
3346 |
__ SmiTag(r0); |
3347 |
__ push(r0); |
3348 |
__ push(r1); |
3349 |
__ push(r2); |
3350 |
|
3351 |
CreateAllocationSiteStub create_stub; |
3352 |
__ CallStub(&create_stub); |
3353 |
|
3354 |
__ pop(r2); |
3355 |
__ pop(r1); |
3356 |
__ pop(r0); |
3357 |
__ SmiUntag(r0); |
3358 |
} |
3359 |
__ b(&done); |
3360 |
|
3361 |
__ bind(¬_array_function); |
3362 |
__ str(r1, FieldMemOperand(r2, Cell::kValueOffset)); |
3363 |
// No need for a write barrier here - cells are rescanned.
|
3364 |
|
3365 |
__ bind(&done); |
3366 |
} |
3367 |
|
3368 |
|
3369 |
void CallFunctionStub::Generate(MacroAssembler* masm) {
|
3370 |
// r1 : the function to call
|
3371 |
// r2 : cache cell for call target
|
3372 |
Label slow, non_function; |
3373 |
|
3374 |
// The receiver might implicitly be the global object. This is
|
3375 |
// indicated by passing the hole as the receiver to the call
|
3376 |
// function stub.
|
3377 |
if (ReceiverMightBeImplicit()) {
|
3378 |
Label call; |
3379 |
// Get the receiver from the stack.
|
3380 |
// function, receiver [, arguments]
|
3381 |
__ ldr(r4, MemOperand(sp, argc_ * kPointerSize)); |
3382 |
// Call as function is indicated with the hole.
|
3383 |
__ CompareRoot(r4, Heap::kTheHoleValueRootIndex); |
3384 |
__ b(ne, &call); |
3385 |
// Patch the receiver on the stack with the global receiver object.
|
3386 |
__ ldr(r3, |
3387 |
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); |
3388 |
__ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalReceiverOffset)); |
3389 |
__ str(r3, MemOperand(sp, argc_ * kPointerSize)); |
3390 |
__ bind(&call); |
3391 |
} |
3392 |
|
3393 |
// Check that the function is really a JavaScript function.
|
3394 |
// r1: pushed function (to be verified)
|
3395 |
__ JumpIfSmi(r1, &non_function); |
3396 |
// Get the map of the function object.
|
3397 |
__ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE); |
3398 |
__ b(ne, &slow); |
3399 |
|
3400 |
if (RecordCallTarget()) {
|
3401 |
GenerateRecordCallTarget(masm); |
3402 |
} |
3403 |
|
3404 |
// Fast-case: Invoke the function now.
|
3405 |
// r1: pushed function
|
3406 |
ParameterCount actual(argc_); |
3407 |
|
3408 |
if (ReceiverMightBeImplicit()) {
|
3409 |
Label call_as_function; |
3410 |
__ CompareRoot(r4, Heap::kTheHoleValueRootIndex); |
3411 |
__ b(eq, &call_as_function); |
3412 |
__ InvokeFunction(r1, |
3413 |
actual, |
3414 |
JUMP_FUNCTION, |
3415 |
NullCallWrapper(), |
3416 |
CALL_AS_METHOD); |
3417 |
__ bind(&call_as_function); |
3418 |
} |
3419 |
__ InvokeFunction(r1, |
3420 |
actual, |
3421 |
JUMP_FUNCTION, |
3422 |
NullCallWrapper(), |
3423 |
CALL_AS_FUNCTION); |
3424 |
|
3425 |
// Slow-case: Non-function called.
|
3426 |
__ bind(&slow); |
3427 |
if (RecordCallTarget()) {
|
3428 |
// If there is a call target cache, mark it megamorphic in the
|
3429 |
// non-function case. MegamorphicSentinel is an immortal immovable
|
3430 |
// object (undefined) so no write barrier is needed.
|
3431 |
ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()), |
3432 |
masm->isolate()->heap()->undefined_value()); |
3433 |
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex); |
3434 |
__ str(ip, FieldMemOperand(r2, Cell::kValueOffset)); |
3435 |
} |
3436 |
// Check for function proxy.
|
3437 |
__ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE)); |
3438 |
__ b(ne, &non_function); |
3439 |
__ push(r1); // put proxy as additional argument
|
3440 |
__ mov(r0, Operand(argc_ + 1, RelocInfo::NONE32));
|
3441 |
__ mov(r2, Operand::Zero()); |
3442 |
__ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY); |
3443 |
__ SetCallKind(r5, CALL_AS_METHOD); |
3444 |
{ |
3445 |
Handle<Code> adaptor = |
3446 |
masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(); |
3447 |
__ Jump(adaptor, RelocInfo::CODE_TARGET); |
3448 |
} |
3449 |
|
3450 |
// CALL_NON_FUNCTION expects the non-function callee as receiver (instead
|
3451 |
// of the original receiver from the call site).
|
3452 |
__ bind(&non_function); |
3453 |
__ str(r1, MemOperand(sp, argc_ * kPointerSize)); |
3454 |
__ mov(r0, Operand(argc_)); // Set up the number of arguments.
|
3455 |
__ mov(r2, Operand::Zero()); |
3456 |
__ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION); |
3457 |
__ SetCallKind(r5, CALL_AS_METHOD); |
3458 |
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), |
3459 |
RelocInfo::CODE_TARGET); |
3460 |
} |
3461 |
|
3462 |
|
3463 |
void CallConstructStub::Generate(MacroAssembler* masm) {
|
3464 |
// r0 : number of arguments
|
3465 |
// r1 : the function to call
|
3466 |
// r2 : cache cell for call target
|
3467 |
Label slow, non_function_call; |
3468 |
|
3469 |
// Check that the function is not a smi.
|
3470 |
__ JumpIfSmi(r1, &non_function_call); |
3471 |
// Check that the function is a JSFunction.
|
3472 |
__ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE); |
3473 |
__ b(ne, &slow); |
3474 |
|
3475 |
if (RecordCallTarget()) {
|
3476 |
GenerateRecordCallTarget(masm); |
3477 |
} |
3478 |
|
3479 |
// Jump to the function-specific construct stub.
|
3480 |
Register jmp_reg = r3; |
3481 |
__ ldr(jmp_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); |
3482 |
__ ldr(jmp_reg, FieldMemOperand(jmp_reg, |
3483 |
SharedFunctionInfo::kConstructStubOffset)); |
3484 |
__ add(pc, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag)); |
3485 |
|
3486 |
// r0: number of arguments
|
3487 |
// r1: called object
|
3488 |
// r3: object type
|
3489 |
Label do_call; |
3490 |
__ bind(&slow); |
3491 |
__ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE)); |
3492 |
__ b(ne, &non_function_call); |
3493 |
__ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR); |
3494 |
__ jmp(&do_call); |
3495 |
|
3496 |
__ bind(&non_function_call); |
3497 |
__ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); |
3498 |
__ bind(&do_call); |
3499 |
// Set expected number of arguments to zero (not changing r0).
|
3500 |
__ mov(r2, Operand::Zero()); |
3501 |
__ SetCallKind(r5, CALL_AS_METHOD); |
3502 |
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), |
3503 |
RelocInfo::CODE_TARGET); |
3504 |
} |
3505 |
|
3506 |
|
3507 |
// StringCharCodeAtGenerator
|
3508 |
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
|
3509 |
Label flat_string; |
3510 |
Label ascii_string; |
3511 |
Label got_char_code; |
3512 |
Label sliced_string; |
3513 |
|
3514 |
// If the receiver is a smi trigger the non-string case.
|
3515 |
__ JumpIfSmi(object_, receiver_not_string_); |
3516 |
|
3517 |
// Fetch the instance type of the receiver into result register.
|
3518 |
__ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); |
3519 |
__ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); |
3520 |
// If the receiver is not a string trigger the non-string case.
|
3521 |
__ tst(result_, Operand(kIsNotStringMask)); |
3522 |
__ b(ne, receiver_not_string_); |
3523 |
|
3524 |
// If the index is non-smi trigger the non-smi case.
|
3525 |
__ JumpIfNotSmi(index_, &index_not_smi_); |
3526 |
__ bind(&got_smi_index_); |
3527 |
|
3528 |
// Check for index out of range.
|
3529 |
__ ldr(ip, FieldMemOperand(object_, String::kLengthOffset)); |
3530 |
__ cmp(ip, Operand(index_)); |
3531 |
__ b(ls, index_out_of_range_); |
3532 |
|
3533 |
__ SmiUntag(index_); |
3534 |
|
3535 |
StringCharLoadGenerator::Generate(masm, |
3536 |
object_, |
3537 |
index_, |
3538 |
result_, |
3539 |
&call_runtime_); |
3540 |
|
3541 |
__ SmiTag(result_); |
3542 |
__ bind(&exit_); |
3543 |
} |
3544 |
|
3545 |
|
3546 |
void StringCharCodeAtGenerator::GenerateSlow(
|
3547 |
MacroAssembler* masm, |
3548 |
const RuntimeCallHelper& call_helper) {
|
3549 |
__ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase); |
3550 |
|
3551 |
// Index is not a smi.
|
3552 |
__ bind(&index_not_smi_); |
3553 |
// If index is a heap number, try converting it to an integer.
|
3554 |
__ CheckMap(index_, |
3555 |
result_, |
3556 |
Heap::kHeapNumberMapRootIndex, |
3557 |
index_not_number_, |
3558 |
DONT_DO_SMI_CHECK); |
3559 |
call_helper.BeforeCall(masm); |
3560 |
__ push(object_); |
3561 |
__ push(index_); // Consumed by runtime conversion function.
|
3562 |
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
|
3563 |
__ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
|
3564 |
} else {
|
3565 |
ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX); |
3566 |
// NumberToSmi discards numbers that are not exact integers.
|
3567 |
__ CallRuntime(Runtime::kNumberToSmi, 1);
|
3568 |
} |
3569 |
// Save the conversion result before the pop instructions below
|
3570 |
// have a chance to overwrite it.
|
3571 |
__ Move(index_, r0); |
3572 |
__ pop(object_); |
3573 |
// Reload the instance type.
|
3574 |
__ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); |
3575 |
__ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); |
3576 |
call_helper.AfterCall(masm); |
3577 |
// If index is still not a smi, it must be out of range.
|
3578 |
__ JumpIfNotSmi(index_, index_out_of_range_); |
3579 |
// Otherwise, return to the fast path.
|
3580 |
__ jmp(&got_smi_index_); |
3581 |
|
3582 |
// Call runtime. We get here when the receiver is a string and the
|
3583 |
// index is a number, but the code of getting the actual character
|
3584 |
// is too complex (e.g., when the string needs to be flattened).
|
3585 |
__ bind(&call_runtime_); |
3586 |
call_helper.BeforeCall(masm); |
3587 |
__ SmiTag(index_); |
3588 |
__ Push(object_, index_); |
3589 |
__ CallRuntime(Runtime::kStringCharCodeAt, 2);
|
3590 |
__ Move(result_, r0); |
3591 |
call_helper.AfterCall(masm); |
3592 |
__ jmp(&exit_); |
3593 |
|
3594 |
__ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase); |
3595 |
} |
3596 |
|
3597 |
|
3598 |
// -------------------------------------------------------------------------
|
3599 |
// StringCharFromCodeGenerator
|
3600 |
|
3601 |
void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
|
3602 |
// Fast case of Heap::LookupSingleCharacterStringFromCode.
|
3603 |
STATIC_ASSERT(kSmiTag == 0);
|
3604 |
STATIC_ASSERT(kSmiShiftSize == 0);
|
3605 |
ASSERT(IsPowerOf2(String::kMaxOneByteCharCode + 1));
|
3606 |
__ tst(code_, |
3607 |
Operand(kSmiTagMask | |
3608 |
((~String::kMaxOneByteCharCode) << kSmiTagSize))); |
3609 |
__ b(ne, &slow_case_); |
3610 |
|
3611 |
__ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); |
3612 |
// At this point code register contains smi tagged ASCII char code.
|
3613 |
__ add(result_, result_, Operand::PointerOffsetFromSmiKey(code_)); |
3614 |
__ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize)); |
3615 |
__ CompareRoot(result_, Heap::kUndefinedValueRootIndex); |
3616 |
__ b(eq, &slow_case_); |
3617 |
__ bind(&exit_); |
3618 |
} |
3619 |
|
3620 |
|
3621 |
void StringCharFromCodeGenerator::GenerateSlow(
|
3622 |
MacroAssembler* masm, |
3623 |
const RuntimeCallHelper& call_helper) {
|
3624 |
__ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase); |
3625 |
|
3626 |
__ bind(&slow_case_); |
3627 |
call_helper.BeforeCall(masm); |
3628 |
__ push(code_); |
3629 |
__ CallRuntime(Runtime::kCharFromCode, 1);
|
3630 |
__ Move(result_, r0); |
3631 |
call_helper.AfterCall(masm); |
3632 |
__ jmp(&exit_); |
3633 |
|
3634 |
__ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase); |
3635 |
} |
3636 |
|
3637 |
|
3638 |
void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
|
3639 |
Register dest, |
3640 |
Register src, |
3641 |
Register count, |
3642 |
Register scratch, |
3643 |
bool ascii) {
|
3644 |
Label loop; |
3645 |
Label done; |
3646 |
// This loop just copies one character at a time, as it is only used for very
|
3647 |
// short strings.
|
3648 |
if (!ascii) {
|
3649 |
__ add(count, count, Operand(count), SetCC); |
3650 |
} else {
|
3651 |
__ cmp(count, Operand::Zero()); |
3652 |
} |
3653 |
__ b(eq, &done); |
3654 |
|
3655 |
__ bind(&loop); |
3656 |
__ ldrb(scratch, MemOperand(src, 1, PostIndex));
|
3657 |
// Perform sub between load and dependent store to get the load time to
|
3658 |
// complete.
|
3659 |
__ sub(count, count, Operand(1), SetCC);
|
3660 |
__ strb(scratch, MemOperand(dest, 1, PostIndex));
|
3661 |
// last iteration.
|
3662 |
__ b(gt, &loop); |
3663 |
|
3664 |
__ bind(&done); |
3665 |
} |
3666 |
|
3667 |
|
3668 |
enum CopyCharactersFlags {
|
3669 |
COPY_ASCII = 1,
|
3670 |
DEST_ALWAYS_ALIGNED = 2
|
3671 |
}; |
3672 |
|
3673 |
|
3674 |
void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
|
3675 |
Register dest, |
3676 |
Register src, |
3677 |
Register count, |
3678 |
Register scratch1, |
3679 |
Register scratch2, |
3680 |
Register scratch3, |
3681 |
Register scratch4, |
3682 |
int flags) {
|
3683 |
bool ascii = (flags & COPY_ASCII) != 0; |
3684 |
bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0; |
3685 |
|
3686 |
if (dest_always_aligned && FLAG_debug_code) {
|
3687 |
// Check that destination is actually word aligned if the flag says
|
3688 |
// that it is.
|
3689 |
__ tst(dest, Operand(kPointerAlignmentMask)); |
3690 |
__ Check(eq, kDestinationOfCopyNotAligned); |
3691 |
} |
3692 |
|
3693 |
const int kReadAlignment = 4; |
3694 |
const int kReadAlignmentMask = kReadAlignment - 1; |
3695 |
// Ensure that reading an entire aligned word containing the last character
|
3696 |
// of a string will not read outside the allocated area (because we pad up
|
3697 |
// to kObjectAlignment).
|
3698 |
STATIC_ASSERT(kObjectAlignment >= kReadAlignment); |
3699 |
// Assumes word reads and writes are little endian.
|
3700 |
// Nothing to do for zero characters.
|
3701 |
Label done; |
3702 |
if (!ascii) {
|
3703 |
__ add(count, count, Operand(count), SetCC); |
3704 |
} else {
|
3705 |
__ cmp(count, Operand::Zero()); |
3706 |
} |
3707 |
__ b(eq, &done); |
3708 |
|
3709 |
// Assume that you cannot read (or write) unaligned.
|
3710 |
Label byte_loop; |
3711 |
// Must copy at least eight bytes, otherwise just do it one byte at a time.
|
3712 |
__ cmp(count, Operand(8));
|
3713 |
__ add(count, dest, Operand(count)); |
3714 |
Register limit = count; // Read until src equals this.
|
3715 |
__ b(lt, &byte_loop); |
3716 |
|
3717 |
if (!dest_always_aligned) {
|
3718 |
// Align dest by byte copying. Copies between zero and three bytes.
|
3719 |
__ and_(scratch4, dest, Operand(kReadAlignmentMask), SetCC); |
3720 |
Label dest_aligned; |
3721 |
__ b(eq, &dest_aligned); |
3722 |
__ cmp(scratch4, Operand(2));
|
3723 |
__ ldrb(scratch1, MemOperand(src, 1, PostIndex));
|
3724 |
__ ldrb(scratch2, MemOperand(src, 1, PostIndex), le);
|
3725 |
__ ldrb(scratch3, MemOperand(src, 1, PostIndex), lt);
|
3726 |
__ strb(scratch1, MemOperand(dest, 1, PostIndex));
|
3727 |
__ strb(scratch2, MemOperand(dest, 1, PostIndex), le);
|
3728 |
__ strb(scratch3, MemOperand(dest, 1, PostIndex), lt);
|
3729 |
__ bind(&dest_aligned); |
3730 |
} |
3731 |
|
3732 |
Label simple_loop; |
3733 |
|
3734 |
__ sub(scratch4, dest, Operand(src)); |
3735 |
__ and_(scratch4, scratch4, Operand(0x03), SetCC);
|
3736 |
__ b(eq, &simple_loop); |
3737 |
// Shift register is number of bits in a source word that
|
3738 |
// must be combined with bits in the next source word in order
|
3739 |
// to create a destination word.
|
3740 |
|
3741 |
// Complex loop for src/dst that are not aligned the same way.
|
3742 |
{ |
3743 |
Label loop; |
3744 |
__ mov(scratch4, Operand(scratch4, LSL, 3));
|
3745 |
Register left_shift = scratch4; |
3746 |
__ and_(src, src, Operand(~3)); // Round down to load previous word. |
3747 |
__ ldr(scratch1, MemOperand(src, 4, PostIndex));
|
3748 |
// Store the "shift" most significant bits of scratch in the least
|
3749 |
// signficant bits (i.e., shift down by (32-shift)).
|
3750 |
__ rsb(scratch2, left_shift, Operand(32));
|
3751 |
Register right_shift = scratch2; |
3752 |
__ mov(scratch1, Operand(scratch1, LSR, right_shift)); |
3753 |
|
3754 |
__ bind(&loop); |
3755 |
__ ldr(scratch3, MemOperand(src, 4, PostIndex));
|
3756 |
__ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift)); |
3757 |
__ str(scratch1, MemOperand(dest, 4, PostIndex));
|
3758 |
__ mov(scratch1, Operand(scratch3, LSR, right_shift)); |
3759 |
// Loop if four or more bytes left to copy.
|
3760 |
__ sub(scratch3, limit, Operand(dest)); |
3761 |
__ sub(scratch3, scratch3, Operand(4), SetCC);
|
3762 |
__ b(ge, &loop); |
3763 |
} |
3764 |
// There is now between zero and three bytes left to copy (negative that
|
3765 |
// number is in scratch3), and between one and three bytes already read into
|
3766 |
// scratch1 (eight times that number in scratch4). We may have read past
|
3767 |
// the end of the string, but because objects are aligned, we have not read
|
3768 |
// past the end of the object.
|
3769 |
// Find the minimum of remaining characters to move and preloaded characters
|
3770 |
// and write those as bytes.
|
3771 |
__ add(scratch3, scratch3, Operand(4), SetCC);
|
3772 |
__ b(eq, &done); |
3773 |
__ cmp(scratch4, Operand(scratch3, LSL, 3), ne);
|
3774 |
// Move minimum of bytes read and bytes left to copy to scratch4.
|
3775 |
__ mov(scratch3, Operand(scratch4, LSR, 3), LeaveCC, lt);
|
3776 |
// Between one and three (value in scratch3) characters already read into
|
3777 |
// scratch ready to write.
|
3778 |
__ cmp(scratch3, Operand(2));
|
3779 |
__ strb(scratch1, MemOperand(dest, 1, PostIndex));
|
3780 |
__ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge);
|
3781 |
__ strb(scratch1, MemOperand(dest, 1, PostIndex), ge);
|
3782 |
__ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, gt);
|
3783 |
__ strb(scratch1, MemOperand(dest, 1, PostIndex), gt);
|
3784 |
// Copy any remaining bytes.
|
3785 |
__ b(&byte_loop); |
3786 |
|
3787 |
// Simple loop.
|
3788 |
// Copy words from src to dst, until less than four bytes left.
|
3789 |
// Both src and dest are word aligned.
|
3790 |
__ bind(&simple_loop); |
3791 |
{ |
3792 |
Label loop; |
3793 |
__ bind(&loop); |
3794 |
__ ldr(scratch1, MemOperand(src, 4, PostIndex));
|
3795 |
__ sub(scratch3, limit, Operand(dest)); |
3796 |
__ str(scratch1, MemOperand(dest, 4, PostIndex));
|
3797 |
// Compare to 8, not 4, because we do the substraction before increasing
|
3798 |
// dest.
|
3799 |
__ cmp(scratch3, Operand(8));
|
3800 |
__ b(ge, &loop); |
3801 |
} |
3802 |
|
3803 |
// Copy bytes from src to dst until dst hits limit.
|
3804 |
__ bind(&byte_loop); |
3805 |
__ cmp(dest, Operand(limit)); |
3806 |
__ ldrb(scratch1, MemOperand(src, 1, PostIndex), lt);
|
3807 |
__ b(ge, &done); |
3808 |
__ strb(scratch1, MemOperand(dest, 1, PostIndex));
|
3809 |
__ b(&byte_loop); |
3810 |
|
3811 |
__ bind(&done); |
3812 |
} |
3813 |
|
3814 |
|
3815 |
void StringHelper::GenerateTwoCharacterStringTableProbe(MacroAssembler* masm,
|
3816 |
Register c1, |
3817 |
Register c2, |
3818 |
Register scratch1, |
3819 |
Register scratch2, |
3820 |
Register scratch3, |
3821 |
Register scratch4, |
3822 |
Register scratch5, |
3823 |
Label* not_found) { |
3824 |
// Register scratch3 is the general scratch register in this function.
|
3825 |
Register scratch = scratch3; |
3826 |
|
3827 |
// Make sure that both characters are not digits as such strings has a
|
3828 |
// different hash algorithm. Don't try to look for these in the string table.
|
3829 |
Label not_array_index; |
3830 |
__ sub(scratch, c1, Operand(static_cast<int>('0'))); |
3831 |
__ cmp(scratch, Operand(static_cast<int>('9' - '0'))); |
3832 |
__ b(hi, ¬_array_index); |
3833 |
__ sub(scratch, c2, Operand(static_cast<int>('0'))); |
3834 |
__ cmp(scratch, Operand(static_cast<int>('9' - '0'))); |
3835 |
|
3836 |
// If check failed combine both characters into single halfword.
|
3837 |
// This is required by the contract of the method: code at the
|
3838 |
// not_found branch expects this combination in c1 register
|
3839 |
__ orr(c1, c1, Operand(c2, LSL, kBitsPerByte), LeaveCC, ls); |
3840 |
__ b(ls, not_found); |
3841 |
|
3842 |
__ bind(¬_array_index); |
3843 |
// Calculate the two character string hash.
|
3844 |
Register hash = scratch1; |
3845 |
StringHelper::GenerateHashInit(masm, hash, c1); |
3846 |
StringHelper::GenerateHashAddCharacter(masm, hash, c2); |
3847 |
StringHelper::GenerateHashGetHash(masm, hash); |
3848 |
|
3849 |
// Collect the two characters in a register.
|
3850 |
Register chars = c1; |
3851 |
__ orr(chars, chars, Operand(c2, LSL, kBitsPerByte)); |
3852 |
|
3853 |
// chars: two character string, char 1 in byte 0 and char 2 in byte 1.
|
3854 |
// hash: hash of two character string.
|
3855 |
|
3856 |
// Load string table
|
3857 |
// Load address of first element of the string table.
|
3858 |
Register string_table = c2; |
3859 |
__ LoadRoot(string_table, Heap::kStringTableRootIndex); |
3860 |
|
3861 |
Register undefined = scratch4; |
3862 |
__ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); |
3863 |
|
3864 |
// Calculate capacity mask from the string table capacity.
|
3865 |
Register mask = scratch2; |
3866 |
__ ldr(mask, FieldMemOperand(string_table, StringTable::kCapacityOffset)); |
3867 |
__ mov(mask, Operand(mask, ASR, 1));
|
3868 |
__ sub(mask, mask, Operand(1));
|
3869 |
|
3870 |
// Calculate untagged address of the first element of the string table.
|
3871 |
Register first_string_table_element = string_table; |
3872 |
__ add(first_string_table_element, string_table, |
3873 |
Operand(StringTable::kElementsStartOffset - kHeapObjectTag)); |
3874 |
|
3875 |
// Registers
|
3876 |
// chars: two character string, char 1 in byte 0 and char 2 in byte 1.
|
3877 |
// hash: hash of two character string
|
3878 |
// mask: capacity mask
|
3879 |
// first_string_table_element: address of the first element of
|
3880 |
// the string table
|
3881 |
// undefined: the undefined object
|
3882 |
// scratch: -
|
3883 |
|
3884 |
// Perform a number of probes in the string table.
|
3885 |
const int kProbes = 4; |
3886 |
Label found_in_string_table; |
3887 |
Label next_probe[kProbes]; |
3888 |
Register candidate = scratch5; // Scratch register contains candidate.
|
3889 |
for (int i = 0; i < kProbes; i++) { |
3890 |
// Calculate entry in string table.
|
3891 |
if (i > 0) { |
3892 |
__ add(candidate, hash, Operand(StringTable::GetProbeOffset(i))); |
3893 |
} else {
|
3894 |
__ mov(candidate, hash); |
3895 |
} |
3896 |
|
3897 |
__ and_(candidate, candidate, Operand(mask)); |
3898 |
|
3899 |
// Load the entry from the symble table.
|
3900 |
STATIC_ASSERT(StringTable::kEntrySize == 1);
|
3901 |
__ ldr(candidate, |
3902 |
MemOperand(first_string_table_element, |
3903 |
candidate, |
3904 |
LSL, |
3905 |
kPointerSizeLog2)); |
3906 |
|
3907 |
// If entry is undefined no string with this hash can be found.
|
3908 |
Label is_string; |
3909 |
__ CompareObjectType(candidate, scratch, scratch, ODDBALL_TYPE); |
3910 |
__ b(ne, &is_string); |
3911 |
|
3912 |
__ cmp(undefined, candidate); |
3913 |
__ b(eq, not_found); |
3914 |
// Must be the hole (deleted entry).
|
3915 |
if (FLAG_debug_code) {
|
3916 |
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
3917 |
__ cmp(ip, candidate); |
3918 |
__ Assert(eq, kOddballInStringTableIsNotUndefinedOrTheHole); |
3919 |
} |
3920 |
__ jmp(&next_probe[i]); |
3921 |
|
3922 |
__ bind(&is_string); |
3923 |
|
3924 |
// Check that the candidate is a non-external ASCII string. The instance
|
3925 |
// type is still in the scratch register from the CompareObjectType
|
3926 |
// operation.
|
3927 |
__ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &next_probe[i]); |
3928 |
|
3929 |
// If length is not 2 the string is not a candidate.
|
3930 |
__ ldr(scratch, FieldMemOperand(candidate, String::kLengthOffset)); |
3931 |
__ cmp(scratch, Operand(Smi::FromInt(2)));
|
3932 |
__ b(ne, &next_probe[i]); |
3933 |
|
3934 |
// Check if the two characters match.
|
3935 |
// Assumes that word load is little endian.
|
3936 |
__ ldrh(scratch, FieldMemOperand(candidate, SeqOneByteString::kHeaderSize)); |
3937 |
__ cmp(chars, scratch); |
3938 |
__ b(eq, &found_in_string_table); |
3939 |
__ bind(&next_probe[i]); |
3940 |
} |
3941 |
|
3942 |
// No matching 2 character string found by probing.
|
3943 |
__ jmp(not_found); |
3944 |
|
3945 |
// Scratch register contains result when we fall through to here.
|
3946 |
Register result = candidate; |
3947 |
__ bind(&found_in_string_table); |
3948 |
__ Move(r0, result); |
3949 |
} |
3950 |
|
3951 |
|
3952 |
void StringHelper::GenerateHashInit(MacroAssembler* masm,
|
3953 |
Register hash, |
3954 |
Register character) { |
3955 |
// hash = character + (character << 10);
|
3956 |
__ LoadRoot(hash, Heap::kHashSeedRootIndex); |
3957 |
// Untag smi seed and add the character.
|
3958 |
__ add(hash, character, Operand(hash, LSR, kSmiTagSize)); |
3959 |
// hash += hash << 10;
|
3960 |
__ add(hash, hash, Operand(hash, LSL, 10));
|
3961 |
// hash ^= hash >> 6;
|
3962 |
__ eor(hash, hash, Operand(hash, LSR, 6));
|
3963 |
} |
3964 |
|
3965 |
|
3966 |
void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
|
3967 |
Register hash, |
3968 |
Register character) { |
3969 |
// hash += character;
|
3970 |
__ add(hash, hash, Operand(character)); |
3971 |
// hash += hash << 10;
|
3972 |
__ add(hash, hash, Operand(hash, LSL, 10));
|
3973 |
// hash ^= hash >> 6;
|
3974 |
__ eor(hash, hash, Operand(hash, LSR, 6));
|
3975 |
} |
3976 |
|
3977 |
|
3978 |
void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
|
3979 |
Register hash) { |
3980 |
// hash += hash << 3;
|
3981 |
__ add(hash, hash, Operand(hash, LSL, 3));
|
3982 |
// hash ^= hash >> 11;
|
3983 |
__ eor(hash, hash, Operand(hash, LSR, 11));
|
3984 |
// hash += hash << 15;
|
3985 |
__ add(hash, hash, Operand(hash, LSL, 15));
|
3986 |
|
3987 |
__ and_(hash, hash, Operand(String::kHashBitMask), SetCC); |
3988 |
|
3989 |
// if (hash == 0) hash = 27;
|
3990 |
__ mov(hash, Operand(StringHasher::kZeroHash), LeaveCC, eq); |
3991 |
} |
3992 |
|
3993 |
|
3994 |
void SubStringStub::Generate(MacroAssembler* masm) {
|
3995 |
Label runtime; |
3996 |
|
3997 |
// Stack frame on entry.
|
3998 |
// lr: return address
|
3999 |
// sp[0]: to
|
4000 |
// sp[4]: from
|
4001 |
// sp[8]: string
|
4002 |
|
4003 |
// This stub is called from the native-call %_SubString(...), so
|
4004 |
// nothing can be assumed about the arguments. It is tested that:
|
4005 |
// "string" is a sequential string,
|
4006 |
// both "from" and "to" are smis, and
|
4007 |
// 0 <= from <= to <= string.length.
|
4008 |
// If any of these assumptions fail, we call the runtime system.
|
4009 |
|
4010 |
const int kToOffset = 0 * kPointerSize; |
4011 |
const int kFromOffset = 1 * kPointerSize; |
4012 |
const int kStringOffset = 2 * kPointerSize; |
4013 |
|
4014 |
__ Ldrd(r2, r3, MemOperand(sp, kToOffset)); |
4015 |
STATIC_ASSERT(kFromOffset == kToOffset + 4);
|
4016 |
STATIC_ASSERT(kSmiTag == 0);
|
4017 |
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
|
4018 |
|
4019 |
// Arithmetic shift right by one un-smi-tags. In this case we rotate right
|
4020 |
// instead because we bail out on non-smi values: ROR and ASR are equivalent
|
4021 |
// for smis but they set the flags in a way that's easier to optimize.
|
4022 |
__ mov(r2, Operand(r2, ROR, 1), SetCC);
|
4023 |
__ mov(r3, Operand(r3, ROR, 1), SetCC, cc);
|
4024 |
// If either to or from had the smi tag bit set, then C is set now, and N
|
4025 |
// has the same value: we rotated by 1, so the bottom bit is now the top bit.
|
4026 |
// We want to bailout to runtime here if From is negative. In that case, the
|
4027 |
// next instruction is not executed and we fall through to bailing out to
|
4028 |
// runtime.
|
4029 |
// Executed if both r2 and r3 are untagged integers.
|
4030 |
__ sub(r2, r2, Operand(r3), SetCC, cc); |
4031 |
// One of the above un-smis or the above SUB could have set N==1.
|
4032 |
__ b(mi, &runtime); // Either "from" or "to" is not an smi, or from > to.
|
4033 |
|
4034 |
// Make sure first argument is a string.
|
4035 |
__ ldr(r0, MemOperand(sp, kStringOffset)); |
4036 |
// Do a JumpIfSmi, but fold its jump into the subsequent string test.
|
4037 |
__ SmiTst(r0); |
4038 |
Condition is_string = masm->IsObjectStringType(r0, r1, ne); |
4039 |
ASSERT(is_string == eq); |
4040 |
__ b(NegateCondition(is_string), &runtime); |
4041 |
|
4042 |
Label single_char; |
4043 |
__ cmp(r2, Operand(1));
|
4044 |
__ b(eq, &single_char); |
4045 |
|
4046 |
// Short-cut for the case of trivial substring.
|
4047 |
Label return_r0; |
4048 |
// r0: original string
|
4049 |
// r2: result string length
|
4050 |
__ ldr(r4, FieldMemOperand(r0, String::kLengthOffset)); |
4051 |
__ cmp(r2, Operand(r4, ASR, 1));
|
4052 |
// Return original string.
|
4053 |
__ b(eq, &return_r0); |
4054 |
// Longer than original string's length or negative: unsafe arguments.
|
4055 |
__ b(hi, &runtime); |
4056 |
// Shorter than original string's length: an actual substring.
|
4057 |
|
4058 |
// Deal with different string types: update the index if necessary
|
4059 |
// and put the underlying string into r5.
|
4060 |
// r0: original string
|
4061 |
// r1: instance type
|
4062 |
// r2: length
|
4063 |
// r3: from index (untagged)
|
4064 |
Label underlying_unpacked, sliced_string, seq_or_external_string; |
4065 |
// If the string is not indirect, it can only be sequential or external.
|
4066 |
STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag)); |
4067 |
STATIC_ASSERT(kIsIndirectStringMask != 0);
|
4068 |
__ tst(r1, Operand(kIsIndirectStringMask)); |
4069 |
__ b(eq, &seq_or_external_string); |
4070 |
|
4071 |
__ tst(r1, Operand(kSlicedNotConsMask)); |
4072 |
__ b(ne, &sliced_string); |
4073 |
// Cons string. Check whether it is flat, then fetch first part.
|
4074 |
__ ldr(r5, FieldMemOperand(r0, ConsString::kSecondOffset)); |
4075 |
__ CompareRoot(r5, Heap::kempty_stringRootIndex); |
4076 |
__ b(ne, &runtime); |
4077 |
__ ldr(r5, FieldMemOperand(r0, ConsString::kFirstOffset)); |
4078 |
// Update instance type.
|
4079 |
__ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset)); |
4080 |
__ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset)); |
4081 |
__ jmp(&underlying_unpacked); |
4082 |
|
4083 |
__ bind(&sliced_string); |
4084 |
// Sliced string. Fetch parent and correct start index by offset.
|
4085 |
__ ldr(r5, FieldMemOperand(r0, SlicedString::kParentOffset)); |
4086 |
__ ldr(r4, FieldMemOperand(r0, SlicedString::kOffsetOffset)); |
4087 |
__ add(r3, r3, Operand(r4, ASR, 1)); // Add offset to index. |
4088 |
// Update instance type.
|
4089 |
__ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset)); |
4090 |
__ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset)); |
4091 |
__ jmp(&underlying_unpacked); |
4092 |
|
4093 |
__ bind(&seq_or_external_string); |
4094 |
// Sequential or external string. Just move string to the expected register.
|
4095 |
__ mov(r5, r0); |
4096 |
|
4097 |
__ bind(&underlying_unpacked); |
4098 |
|
4099 |
if (FLAG_string_slices) {
|
4100 |
Label copy_routine; |
4101 |
// r5: underlying subject string
|
4102 |
// r1: instance type of underlying subject string
|
4103 |
// r2: length
|
4104 |
// r3: adjusted start index (untagged)
|
4105 |
__ cmp(r2, Operand(SlicedString::kMinLength)); |
4106 |
// Short slice. Copy instead of slicing.
|
4107 |
__ b(lt, ©_routine); |
4108 |
// Allocate new sliced string. At this point we do not reload the instance
|
4109 |
// type including the string encoding because we simply rely on the info
|
4110 |
// provided by the original string. It does not matter if the original
|
4111 |
// string's encoding is wrong because we always have to recheck encoding of
|
4112 |
// the newly created string's parent anyways due to externalized strings.
|
4113 |
Label two_byte_slice, set_slice_header; |
4114 |
STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
|
4115 |
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
|
4116 |
__ tst(r1, Operand(kStringEncodingMask)); |
4117 |
__ b(eq, &two_byte_slice); |
4118 |
__ AllocateAsciiSlicedString(r0, r2, r6, r4, &runtime); |
4119 |
__ jmp(&set_slice_header); |
4120 |
__ bind(&two_byte_slice); |
4121 |
__ AllocateTwoByteSlicedString(r0, r2, r6, r4, &runtime); |
4122 |
__ bind(&set_slice_header); |
4123 |
__ mov(r3, Operand(r3, LSL, 1));
|
4124 |
__ str(r5, FieldMemOperand(r0, SlicedString::kParentOffset)); |
4125 |
__ str(r3, FieldMemOperand(r0, SlicedString::kOffsetOffset)); |
4126 |
__ jmp(&return_r0); |
4127 |
|
4128 |
__ bind(©_routine); |
4129 |
} |
4130 |
|
4131 |
// r5: underlying subject string
|
4132 |
// r1: instance type of underlying subject string
|
4133 |
// r2: length
|
4134 |
// r3: adjusted start index (untagged)
|
4135 |
Label two_byte_sequential, sequential_string, allocate_result; |
4136 |
STATIC_ASSERT(kExternalStringTag != 0);
|
4137 |
STATIC_ASSERT(kSeqStringTag == 0);
|
4138 |
__ tst(r1, Operand(kExternalStringTag)); |
4139 |
__ b(eq, &sequential_string); |
4140 |
|
4141 |
// Handle external string.
|
4142 |
// Rule out short external strings.
|
4143 |
STATIC_CHECK(kShortExternalStringTag != 0);
|
4144 |
__ tst(r1, Operand(kShortExternalStringTag)); |
4145 |
__ b(ne, &runtime); |
4146 |
__ ldr(r5, FieldMemOperand(r5, ExternalString::kResourceDataOffset)); |
4147 |
// r5 already points to the first character of underlying string.
|
4148 |
__ jmp(&allocate_result); |
4149 |
|
4150 |
__ bind(&sequential_string); |
4151 |
// Locate first character of underlying subject string.
|
4152 |
STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); |
4153 |
__ add(r5, r5, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); |
4154 |
|
4155 |
__ bind(&allocate_result); |
4156 |
// Sequential acii string. Allocate the result.
|
4157 |
STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
|
4158 |
__ tst(r1, Operand(kStringEncodingMask)); |
4159 |
__ b(eq, &two_byte_sequential); |
4160 |
|
4161 |
// Allocate and copy the resulting ASCII string.
|
4162 |
__ AllocateAsciiString(r0, r2, r4, r6, r1, &runtime); |
4163 |
|
4164 |
// Locate first character of substring to copy.
|
4165 |
__ add(r5, r5, r3); |
4166 |
// Locate first character of result.
|
4167 |
__ add(r1, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); |
4168 |
|
4169 |
// r0: result string
|
4170 |
// r1: first character of result string
|
4171 |
// r2: result string length
|
4172 |
// r5: first character of substring to copy
|
4173 |
STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
|
4174 |
StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r9, |
4175 |
COPY_ASCII | DEST_ALWAYS_ALIGNED); |
4176 |
__ jmp(&return_r0); |
4177 |
|
4178 |
// Allocate and copy the resulting two-byte string.
|
4179 |
__ bind(&two_byte_sequential); |
4180 |
__ AllocateTwoByteString(r0, r2, r4, r6, r1, &runtime); |
4181 |
|
4182 |
// Locate first character of substring to copy.
|
4183 |
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); |
4184 |
__ add(r5, r5, Operand(r3, LSL, 1));
|
4185 |
// Locate first character of result.
|
4186 |
__ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); |
4187 |
|
4188 |
// r0: result string.
|
4189 |
// r1: first character of result.
|
4190 |
// r2: result length.
|
4191 |
// r5: first character of substring to copy.
|
4192 |
STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
|
4193 |
StringHelper::GenerateCopyCharactersLong( |
4194 |
masm, r1, r5, r2, r3, r4, r6, r9, DEST_ALWAYS_ALIGNED); |
4195 |
|
4196 |
__ bind(&return_r0); |
4197 |
Counters* counters = masm->isolate()->counters(); |
4198 |
__ IncrementCounter(counters->sub_string_native(), 1, r3, r4);
|
4199 |
__ Drop(3);
|
4200 |
__ Ret(); |
4201 |
|
4202 |
// Just jump to runtime to create the sub string.
|
4203 |
__ bind(&runtime); |
4204 |
__ TailCallRuntime(Runtime::kSubString, 3, 1); |
4205 |
|
4206 |
__ bind(&single_char); |
4207 |
// r0: original string
|
4208 |
// r1: instance type
|
4209 |
// r2: length
|
4210 |
// r3: from index (untagged)
|
4211 |
__ SmiTag(r3, r3); |
4212 |
StringCharAtGenerator generator( |
4213 |
r0, r3, r2, r0, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER); |
4214 |
generator.GenerateFast(masm); |
4215 |
__ Drop(3);
|
4216 |
__ Ret(); |
4217 |
generator.SkipSlow(masm, &runtime); |
4218 |
} |
4219 |
|
4220 |
|
4221 |
void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
|
4222 |
Register left, |
4223 |
Register right, |
4224 |
Register scratch1, |
4225 |
Register scratch2, |
4226 |
Register scratch3) { |
4227 |
Register length = scratch1; |
4228 |
|
4229 |
// Compare lengths.
|
4230 |
Label strings_not_equal, check_zero_length; |
4231 |
__ ldr(length, FieldMemOperand(left, String::kLengthOffset)); |
4232 |
__ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset)); |
4233 |
__ cmp(length, scratch2); |
4234 |
__ b(eq, &check_zero_length); |
4235 |
__ bind(&strings_not_equal); |
4236 |
__ mov(r0, Operand(Smi::FromInt(NOT_EQUAL))); |
4237 |
__ Ret(); |
4238 |
|
4239 |
// Check if the length is zero.
|
4240 |
Label compare_chars; |
4241 |
__ bind(&check_zero_length); |
4242 |
STATIC_ASSERT(kSmiTag == 0);
|
4243 |
__ cmp(length, Operand::Zero()); |
4244 |
__ b(ne, &compare_chars); |
4245 |
__ mov(r0, Operand(Smi::FromInt(EQUAL))); |
4246 |
__ Ret(); |
4247 |
|
4248 |
// Compare characters.
|
4249 |
__ bind(&compare_chars); |
4250 |
GenerateAsciiCharsCompareLoop(masm, |
4251 |
left, right, length, scratch2, scratch3, |
4252 |
&strings_not_equal); |
4253 |
|
4254 |
// Characters are equal.
|
4255 |
__ mov(r0, Operand(Smi::FromInt(EQUAL))); |
4256 |
__ Ret(); |
4257 |
} |
4258 |
|
4259 |
|
4260 |
void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
|
4261 |
Register left, |
4262 |
Register right, |
4263 |
Register scratch1, |
4264 |
Register scratch2, |
4265 |
Register scratch3, |
4266 |
Register scratch4) { |
4267 |
Label result_not_equal, compare_lengths; |
4268 |
// Find minimum length and length difference.
|
4269 |
__ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset)); |
4270 |
__ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset)); |
4271 |
__ sub(scratch3, scratch1, Operand(scratch2), SetCC); |
4272 |
Register length_delta = scratch3; |
4273 |
__ mov(scratch1, scratch2, LeaveCC, gt); |
4274 |
Register min_length = scratch1; |
4275 |
STATIC_ASSERT(kSmiTag == 0);
|
4276 |
__ cmp(min_length, Operand::Zero()); |
4277 |
__ b(eq, &compare_lengths); |
4278 |
|
4279 |
// Compare loop.
|
4280 |
GenerateAsciiCharsCompareLoop(masm, |
4281 |
left, right, min_length, scratch2, scratch4, |
4282 |
&result_not_equal); |
4283 |
|
4284 |
// Compare lengths - strings up to min-length are equal.
|
4285 |
__ bind(&compare_lengths); |
4286 |
ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0)); |
4287 |
// Use length_delta as result if it's zero.
|
4288 |
__ mov(r0, Operand(length_delta), SetCC); |
4289 |
__ bind(&result_not_equal); |
4290 |
// Conditionally update the result based either on length_delta or
|
4291 |
// the last comparion performed in the loop above.
|
4292 |
__ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt); |
4293 |
__ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt); |
4294 |
__ Ret(); |
4295 |
} |
4296 |
|
4297 |
|
4298 |
void StringCompareStub::GenerateAsciiCharsCompareLoop(
|
4299 |
MacroAssembler* masm, |
4300 |
Register left, |
4301 |
Register right, |
4302 |
Register length, |
4303 |
Register scratch1, |
4304 |
Register scratch2, |
4305 |
Label* chars_not_equal) { |
4306 |
// Change index to run from -length to -1 by adding length to string
|
4307 |
// start. This means that loop ends when index reaches zero, which
|
4308 |
// doesn't need an additional compare.
|
4309 |
__ SmiUntag(length); |
4310 |
__ add(scratch1, length, |
4311 |
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); |
4312 |
__ add(left, left, Operand(scratch1)); |
4313 |
__ add(right, right, Operand(scratch1)); |
4314 |
__ rsb(length, length, Operand::Zero()); |
4315 |
Register index = length; // index = -length;
|
4316 |
|
4317 |
// Compare loop.
|
4318 |
Label loop; |
4319 |
__ bind(&loop); |
4320 |
__ ldrb(scratch1, MemOperand(left, index)); |
4321 |
__ ldrb(scratch2, MemOperand(right, index)); |
4322 |
__ cmp(scratch1, scratch2); |
4323 |
__ b(ne, chars_not_equal); |
4324 |
__ add(index, index, Operand(1), SetCC);
|
4325 |
__ b(ne, &loop); |
4326 |
} |
4327 |
|
4328 |
|
4329 |
void StringCompareStub::Generate(MacroAssembler* masm) {
|
4330 |
Label runtime; |
4331 |
|
4332 |
Counters* counters = masm->isolate()->counters(); |
4333 |
|
4334 |
// Stack frame on entry.
|
4335 |
// sp[0]: right string
|
4336 |
// sp[4]: left string
|
4337 |
__ Ldrd(r0 , r1, MemOperand(sp)); // Load right in r0, left in r1.
|
4338 |
|
4339 |
Label not_same; |
4340 |
__ cmp(r0, r1); |
4341 |
__ b(ne, ¬_same); |
4342 |
STATIC_ASSERT(EQUAL == 0);
|
4343 |
STATIC_ASSERT(kSmiTag == 0);
|
4344 |
__ mov(r0, Operand(Smi::FromInt(EQUAL))); |
4345 |
__ IncrementCounter(counters->string_compare_native(), 1, r1, r2);
|
4346 |
__ add(sp, sp, Operand(2 * kPointerSize));
|
4347 |
__ Ret(); |
4348 |
|
4349 |
__ bind(¬_same); |
4350 |
|
4351 |
// Check that both objects are sequential ASCII strings.
|
4352 |
__ JumpIfNotBothSequentialAsciiStrings(r1, r0, r2, r3, &runtime); |
4353 |
|
4354 |
// Compare flat ASCII strings natively. Remove arguments from stack first.
|
4355 |
__ IncrementCounter(counters->string_compare_native(), 1, r2, r3);
|
4356 |
__ add(sp, sp, Operand(2 * kPointerSize));
|
4357 |
GenerateCompareFlatAsciiStrings(masm, r1, r0, r2, r3, r4, r5); |
4358 |
|
4359 |
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
|
4360 |
// tagged as a small integer.
|
4361 |
__ bind(&runtime); |
4362 |
__ TailCallRuntime(Runtime::kStringCompare, 2, 1); |
4363 |
} |
4364 |
|
4365 |
|
4366 |
void StringAddStub::Generate(MacroAssembler* masm) {
|
4367 |
Label call_runtime, call_builtin; |
4368 |
Builtins::JavaScript builtin_id = Builtins::ADD; |
4369 |
|
4370 |
Counters* counters = masm->isolate()->counters(); |
4371 |
|
4372 |
// Stack on entry:
|
4373 |
// sp[0]: second argument (right).
|
4374 |
// sp[4]: first argument (left).
|
4375 |
|
4376 |
// Load the two arguments.
|
4377 |
__ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // First argument. |
4378 |
__ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // Second argument. |
4379 |
|
4380 |
// Make sure that both arguments are strings if not known in advance.
|
4381 |
// Otherwise, at least one of the arguments is definitely a string,
|
4382 |
// and we convert the one that is not known to be a string.
|
4383 |
if ((flags_ & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_BOTH) {
|
4384 |
ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT); |
4385 |
ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT); |
4386 |
__ JumpIfEitherSmi(r0, r1, &call_runtime); |
4387 |
// Load instance types.
|
4388 |
__ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); |
4389 |
__ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); |
4390 |
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); |
4391 |
__ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); |
4392 |
STATIC_ASSERT(kStringTag == 0);
|
4393 |
// If either is not a string, go to runtime.
|
4394 |
__ tst(r4, Operand(kIsNotStringMask)); |
4395 |
__ tst(r5, Operand(kIsNotStringMask), eq); |
4396 |
__ b(ne, &call_runtime); |
4397 |
} else if ((flags_ & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT) { |
4398 |
ASSERT((flags_ & STRING_ADD_CHECK_RIGHT) == 0);
|
4399 |
GenerateConvertArgument( |
4400 |
masm, 1 * kPointerSize, r0, r2, r3, r4, r5, &call_builtin);
|
4401 |
builtin_id = Builtins::STRING_ADD_RIGHT; |
4402 |
} else if ((flags_ & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT) { |
4403 |
ASSERT((flags_ & STRING_ADD_CHECK_LEFT) == 0);
|
4404 |
GenerateConvertArgument( |
4405 |
masm, 0 * kPointerSize, r1, r2, r3, r4, r5, &call_builtin);
|
4406 |
builtin_id = Builtins::STRING_ADD_LEFT; |
4407 |
} |
4408 |
|
4409 |
// Both arguments are strings.
|
4410 |
// r0: first string
|
4411 |
// r1: second string
|
4412 |
// r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
|
4413 |
// r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
|
4414 |
{ |
4415 |
Label strings_not_empty; |
4416 |
// Check if either of the strings are empty. In that case return the other.
|
4417 |
__ ldr(r2, FieldMemOperand(r0, String::kLengthOffset)); |
4418 |
__ ldr(r3, FieldMemOperand(r1, String::kLengthOffset)); |
4419 |
STATIC_ASSERT(kSmiTag == 0);
|
4420 |
__ cmp(r2, Operand(Smi::FromInt(0))); // Test if first string is empty. |
4421 |
__ mov(r0, Operand(r1), LeaveCC, eq); // If first is empty, return second.
|
4422 |
STATIC_ASSERT(kSmiTag == 0);
|
4423 |
// Else test if second string is empty.
|
4424 |
__ cmp(r3, Operand(Smi::FromInt(0)), ne);
|
4425 |
__ b(ne, &strings_not_empty); // If either string was empty, return r0.
|
4426 |
|
4427 |
__ IncrementCounter(counters->string_add_native(), 1, r2, r3);
|
4428 |
__ add(sp, sp, Operand(2 * kPointerSize));
|
4429 |
__ Ret(); |
4430 |
|
4431 |
__ bind(&strings_not_empty); |
4432 |
} |
4433 |
|
4434 |
__ SmiUntag(r2); |
4435 |
__ SmiUntag(r3); |
4436 |
// Both strings are non-empty.
|
4437 |
// r0: first string
|
4438 |
// r1: second string
|
4439 |
// r2: length of first string
|
4440 |
// r3: length of second string
|
4441 |
// r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
|
4442 |
// r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
|
4443 |
// Look at the length of the result of adding the two strings.
|
4444 |
Label string_add_flat_result, longer_than_two; |
4445 |
// Adding two lengths can't overflow.
|
4446 |
STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
|
4447 |
__ add(r6, r2, Operand(r3)); |
4448 |
// Use the string table when adding two one character strings, as it
|
4449 |
// helps later optimizations to return a string here.
|
4450 |
__ cmp(r6, Operand(2));
|
4451 |
__ b(ne, &longer_than_two); |
4452 |
|
4453 |
// Check that both strings are non-external ASCII strings.
|
4454 |
if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
|
4455 |
__ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); |
4456 |
__ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); |
4457 |
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); |
4458 |
__ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); |
4459 |
} |
4460 |
__ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r3, |
4461 |
&call_runtime); |
4462 |
|
4463 |
// Get the two characters forming the sub string.
|
4464 |
__ ldrb(r2, FieldMemOperand(r0, SeqOneByteString::kHeaderSize)); |
4465 |
__ ldrb(r3, FieldMemOperand(r1, SeqOneByteString::kHeaderSize)); |
4466 |
|
4467 |
// Try to lookup two character string in string table. If it is not found
|
4468 |
// just allocate a new one.
|
4469 |
Label make_two_character_string; |
4470 |
StringHelper::GenerateTwoCharacterStringTableProbe( |
4471 |
masm, r2, r3, r6, r0, r4, r5, r9, &make_two_character_string); |
4472 |
__ IncrementCounter(counters->string_add_native(), 1, r2, r3);
|
4473 |
__ add(sp, sp, Operand(2 * kPointerSize));
|
4474 |
__ Ret(); |
4475 |
|
4476 |
__ bind(&make_two_character_string); |
4477 |
// Resulting string has length 2 and first chars of two strings
|
4478 |
// are combined into single halfword in r2 register.
|
4479 |
// So we can fill resulting string without two loops by a single
|
4480 |
// halfword store instruction (which assumes that processor is
|
4481 |
// in a little endian mode)
|
4482 |
__ mov(r6, Operand(2));
|
4483 |
__ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime); |
4484 |
__ strh(r2, FieldMemOperand(r0, SeqOneByteString::kHeaderSize)); |
4485 |
__ IncrementCounter(counters->string_add_native(), 1, r2, r3);
|
4486 |
__ add(sp, sp, Operand(2 * kPointerSize));
|
4487 |
__ Ret(); |
4488 |
|
4489 |
__ bind(&longer_than_two); |
4490 |
// Check if resulting string will be flat.
|
4491 |
__ cmp(r6, Operand(ConsString::kMinLength)); |
4492 |
__ b(lt, &string_add_flat_result); |
4493 |
// Handle exceptionally long strings in the runtime system.
|
4494 |
STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0); |
4495 |
ASSERT(IsPowerOf2(String::kMaxLength + 1));
|
4496 |
// kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
|
4497 |
__ cmp(r6, Operand(String::kMaxLength + 1));
|
4498 |
__ b(hs, &call_runtime); |
4499 |
|
4500 |
// If result is not supposed to be flat, allocate a cons string object.
|
4501 |
// If both strings are ASCII the result is an ASCII cons string.
|
4502 |
if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
|
4503 |
__ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); |
4504 |
__ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); |
4505 |
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); |
4506 |
__ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); |
4507 |
} |
4508 |
Label non_ascii, allocated, ascii_data; |
4509 |
STATIC_ASSERT(kTwoByteStringTag == 0);
|
4510 |
__ tst(r4, Operand(kStringEncodingMask)); |
4511 |
__ tst(r5, Operand(kStringEncodingMask), ne); |
4512 |
__ b(eq, &non_ascii); |
4513 |
|
4514 |
// Allocate an ASCII cons string.
|
4515 |
__ bind(&ascii_data); |
4516 |
__ AllocateAsciiConsString(r3, r6, r4, r5, &call_runtime); |
4517 |
__ bind(&allocated); |
4518 |
// Fill the fields of the cons string.
|
4519 |
Label skip_write_barrier, after_writing; |
4520 |
ExternalReference high_promotion_mode = ExternalReference:: |
4521 |
new_space_high_promotion_mode_active_address(masm->isolate()); |
4522 |
__ mov(r4, Operand(high_promotion_mode)); |
4523 |
__ ldr(r4, MemOperand(r4, 0));
|
4524 |
__ cmp(r4, Operand::Zero()); |
4525 |
__ b(eq, &skip_write_barrier); |
4526 |
|
4527 |
__ str(r0, FieldMemOperand(r3, ConsString::kFirstOffset)); |
4528 |
__ RecordWriteField(r3, |
4529 |
ConsString::kFirstOffset, |
4530 |
r0, |
4531 |
r4, |
4532 |
kLRHasNotBeenSaved, |
4533 |
kDontSaveFPRegs); |
4534 |
__ str(r1, FieldMemOperand(r3, ConsString::kSecondOffset)); |
4535 |
__ RecordWriteField(r3, |
4536 |
ConsString::kSecondOffset, |
4537 |
r1, |
4538 |
r4, |
4539 |
kLRHasNotBeenSaved, |
4540 |
kDontSaveFPRegs); |
4541 |
__ jmp(&after_writing); |
4542 |
|
4543 |
__ bind(&skip_write_barrier); |
4544 |
__ str(r0, FieldMemOperand(r3, ConsString::kFirstOffset)); |
4545 |
__ str(r1, FieldMemOperand(r3, ConsString::kSecondOffset)); |
4546 |
|
4547 |
__ bind(&after_writing); |
4548 |
|
4549 |
__ mov(r0, Operand(r3)); |
4550 |
__ IncrementCounter(counters->string_add_native(), 1, r2, r3);
|
4551 |
__ add(sp, sp, Operand(2 * kPointerSize));
|
4552 |
__ Ret(); |
4553 |
|
4554 |
__ bind(&non_ascii); |
4555 |
// At least one of the strings is two-byte. Check whether it happens
|
4556 |
// to contain only one byte characters.
|
4557 |
// r4: first instance type.
|
4558 |
// r5: second instance type.
|
4559 |
__ tst(r4, Operand(kOneByteDataHintMask)); |
4560 |
__ tst(r5, Operand(kOneByteDataHintMask), ne); |
4561 |
__ b(ne, &ascii_data); |
4562 |
__ eor(r4, r4, Operand(r5)); |
4563 |
STATIC_ASSERT(kOneByteStringTag != 0 && kOneByteDataHintTag != 0); |
4564 |
__ and_(r4, r4, Operand(kOneByteStringTag | kOneByteDataHintTag)); |
4565 |
__ cmp(r4, Operand(kOneByteStringTag | kOneByteDataHintTag)); |
4566 |
__ b(eq, &ascii_data); |
4567 |
|
4568 |
// Allocate a two byte cons string.
|
4569 |
__ AllocateTwoByteConsString(r3, r6, r4, r5, &call_runtime); |
4570 |
__ jmp(&allocated); |
4571 |
|
4572 |
// We cannot encounter sliced strings or cons strings here since:
|
4573 |
STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength); |
4574 |
// Handle creating a flat result from either external or sequential strings.
|
4575 |
// Locate the first characters' locations.
|
4576 |
// r0: first string
|
4577 |
// r1: second string
|
4578 |
// r2: length of first string
|
4579 |
// r3: length of second string
|
4580 |
// r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
|
4581 |
// r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
|
4582 |
// r6: sum of lengths.
|
4583 |
Label first_prepared, second_prepared; |
4584 |
__ bind(&string_add_flat_result); |
4585 |
if ((flags_ & STRING_ADD_CHECK_BOTH) != STRING_ADD_CHECK_BOTH) {
|
4586 |
__ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); |
4587 |
__ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); |
4588 |
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); |
4589 |
__ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); |
4590 |
} |
4591 |
|
4592 |
// Check whether both strings have same encoding
|
4593 |
__ eor(ip, r4, Operand(r5)); |
4594 |
ASSERT(__ ImmediateFitsAddrMode1Instruction(kStringEncodingMask)); |
4595 |
__ tst(ip, Operand(kStringEncodingMask)); |
4596 |
__ b(ne, &call_runtime); |
4597 |
|
4598 |
STATIC_ASSERT(kSeqStringTag == 0);
|
4599 |
__ tst(r4, Operand(kStringRepresentationMask)); |
4600 |
STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize); |
4601 |
__ add(r6, |
4602 |
r0, |
4603 |
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag), |
4604 |
LeaveCC, |
4605 |
eq); |
4606 |
__ b(eq, &first_prepared); |
4607 |
// External string: rule out short external string and load string resource.
|
4608 |
STATIC_ASSERT(kShortExternalStringTag != 0);
|
4609 |
__ tst(r4, Operand(kShortExternalStringMask)); |
4610 |
__ b(ne, &call_runtime); |
4611 |
__ ldr(r6, FieldMemOperand(r0, ExternalString::kResourceDataOffset)); |
4612 |
__ bind(&first_prepared); |
4613 |
|
4614 |
STATIC_ASSERT(kSeqStringTag == 0);
|
4615 |
__ tst(r5, Operand(kStringRepresentationMask)); |
4616 |
STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize); |
4617 |
__ add(r1, |
4618 |
r1, |
4619 |
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag), |
4620 |
LeaveCC, |
4621 |
eq); |
4622 |
__ b(eq, &second_prepared); |
4623 |
// External string: rule out short external string and load string resource.
|
4624 |
STATIC_ASSERT(kShortExternalStringTag != 0);
|
4625 |
__ tst(r5, Operand(kShortExternalStringMask)); |
4626 |
__ b(ne, &call_runtime); |
4627 |
__ ldr(r1, FieldMemOperand(r1, ExternalString::kResourceDataOffset)); |
4628 |
__ bind(&second_prepared); |
4629 |
|
4630 |
Label non_ascii_string_add_flat_result; |
4631 |
// r6: first character of first string
|
4632 |
// r1: first character of second string
|
4633 |
// r2: length of first string.
|
4634 |
// r3: length of second string.
|
4635 |
// Both strings have the same encoding.
|
4636 |
STATIC_ASSERT(kTwoByteStringTag == 0);
|
4637 |
__ tst(r5, Operand(kStringEncodingMask)); |
4638 |
__ b(eq, &non_ascii_string_add_flat_result); |
4639 |
|
4640 |
__ add(r2, r2, Operand(r3)); |
4641 |
__ AllocateAsciiString(r0, r2, r4, r5, r9, &call_runtime); |
4642 |
__ sub(r2, r2, Operand(r3)); |
4643 |
__ add(r5, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); |
4644 |
// r0: result string.
|
4645 |
// r6: first character of first string.
|
4646 |
// r1: first character of second string.
|
4647 |
// r2: length of first string.
|
4648 |
// r3: length of second string.
|
4649 |
// r5: first character of result.
|
4650 |
StringHelper::GenerateCopyCharacters(masm, r5, r6, r2, r4, true);
|
4651 |
// r5: next character of result.
|
4652 |
StringHelper::GenerateCopyCharacters(masm, r5, r1, r3, r4, true);
|
4653 |
__ IncrementCounter(counters->string_add_native(), 1, r2, r3);
|
4654 |
__ add(sp, sp, Operand(2 * kPointerSize));
|
4655 |
__ Ret(); |
4656 |
|
4657 |
__ bind(&non_ascii_string_add_flat_result); |
4658 |
__ add(r2, r2, Operand(r3)); |
4659 |
__ AllocateTwoByteString(r0, r2, r4, r5, r9, &call_runtime); |
4660 |
__ sub(r2, r2, Operand(r3)); |
4661 |
__ add(r5, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); |
4662 |
// r0: result string.
|
4663 |
// r6: first character of first string.
|
4664 |
// r1: first character of second string.
|
4665 |
// r2: length of first string.
|
4666 |
// r3: length of second string.
|
4667 |
// r5: first character of result.
|
4668 |
StringHelper::GenerateCopyCharacters(masm, r5, r6, r2, r4, false);
|
4669 |
// r5: next character of result.
|
4670 |
StringHelper::GenerateCopyCharacters(masm, r5, r1, r3, r4, false);
|
4671 |
__ IncrementCounter(counters->string_add_native(), 1, r2, r3);
|
4672 |
__ add(sp, sp, Operand(2 * kPointerSize));
|
4673 |
__ Ret(); |
4674 |
|
4675 |
// Just jump to runtime to add the two strings.
|
4676 |
__ bind(&call_runtime); |
4677 |
__ TailCallRuntime(Runtime::kStringAdd, 2, 1); |
4678 |
|
4679 |
if (call_builtin.is_linked()) {
|
4680 |
__ bind(&call_builtin); |
4681 |
__ InvokeBuiltin(builtin_id, JUMP_FUNCTION); |
4682 |
} |
4683 |
} |
4684 |
|
4685 |
|
4686 |
void StringAddStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
|
4687 |
__ push(r0); |
4688 |
__ push(r1); |
4689 |
} |
4690 |
|
4691 |
|
4692 |
void StringAddStub::GenerateRegisterArgsPop(MacroAssembler* masm) {
|
4693 |
__ pop(r1); |
4694 |
__ pop(r0); |
4695 |
} |
4696 |
|
4697 |
|
4698 |
void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
|
4699 |
int stack_offset,
|
4700 |
Register arg, |
4701 |
Register scratch1, |
4702 |
Register scratch2, |
4703 |
Register scratch3, |
4704 |
Register scratch4, |
4705 |
Label* slow) { |
4706 |
// First check if the argument is already a string.
|
4707 |
Label not_string, done; |
4708 |
__ JumpIfSmi(arg, ¬_string); |
4709 |
__ CompareObjectType(arg, scratch1, scratch1, FIRST_NONSTRING_TYPE); |
4710 |
__ b(lt, &done); |
4711 |
|
4712 |
// Check the number to string cache.
|
4713 |
__ bind(¬_string); |
4714 |
// Puts the cached result into scratch1.
|
4715 |
__ LookupNumberStringCache(arg, scratch1, scratch2, scratch3, scratch4, slow); |
4716 |
__ mov(arg, scratch1); |
4717 |
__ str(arg, MemOperand(sp, stack_offset)); |
4718 |
__ bind(&done); |
4719 |
} |
4720 |
|
4721 |
|
4722 |
void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
|
4723 |
ASSERT(state_ == CompareIC::SMI); |
4724 |
Label miss; |
4725 |
__ orr(r2, r1, r0); |
4726 |
__ JumpIfNotSmi(r2, &miss); |
4727 |
|
4728 |
if (GetCondition() == eq) {
|
4729 |
// For equality we do not care about the sign of the result.
|
4730 |
__ sub(r0, r0, r1, SetCC); |
4731 |
} else {
|
4732 |
// Untag before subtracting to avoid handling overflow.
|
4733 |
__ SmiUntag(r1); |
4734 |
__ sub(r0, r1, Operand::SmiUntag(r0)); |
4735 |
} |
4736 |
__ Ret(); |
4737 |
|
4738 |
__ bind(&miss); |
4739 |
GenerateMiss(masm); |
4740 |
} |
4741 |
|
4742 |
|
4743 |
void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
|
4744 |
ASSERT(state_ == CompareIC::NUMBER); |
4745 |
|
4746 |
Label generic_stub; |
4747 |
Label unordered, maybe_undefined1, maybe_undefined2; |
4748 |
Label miss; |
4749 |
|
4750 |
if (left_ == CompareIC::SMI) {
|
4751 |
__ JumpIfNotSmi(r1, &miss); |
4752 |
} |
4753 |
if (right_ == CompareIC::SMI) {
|
4754 |
__ JumpIfNotSmi(r0, &miss); |
4755 |
} |
4756 |
|
4757 |
// Inlining the double comparison and falling back to the general compare
|
4758 |
// stub if NaN is involved.
|
4759 |
// Load left and right operand.
|
4760 |
Label done, left, left_smi, right_smi; |
4761 |
__ JumpIfSmi(r0, &right_smi); |
4762 |
__ CheckMap(r0, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1, |
4763 |
DONT_DO_SMI_CHECK); |
4764 |
__ sub(r2, r0, Operand(kHeapObjectTag)); |
4765 |
__ vldr(d1, r2, HeapNumber::kValueOffset); |
4766 |
__ b(&left); |
4767 |
__ bind(&right_smi); |
4768 |
__ SmiToDouble(d1, r0); |
4769 |
|
4770 |
__ bind(&left); |
4771 |
__ JumpIfSmi(r1, &left_smi); |
4772 |
__ CheckMap(r1, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2, |
4773 |
DONT_DO_SMI_CHECK); |
4774 |
__ sub(r2, r1, Operand(kHeapObjectTag)); |
4775 |
__ vldr(d0, r2, HeapNumber::kValueOffset); |
4776 |
__ b(&done); |
4777 |
__ bind(&left_smi); |
4778 |
__ SmiToDouble(d0, r1); |
4779 |
|
4780 |
__ bind(&done); |
4781 |
// Compare operands.
|
4782 |
__ VFPCompareAndSetFlags(d0, d1); |
4783 |
|
4784 |
// Don't base result on status bits when a NaN is involved.
|
4785 |
__ b(vs, &unordered); |
4786 |
|
4787 |
// Return a result of -1, 0, or 1, based on status bits.
|
4788 |
__ mov(r0, Operand(EQUAL), LeaveCC, eq); |
4789 |
__ mov(r0, Operand(LESS), LeaveCC, lt); |
4790 |
__ mov(r0, Operand(GREATER), LeaveCC, gt); |
4791 |
__ Ret(); |
4792 |
|
4793 |
__ bind(&unordered); |
4794 |
__ bind(&generic_stub); |
4795 |
ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC, |
4796 |
CompareIC::GENERIC); |
4797 |
__ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); |
4798 |
|
4799 |
__ bind(&maybe_undefined1); |
4800 |
if (Token::IsOrderedRelationalCompareOp(op_)) {
|
4801 |
__ CompareRoot(r0, Heap::kUndefinedValueRootIndex); |
4802 |
__ b(ne, &miss); |
4803 |
__ JumpIfSmi(r1, &unordered); |
4804 |
__ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE); |
4805 |
__ b(ne, &maybe_undefined2); |
4806 |
__ jmp(&unordered); |
4807 |
} |
4808 |
|
4809 |
__ bind(&maybe_undefined2); |
4810 |
if (Token::IsOrderedRelationalCompareOp(op_)) {
|
4811 |
__ CompareRoot(r1, Heap::kUndefinedValueRootIndex); |
4812 |
__ b(eq, &unordered); |
4813 |
} |
4814 |
|
4815 |
__ bind(&miss); |
4816 |
GenerateMiss(masm); |
4817 |
} |
4818 |
|
4819 |
|
4820 |
void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
|
4821 |
ASSERT(state_ == CompareIC::INTERNALIZED_STRING); |
4822 |
Label miss; |
4823 |
|
4824 |
// Registers containing left and right operands respectively.
|
4825 |
Register left = r1; |
4826 |
Register right = r0; |
4827 |
Register tmp1 = r2; |
4828 |
Register tmp2 = r3; |
4829 |
|
4830 |
// Check that both operands are heap objects.
|
4831 |
__ JumpIfEitherSmi(left, right, &miss); |
4832 |
|
4833 |
// Check that both operands are internalized strings.
|
4834 |
__ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); |
4835 |
__ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); |
4836 |
__ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); |
4837 |
__ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); |
4838 |
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); |
4839 |
__ orr(tmp1, tmp1, Operand(tmp2)); |
4840 |
__ tst(tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask)); |
4841 |
__ b(ne, &miss); |
4842 |
|
4843 |
// Internalized strings are compared by identity.
|
4844 |
__ cmp(left, right); |
4845 |
// Make sure r0 is non-zero. At this point input operands are
|
4846 |
// guaranteed to be non-zero.
|
4847 |
ASSERT(right.is(r0)); |
4848 |
STATIC_ASSERT(EQUAL == 0);
|
4849 |
STATIC_ASSERT(kSmiTag == 0);
|
4850 |
__ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq); |
4851 |
__ Ret(); |
4852 |
|
4853 |
__ bind(&miss); |
4854 |
GenerateMiss(masm); |
4855 |
} |
4856 |
|
4857 |
|
4858 |
void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
|
4859 |
ASSERT(state_ == CompareIC::UNIQUE_NAME); |
4860 |
ASSERT(GetCondition() == eq); |
4861 |
Label miss; |
4862 |
|
4863 |
// Registers containing left and right operands respectively.
|
4864 |
Register left = r1; |
4865 |
Register right = r0; |
4866 |
Register tmp1 = r2; |
4867 |
Register tmp2 = r3; |
4868 |
|
4869 |
// Check that both operands are heap objects.
|
4870 |
__ JumpIfEitherSmi(left, right, &miss); |
4871 |
|
4872 |
// Check that both operands are unique names. This leaves the instance
|
4873 |
// types loaded in tmp1 and tmp2.
|
4874 |
__ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); |
4875 |
__ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); |
4876 |
__ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); |
4877 |
__ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); |
4878 |
|
4879 |
__ JumpIfNotUniqueName(tmp1, &miss); |
4880 |
__ JumpIfNotUniqueName(tmp2, &miss); |
4881 |
|
4882 |
// Unique names are compared by identity.
|
4883 |
__ cmp(left, right); |
4884 |
// Make sure r0 is non-zero. At this point input operands are
|
4885 |
// guaranteed to be non-zero.
|
4886 |
ASSERT(right.is(r0)); |
4887 |
STATIC_ASSERT(EQUAL == 0);
|
4888 |
STATIC_ASSERT(kSmiTag == 0);
|
4889 |
__ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq); |
4890 |
__ Ret(); |
4891 |
|
4892 |
__ bind(&miss); |
4893 |
GenerateMiss(masm); |
4894 |
} |
4895 |
|
4896 |
|
4897 |
void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
|
4898 |
ASSERT(state_ == CompareIC::STRING); |
4899 |
Label miss; |
4900 |
|
4901 |
bool equality = Token::IsEqualityOp(op_);
|
4902 |
|
4903 |
// Registers containing left and right operands respectively.
|
4904 |
Register left = r1; |
4905 |
Register right = r0; |
4906 |
Register tmp1 = r2; |
4907 |
Register tmp2 = r3; |
4908 |
Register tmp3 = r4; |
4909 |
Register tmp4 = r5; |
4910 |
|
4911 |
// Check that both operands are heap objects.
|
4912 |
__ JumpIfEitherSmi(left, right, &miss); |
4913 |
|
4914 |
// Check that both operands are strings. This leaves the instance
|
4915 |
// types loaded in tmp1 and tmp2.
|
4916 |
__ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); |
4917 |
__ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); |
4918 |
__ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); |
4919 |
__ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); |
4920 |
STATIC_ASSERT(kNotStringTag != 0);
|
4921 |
__ orr(tmp3, tmp1, tmp2); |
4922 |
__ tst(tmp3, Operand(kIsNotStringMask)); |
4923 |
__ b(ne, &miss); |
4924 |
|
4925 |
// Fast check for identical strings.
|
4926 |
__ cmp(left, right); |
4927 |
STATIC_ASSERT(EQUAL == 0);
|
4928 |
STATIC_ASSERT(kSmiTag == 0);
|
4929 |
__ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq); |
4930 |
__ Ret(eq); |
4931 |
|
4932 |
// Handle not identical strings.
|
4933 |
|
4934 |
// Check that both strings are internalized strings. If they are, we're done
|
4935 |
// because we already know they are not identical. We know they are both
|
4936 |
// strings.
|
4937 |
if (equality) {
|
4938 |
ASSERT(GetCondition() == eq); |
4939 |
STATIC_ASSERT(kInternalizedTag == 0);
|
4940 |
__ orr(tmp3, tmp1, Operand(tmp2)); |
4941 |
__ tst(tmp3, Operand(kIsNotInternalizedMask)); |
4942 |
// Make sure r0 is non-zero. At this point input operands are
|
4943 |
// guaranteed to be non-zero.
|
4944 |
ASSERT(right.is(r0)); |
4945 |
__ Ret(eq); |
4946 |
} |
4947 |
|
4948 |
// Check that both strings are sequential ASCII.
|
4949 |
Label runtime; |
4950 |
__ JumpIfBothInstanceTypesAreNotSequentialAscii( |
4951 |
tmp1, tmp2, tmp3, tmp4, &runtime); |
4952 |
|
4953 |
// Compare flat ASCII strings. Returns when done.
|
4954 |
if (equality) {
|
4955 |
StringCompareStub::GenerateFlatAsciiStringEquals( |
4956 |
masm, left, right, tmp1, tmp2, tmp3); |
4957 |
} else {
|
4958 |
StringCompareStub::GenerateCompareFlatAsciiStrings( |
4959 |
masm, left, right, tmp1, tmp2, tmp3, tmp4); |
4960 |
} |
4961 |
|
4962 |
// Handle more complex cases in runtime.
|
4963 |
__ bind(&runtime); |
4964 |
__ Push(left, right); |
4965 |
if (equality) {
|
4966 |
__ TailCallRuntime(Runtime::kStringEquals, 2, 1); |
4967 |
} else {
|
4968 |
__ TailCallRuntime(Runtime::kStringCompare, 2, 1); |
4969 |
} |
4970 |
|
4971 |
__ bind(&miss); |
4972 |
GenerateMiss(masm); |
4973 |
} |
4974 |
|
4975 |
|
4976 |
void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
|
4977 |
ASSERT(state_ == CompareIC::OBJECT); |
4978 |
Label miss; |
4979 |
__ and_(r2, r1, Operand(r0)); |
4980 |
__ JumpIfSmi(r2, &miss); |
4981 |
|
4982 |
__ CompareObjectType(r0, r2, r2, JS_OBJECT_TYPE); |
4983 |
__ b(ne, &miss); |
4984 |
__ CompareObjectType(r1, r2, r2, JS_OBJECT_TYPE); |
4985 |
__ b(ne, &miss); |
4986 |
|
4987 |
ASSERT(GetCondition() == eq); |
4988 |
__ sub(r0, r0, Operand(r1)); |
4989 |
__ Ret(); |
4990 |
|
4991 |
__ bind(&miss); |
4992 |
GenerateMiss(masm); |
4993 |
} |
4994 |
|
4995 |
|
4996 |
void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
|
4997 |
Label miss; |
4998 |
__ and_(r2, r1, Operand(r0)); |
4999 |
__ JumpIfSmi(r2, &miss); |
5000 |
__ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); |
5001 |
__ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset)); |
5002 |
__ cmp(r2, Operand(known_map_)); |
5003 |
__ b(ne, &miss); |
5004 |
__ cmp(r3, Operand(known_map_)); |
5005 |
__ b(ne, &miss); |
5006 |
|
5007 |
__ sub(r0, r0, Operand(r1)); |
5008 |
__ Ret(); |
5009 |
|
5010 |
__ bind(&miss); |
5011 |
GenerateMiss(masm); |
5012 |
} |
5013 |
|
5014 |
|
5015 |
|
5016 |
void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
|
5017 |
{ |
5018 |
// Call the runtime system in a fresh internal frame.
|
5019 |
ExternalReference miss = |
5020 |
ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate()); |
5021 |
|
5022 |
FrameScope scope(masm, StackFrame::INTERNAL); |
5023 |
__ Push(r1, r0); |
5024 |
__ push(lr); |
5025 |
__ Push(r1, r0); |
5026 |
__ mov(ip, Operand(Smi::FromInt(op_))); |
5027 |
__ push(ip); |
5028 |
__ CallExternalReference(miss, 3);
|
5029 |
// Compute the entry point of the rewritten stub.
|
5030 |
__ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); |
5031 |
// Restore registers.
|
5032 |
__ pop(lr); |
5033 |
__ pop(r0); |
5034 |
__ pop(r1); |
5035 |
} |
5036 |
|
5037 |
__ Jump(r2); |
5038 |
} |
5039 |
|
5040 |
|
5041 |
void DirectCEntryStub::Generate(MacroAssembler* masm) {
|
5042 |
// Place the return address on the stack, making the call
|
5043 |
// GC safe. The RegExp backend also relies on this.
|
5044 |
__ str(lr, MemOperand(sp, 0));
|
5045 |
__ blx(ip); // Call the C++ function.
|
5046 |
__ VFPEnsureFPSCRState(r2); |
5047 |
__ ldr(pc, MemOperand(sp, 0));
|
5048 |
} |
5049 |
|
5050 |
|
5051 |
void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
|
5052 |
Register target) { |
5053 |
intptr_t code = |
5054 |
reinterpret_cast<intptr_t>(GetCode(masm->isolate()).location());
|
5055 |
__ Move(ip, target); |
5056 |
__ mov(lr, Operand(code, RelocInfo::CODE_TARGET)); |
5057 |
__ blx(lr); // Call the stub.
|
5058 |
} |
5059 |
|
5060 |
|
5061 |
void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
|
5062 |
Label* miss, |
5063 |
Label* done, |
5064 |
Register receiver, |
5065 |
Register properties, |
5066 |
Handle<Name> name, |
5067 |
Register scratch0) { |
5068 |
ASSERT(name->IsUniqueName()); |
5069 |
// If names of slots in range from 1 to kProbes - 1 for the hash value are
|
5070 |
// not equal to the name and kProbes-th slot is not used (its name is the
|
5071 |
// undefined value), it guarantees the hash table doesn't contain the
|
5072 |
// property. It's true even if some slots represent deleted properties
|
5073 |
// (their names are the hole value).
|
5074 |
for (int i = 0; i < kInlinedProbes; i++) { |
5075 |
// scratch0 points to properties hash.
|
5076 |
// Compute the masked index: (hash + i + i * i) & mask.
|
5077 |
Register index = scratch0; |
5078 |
// Capacity is smi 2^n.
|
5079 |
__ ldr(index, FieldMemOperand(properties, kCapacityOffset)); |
5080 |
__ sub(index, index, Operand(1));
|
5081 |
__ and_(index, index, Operand( |
5082 |
Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i)))); |
5083 |
|
5084 |
// Scale the index by multiplying by the entry size.
|
5085 |
ASSERT(NameDictionary::kEntrySize == 3);
|
5086 |
__ add(index, index, Operand(index, LSL, 1)); // index *= 3. |
5087 |
|
5088 |
Register entity_name = scratch0; |
5089 |
// Having undefined at this place means the name is not contained.
|
5090 |
ASSERT_EQ(kSmiTagSize, 1);
|
5091 |
Register tmp = properties; |
5092 |
__ add(tmp, properties, Operand(index, LSL, 1));
|
5093 |
__ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset)); |
5094 |
|
5095 |
ASSERT(!tmp.is(entity_name)); |
5096 |
__ LoadRoot(tmp, Heap::kUndefinedValueRootIndex); |
5097 |
__ cmp(entity_name, tmp); |
5098 |
__ b(eq, done); |
5099 |
|
5100 |
// Load the hole ready for use below:
|
5101 |
__ LoadRoot(tmp, Heap::kTheHoleValueRootIndex); |
5102 |
|
5103 |
// Stop if found the property.
|
5104 |
__ cmp(entity_name, Operand(Handle<Name>(name))); |
5105 |
__ b(eq, miss); |
5106 |
|
5107 |
Label good; |
5108 |
__ cmp(entity_name, tmp); |
5109 |
__ b(eq, &good); |
5110 |
|
5111 |
// Check if the entry name is not a unique name.
|
5112 |
__ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset)); |
5113 |
__ ldrb(entity_name, |
5114 |
FieldMemOperand(entity_name, Map::kInstanceTypeOffset)); |
5115 |
__ JumpIfNotUniqueName(entity_name, miss); |
5116 |
__ bind(&good); |
5117 |
|
5118 |
// Restore the properties.
|
5119 |
__ ldr(properties, |
5120 |
FieldMemOperand(receiver, JSObject::kPropertiesOffset)); |
5121 |
} |
5122 |
|
5123 |
const int spill_mask = |
5124 |
(lr.bit() | r6.bit() | r5.bit() | r4.bit() | r3.bit() | |
5125 |
r2.bit() | r1.bit() | r0.bit()); |
5126 |
|
5127 |
__ stm(db_w, sp, spill_mask); |
5128 |
__ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); |
5129 |
__ mov(r1, Operand(Handle<Name>(name))); |
5130 |
NameDictionaryLookupStub stub(NEGATIVE_LOOKUP); |
5131 |
__ CallStub(&stub); |
5132 |
__ cmp(r0, Operand::Zero()); |
5133 |
__ ldm(ia_w, sp, spill_mask); |
5134 |
|
5135 |
__ b(eq, done); |
5136 |
__ b(ne, miss); |
5137 |
} |
5138 |
|
5139 |
|
5140 |
// Probe the name dictionary in the |elements| register. Jump to the
|
5141 |
// |done| label if a property with the given name is found. Jump to
|
5142 |
// the |miss| label otherwise.
|
5143 |
// If lookup was successful |scratch2| will be equal to elements + 4 * index.
|
5144 |
void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
|
5145 |
Label* miss, |
5146 |
Label* done, |
5147 |
Register elements, |
5148 |
Register name, |
5149 |
Register scratch1, |
5150 |
Register scratch2) { |
5151 |
ASSERT(!elements.is(scratch1)); |
5152 |
ASSERT(!elements.is(scratch2)); |
5153 |
ASSERT(!name.is(scratch1)); |
5154 |
ASSERT(!name.is(scratch2)); |
5155 |
|
5156 |
__ AssertName(name); |
5157 |
|
5158 |
// Compute the capacity mask.
|
5159 |
__ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset)); |
5160 |
__ SmiUntag(scratch1); |
5161 |
__ sub(scratch1, scratch1, Operand(1));
|
5162 |
|
5163 |
// Generate an unrolled loop that performs a few probes before
|
5164 |
// giving up. Measurements done on Gmail indicate that 2 probes
|
5165 |
// cover ~93% of loads from dictionaries.
|
5166 |
for (int i = 0; i < kInlinedProbes; i++) { |
5167 |
// Compute the masked index: (hash + i + i * i) & mask.
|
5168 |
__ ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset)); |
5169 |
if (i > 0) { |
5170 |
// Add the probe offset (i + i * i) left shifted to avoid right shifting
|
5171 |
// the hash in a separate instruction. The value hash + i + i * i is right
|
5172 |
// shifted in the following and instruction.
|
5173 |
ASSERT(NameDictionary::GetProbeOffset(i) < |
5174 |
1 << (32 - Name::kHashFieldOffset)); |
5175 |
__ add(scratch2, scratch2, Operand( |
5176 |
NameDictionary::GetProbeOffset(i) << Name::kHashShift)); |
5177 |
} |
5178 |
__ and_(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift)); |
5179 |
|
5180 |
// Scale the index by multiplying by the element size.
|
5181 |
ASSERT(NameDictionary::kEntrySize == 3);
|
5182 |
// scratch2 = scratch2 * 3.
|
5183 |
__ add(scratch2, scratch2, Operand(scratch2, LSL, 1));
|
5184 |
|
5185 |
// Check if the key is identical to the name.
|
5186 |
__ add(scratch2, elements, Operand(scratch2, LSL, 2));
|
5187 |
__ ldr(ip, FieldMemOperand(scratch2, kElementsStartOffset)); |
5188 |
__ cmp(name, Operand(ip)); |
5189 |
__ b(eq, done); |
5190 |
} |
5191 |
|
5192 |
const int spill_mask = |
5193 |
(lr.bit() | r6.bit() | r5.bit() | r4.bit() | |
5194 |
r3.bit() | r2.bit() | r1.bit() | r0.bit()) & |
5195 |
~(scratch1.bit() | scratch2.bit()); |
5196 |
|
5197 |
__ stm(db_w, sp, spill_mask); |
5198 |
if (name.is(r0)) {
|
5199 |
ASSERT(!elements.is(r1)); |
5200 |
__ Move(r1, name); |
5201 |
__ Move(r0, elements); |
5202 |
} else {
|
5203 |
__ Move(r0, elements); |
5204 |
__ Move(r1, name); |
5205 |
} |
5206 |
NameDictionaryLookupStub stub(POSITIVE_LOOKUP); |
5207 |
__ CallStub(&stub); |
5208 |
__ cmp(r0, Operand::Zero()); |
5209 |
__ mov(scratch2, Operand(r2)); |
5210 |
__ ldm(ia_w, sp, spill_mask); |
5211 |
|
5212 |
__ b(ne, done); |
5213 |
__ b(eq, miss); |
5214 |
} |
5215 |
|
5216 |
|
5217 |
void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
|
5218 |
// This stub overrides SometimesSetsUpAFrame() to return false. That means
|
5219 |
// we cannot call anything that could cause a GC from this stub.
|
5220 |
// Registers:
|
5221 |
// result: NameDictionary to probe
|
5222 |
// r1: key
|
5223 |
// dictionary: NameDictionary to probe.
|
5224 |
// index: will hold an index of entry if lookup is successful.
|
5225 |
// might alias with result_.
|
5226 |
// Returns:
|
5227 |
// result_ is zero if lookup failed, non zero otherwise.
|
5228 |
|
5229 |
Register result = r0; |
5230 |
Register dictionary = r0; |
5231 |
Register key = r1; |
5232 |
Register index = r2; |
5233 |
Register mask = r3; |
5234 |
Register hash = r4; |
5235 |
Register undefined = r5; |
5236 |
Register entry_key = r6; |
5237 |
|
5238 |
Label in_dictionary, maybe_in_dictionary, not_in_dictionary; |
5239 |
|
5240 |
__ ldr(mask, FieldMemOperand(dictionary, kCapacityOffset)); |
5241 |
__ SmiUntag(mask); |
5242 |
__ sub(mask, mask, Operand(1));
|
5243 |
|
5244 |
__ ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset)); |
5245 |
|
5246 |
__ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); |
5247 |
|
5248 |
for (int i = kInlinedProbes; i < kTotalProbes; i++) { |
5249 |
// Compute the masked index: (hash + i + i * i) & mask.
|
5250 |
// Capacity is smi 2^n.
|
5251 |
if (i > 0) { |
5252 |
// Add the probe offset (i + i * i) left shifted to avoid right shifting
|
5253 |
// the hash in a separate instruction. The value hash + i + i * i is right
|
5254 |
// shifted in the following and instruction.
|
5255 |
ASSERT(NameDictionary::GetProbeOffset(i) < |
5256 |
1 << (32 - Name::kHashFieldOffset)); |
5257 |
__ add(index, hash, Operand( |
5258 |
NameDictionary::GetProbeOffset(i) << Name::kHashShift)); |
5259 |
} else {
|
5260 |
__ mov(index, Operand(hash)); |
5261 |
} |
5262 |
__ and_(index, mask, Operand(index, LSR, Name::kHashShift)); |
5263 |
|
5264 |
// Scale the index by multiplying by the entry size.
|
5265 |
ASSERT(NameDictionary::kEntrySize == 3);
|
5266 |
__ add(index, index, Operand(index, LSL, 1)); // index *= 3. |
5267 |
|
5268 |
ASSERT_EQ(kSmiTagSize, 1);
|
5269 |
__ add(index, dictionary, Operand(index, LSL, 2));
|
5270 |
__ ldr(entry_key, FieldMemOperand(index, kElementsStartOffset)); |
5271 |
|
5272 |
// Having undefined at this place means the name is not contained.
|
5273 |
__ cmp(entry_key, Operand(undefined)); |
5274 |
__ b(eq, ¬_in_dictionary); |
5275 |
|
5276 |
// Stop if found the property.
|
5277 |
__ cmp(entry_key, Operand(key)); |
5278 |
__ b(eq, &in_dictionary); |
5279 |
|
5280 |
if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) { |
5281 |
// Check if the entry name is not a unique name.
|
5282 |
__ ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset)); |
5283 |
__ ldrb(entry_key, |
5284 |
FieldMemOperand(entry_key, Map::kInstanceTypeOffset)); |
5285 |
__ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary); |
5286 |
} |
5287 |
} |
5288 |
|
5289 |
__ bind(&maybe_in_dictionary); |
5290 |
// If we are doing negative lookup then probing failure should be
|
5291 |
// treated as a lookup success. For positive lookup probing failure
|
5292 |
// should be treated as lookup failure.
|
5293 |
if (mode_ == POSITIVE_LOOKUP) {
|
5294 |
__ mov(result, Operand::Zero()); |
5295 |
__ Ret(); |
5296 |
} |
5297 |
|
5298 |
__ bind(&in_dictionary); |
5299 |
__ mov(result, Operand(1));
|
5300 |
__ Ret(); |
5301 |
|
5302 |
__ bind(¬_in_dictionary); |
5303 |
__ mov(result, Operand::Zero()); |
5304 |
__ Ret(); |
5305 |
} |
5306 |
|
5307 |
|
5308 |
struct AheadOfTimeWriteBarrierStubList {
|
5309 |
Register object, value, address; |
5310 |
RememberedSetAction action; |
5311 |
}; |
5312 |
|
5313 |
|
5314 |
#define REG(Name) { kRegister_ ## Name ## _Code } |
5315 |
|
5316 |
static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = { |
5317 |
// Used in RegExpExecStub.
|
5318 |
{ REG(r6), REG(r4), REG(r3), EMIT_REMEMBERED_SET }, |
5319 |
// Used in CompileArrayPushCall.
|
5320 |
// Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
|
5321 |
// Also used in KeyedStoreIC::GenerateGeneric.
|
5322 |
{ REG(r3), REG(r4), REG(r5), EMIT_REMEMBERED_SET }, |
5323 |
// Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
|
5324 |
{ REG(r1), REG(r2), REG(r3), EMIT_REMEMBERED_SET }, |
5325 |
{ REG(r3), REG(r2), REG(r1), EMIT_REMEMBERED_SET }, |
5326 |
// Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
|
5327 |
{ REG(r2), REG(r1), REG(r3), EMIT_REMEMBERED_SET }, |
5328 |
{ REG(r3), REG(r1), REG(r2), EMIT_REMEMBERED_SET }, |
5329 |
// KeyedStoreStubCompiler::GenerateStoreFastElement.
|
5330 |
{ REG(r3), REG(r2), REG(r4), EMIT_REMEMBERED_SET }, |
5331 |
{ REG(r2), REG(r3), REG(r4), EMIT_REMEMBERED_SET }, |
5332 |
// ElementsTransitionGenerator::GenerateMapChangeElementTransition
|
5333 |
// and ElementsTransitionGenerator::GenerateSmiToDouble
|
5334 |
// and ElementsTransitionGenerator::GenerateDoubleToObject
|
5335 |
{ REG(r2), REG(r3), REG(r9), EMIT_REMEMBERED_SET }, |
5336 |
{ REG(r2), REG(r3), REG(r9), OMIT_REMEMBERED_SET }, |
5337 |
// ElementsTransitionGenerator::GenerateDoubleToObject
|
5338 |
{ REG(r6), REG(r2), REG(r0), EMIT_REMEMBERED_SET }, |
5339 |
{ REG(r2), REG(r6), REG(r9), EMIT_REMEMBERED_SET }, |
5340 |
// StoreArrayLiteralElementStub::Generate
|
5341 |
{ REG(r5), REG(r0), REG(r6), EMIT_REMEMBERED_SET }, |
5342 |
// FastNewClosureStub::Generate
|
5343 |
{ REG(r2), REG(r4), REG(r1), EMIT_REMEMBERED_SET }, |
5344 |
// StringAddStub::Generate
|
5345 |
{ REG(r3), REG(r1), REG(r4), EMIT_REMEMBERED_SET }, |
5346 |
{ REG(r3), REG(r0), REG(r4), EMIT_REMEMBERED_SET }, |
5347 |
// Null termination.
|
5348 |
{ REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET} |
5349 |
}; |
5350 |
|
5351 |
#undef REG
|
5352 |
|
5353 |
|
5354 |
bool RecordWriteStub::IsPregenerated(Isolate* isolate) {
|
5355 |
for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; |
5356 |
!entry->object.is(no_reg); |
5357 |
entry++) { |
5358 |
if (object_.is(entry->object) &&
|
5359 |
value_.is(entry->value) && |
5360 |
address_.is(entry->address) && |
5361 |
remembered_set_action_ == entry->action && |
5362 |
save_fp_regs_mode_ == kDontSaveFPRegs) { |
5363 |
return true; |
5364 |
} |
5365 |
} |
5366 |
return false; |
5367 |
} |
5368 |
|
5369 |
|
5370 |
void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
|
5371 |
Isolate* isolate) { |
5372 |
StoreBufferOverflowStub stub1(kDontSaveFPRegs); |
5373 |
stub1.GetCode(isolate)->set_is_pregenerated(true);
|
5374 |
// Hydrogen code stubs need stub2 at snapshot time.
|
5375 |
StoreBufferOverflowStub stub2(kSaveFPRegs); |
5376 |
stub2.GetCode(isolate)->set_is_pregenerated(true);
|
5377 |
} |
5378 |
|
5379 |
|
5380 |
void RecordWriteStub::GenerateFixedRegStubsAheadOfTime(Isolate* isolate) {
|
5381 |
for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; |
5382 |
!entry->object.is(no_reg); |
5383 |
entry++) { |
5384 |
RecordWriteStub stub(entry->object, |
5385 |
entry->value, |
5386 |
entry->address, |
5387 |
entry->action, |
5388 |
kDontSaveFPRegs); |
5389 |
stub.GetCode(isolate)->set_is_pregenerated(true);
|
5390 |
} |
5391 |
} |
5392 |
|
5393 |
|
5394 |
bool CodeStub::CanUseFPRegisters() {
|
5395 |
return true; // VFP2 is a base requirement for V8 |
5396 |
} |
5397 |
|
5398 |
|
5399 |
// Takes the input in 3 registers: address_ value_ and object_. A pointer to
|
5400 |
// the value has just been written into the object, now this stub makes sure
|
5401 |
// we keep the GC informed. The word in the object where the value has been
|
5402 |
// written is in the address register.
|
5403 |
void RecordWriteStub::Generate(MacroAssembler* masm) {
|
5404 |
Label skip_to_incremental_noncompacting; |
5405 |
Label skip_to_incremental_compacting; |
5406 |
|
5407 |
// The first two instructions are generated with labels so as to get the
|
5408 |
// offset fixed up correctly by the bind(Label*) call. We patch it back and
|
5409 |
// forth between a compare instructions (a nop in this position) and the
|
5410 |
// real branch when we start and stop incremental heap marking.
|
5411 |
// See RecordWriteStub::Patch for details.
|
5412 |
{ |
5413 |
// Block literal pool emission, as the position of these two instructions
|
5414 |
// is assumed by the patching code.
|
5415 |
Assembler::BlockConstPoolScope block_const_pool(masm); |
5416 |
__ b(&skip_to_incremental_noncompacting); |
5417 |
__ b(&skip_to_incremental_compacting); |
5418 |
} |
5419 |
|
5420 |
if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
|
5421 |
__ RememberedSetHelper(object_, |
5422 |
address_, |
5423 |
value_, |
5424 |
save_fp_regs_mode_, |
5425 |
MacroAssembler::kReturnAtEnd); |
5426 |
} |
5427 |
__ Ret(); |
5428 |
|
5429 |
__ bind(&skip_to_incremental_noncompacting); |
5430 |
GenerateIncremental(masm, INCREMENTAL); |
5431 |
|
5432 |
__ bind(&skip_to_incremental_compacting); |
5433 |
GenerateIncremental(masm, INCREMENTAL_COMPACTION); |
5434 |
|
5435 |
// Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
|
5436 |
// Will be checked in IncrementalMarking::ActivateGeneratedStub.
|
5437 |
ASSERT(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12)); |
5438 |
ASSERT(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12)); |
5439 |
PatchBranchIntoNop(masm, 0);
|
5440 |
PatchBranchIntoNop(masm, Assembler::kInstrSize); |
5441 |
} |
5442 |
|
5443 |
|
5444 |
void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
|
5445 |
regs_.Save(masm); |
5446 |
|
5447 |
if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
|
5448 |
Label dont_need_remembered_set; |
5449 |
|
5450 |
__ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
|
5451 |
__ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
|
5452 |
regs_.scratch0(), |
5453 |
&dont_need_remembered_set); |
5454 |
|
5455 |
__ CheckPageFlag(regs_.object(), |
5456 |
regs_.scratch0(), |
5457 |
1 << MemoryChunk::SCAN_ON_SCAVENGE,
|
5458 |
ne, |
5459 |
&dont_need_remembered_set); |
5460 |
|
5461 |
// First notify the incremental marker if necessary, then update the
|
5462 |
// remembered set.
|
5463 |
CheckNeedsToInformIncrementalMarker( |
5464 |
masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode); |
5465 |
InformIncrementalMarker(masm, mode); |
5466 |
regs_.Restore(masm); |
5467 |
__ RememberedSetHelper(object_, |
5468 |
address_, |
5469 |
value_, |
5470 |
save_fp_regs_mode_, |
5471 |
MacroAssembler::kReturnAtEnd); |
5472 |
|
5473 |
__ bind(&dont_need_remembered_set); |
5474 |
} |
5475 |
|
5476 |
CheckNeedsToInformIncrementalMarker( |
5477 |
masm, kReturnOnNoNeedToInformIncrementalMarker, mode); |
5478 |
InformIncrementalMarker(masm, mode); |
5479 |
regs_.Restore(masm); |
5480 |
__ Ret(); |
5481 |
} |
5482 |
|
5483 |
|
5484 |
void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
|
5485 |
regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_); |
5486 |
int argument_count = 3; |
5487 |
__ PrepareCallCFunction(argument_count, regs_.scratch0()); |
5488 |
Register address = |
5489 |
r0.is(regs_.address()) ? regs_.scratch0() : regs_.address(); |
5490 |
ASSERT(!address.is(regs_.object())); |
5491 |
ASSERT(!address.is(r0)); |
5492 |
__ Move(address, regs_.address()); |
5493 |
__ Move(r0, regs_.object()); |
5494 |
__ Move(r1, address); |
5495 |
__ mov(r2, Operand(ExternalReference::isolate_address(masm->isolate()))); |
5496 |
|
5497 |
AllowExternalCallThatCantCauseGC scope(masm); |
5498 |
if (mode == INCREMENTAL_COMPACTION) {
|
5499 |
__ CallCFunction( |
5500 |
ExternalReference::incremental_evacuation_record_write_function( |
5501 |
masm->isolate()), |
5502 |
argument_count); |
5503 |
} else {
|
5504 |
ASSERT(mode == INCREMENTAL); |
5505 |
__ CallCFunction( |
5506 |
ExternalReference::incremental_marking_record_write_function( |
5507 |
masm->isolate()), |
5508 |
argument_count); |
5509 |
} |
5510 |
regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_); |
5511 |
} |
5512 |
|
5513 |
|
5514 |
void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
|
5515 |
MacroAssembler* masm, |
5516 |
OnNoNeedToInformIncrementalMarker on_no_need, |
5517 |
Mode mode) { |
5518 |
Label on_black; |
5519 |
Label need_incremental; |
5520 |
Label need_incremental_pop_scratch; |
5521 |
|
5522 |
__ and_(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask)); |
5523 |
__ ldr(regs_.scratch1(), |
5524 |
MemOperand(regs_.scratch0(), |
5525 |
MemoryChunk::kWriteBarrierCounterOffset)); |
5526 |
__ sub(regs_.scratch1(), regs_.scratch1(), Operand(1), SetCC);
|
5527 |
__ str(regs_.scratch1(), |
5528 |
MemOperand(regs_.scratch0(), |
5529 |
MemoryChunk::kWriteBarrierCounterOffset)); |
5530 |
__ b(mi, &need_incremental); |
5531 |
|
5532 |
// Let's look at the color of the object: If it is not black we don't have
|
5533 |
// to inform the incremental marker.
|
5534 |
__ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black); |
5535 |
|
5536 |
regs_.Restore(masm); |
5537 |
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
|
5538 |
__ RememberedSetHelper(object_, |
5539 |
address_, |
5540 |
value_, |
5541 |
save_fp_regs_mode_, |
5542 |
MacroAssembler::kReturnAtEnd); |
5543 |
} else {
|
5544 |
__ Ret(); |
5545 |
} |
5546 |
|
5547 |
__ bind(&on_black); |
5548 |
|
5549 |
// Get the value from the slot.
|
5550 |
__ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
|
5551 |
|
5552 |
if (mode == INCREMENTAL_COMPACTION) {
|
5553 |
Label ensure_not_white; |
5554 |
|
5555 |
__ CheckPageFlag(regs_.scratch0(), // Contains value.
|
5556 |
regs_.scratch1(), // Scratch.
|
5557 |
MemoryChunk::kEvacuationCandidateMask, |
5558 |
eq, |
5559 |
&ensure_not_white); |
5560 |
|
5561 |
__ CheckPageFlag(regs_.object(), |
5562 |
regs_.scratch1(), // Scratch.
|
5563 |
MemoryChunk::kSkipEvacuationSlotsRecordingMask, |
5564 |
eq, |
5565 |
&need_incremental); |
5566 |
|
5567 |
__ bind(&ensure_not_white); |
5568 |
} |
5569 |
|
5570 |
// We need extra registers for this, so we push the object and the address
|
5571 |
// register temporarily.
|
5572 |
__ Push(regs_.object(), regs_.address()); |
5573 |
__ EnsureNotWhite(regs_.scratch0(), // The value.
|
5574 |
regs_.scratch1(), // Scratch.
|
5575 |
regs_.object(), // Scratch.
|
5576 |
regs_.address(), // Scratch.
|
5577 |
&need_incremental_pop_scratch); |
5578 |
__ Pop(regs_.object(), regs_.address()); |
5579 |
|
5580 |
regs_.Restore(masm); |
5581 |
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
|
5582 |
__ RememberedSetHelper(object_, |
5583 |
address_, |
5584 |
value_, |
5585 |
save_fp_regs_mode_, |
5586 |
MacroAssembler::kReturnAtEnd); |
5587 |
} else {
|
5588 |
__ Ret(); |
5589 |
} |
5590 |
|
5591 |
__ bind(&need_incremental_pop_scratch); |
5592 |
__ Pop(regs_.object(), regs_.address()); |
5593 |
|
5594 |
__ bind(&need_incremental); |
5595 |
|
5596 |
// Fall through when we need to inform the incremental marker.
|
5597 |
} |
5598 |
|
5599 |
|
5600 |
void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
|
5601 |
// ----------- S t a t e -------------
|
5602 |
// -- r0 : element value to store
|
5603 |
// -- r3 : element index as smi
|
5604 |
// -- sp[0] : array literal index in function as smi
|
5605 |
// -- sp[4] : array literal
|
5606 |
// clobbers r1, r2, r4
|
5607 |
// -----------------------------------
|
5608 |
|
5609 |
Label element_done; |
5610 |
Label double_elements; |
5611 |
Label smi_element; |
5612 |
Label slow_elements; |
5613 |
Label fast_elements; |
5614 |
|
5615 |
// Get array literal index, array literal and its map.
|
5616 |
__ ldr(r4, MemOperand(sp, 0 * kPointerSize));
|
5617 |
__ ldr(r1, MemOperand(sp, 1 * kPointerSize));
|
5618 |
__ ldr(r2, FieldMemOperand(r1, JSObject::kMapOffset)); |
5619 |
|
5620 |
__ CheckFastElements(r2, r5, &double_elements); |
5621 |
// FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS
|
5622 |
__ JumpIfSmi(r0, &smi_element); |
5623 |
__ CheckFastSmiElements(r2, r5, &fast_elements); |
5624 |
|
5625 |
// Store into the array literal requires a elements transition. Call into
|
5626 |
// the runtime.
|
5627 |
__ bind(&slow_elements); |
5628 |
// call.
|
5629 |
__ Push(r1, r3, r0); |
5630 |
__ ldr(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); |
5631 |
__ ldr(r5, FieldMemOperand(r5, JSFunction::kLiteralsOffset)); |
5632 |
__ Push(r5, r4); |
5633 |
__ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1); |
5634 |
|
5635 |
// Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
|
5636 |
__ bind(&fast_elements); |
5637 |
__ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); |
5638 |
__ add(r6, r5, Operand::PointerOffsetFromSmiKey(r3)); |
5639 |
__ add(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
5640 |
__ str(r0, MemOperand(r6, 0));
|
5641 |
// Update the write barrier for the array store.
|
5642 |
__ RecordWrite(r5, r6, r0, kLRHasNotBeenSaved, kDontSaveFPRegs, |
5643 |
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); |
5644 |
__ Ret(); |
5645 |
|
5646 |
// Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
|
5647 |
// and value is Smi.
|
5648 |
__ bind(&smi_element); |
5649 |
__ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); |
5650 |
__ add(r6, r5, Operand::PointerOffsetFromSmiKey(r3)); |
5651 |
__ str(r0, FieldMemOperand(r6, FixedArray::kHeaderSize)); |
5652 |
__ Ret(); |
5653 |
|
5654 |
// Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
|
5655 |
__ bind(&double_elements); |
5656 |
__ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); |
5657 |
__ StoreNumberToDoubleElements(r0, r3, r5, r6, d0, &slow_elements); |
5658 |
__ Ret(); |
5659 |
} |
5660 |
|
5661 |
|
5662 |
void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
|
5663 |
CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
|
5664 |
__ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); |
5665 |
int parameter_count_offset =
|
5666 |
StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset; |
5667 |
__ ldr(r1, MemOperand(fp, parameter_count_offset)); |
5668 |
if (function_mode_ == JS_FUNCTION_STUB_MODE) {
|
5669 |
__ add(r1, r1, Operand(1));
|
5670 |
} |
5671 |
masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE); |
5672 |
__ mov(r1, Operand(r1, LSL, kPointerSizeLog2)); |
5673 |
__ add(sp, sp, r1); |
5674 |
__ Ret(); |
5675 |
} |
5676 |
|
5677 |
|
5678 |
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
|
5679 |
if (masm->isolate()->function_entry_hook() != NULL) { |
5680 |
PredictableCodeSizeScope predictable(masm, 4 * Assembler::kInstrSize);
|
5681 |
AllowStubCallsScope allow_stub_calls(masm, true);
|
5682 |
ProfileEntryHookStub stub; |
5683 |
__ push(lr); |
5684 |
__ CallStub(&stub); |
5685 |
__ pop(lr); |
5686 |
} |
5687 |
} |
5688 |
|
5689 |
|
5690 |
void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
|
5691 |
// The entry hook is a "push lr" instruction, followed by a call.
|
5692 |
const int32_t kReturnAddressDistanceFromFunctionStart =
|
5693 |
3 * Assembler::kInstrSize;
|
5694 |
|
5695 |
// This should contain all kCallerSaved registers.
|
5696 |
const RegList kSavedRegs =
|
5697 |
1 << 0 | // r0 |
5698 |
1 << 1 | // r1 |
5699 |
1 << 2 | // r2 |
5700 |
1 << 3 | // r3 |
5701 |
1 << 5 | // r5 |
5702 |
1 << 9; // r9 |
5703 |
// We also save lr, so the count here is one higher than the mask indicates.
|
5704 |
const int32_t kNumSavedRegs = 7; |
5705 |
|
5706 |
ASSERT((kCallerSaved & kSavedRegs) == kCallerSaved); |
5707 |
|
5708 |
// Save all caller-save registers as this may be called from anywhere.
|
5709 |
__ stm(db_w, sp, kSavedRegs | lr.bit()); |
5710 |
|
5711 |
// Compute the function's address for the first argument.
|
5712 |
__ sub(r0, lr, Operand(kReturnAddressDistanceFromFunctionStart)); |
5713 |
|
5714 |
// The caller's return address is above the saved temporaries.
|
5715 |
// Grab that for the second argument to the hook.
|
5716 |
__ add(r1, sp, Operand(kNumSavedRegs * kPointerSize)); |
5717 |
|
5718 |
// Align the stack if necessary.
|
5719 |
int frame_alignment = masm->ActivationFrameAlignment();
|
5720 |
if (frame_alignment > kPointerSize) {
|
5721 |
__ mov(r5, sp); |
5722 |
ASSERT(IsPowerOf2(frame_alignment)); |
5723 |
__ and_(sp, sp, Operand(-frame_alignment)); |
5724 |
} |
5725 |
|
5726 |
#if V8_HOST_ARCH_ARM
|
5727 |
int32_t entry_hook = |
5728 |
reinterpret_cast<int32_t>(masm->isolate()->function_entry_hook());
|
5729 |
__ mov(ip, Operand(entry_hook)); |
5730 |
#else
|
5731 |
// Under the simulator we need to indirect the entry hook through a
|
5732 |
// trampoline function at a known address.
|
5733 |
// It additionally takes an isolate as a third parameter
|
5734 |
__ mov(r2, Operand(ExternalReference::isolate_address(masm->isolate()))); |
5735 |
|
5736 |
ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline)); |
5737 |
__ mov(ip, Operand(ExternalReference(&dispatcher, |
5738 |
ExternalReference::BUILTIN_CALL, |
5739 |
masm->isolate()))); |
5740 |
#endif
|
5741 |
__ Call(ip); |
5742 |
|
5743 |
// Restore the stack pointer if needed.
|
5744 |
if (frame_alignment > kPointerSize) {
|
5745 |
__ mov(sp, r5); |
5746 |
} |
5747 |
|
5748 |
// Also pop pc to get Ret(0).
|
5749 |
__ ldm(ia_w, sp, kSavedRegs | pc.bit()); |
5750 |
} |
5751 |
|
5752 |
|
5753 |
template<class T> |
5754 |
static void CreateArrayDispatch(MacroAssembler* masm, |
5755 |
AllocationSiteOverrideMode mode) { |
5756 |
if (mode == DISABLE_ALLOCATION_SITES) {
|
5757 |
T stub(GetInitialFastElementsKind(), |
5758 |
CONTEXT_CHECK_REQUIRED, |
5759 |
mode); |
5760 |
__ TailCallStub(&stub); |
5761 |
} else if (mode == DONT_OVERRIDE) { |
5762 |
int last_index = GetSequenceIndexFromFastElementsKind(
|
5763 |
TERMINAL_FAST_ELEMENTS_KIND); |
5764 |
for (int i = 0; i <= last_index; ++i) { |
5765 |
Label next; |
5766 |
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); |
5767 |
__ cmp(r3, Operand(kind)); |
5768 |
__ b(ne, &next); |
5769 |
T stub(kind); |
5770 |
__ TailCallStub(&stub); |
5771 |
__ bind(&next); |
5772 |
} |
5773 |
|
5774 |
// If we reached this point there is a problem.
|
5775 |
__ Abort(kUnexpectedElementsKindInArrayConstructor); |
5776 |
} else {
|
5777 |
UNREACHABLE(); |
5778 |
} |
5779 |
} |
5780 |
|
5781 |
|
5782 |
static void CreateArrayDispatchOneArgument(MacroAssembler* masm, |
5783 |
AllocationSiteOverrideMode mode) { |
5784 |
// r2 - type info cell (if mode != DISABLE_ALLOCATION_SITES)
|
5785 |
// r3 - kind (if mode != DISABLE_ALLOCATION_SITES)
|
5786 |
// r0 - number of arguments
|
5787 |
// r1 - constructor?
|
5788 |
// sp[0] - last argument
|
5789 |
Label normal_sequence; |
5790 |
if (mode == DONT_OVERRIDE) {
|
5791 |
ASSERT(FAST_SMI_ELEMENTS == 0);
|
5792 |
ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
|
5793 |
ASSERT(FAST_ELEMENTS == 2);
|
5794 |
ASSERT(FAST_HOLEY_ELEMENTS == 3);
|
5795 |
ASSERT(FAST_DOUBLE_ELEMENTS == 4);
|
5796 |
ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
|
5797 |
|
5798 |
// is the low bit set? If so, we are holey and that is good.
|
5799 |
__ tst(r3, Operand(1));
|
5800 |
__ b(ne, &normal_sequence); |
5801 |
} |
5802 |
|
5803 |
// look at the first argument
|
5804 |
__ ldr(r5, MemOperand(sp, 0));
|
5805 |
__ cmp(r5, Operand::Zero()); |
5806 |
__ b(eq, &normal_sequence); |
5807 |
|
5808 |
if (mode == DISABLE_ALLOCATION_SITES) {
|
5809 |
ElementsKind initial = GetInitialFastElementsKind(); |
5810 |
ElementsKind holey_initial = GetHoleyElementsKind(initial); |
5811 |
|
5812 |
ArraySingleArgumentConstructorStub stub_holey(holey_initial, |
5813 |
CONTEXT_CHECK_REQUIRED, |
5814 |
DISABLE_ALLOCATION_SITES); |
5815 |
__ TailCallStub(&stub_holey); |
5816 |
|
5817 |
__ bind(&normal_sequence); |
5818 |
ArraySingleArgumentConstructorStub stub(initial, |
5819 |
CONTEXT_CHECK_REQUIRED, |
5820 |
DISABLE_ALLOCATION_SITES); |
5821 |
__ TailCallStub(&stub); |
5822 |
} else if (mode == DONT_OVERRIDE) { |
5823 |
// We are going to create a holey array, but our kind is non-holey.
|
5824 |
// Fix kind and retry (only if we have an allocation site in the cell).
|
5825 |
__ add(r3, r3, Operand(1));
|
5826 |
__ ldr(r5, FieldMemOperand(r2, Cell::kValueOffset)); |
5827 |
|
5828 |
if (FLAG_debug_code) {
|
5829 |
__ ldr(r5, FieldMemOperand(r5, 0));
|
5830 |
__ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex); |
5831 |
__ Assert(eq, kExpectedAllocationSiteInCell); |
5832 |
__ ldr(r5, FieldMemOperand(r2, Cell::kValueOffset)); |
5833 |
} |
5834 |
|
5835 |
// Save the resulting elements kind in type info
|
5836 |
__ SmiTag(r3); |
5837 |
__ ldr(r5, FieldMemOperand(r2, Cell::kValueOffset)); |
5838 |
__ str(r3, FieldMemOperand(r5, AllocationSite::kTransitionInfoOffset)); |
5839 |
__ SmiUntag(r3); |
5840 |
|
5841 |
__ bind(&normal_sequence); |
5842 |
int last_index = GetSequenceIndexFromFastElementsKind(
|
5843 |
TERMINAL_FAST_ELEMENTS_KIND); |
5844 |
for (int i = 0; i <= last_index; ++i) { |
5845 |
Label next; |
5846 |
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); |
5847 |
__ cmp(r3, Operand(kind)); |
5848 |
__ b(ne, &next); |
5849 |
ArraySingleArgumentConstructorStub stub(kind); |
5850 |
__ TailCallStub(&stub); |
5851 |
__ bind(&next); |
5852 |
} |
5853 |
|
5854 |
// If we reached this point there is a problem.
|
5855 |
__ Abort(kUnexpectedElementsKindInArrayConstructor); |
5856 |
} else {
|
5857 |
UNREACHABLE(); |
5858 |
} |
5859 |
} |
5860 |
|
5861 |
|
5862 |
template<class T> |
5863 |
static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) { |
5864 |
ElementsKind initial_kind = GetInitialFastElementsKind(); |
5865 |
ElementsKind initial_holey_kind = GetHoleyElementsKind(initial_kind); |
5866 |
|
5867 |
int to_index = GetSequenceIndexFromFastElementsKind(
|
5868 |
TERMINAL_FAST_ELEMENTS_KIND); |
5869 |
for (int i = 0; i <= to_index; ++i) { |
5870 |
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); |
5871 |
T stub(kind); |
5872 |
stub.GetCode(isolate)->set_is_pregenerated(true);
|
5873 |
if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE ||
|
5874 |
(!FLAG_track_allocation_sites && |
5875 |
(kind == initial_kind || kind == initial_holey_kind))) { |
5876 |
T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES); |
5877 |
stub1.GetCode(isolate)->set_is_pregenerated(true);
|
5878 |
} |
5879 |
} |
5880 |
} |
5881 |
|
5882 |
|
5883 |
void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
|
5884 |
ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>( |
5885 |
isolate); |
5886 |
ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>( |
5887 |
isolate); |
5888 |
ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>( |
5889 |
isolate); |
5890 |
} |
5891 |
|
5892 |
|
5893 |
void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
|
5894 |
Isolate* isolate) { |
5895 |
ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
|
5896 |
for (int i = 0; i < 2; i++) { |
5897 |
// For internal arrays we only need a few things
|
5898 |
InternalArrayNoArgumentConstructorStub stubh1(kinds[i]); |
5899 |
stubh1.GetCode(isolate)->set_is_pregenerated(true);
|
5900 |
InternalArraySingleArgumentConstructorStub stubh2(kinds[i]); |
5901 |
stubh2.GetCode(isolate)->set_is_pregenerated(true);
|
5902 |
InternalArrayNArgumentsConstructorStub stubh3(kinds[i]); |
5903 |
stubh3.GetCode(isolate)->set_is_pregenerated(true);
|
5904 |
} |
5905 |
} |
5906 |
|
5907 |
|
5908 |
void ArrayConstructorStub::GenerateDispatchToArrayStub(
|
5909 |
MacroAssembler* masm, |
5910 |
AllocationSiteOverrideMode mode) { |
5911 |
if (argument_count_ == ANY) {
|
5912 |
Label not_zero_case, not_one_case; |
5913 |
__ tst(r0, r0); |
5914 |
__ b(ne, ¬_zero_case); |
5915 |
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode); |
5916 |
|
5917 |
__ bind(¬_zero_case); |
5918 |
__ cmp(r0, Operand(1));
|
5919 |
__ b(gt, ¬_one_case); |
5920 |
CreateArrayDispatchOneArgument(masm, mode); |
5921 |
|
5922 |
__ bind(¬_one_case); |
5923 |
CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode); |
5924 |
} else if (argument_count_ == NONE) { |
5925 |
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode); |
5926 |
} else if (argument_count_ == ONE) { |
5927 |
CreateArrayDispatchOneArgument(masm, mode); |
5928 |
} else if (argument_count_ == MORE_THAN_ONE) { |
5929 |
CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode); |
5930 |
} else {
|
5931 |
UNREACHABLE(); |
5932 |
} |
5933 |
} |
5934 |
|
5935 |
|
5936 |
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
|
5937 |
// ----------- S t a t e -------------
|
5938 |
// -- r0 : argc (only if argument_count_ == ANY)
|
5939 |
// -- r1 : constructor
|
5940 |
// -- r2 : type info cell
|
5941 |
// -- sp[0] : return address
|
5942 |
// -- sp[4] : last argument
|
5943 |
// -----------------------------------
|
5944 |
if (FLAG_debug_code) {
|
5945 |
// The array construct code is only set for the global and natives
|
5946 |
// builtin Array functions which always have maps.
|
5947 |
|
5948 |
// Initial map for the builtin Array function should be a map.
|
5949 |
__ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); |
5950 |
// Will both indicate a NULL and a Smi.
|
5951 |
__ tst(r3, Operand(kSmiTagMask)); |
5952 |
__ Assert(ne, kUnexpectedInitialMapForArrayFunction); |
5953 |
__ CompareObjectType(r3, r3, r4, MAP_TYPE); |
5954 |
__ Assert(eq, kUnexpectedInitialMapForArrayFunction); |
5955 |
|
5956 |
// We should either have undefined in ebx or a valid cell
|
5957 |
Label okay_here; |
5958 |
Handle<Map> cell_map = masm->isolate()->factory()->cell_map(); |
5959 |
__ CompareRoot(r2, Heap::kUndefinedValueRootIndex); |
5960 |
__ b(eq, &okay_here); |
5961 |
__ ldr(r3, FieldMemOperand(r2, 0));
|
5962 |
__ cmp(r3, Operand(cell_map)); |
5963 |
__ Assert(eq, kExpectedPropertyCellInRegisterEbx); |
5964 |
__ bind(&okay_here); |
5965 |
} |
5966 |
|
5967 |
Label no_info; |
5968 |
// Get the elements kind and case on that.
|
5969 |
__ CompareRoot(r2, Heap::kUndefinedValueRootIndex); |
5970 |
__ b(eq, &no_info); |
5971 |
__ ldr(r3, FieldMemOperand(r2, Cell::kValueOffset)); |
5972 |
|
5973 |
// If the type cell is undefined, or contains anything other than an
|
5974 |
// AllocationSite, call an array constructor that doesn't use AllocationSites.
|
5975 |
__ ldr(r4, FieldMemOperand(r3, 0));
|
5976 |
__ CompareRoot(r4, Heap::kAllocationSiteMapRootIndex); |
5977 |
__ b(ne, &no_info); |
5978 |
|
5979 |
__ ldr(r3, FieldMemOperand(r3, AllocationSite::kTransitionInfoOffset)); |
5980 |
__ SmiUntag(r3); |
5981 |
GenerateDispatchToArrayStub(masm, DONT_OVERRIDE); |
5982 |
|
5983 |
__ bind(&no_info); |
5984 |
GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES); |
5985 |
} |
5986 |
|
5987 |
|
5988 |
void InternalArrayConstructorStub::GenerateCase(
|
5989 |
MacroAssembler* masm, ElementsKind kind) { |
5990 |
Label not_zero_case, not_one_case; |
5991 |
Label normal_sequence; |
5992 |
|
5993 |
__ tst(r0, r0); |
5994 |
__ b(ne, ¬_zero_case); |
5995 |
InternalArrayNoArgumentConstructorStub stub0(kind); |
5996 |
__ TailCallStub(&stub0); |
5997 |
|
5998 |
__ bind(¬_zero_case); |
5999 |
__ cmp(r0, Operand(1));
|
6000 |
__ b(gt, ¬_one_case); |
6001 |
|
6002 |
if (IsFastPackedElementsKind(kind)) {
|
6003 |
// We might need to create a holey array
|
6004 |
// look at the first argument
|
6005 |
__ ldr(r3, MemOperand(sp, 0));
|
6006 |
__ cmp(r3, Operand::Zero()); |
6007 |
__ b(eq, &normal_sequence); |
6008 |
|
6009 |
InternalArraySingleArgumentConstructorStub |
6010 |
stub1_holey(GetHoleyElementsKind(kind)); |
6011 |
__ TailCallStub(&stub1_holey); |
6012 |
} |
6013 |
|
6014 |
__ bind(&normal_sequence); |
6015 |
InternalArraySingleArgumentConstructorStub stub1(kind); |
6016 |
__ TailCallStub(&stub1); |
6017 |
|
6018 |
__ bind(¬_one_case); |
6019 |
InternalArrayNArgumentsConstructorStub stubN(kind); |
6020 |
__ TailCallStub(&stubN); |
6021 |
} |
6022 |
|
6023 |
|
6024 |
void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
|
6025 |
// ----------- S t a t e -------------
|
6026 |
// -- r0 : argc
|
6027 |
// -- r1 : constructor
|
6028 |
// -- sp[0] : return address
|
6029 |
// -- sp[4] : last argument
|
6030 |
// -----------------------------------
|
6031 |
|
6032 |
if (FLAG_debug_code) {
|
6033 |
// The array construct code is only set for the global and natives
|
6034 |
// builtin Array functions which always have maps.
|
6035 |
|
6036 |
// Initial map for the builtin Array function should be a map.
|
6037 |
__ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); |
6038 |
// Will both indicate a NULL and a Smi.
|
6039 |
__ tst(r3, Operand(kSmiTagMask)); |
6040 |
__ Assert(ne, kUnexpectedInitialMapForArrayFunction); |
6041 |
__ CompareObjectType(r3, r3, r4, MAP_TYPE); |
6042 |
__ Assert(eq, kUnexpectedInitialMapForArrayFunction); |
6043 |
} |
6044 |
|
6045 |
// Figure out the right elements kind
|
6046 |
__ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); |
6047 |
// Load the map's "bit field 2" into |result|. We only need the first byte,
|
6048 |
// but the following bit field extraction takes care of that anyway.
|
6049 |
__ ldr(r3, FieldMemOperand(r3, Map::kBitField2Offset)); |
6050 |
// Retrieve elements_kind from bit field 2.
|
6051 |
__ Ubfx(r3, r3, Map::kElementsKindShift, Map::kElementsKindBitCount); |
6052 |
|
6053 |
if (FLAG_debug_code) {
|
6054 |
Label done; |
6055 |
__ cmp(r3, Operand(FAST_ELEMENTS)); |
6056 |
__ b(eq, &done); |
6057 |
__ cmp(r3, Operand(FAST_HOLEY_ELEMENTS)); |
6058 |
__ Assert(eq, |
6059 |
kInvalidElementsKindForInternalArrayOrInternalPackedArray); |
6060 |
__ bind(&done); |
6061 |
} |
6062 |
|
6063 |
Label fast_elements_case; |
6064 |
__ cmp(r3, Operand(FAST_ELEMENTS)); |
6065 |
__ b(eq, &fast_elements_case); |
6066 |
GenerateCase(masm, FAST_HOLEY_ELEMENTS); |
6067 |
|
6068 |
__ bind(&fast_elements_case); |
6069 |
GenerateCase(masm, FAST_ELEMENTS); |
6070 |
} |
6071 |
|
6072 |
|
6073 |
#undef __
|
6074 |
|
6075 |
} } // namespace v8::internal
|
6076 |
|
6077 |
#endif // V8_TARGET_ARCH_ARM |