Revision f230a1cf deps/v8/src/arm/code-stubs-arm.cc
deps/v8/src/arm/code-stubs-arm.cc | ||
---|---|---|
59 | 59 |
} |
60 | 60 |
|
61 | 61 |
|
62 |
void NumberToStringStub::InitializeInterfaceDescriptor( |
|
63 |
Isolate* isolate, |
|
64 |
CodeStubInterfaceDescriptor* descriptor) { |
|
65 |
static Register registers[] = { r0 }; |
|
66 |
descriptor->register_param_count_ = 1; |
|
67 |
descriptor->register_params_ = registers; |
|
68 |
descriptor->deoptimization_handler_ = |
|
69 |
Runtime::FunctionForId(Runtime::kNumberToString)->entry; |
|
70 |
} |
|
71 |
|
|
72 |
|
|
62 | 73 |
void FastCloneShallowArrayStub::InitializeInterfaceDescriptor( |
63 | 74 |
Isolate* isolate, |
64 | 75 |
CodeStubInterfaceDescriptor* descriptor) { |
... | ... | |
77 | 88 |
descriptor->register_param_count_ = 4; |
78 | 89 |
descriptor->register_params_ = registers; |
79 | 90 |
descriptor->deoptimization_handler_ = |
80 |
Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry;
|
|
91 |
Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry; |
|
81 | 92 |
} |
82 | 93 |
|
83 | 94 |
|
... | ... | |
158 | 169 |
} |
159 | 170 |
|
160 | 171 |
|
172 |
void BinaryOpStub::InitializeInterfaceDescriptor( |
|
173 |
Isolate* isolate, |
|
174 |
CodeStubInterfaceDescriptor* descriptor) { |
|
175 |
static Register registers[] = { r1, r0 }; |
|
176 |
descriptor->register_param_count_ = 2; |
|
177 |
descriptor->register_params_ = registers; |
|
178 |
descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss); |
|
179 |
descriptor->SetMissHandler( |
|
180 |
ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate)); |
|
181 |
} |
|
182 |
|
|
183 |
|
|
161 | 184 |
static void InitializeArrayConstructorDescriptor( |
162 | 185 |
Isolate* isolate, |
163 | 186 |
CodeStubInterfaceDescriptor* descriptor, |
... | ... | |
170 | 193 |
descriptor->register_param_count_ = 2; |
171 | 194 |
if (constant_stack_parameter_count != 0) { |
172 | 195 |
// stack param count needs (constructor pointer, and single argument) |
173 |
descriptor->stack_parameter_count_ = &r0;
|
|
196 |
descriptor->stack_parameter_count_ = r0; |
|
174 | 197 |
} |
175 | 198 |
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count; |
176 | 199 |
descriptor->register_params_ = registers; |
... | ... | |
192 | 215 |
|
193 | 216 |
if (constant_stack_parameter_count != 0) { |
194 | 217 |
// stack param count needs (constructor pointer, and single argument) |
195 |
descriptor->stack_parameter_count_ = &r0;
|
|
218 |
descriptor->stack_parameter_count_ = r0; |
|
196 | 219 |
} |
197 | 220 |
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count; |
198 | 221 |
descriptor->register_params_ = registers; |
... | ... | |
825 | 848 |
// Convert lhs to a double in d7. |
826 | 849 |
__ SmiToDouble(d7, lhs); |
827 | 850 |
// Load the double from rhs, tagged HeapNumber r0, to d6. |
828 |
__ sub(r7, rhs, Operand(kHeapObjectTag)); |
|
829 |
__ vldr(d6, r7, HeapNumber::kValueOffset); |
|
851 |
__ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag); |
|
830 | 852 |
|
831 | 853 |
// We now have both loaded as doubles but we can skip the lhs nan check |
832 | 854 |
// since it's a smi. |
... | ... | |
851 | 873 |
|
852 | 874 |
// Rhs is a smi, lhs is a heap number. |
853 | 875 |
// Load the double from lhs, tagged HeapNumber r1, to d7. |
854 |
__ sub(r7, lhs, Operand(kHeapObjectTag)); |
|
855 |
__ vldr(d7, r7, HeapNumber::kValueOffset); |
|
876 |
__ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag); |
|
856 | 877 |
// Convert rhs to a double in d6 . |
857 | 878 |
__ SmiToDouble(d6, rhs); |
858 | 879 |
// Fall through to both_loaded_as_doubles. |
... | ... | |
920 | 941 |
|
921 | 942 |
// Both are heap numbers. Load them up then jump to the code we have |
922 | 943 |
// for that. |
923 |
__ sub(r7, rhs, Operand(kHeapObjectTag)); |
|
924 |
__ vldr(d6, r7, HeapNumber::kValueOffset); |
|
925 |
__ sub(r7, lhs, Operand(kHeapObjectTag)); |
|
926 |
__ vldr(d7, r7, HeapNumber::kValueOffset); |
|
944 |
__ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag); |
|
945 |
__ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag); |
|
927 | 946 |
__ jmp(both_loaded_as_doubles); |
928 | 947 |
} |
929 | 948 |
|
... | ... | |
972 | 991 |
} |
973 | 992 |
|
974 | 993 |
|
975 |
void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, |
|
976 |
Register object, |
|
977 |
Register result, |
|
978 |
Register scratch1, |
|
979 |
Register scratch2, |
|
980 |
Register scratch3, |
|
981 |
Label* not_found) { |
|
982 |
// Use of registers. Register result is used as a temporary. |
|
983 |
Register number_string_cache = result; |
|
984 |
Register mask = scratch3; |
|
985 |
|
|
986 |
// Load the number string cache. |
|
987 |
__ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex); |
|
988 |
|
|
989 |
// Make the hash mask from the length of the number string cache. It |
|
990 |
// contains two elements (number and string) for each cache entry. |
|
991 |
__ ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset)); |
|
992 |
// Divide length by two (length is a smi). |
|
993 |
__ mov(mask, Operand(mask, ASR, kSmiTagSize + 1)); |
|
994 |
__ sub(mask, mask, Operand(1)); // Make mask. |
|
995 |
|
|
996 |
// Calculate the entry in the number string cache. The hash value in the |
|
997 |
// number string cache for smis is just the smi value, and the hash for |
|
998 |
// doubles is the xor of the upper and lower words. See |
|
999 |
// Heap::GetNumberStringCache. |
|
1000 |
Isolate* isolate = masm->isolate(); |
|
1001 |
Label is_smi; |
|
1002 |
Label load_result_from_cache; |
|
1003 |
__ JumpIfSmi(object, &is_smi); |
|
1004 |
__ CheckMap(object, |
|
1005 |
scratch1, |
|
1006 |
Heap::kHeapNumberMapRootIndex, |
|
1007 |
not_found, |
|
1008 |
DONT_DO_SMI_CHECK); |
|
1009 |
|
|
1010 |
STATIC_ASSERT(8 == kDoubleSize); |
|
1011 |
__ add(scratch1, |
|
1012 |
object, |
|
1013 |
Operand(HeapNumber::kValueOffset - kHeapObjectTag)); |
|
1014 |
__ ldm(ia, scratch1, scratch1.bit() | scratch2.bit()); |
|
1015 |
__ eor(scratch1, scratch1, Operand(scratch2)); |
|
1016 |
__ and_(scratch1, scratch1, Operand(mask)); |
|
1017 |
|
|
1018 |
// Calculate address of entry in string cache: each entry consists |
|
1019 |
// of two pointer sized fields. |
|
1020 |
__ add(scratch1, |
|
1021 |
number_string_cache, |
|
1022 |
Operand(scratch1, LSL, kPointerSizeLog2 + 1)); |
|
1023 |
|
|
1024 |
Register probe = mask; |
|
1025 |
__ ldr(probe, |
|
1026 |
FieldMemOperand(scratch1, FixedArray::kHeaderSize)); |
|
1027 |
__ JumpIfSmi(probe, not_found); |
|
1028 |
__ sub(scratch2, object, Operand(kHeapObjectTag)); |
|
1029 |
__ vldr(d0, scratch2, HeapNumber::kValueOffset); |
|
1030 |
__ sub(probe, probe, Operand(kHeapObjectTag)); |
|
1031 |
__ vldr(d1, probe, HeapNumber::kValueOffset); |
|
1032 |
__ VFPCompareAndSetFlags(d0, d1); |
|
1033 |
__ b(ne, not_found); // The cache did not contain this value. |
|
1034 |
__ b(&load_result_from_cache); |
|
1035 |
|
|
1036 |
__ bind(&is_smi); |
|
1037 |
Register scratch = scratch1; |
|
1038 |
__ and_(scratch, mask, Operand(object, ASR, 1)); |
|
1039 |
// Calculate address of entry in string cache: each entry consists |
|
1040 |
// of two pointer sized fields. |
|
1041 |
__ add(scratch, |
|
1042 |
number_string_cache, |
|
1043 |
Operand(scratch, LSL, kPointerSizeLog2 + 1)); |
|
1044 |
|
|
1045 |
// Check if the entry is the smi we are looking for. |
|
1046 |
__ ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize)); |
|
1047 |
__ cmp(object, probe); |
|
1048 |
__ b(ne, not_found); |
|
1049 |
|
|
1050 |
// Get the result from the cache. |
|
1051 |
__ bind(&load_result_from_cache); |
|
1052 |
__ ldr(result, |
|
1053 |
FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize)); |
|
1054 |
__ IncrementCounter(isolate->counters()->number_to_string_native(), |
|
1055 |
1, |
|
1056 |
scratch1, |
|
1057 |
scratch2); |
|
1058 |
} |
|
1059 |
|
|
1060 |
|
|
1061 |
void NumberToStringStub::Generate(MacroAssembler* masm) { |
|
1062 |
Label runtime; |
|
1063 |
|
|
1064 |
__ ldr(r1, MemOperand(sp, 0)); |
|
1065 |
|
|
1066 |
// Generate code to lookup number in the number string cache. |
|
1067 |
GenerateLookupNumberStringCache(masm, r1, r0, r2, r3, r4, &runtime); |
|
1068 |
__ add(sp, sp, Operand(1 * kPointerSize)); |
|
1069 |
__ Ret(); |
|
1070 |
|
|
1071 |
__ bind(&runtime); |
|
1072 |
// Handle number to string in the runtime system if not found in the cache. |
|
1073 |
__ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1); |
|
1074 |
} |
|
1075 |
|
|
1076 |
|
|
1077 | 994 |
static void ICCompareStub_CheckInputType(MacroAssembler* masm, |
1078 | 995 |
Register input, |
1079 | 996 |
Register scratch, |
... | ... | |
1281 | 1198 |
} |
1282 | 1199 |
|
1283 | 1200 |
|
1284 |
// Generates code to call a C function to do a double operation. |
|
1285 |
// This code never falls through, but returns with a heap number containing |
|
1286 |
// the result in r0. |
|
1287 |
// Register heapnumber_result must be a heap number in which the |
|
1288 |
// result of the operation will be stored. |
|
1289 |
// Requires the following layout on entry: |
|
1290 |
// d0: Left value. |
|
1291 |
// d1: Right value. |
|
1292 |
// If soft float ABI, use also r0, r1, r2, r3. |
|
1293 |
static void CallCCodeForDoubleOperation(MacroAssembler* masm, |
|
1294 |
Token::Value op, |
|
1295 |
Register heap_number_result, |
|
1296 |
Register scratch) { |
|
1297 |
// Assert that heap_number_result is callee-saved. |
|
1298 |
// We currently always use r5 to pass it. |
|
1299 |
ASSERT(heap_number_result.is(r5)); |
|
1300 |
|
|
1301 |
// Push the current return address before the C call. Return will be |
|
1302 |
// through pop(pc) below. |
|
1303 |
__ push(lr); |
|
1304 |
__ PrepareCallCFunction(0, 2, scratch); |
|
1305 |
if (!masm->use_eabi_hardfloat()) { |
|
1306 |
__ vmov(r0, r1, d0); |
|
1307 |
__ vmov(r2, r3, d1); |
|
1308 |
} |
|
1309 |
{ |
|
1310 |
AllowExternalCallThatCantCauseGC scope(masm); |
|
1311 |
__ CallCFunction( |
|
1312 |
ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2); |
|
1313 |
} |
|
1314 |
// Store answer in the overwritable heap number. Double returned in |
|
1315 |
// registers r0 and r1 or in d0. |
|
1316 |
if (masm->use_eabi_hardfloat()) { |
|
1317 |
__ vstr(d0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); |
|
1318 |
} else { |
|
1319 |
__ Strd(r0, r1, |
|
1320 |
FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); |
|
1321 |
} |
|
1322 |
// Place heap_number_result in r0 and return to the pushed return address. |
|
1323 |
__ mov(r0, Operand(heap_number_result)); |
|
1324 |
__ pop(pc); |
|
1325 |
} |
|
1326 |
|
|
1327 |
|
|
1328 |
void BinaryOpStub::Initialize() { |
|
1329 |
platform_specific_bit_ = true; // VFP2 is a base requirement for V8 |
|
1330 |
} |
|
1331 |
|
|
1332 |
|
|
1333 |
void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { |
|
1334 |
Label get_result; |
|
1335 |
|
|
1336 |
__ Push(r1, r0); |
|
1337 |
|
|
1338 |
__ mov(r2, Operand(Smi::FromInt(MinorKey()))); |
|
1339 |
__ push(r2); |
|
1340 |
|
|
1341 |
__ TailCallExternalReference( |
|
1342 |
ExternalReference(IC_Utility(IC::kBinaryOp_Patch), |
|
1343 |
masm->isolate()), |
|
1344 |
3, |
|
1345 |
1); |
|
1346 |
} |
|
1347 |
|
|
1348 |
|
|
1349 |
void BinaryOpStub::GenerateTypeTransitionWithSavedArgs( |
|
1350 |
MacroAssembler* masm) { |
|
1351 |
UNIMPLEMENTED(); |
|
1352 |
} |
|
1353 |
|
|
1354 |
|
|
1355 |
void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm, |
|
1356 |
Token::Value op) { |
|
1357 |
Register left = r1; |
|
1358 |
Register right = r0; |
|
1359 |
Register scratch1 = r7; |
|
1360 |
Register scratch2 = r9; |
|
1361 |
|
|
1362 |
ASSERT(right.is(r0)); |
|
1363 |
STATIC_ASSERT(kSmiTag == 0); |
|
1364 |
|
|
1365 |
Label not_smi_result; |
|
1366 |
switch (op) { |
|
1367 |
case Token::ADD: |
|
1368 |
__ add(right, left, Operand(right), SetCC); // Add optimistically. |
|
1369 |
__ Ret(vc); |
|
1370 |
__ sub(right, right, Operand(left)); // Revert optimistic add. |
|
1371 |
break; |
|
1372 |
case Token::SUB: |
|
1373 |
__ sub(right, left, Operand(right), SetCC); // Subtract optimistically. |
|
1374 |
__ Ret(vc); |
|
1375 |
__ sub(right, left, Operand(right)); // Revert optimistic subtract. |
|
1376 |
break; |
|
1377 |
case Token::MUL: |
|
1378 |
// Remove tag from one of the operands. This way the multiplication result |
|
1379 |
// will be a smi if it fits the smi range. |
|
1380 |
__ SmiUntag(ip, right); |
|
1381 |
// Do multiplication |
|
1382 |
// scratch1 = lower 32 bits of ip * left. |
|
1383 |
// scratch2 = higher 32 bits of ip * left. |
|
1384 |
__ smull(scratch1, scratch2, left, ip); |
|
1385 |
// Check for overflowing the smi range - no overflow if higher 33 bits of |
|
1386 |
// the result are identical. |
|
1387 |
__ mov(ip, Operand(scratch1, ASR, 31)); |
|
1388 |
__ cmp(ip, Operand(scratch2)); |
|
1389 |
__ b(ne, ¬_smi_result); |
|
1390 |
// Go slow on zero result to handle -0. |
|
1391 |
__ cmp(scratch1, Operand::Zero()); |
|
1392 |
__ mov(right, Operand(scratch1), LeaveCC, ne); |
|
1393 |
__ Ret(ne); |
|
1394 |
// We need -0 if we were multiplying a negative number with 0 to get 0. |
|
1395 |
// We know one of them was zero. |
|
1396 |
__ add(scratch2, right, Operand(left), SetCC); |
|
1397 |
__ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl); |
|
1398 |
__ Ret(pl); // Return smi 0 if the non-zero one was positive. |
|
1399 |
// We fall through here if we multiplied a negative number with 0, because |
|
1400 |
// that would mean we should produce -0. |
|
1401 |
break; |
|
1402 |
case Token::DIV: { |
|
1403 |
Label div_with_sdiv; |
|
1404 |
|
|
1405 |
// Check for 0 divisor. |
|
1406 |
__ cmp(right, Operand::Zero()); |
|
1407 |
__ b(eq, ¬_smi_result); |
|
1408 |
|
|
1409 |
// Check for power of two on the right hand side. |
|
1410 |
__ sub(scratch1, right, Operand(1)); |
|
1411 |
__ tst(scratch1, right); |
|
1412 |
if (CpuFeatures::IsSupported(SUDIV)) { |
|
1413 |
__ b(ne, &div_with_sdiv); |
|
1414 |
// Check for no remainder. |
|
1415 |
__ tst(left, scratch1); |
|
1416 |
__ b(ne, ¬_smi_result); |
|
1417 |
// Check for positive left hand side. |
|
1418 |
__ cmp(left, Operand::Zero()); |
|
1419 |
__ b(mi, &div_with_sdiv); |
|
1420 |
} else { |
|
1421 |
__ b(ne, ¬_smi_result); |
|
1422 |
// Check for positive and no remainder. |
|
1423 |
__ orr(scratch2, scratch1, Operand(0x80000000u)); |
|
1424 |
__ tst(left, scratch2); |
|
1425 |
__ b(ne, ¬_smi_result); |
|
1426 |
} |
|
1427 |
|
|
1428 |
// Perform division by shifting. |
|
1429 |
__ clz(scratch1, scratch1); |
|
1430 |
__ rsb(scratch1, scratch1, Operand(31)); |
|
1431 |
__ mov(right, Operand(left, LSR, scratch1)); |
|
1432 |
__ Ret(); |
|
1433 |
|
|
1434 |
if (CpuFeatures::IsSupported(SUDIV)) { |
|
1435 |
CpuFeatureScope scope(masm, SUDIV); |
|
1436 |
Label result_not_zero; |
|
1437 |
|
|
1438 |
__ bind(&div_with_sdiv); |
|
1439 |
// Do division. |
|
1440 |
__ sdiv(scratch1, left, right); |
|
1441 |
// Check that the remainder is zero. |
|
1442 |
__ mls(scratch2, scratch1, right, left); |
|
1443 |
__ cmp(scratch2, Operand::Zero()); |
|
1444 |
__ b(ne, ¬_smi_result); |
|
1445 |
// Check for negative zero result. |
|
1446 |
__ cmp(scratch1, Operand::Zero()); |
|
1447 |
__ b(ne, &result_not_zero); |
|
1448 |
__ cmp(right, Operand::Zero()); |
|
1449 |
__ b(lt, ¬_smi_result); |
|
1450 |
__ bind(&result_not_zero); |
|
1451 |
// Check for the corner case of dividing the most negative smi by -1. |
|
1452 |
__ cmp(scratch1, Operand(0x40000000)); |
|
1453 |
__ b(eq, ¬_smi_result); |
|
1454 |
// Tag and return the result. |
|
1455 |
__ SmiTag(right, scratch1); |
|
1456 |
__ Ret(); |
|
1457 |
} |
|
1458 |
break; |
|
1459 |
} |
|
1460 |
case Token::MOD: { |
|
1461 |
Label modulo_with_sdiv; |
|
1462 |
|
|
1463 |
if (CpuFeatures::IsSupported(SUDIV)) { |
|
1464 |
// Check for x % 0. |
|
1465 |
__ cmp(right, Operand::Zero()); |
|
1466 |
__ b(eq, ¬_smi_result); |
|
1467 |
|
|
1468 |
// Check for two positive smis. |
|
1469 |
__ orr(scratch1, left, Operand(right)); |
|
1470 |
__ tst(scratch1, Operand(0x80000000u)); |
|
1471 |
__ b(ne, &modulo_with_sdiv); |
|
1472 |
|
|
1473 |
// Check for power of two on the right hand side. |
|
1474 |
__ sub(scratch1, right, Operand(1)); |
|
1475 |
__ tst(scratch1, right); |
|
1476 |
__ b(ne, &modulo_with_sdiv); |
|
1477 |
} else { |
|
1478 |
// Check for two positive smis. |
|
1479 |
__ orr(scratch1, left, Operand(right)); |
|
1480 |
__ tst(scratch1, Operand(0x80000000u)); |
|
1481 |
__ b(ne, ¬_smi_result); |
|
1482 |
|
|
1483 |
// Check for power of two on the right hand side. |
|
1484 |
__ JumpIfNotPowerOfTwoOrZero(right, scratch1, ¬_smi_result); |
|
1485 |
} |
|
1486 |
|
|
1487 |
// Perform modulus by masking (scratch1 contains right - 1). |
|
1488 |
__ and_(right, left, Operand(scratch1)); |
|
1489 |
__ Ret(); |
|
1490 |
|
|
1491 |
if (CpuFeatures::IsSupported(SUDIV)) { |
|
1492 |
CpuFeatureScope scope(masm, SUDIV); |
|
1493 |
__ bind(&modulo_with_sdiv); |
|
1494 |
__ mov(scratch2, right); |
|
1495 |
// Perform modulus with sdiv and mls. |
|
1496 |
__ sdiv(scratch1, left, right); |
|
1497 |
__ mls(right, scratch1, right, left); |
|
1498 |
// Return if the result is not 0. |
|
1499 |
__ cmp(right, Operand::Zero()); |
|
1500 |
__ Ret(ne); |
|
1501 |
// The result is 0, check for -0 case. |
|
1502 |
__ cmp(left, Operand::Zero()); |
|
1503 |
__ Ret(pl); |
|
1504 |
// This is a -0 case, restore the value of right. |
|
1505 |
__ mov(right, scratch2); |
|
1506 |
// We fall through here to not_smi_result to produce -0. |
|
1507 |
} |
|
1508 |
break; |
|
1509 |
} |
|
1510 |
case Token::BIT_OR: |
|
1511 |
__ orr(right, left, Operand(right)); |
|
1512 |
__ Ret(); |
|
1513 |
break; |
|
1514 |
case Token::BIT_AND: |
|
1515 |
__ and_(right, left, Operand(right)); |
|
1516 |
__ Ret(); |
|
1517 |
break; |
|
1518 |
case Token::BIT_XOR: |
|
1519 |
__ eor(right, left, Operand(right)); |
|
1520 |
__ Ret(); |
|
1521 |
break; |
|
1522 |
case Token::SAR: |
|
1523 |
// Remove tags from right operand. |
|
1524 |
__ GetLeastBitsFromSmi(scratch1, right, 5); |
|
1525 |
__ mov(right, Operand(left, ASR, scratch1)); |
|
1526 |
// Smi tag result. |
|
1527 |
__ bic(right, right, Operand(kSmiTagMask)); |
|
1528 |
__ Ret(); |
|
1529 |
break; |
|
1530 |
case Token::SHR: |
|
1531 |
// Remove tags from operands. We can't do this on a 31 bit number |
|
1532 |
// because then the 0s get shifted into bit 30 instead of bit 31. |
|
1533 |
__ SmiUntag(scratch1, left); |
|
1534 |
__ GetLeastBitsFromSmi(scratch2, right, 5); |
|
1535 |
__ mov(scratch1, Operand(scratch1, LSR, scratch2)); |
|
1536 |
// Unsigned shift is not allowed to produce a negative number, so |
|
1537 |
// check the sign bit and the sign bit after Smi tagging. |
|
1538 |
__ tst(scratch1, Operand(0xc0000000)); |
|
1539 |
__ b(ne, ¬_smi_result); |
|
1540 |
// Smi tag result. |
|
1541 |
__ SmiTag(right, scratch1); |
|
1542 |
__ Ret(); |
|
1543 |
break; |
|
1544 |
case Token::SHL: |
|
1545 |
// Remove tags from operands. |
|
1546 |
__ SmiUntag(scratch1, left); |
|
1547 |
__ GetLeastBitsFromSmi(scratch2, right, 5); |
|
1548 |
__ mov(scratch1, Operand(scratch1, LSL, scratch2)); |
|
1549 |
// Check that the signed result fits in a Smi. |
|
1550 |
__ TrySmiTag(right, scratch1, ¬_smi_result); |
|
1551 |
__ Ret(); |
|
1552 |
break; |
|
1553 |
default: |
|
1554 |
UNREACHABLE(); |
|
1555 |
} |
|
1556 |
__ bind(¬_smi_result); |
|
1557 |
} |
|
1558 |
|
|
1559 |
|
|
1560 |
void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, |
|
1561 |
Register result, |
|
1562 |
Register heap_number_map, |
|
1563 |
Register scratch1, |
|
1564 |
Register scratch2, |
|
1565 |
Label* gc_required, |
|
1566 |
OverwriteMode mode); |
|
1567 |
|
|
1568 |
|
|
1569 |
void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, |
|
1570 |
BinaryOpIC::TypeInfo left_type, |
|
1571 |
BinaryOpIC::TypeInfo right_type, |
|
1572 |
bool smi_operands, |
|
1573 |
Label* not_numbers, |
|
1574 |
Label* gc_required, |
|
1575 |
Label* miss, |
|
1576 |
Token::Value op, |
|
1577 |
OverwriteMode mode) { |
|
1578 |
Register left = r1; |
|
1579 |
Register right = r0; |
|
1580 |
Register scratch1 = r6; |
|
1581 |
Register scratch2 = r7; |
|
1582 |
|
|
1583 |
ASSERT(smi_operands || (not_numbers != NULL)); |
|
1584 |
if (smi_operands) { |
|
1585 |
__ AssertSmi(left); |
|
1586 |
__ AssertSmi(right); |
|
1587 |
} |
|
1588 |
if (left_type == BinaryOpIC::SMI) { |
|
1589 |
__ JumpIfNotSmi(left, miss); |
|
1590 |
} |
|
1591 |
if (right_type == BinaryOpIC::SMI) { |
|
1592 |
__ JumpIfNotSmi(right, miss); |
|
1593 |
} |
|
1594 |
|
|
1595 |
Register heap_number_map = r9; |
|
1596 |
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
|
1597 |
|
|
1598 |
switch (op) { |
|
1599 |
case Token::ADD: |
|
1600 |
case Token::SUB: |
|
1601 |
case Token::MUL: |
|
1602 |
case Token::DIV: |
|
1603 |
case Token::MOD: { |
|
1604 |
// Allocate new heap number for result. |
|
1605 |
Register result = r5; |
|
1606 |
BinaryOpStub_GenerateHeapResultAllocation( |
|
1607 |
masm, result, heap_number_map, scratch1, scratch2, gc_required, mode); |
|
1608 |
|
|
1609 |
// Load left and right operands into d0 and d1. |
|
1610 |
if (smi_operands) { |
|
1611 |
__ SmiToDouble(d1, right); |
|
1612 |
__ SmiToDouble(d0, left); |
|
1613 |
} else { |
|
1614 |
// Load right operand into d1. |
|
1615 |
if (right_type == BinaryOpIC::INT32) { |
|
1616 |
__ LoadNumberAsInt32Double( |
|
1617 |
right, d1, heap_number_map, scratch1, d8, miss); |
|
1618 |
} else { |
|
1619 |
Label* fail = (right_type == BinaryOpIC::NUMBER) ? miss : not_numbers; |
|
1620 |
__ LoadNumber(right, d1, heap_number_map, scratch1, fail); |
|
1621 |
} |
|
1622 |
// Load left operand into d0. |
|
1623 |
if (left_type == BinaryOpIC::INT32) { |
|
1624 |
__ LoadNumberAsInt32Double( |
|
1625 |
left, d0, heap_number_map, scratch1, d8, miss); |
|
1626 |
} else { |
|
1627 |
Label* fail = (left_type == BinaryOpIC::NUMBER) ? miss : not_numbers; |
|
1628 |
__ LoadNumber( |
|
1629 |
left, d0, heap_number_map, scratch1, fail); |
|
1630 |
} |
|
1631 |
} |
|
1632 |
|
|
1633 |
// Calculate the result. |
|
1634 |
if (op != Token::MOD) { |
|
1635 |
// Using VFP registers: |
|
1636 |
// d0: Left value |
|
1637 |
// d1: Right value |
|
1638 |
switch (op) { |
|
1639 |
case Token::ADD: |
|
1640 |
__ vadd(d5, d0, d1); |
|
1641 |
break; |
|
1642 |
case Token::SUB: |
|
1643 |
__ vsub(d5, d0, d1); |
|
1644 |
break; |
|
1645 |
case Token::MUL: |
|
1646 |
__ vmul(d5, d0, d1); |
|
1647 |
break; |
|
1648 |
case Token::DIV: |
|
1649 |
__ vdiv(d5, d0, d1); |
|
1650 |
break; |
|
1651 |
default: |
|
1652 |
UNREACHABLE(); |
|
1653 |
} |
|
1654 |
|
|
1655 |
__ sub(r0, result, Operand(kHeapObjectTag)); |
|
1656 |
__ vstr(d5, r0, HeapNumber::kValueOffset); |
|
1657 |
__ add(r0, r0, Operand(kHeapObjectTag)); |
|
1658 |
__ Ret(); |
|
1659 |
} else { |
|
1660 |
// Call the C function to handle the double operation. |
|
1661 |
CallCCodeForDoubleOperation(masm, op, result, scratch1); |
|
1662 |
if (FLAG_debug_code) { |
|
1663 |
__ stop("Unreachable code."); |
|
1664 |
} |
|
1665 |
} |
|
1666 |
break; |
|
1667 |
} |
|
1668 |
case Token::BIT_OR: |
|
1669 |
case Token::BIT_XOR: |
|
1670 |
case Token::BIT_AND: |
|
1671 |
case Token::SAR: |
|
1672 |
case Token::SHR: |
|
1673 |
case Token::SHL: { |
|
1674 |
if (smi_operands) { |
|
1675 |
__ SmiUntag(r3, left); |
|
1676 |
__ SmiUntag(r2, right); |
|
1677 |
} else { |
|
1678 |
// Convert operands to 32-bit integers. Right in r2 and left in r3. |
|
1679 |
__ TruncateNumberToI(left, r3, heap_number_map, scratch1, not_numbers); |
|
1680 |
__ TruncateNumberToI(right, r2, heap_number_map, scratch1, not_numbers); |
|
1681 |
} |
|
1682 |
|
|
1683 |
Label result_not_a_smi; |
|
1684 |
switch (op) { |
|
1685 |
case Token::BIT_OR: |
|
1686 |
__ orr(r2, r3, Operand(r2)); |
|
1687 |
break; |
|
1688 |
case Token::BIT_XOR: |
|
1689 |
__ eor(r2, r3, Operand(r2)); |
|
1690 |
break; |
|
1691 |
case Token::BIT_AND: |
|
1692 |
__ and_(r2, r3, Operand(r2)); |
|
1693 |
break; |
|
1694 |
case Token::SAR: |
|
1695 |
// Use only the 5 least significant bits of the shift count. |
|
1696 |
__ GetLeastBitsFromInt32(r2, r2, 5); |
|
1697 |
__ mov(r2, Operand(r3, ASR, r2)); |
|
1698 |
break; |
|
1699 |
case Token::SHR: |
|
1700 |
// Use only the 5 least significant bits of the shift count. |
|
1701 |
__ GetLeastBitsFromInt32(r2, r2, 5); |
|
1702 |
__ mov(r2, Operand(r3, LSR, r2), SetCC); |
|
1703 |
// SHR is special because it is required to produce a positive answer. |
|
1704 |
// The code below for writing into heap numbers isn't capable of |
|
1705 |
// writing the register as an unsigned int so we go to slow case if we |
|
1706 |
// hit this case. |
|
1707 |
__ b(mi, &result_not_a_smi); |
|
1708 |
break; |
|
1709 |
case Token::SHL: |
|
1710 |
// Use only the 5 least significant bits of the shift count. |
|
1711 |
__ GetLeastBitsFromInt32(r2, r2, 5); |
|
1712 |
__ mov(r2, Operand(r3, LSL, r2)); |
|
1713 |
break; |
|
1714 |
default: |
|
1715 |
UNREACHABLE(); |
|
1716 |
} |
|
1717 |
|
|
1718 |
// Check that the *signed* result fits in a smi. |
|
1719 |
__ TrySmiTag(r0, r2, &result_not_a_smi); |
|
1720 |
__ Ret(); |
|
1721 |
|
|
1722 |
// Allocate new heap number for result. |
|
1723 |
__ bind(&result_not_a_smi); |
|
1724 |
Register result = r5; |
|
1725 |
if (smi_operands) { |
|
1726 |
__ AllocateHeapNumber( |
|
1727 |
result, scratch1, scratch2, heap_number_map, gc_required); |
|
1728 |
} else { |
|
1729 |
BinaryOpStub_GenerateHeapResultAllocation( |
|
1730 |
masm, result, heap_number_map, scratch1, scratch2, gc_required, |
|
1731 |
mode); |
|
1732 |
} |
|
1733 |
|
|
1734 |
// r2: Answer as signed int32. |
|
1735 |
// r5: Heap number to write answer into. |
|
1736 |
|
|
1737 |
// Nothing can go wrong now, so move the heap number to r0, which is the |
|
1738 |
// result. |
|
1739 |
__ mov(r0, Operand(r5)); |
|
1740 |
|
|
1741 |
// Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As |
|
1742 |
// mentioned above SHR needs to always produce a positive result. |
|
1743 |
__ vmov(s0, r2); |
|
1744 |
if (op == Token::SHR) { |
|
1745 |
__ vcvt_f64_u32(d0, s0); |
|
1746 |
} else { |
|
1747 |
__ vcvt_f64_s32(d0, s0); |
|
1748 |
} |
|
1749 |
__ sub(r3, r0, Operand(kHeapObjectTag)); |
|
1750 |
__ vstr(d0, r3, HeapNumber::kValueOffset); |
|
1751 |
__ Ret(); |
|
1752 |
break; |
|
1753 |
} |
|
1754 |
default: |
|
1755 |
UNREACHABLE(); |
|
1756 |
} |
|
1757 |
} |
|
1758 |
|
|
1759 |
|
|
1760 |
// Generate the smi code. If the operation on smis are successful this return is |
|
1761 |
// generated. If the result is not a smi and heap number allocation is not |
|
1762 |
// requested the code falls through. If number allocation is requested but a |
|
1763 |
// heap number cannot be allocated the code jumps to the label gc_required. |
|
1764 |
void BinaryOpStub_GenerateSmiCode( |
|
1765 |
MacroAssembler* masm, |
|
1766 |
Label* use_runtime, |
|
1767 |
Label* gc_required, |
|
1768 |
Token::Value op, |
|
1769 |
BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results, |
|
1770 |
OverwriteMode mode) { |
|
1771 |
Label not_smis; |
|
1772 |
|
|
1773 |
Register left = r1; |
|
1774 |
Register right = r0; |
|
1775 |
Register scratch1 = r7; |
|
1776 |
|
|
1777 |
// Perform combined smi check on both operands. |
|
1778 |
__ orr(scratch1, left, Operand(right)); |
|
1779 |
__ JumpIfNotSmi(scratch1, ¬_smis); |
|
1780 |
|
|
1781 |
// If the smi-smi operation results in a smi return is generated. |
|
1782 |
BinaryOpStub_GenerateSmiSmiOperation(masm, op); |
|
1783 |
|
|
1784 |
// If heap number results are possible generate the result in an allocated |
|
1785 |
// heap number. |
|
1786 |
if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) { |
|
1787 |
BinaryOpStub_GenerateFPOperation( |
|
1788 |
masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true, |
|
1789 |
use_runtime, gc_required, ¬_smis, op, mode); |
|
1790 |
} |
|
1791 |
__ bind(¬_smis); |
|
1792 |
} |
|
1793 |
|
|
1794 |
|
|
1795 |
void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { |
|
1796 |
Label right_arg_changed, call_runtime; |
|
1797 |
|
|
1798 |
if (op_ == Token::MOD && encoded_right_arg_.has_value) { |
|
1799 |
// It is guaranteed that the value will fit into a Smi, because if it |
|
1800 |
// didn't, we wouldn't be here, see BinaryOp_Patch. |
|
1801 |
__ cmp(r0, Operand(Smi::FromInt(fixed_right_arg_value()))); |
|
1802 |
__ b(ne, &right_arg_changed); |
|
1803 |
} |
|
1804 |
|
|
1805 |
if (result_type_ == BinaryOpIC::UNINITIALIZED || |
|
1806 |
result_type_ == BinaryOpIC::SMI) { |
|
1807 |
// Only allow smi results. |
|
1808 |
BinaryOpStub_GenerateSmiCode( |
|
1809 |
masm, &call_runtime, NULL, op_, NO_HEAPNUMBER_RESULTS, mode_); |
|
1810 |
} else { |
|
1811 |
// Allow heap number result and don't make a transition if a heap number |
|
1812 |
// cannot be allocated. |
|
1813 |
BinaryOpStub_GenerateSmiCode( |
|
1814 |
masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, |
|
1815 |
mode_); |
|
1816 |
} |
|
1817 |
|
|
1818 |
// Code falls through if the result is not returned as either a smi or heap |
|
1819 |
// number. |
|
1820 |
__ bind(&right_arg_changed); |
|
1821 |
GenerateTypeTransition(masm); |
|
1822 |
|
|
1823 |
__ bind(&call_runtime); |
|
1824 |
{ |
|
1825 |
FrameScope scope(masm, StackFrame::INTERNAL); |
|
1826 |
GenerateRegisterArgsPush(masm); |
|
1827 |
GenerateCallRuntime(masm); |
|
1828 |
} |
|
1829 |
__ Ret(); |
|
1830 |
} |
|
1831 |
|
|
1832 |
|
|
1833 |
void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) { |
|
1834 |
Label call_runtime; |
|
1835 |
ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING); |
|
1836 |
ASSERT(op_ == Token::ADD); |
|
1837 |
// If both arguments are strings, call the string add stub. |
|
1838 |
// Otherwise, do a transition. |
|
1839 |
|
|
1840 |
// Registers containing left and right operands respectively. |
|
1841 |
Register left = r1; |
|
1842 |
Register right = r0; |
|
1843 |
|
|
1844 |
// Test if left operand is a string. |
|
1845 |
__ JumpIfSmi(left, &call_runtime); |
|
1846 |
__ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE); |
|
1847 |
__ b(ge, &call_runtime); |
|
1848 |
|
|
1849 |
// Test if right operand is a string. |
|
1850 |
__ JumpIfSmi(right, &call_runtime); |
|
1851 |
__ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE); |
|
1852 |
__ b(ge, &call_runtime); |
|
1853 |
|
|
1854 |
StringAddStub string_add_stub( |
|
1855 |
(StringAddFlags)(STRING_ADD_CHECK_NONE | STRING_ADD_ERECT_FRAME)); |
|
1856 |
GenerateRegisterArgsPush(masm); |
|
1857 |
__ TailCallStub(&string_add_stub); |
|
1858 |
|
|
1859 |
__ bind(&call_runtime); |
|
1860 |
GenerateTypeTransition(masm); |
|
1861 |
} |
|
1862 |
|
|
1863 |
|
|
1864 |
void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { |
|
1865 |
ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32); |
|
1866 |
|
|
1867 |
Register left = r1; |
|
1868 |
Register right = r0; |
|
1869 |
Register scratch1 = r7; |
|
1870 |
Register scratch2 = r9; |
|
1871 |
LowDwVfpRegister double_scratch = d0; |
|
1872 |
|
|
1873 |
Register heap_number_result = no_reg; |
|
1874 |
Register heap_number_map = r6; |
|
1875 |
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
|
1876 |
|
|
1877 |
Label call_runtime; |
|
1878 |
// Labels for type transition, used for wrong input or output types. |
|
1879 |
// Both label are currently actually bound to the same position. We use two |
|
1880 |
// different label to differentiate the cause leading to type transition. |
|
1881 |
Label transition; |
|
1882 |
|
|
1883 |
// Smi-smi fast case. |
|
1884 |
Label skip; |
|
1885 |
__ orr(scratch1, left, right); |
|
1886 |
__ JumpIfNotSmi(scratch1, &skip); |
|
1887 |
BinaryOpStub_GenerateSmiSmiOperation(masm, op_); |
|
1888 |
// Fall through if the result is not a smi. |
|
1889 |
__ bind(&skip); |
|
1890 |
|
|
1891 |
switch (op_) { |
|
1892 |
case Token::ADD: |
|
1893 |
case Token::SUB: |
|
1894 |
case Token::MUL: |
|
1895 |
case Token::DIV: |
|
1896 |
case Token::MOD: { |
|
1897 |
// It could be that only SMIs have been seen at either the left |
|
1898 |
// or the right operand. For precise type feedback, patch the IC |
|
1899 |
// again if this changes. |
|
1900 |
if (left_type_ == BinaryOpIC::SMI) { |
|
1901 |
__ JumpIfNotSmi(left, &transition); |
|
1902 |
} |
|
1903 |
if (right_type_ == BinaryOpIC::SMI) { |
|
1904 |
__ JumpIfNotSmi(right, &transition); |
|
1905 |
} |
|
1906 |
// Load both operands and check that they are 32-bit integer. |
|
1907 |
// Jump to type transition if they are not. The registers r0 and r1 (right |
|
1908 |
// and left) are preserved for the runtime call. |
|
1909 |
__ LoadNumberAsInt32Double( |
|
1910 |
right, d1, heap_number_map, scratch1, d8, &transition); |
|
1911 |
__ LoadNumberAsInt32Double( |
|
1912 |
left, d0, heap_number_map, scratch1, d8, &transition); |
|
1913 |
|
|
1914 |
if (op_ != Token::MOD) { |
|
1915 |
Label return_heap_number; |
|
1916 |
switch (op_) { |
|
1917 |
case Token::ADD: |
|
1918 |
__ vadd(d5, d0, d1); |
|
1919 |
break; |
|
1920 |
case Token::SUB: |
|
1921 |
__ vsub(d5, d0, d1); |
|
1922 |
break; |
|
1923 |
case Token::MUL: |
|
1924 |
__ vmul(d5, d0, d1); |
|
1925 |
break; |
|
1926 |
case Token::DIV: |
|
1927 |
__ vdiv(d5, d0, d1); |
|
1928 |
break; |
|
1929 |
default: |
|
1930 |
UNREACHABLE(); |
|
1931 |
} |
|
1932 |
|
|
1933 |
if (result_type_ <= BinaryOpIC::INT32) { |
|
1934 |
__ TryDoubleToInt32Exact(scratch1, d5, d8); |
|
1935 |
// If the ne condition is set, result does |
|
1936 |
// not fit in a 32-bit integer. |
|
1937 |
__ b(ne, &transition); |
|
1938 |
// Try to tag the result as a Smi, return heap number on overflow. |
|
1939 |
__ SmiTag(scratch1, SetCC); |
|
1940 |
__ b(vs, &return_heap_number); |
|
1941 |
// Check for minus zero, transition in that case (because we need |
|
1942 |
// to return a heap number). |
|
1943 |
Label not_zero; |
|
1944 |
ASSERT(kSmiTag == 0); |
|
1945 |
__ b(ne, ¬_zero); |
|
1946 |
__ VmovHigh(scratch2, d5); |
|
1947 |
__ tst(scratch2, Operand(HeapNumber::kSignMask)); |
|
1948 |
__ b(ne, &transition); |
|
1949 |
__ bind(¬_zero); |
|
1950 |
__ mov(r0, scratch1); |
|
1951 |
__ Ret(); |
|
1952 |
} |
|
1953 |
|
|
1954 |
__ bind(&return_heap_number); |
|
1955 |
// Return a heap number, or fall through to type transition or runtime |
|
1956 |
// call if we can't. |
|
1957 |
// We are using vfp registers so r5 is available. |
|
1958 |
heap_number_result = r5; |
|
1959 |
BinaryOpStub_GenerateHeapResultAllocation(masm, |
|
1960 |
heap_number_result, |
|
1961 |
heap_number_map, |
|
1962 |
scratch1, |
|
1963 |
scratch2, |
|
1964 |
&call_runtime, |
|
1965 |
mode_); |
|
1966 |
__ sub(r0, heap_number_result, Operand(kHeapObjectTag)); |
|
1967 |
__ vstr(d5, r0, HeapNumber::kValueOffset); |
|
1968 |
__ mov(r0, heap_number_result); |
|
1969 |
__ Ret(); |
|
1970 |
|
|
1971 |
// A DIV operation expecting an integer result falls through |
|
1972 |
// to type transition. |
|
1973 |
|
|
1974 |
} else { |
|
1975 |
if (encoded_right_arg_.has_value) { |
|
1976 |
__ Vmov(d8, fixed_right_arg_value(), scratch1); |
|
1977 |
__ VFPCompareAndSetFlags(d1, d8); |
|
1978 |
__ b(ne, &transition); |
|
1979 |
} |
|
1980 |
|
|
1981 |
// We preserved r0 and r1 to be able to call runtime. |
|
1982 |
// Save the left value on the stack. |
|
1983 |
__ Push(r5, r4); |
|
1984 |
|
|
1985 |
Label pop_and_call_runtime; |
|
1986 |
|
|
1987 |
// Allocate a heap number to store the result. |
|
1988 |
heap_number_result = r5; |
|
1989 |
BinaryOpStub_GenerateHeapResultAllocation(masm, |
|
1990 |
heap_number_result, |
|
1991 |
heap_number_map, |
|
1992 |
scratch1, |
|
1993 |
scratch2, |
|
1994 |
&pop_and_call_runtime, |
|
1995 |
mode_); |
|
1996 |
|
|
1997 |
// Load the left value from the value saved on the stack. |
|
1998 |
__ Pop(r1, r0); |
|
1999 |
|
|
2000 |
// Call the C function to handle the double operation. |
|
2001 |
CallCCodeForDoubleOperation(masm, op_, heap_number_result, scratch1); |
|
2002 |
if (FLAG_debug_code) { |
|
2003 |
__ stop("Unreachable code."); |
|
2004 |
} |
|
2005 |
|
|
2006 |
__ bind(&pop_and_call_runtime); |
|
2007 |
__ Drop(2); |
|
2008 |
__ b(&call_runtime); |
|
2009 |
} |
|
2010 |
|
|
2011 |
break; |
|
2012 |
} |
|
2013 |
|
|
2014 |
case Token::BIT_OR: |
|
2015 |
case Token::BIT_XOR: |
|
2016 |
case Token::BIT_AND: |
|
2017 |
case Token::SAR: |
|
2018 |
case Token::SHR: |
|
2019 |
case Token::SHL: { |
|
2020 |
Label return_heap_number; |
|
2021 |
// Convert operands to 32-bit integers. Right in r2 and left in r3. The |
|
2022 |
// registers r0 and r1 (right and left) are preserved for the runtime |
|
2023 |
// call. |
|
2024 |
__ LoadNumberAsInt32(left, r3, heap_number_map, |
|
2025 |
scratch1, d0, d1, &transition); |
|
2026 |
__ LoadNumberAsInt32(right, r2, heap_number_map, |
|
2027 |
scratch1, d0, d1, &transition); |
|
2028 |
|
|
2029 |
// The ECMA-262 standard specifies that, for shift operations, only the |
|
2030 |
// 5 least significant bits of the shift value should be used. |
|
2031 |
switch (op_) { |
|
2032 |
case Token::BIT_OR: |
|
2033 |
__ orr(r2, r3, Operand(r2)); |
|
2034 |
break; |
|
2035 |
case Token::BIT_XOR: |
|
2036 |
__ eor(r2, r3, Operand(r2)); |
|
2037 |
break; |
|
2038 |
case Token::BIT_AND: |
|
2039 |
__ and_(r2, r3, Operand(r2)); |
|
2040 |
break; |
|
2041 |
case Token::SAR: |
|
2042 |
__ and_(r2, r2, Operand(0x1f)); |
|
2043 |
__ mov(r2, Operand(r3, ASR, r2)); |
|
2044 |
break; |
|
2045 |
case Token::SHR: |
|
2046 |
__ and_(r2, r2, Operand(0x1f)); |
|
2047 |
__ mov(r2, Operand(r3, LSR, r2), SetCC); |
|
2048 |
// SHR is special because it is required to produce a positive answer. |
|
2049 |
// We only get a negative result if the shift value (r2) is 0. |
|
2050 |
// This result cannot be respresented as a signed 32-bit integer, try |
|
2051 |
// to return a heap number if we can. |
|
2052 |
__ b(mi, (result_type_ <= BinaryOpIC::INT32) |
|
2053 |
? &transition |
|
2054 |
: &return_heap_number); |
|
2055 |
break; |
|
2056 |
case Token::SHL: |
|
2057 |
__ and_(r2, r2, Operand(0x1f)); |
|
2058 |
__ mov(r2, Operand(r3, LSL, r2)); |
|
2059 |
break; |
|
2060 |
default: |
|
2061 |
UNREACHABLE(); |
|
2062 |
} |
|
2063 |
|
|
2064 |
// Check if the result fits in a smi. If not try to return a heap number. |
|
2065 |
// (We know the result is an int32). |
|
2066 |
__ TrySmiTag(r0, r2, &return_heap_number); |
|
2067 |
__ Ret(); |
|
2068 |
|
|
2069 |
__ bind(&return_heap_number); |
|
2070 |
heap_number_result = r5; |
|
2071 |
BinaryOpStub_GenerateHeapResultAllocation(masm, |
|
2072 |
heap_number_result, |
|
2073 |
heap_number_map, |
|
2074 |
scratch1, |
|
2075 |
scratch2, |
|
2076 |
&call_runtime, |
|
2077 |
mode_); |
|
2078 |
|
|
2079 |
if (op_ != Token::SHR) { |
|
2080 |
// Convert the result to a floating point value. |
|
2081 |
__ vmov(double_scratch.low(), r2); |
|
2082 |
__ vcvt_f64_s32(double_scratch, double_scratch.low()); |
|
2083 |
} else { |
|
2084 |
// The result must be interpreted as an unsigned 32-bit integer. |
|
2085 |
__ vmov(double_scratch.low(), r2); |
|
2086 |
__ vcvt_f64_u32(double_scratch, double_scratch.low()); |
|
2087 |
} |
|
2088 |
|
|
2089 |
// Store the result. |
|
2090 |
__ sub(r0, heap_number_result, Operand(kHeapObjectTag)); |
|
2091 |
__ vstr(double_scratch, r0, HeapNumber::kValueOffset); |
|
2092 |
__ mov(r0, heap_number_result); |
|
2093 |
__ Ret(); |
|
2094 |
|
|
2095 |
break; |
|
2096 |
} |
|
2097 |
|
|
2098 |
default: |
|
2099 |
UNREACHABLE(); |
|
2100 |
} |
|
2101 |
|
|
2102 |
// We never expect DIV to yield an integer result, so we always generate |
|
2103 |
// type transition code for DIV operations expecting an integer result: the |
|
2104 |
// code will fall through to this type transition. |
|
2105 |
if (transition.is_linked() || |
|
2106 |
((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) { |
|
2107 |
__ bind(&transition); |
|
2108 |
GenerateTypeTransition(masm); |
|
2109 |
} |
|
2110 |
|
|
2111 |
__ bind(&call_runtime); |
|
2112 |
{ |
|
2113 |
FrameScope scope(masm, StackFrame::INTERNAL); |
|
2114 |
GenerateRegisterArgsPush(masm); |
|
2115 |
GenerateCallRuntime(masm); |
|
2116 |
} |
|
2117 |
__ Ret(); |
|
2118 |
} |
|
2119 |
|
|
2120 |
|
|
2121 |
void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) { |
|
2122 |
Label call_runtime; |
|
2123 |
|
|
2124 |
if (op_ == Token::ADD) { |
|
2125 |
// Handle string addition here, because it is the only operation |
|
2126 |
// that does not do a ToNumber conversion on the operands. |
|
2127 |
GenerateAddStrings(masm); |
|
2128 |
} |
|
2129 |
|
|
2130 |
// Convert oddball arguments to numbers. |
|
2131 |
Label check, done; |
|
2132 |
__ CompareRoot(r1, Heap::kUndefinedValueRootIndex); |
|
2133 |
__ b(ne, &check); |
|
2134 |
if (Token::IsBitOp(op_)) { |
|
2135 |
__ mov(r1, Operand(Smi::FromInt(0))); |
|
2136 |
} else { |
|
2137 |
__ LoadRoot(r1, Heap::kNanValueRootIndex); |
|
2138 |
} |
|
2139 |
__ jmp(&done); |
|
2140 |
__ bind(&check); |
|
2141 |
__ CompareRoot(r0, Heap::kUndefinedValueRootIndex); |
|
2142 |
__ b(ne, &done); |
|
2143 |
if (Token::IsBitOp(op_)) { |
|
2144 |
__ mov(r0, Operand(Smi::FromInt(0))); |
|
2145 |
} else { |
|
2146 |
__ LoadRoot(r0, Heap::kNanValueRootIndex); |
|
2147 |
} |
|
2148 |
__ bind(&done); |
|
2149 |
|
|
2150 |
GenerateNumberStub(masm); |
|
2151 |
} |
|
2152 |
|
|
2153 |
|
|
2154 |
void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) { |
|
2155 |
Label call_runtime, transition; |
|
2156 |
BinaryOpStub_GenerateFPOperation( |
|
2157 |
masm, left_type_, right_type_, false, |
|
2158 |
&transition, &call_runtime, &transition, op_, mode_); |
|
2159 |
|
|
2160 |
__ bind(&transition); |
|
2161 |
GenerateTypeTransition(masm); |
|
2162 |
|
|
2163 |
__ bind(&call_runtime); |
|
2164 |
{ |
|
2165 |
FrameScope scope(masm, StackFrame::INTERNAL); |
|
2166 |
GenerateRegisterArgsPush(masm); |
|
2167 |
GenerateCallRuntime(masm); |
|
2168 |
} |
|
2169 |
__ Ret(); |
|
2170 |
} |
|
2171 |
|
|
2172 |
|
|
2173 |
void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { |
|
2174 |
Label call_runtime, call_string_add_or_runtime, transition; |
|
2175 |
|
|
2176 |
BinaryOpStub_GenerateSmiCode( |
|
2177 |
masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_); |
|
2178 |
|
|
2179 |
BinaryOpStub_GenerateFPOperation( |
|
2180 |
masm, left_type_, right_type_, false, |
|
2181 |
&call_string_add_or_runtime, &call_runtime, &transition, op_, mode_); |
|
2182 |
|
|
2183 |
__ bind(&transition); |
|
2184 |
GenerateTypeTransition(masm); |
|
2185 |
|
|
2186 |
__ bind(&call_string_add_or_runtime); |
|
2187 |
if (op_ == Token::ADD) { |
|
2188 |
GenerateAddStrings(masm); |
|
2189 |
} |
|
2190 |
|
|
2191 |
__ bind(&call_runtime); |
|
2192 |
{ |
|
2193 |
FrameScope scope(masm, StackFrame::INTERNAL); |
|
2194 |
GenerateRegisterArgsPush(masm); |
|
2195 |
GenerateCallRuntime(masm); |
|
2196 |
} |
|
2197 |
__ Ret(); |
|
2198 |
} |
|
2199 |
|
|
2200 |
|
|
2201 |
void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { |
|
2202 |
ASSERT(op_ == Token::ADD); |
|
2203 |
Label left_not_string, call_runtime; |
|
2204 |
|
|
2205 |
Register left = r1; |
|
2206 |
Register right = r0; |
|
2207 |
|
|
2208 |
// Check if left argument is a string. |
|
2209 |
__ JumpIfSmi(left, &left_not_string); |
|
2210 |
__ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE); |
|
2211 |
__ b(ge, &left_not_string); |
|
2212 |
|
|
2213 |
StringAddStub string_add_left_stub( |
|
2214 |
(StringAddFlags)(STRING_ADD_CHECK_RIGHT | STRING_ADD_ERECT_FRAME)); |
|
2215 |
GenerateRegisterArgsPush(masm); |
|
2216 |
__ TailCallStub(&string_add_left_stub); |
|
2217 |
|
|
2218 |
// Left operand is not a string, test right. |
|
2219 |
__ bind(&left_not_string); |
|
2220 |
__ JumpIfSmi(right, &call_runtime); |
|
2221 |
__ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE); |
|
2222 |
__ b(ge, &call_runtime); |
|
2223 |
|
|
2224 |
StringAddStub string_add_right_stub( |
|
2225 |
(StringAddFlags)(STRING_ADD_CHECK_LEFT | STRING_ADD_ERECT_FRAME)); |
|
2226 |
GenerateRegisterArgsPush(masm); |
|
2227 |
__ TailCallStub(&string_add_right_stub); |
|
2228 |
|
|
2229 |
// At least one argument is not a string. |
|
2230 |
__ bind(&call_runtime); |
|
2231 |
} |
|
2232 |
|
|
2233 |
|
|
2234 |
void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, |
|
2235 |
Register result, |
|
2236 |
Register heap_number_map, |
|
2237 |
Register scratch1, |
|
2238 |
Register scratch2, |
|
2239 |
Label* gc_required, |
|
2240 |
OverwriteMode mode) { |
|
2241 |
// Code below will scratch result if allocation fails. To keep both arguments |
|
2242 |
// intact for the runtime call result cannot be one of these. |
|
2243 |
ASSERT(!result.is(r0) && !result.is(r1)); |
|
2244 |
|
|
2245 |
if (mode == OVERWRITE_LEFT || mode == OVERWRITE_RIGHT) { |
|
2246 |
Label skip_allocation, allocated; |
|
2247 |
Register overwritable_operand = mode == OVERWRITE_LEFT ? r1 : r0; |
|
2248 |
// If the overwritable operand is already an object, we skip the |
|
2249 |
// allocation of a heap number. |
|
2250 |
__ JumpIfNotSmi(overwritable_operand, &skip_allocation); |
|
2251 |
// Allocate a heap number for the result. |
|
2252 |
__ AllocateHeapNumber( |
|
2253 |
result, scratch1, scratch2, heap_number_map, gc_required); |
|
2254 |
__ b(&allocated); |
|
2255 |
__ bind(&skip_allocation); |
|
2256 |
// Use object holding the overwritable operand for result. |
|
2257 |
__ mov(result, Operand(overwritable_operand)); |
|
2258 |
__ bind(&allocated); |
|
2259 |
} else { |
|
2260 |
ASSERT(mode == NO_OVERWRITE); |
|
2261 |
__ AllocateHeapNumber( |
|
2262 |
result, scratch1, scratch2, heap_number_map, gc_required); |
|
2263 |
} |
|
2264 |
} |
|
2265 |
|
|
2266 |
|
|
2267 |
void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { |
|
2268 |
__ Push(r1, r0); |
|
2269 |
} |
|
2270 |
|
|
2271 |
|
|
2272 | 1201 |
void TranscendentalCacheStub::Generate(MacroAssembler* masm) { |
2273 | 1202 |
// Untagged case: double input in d2, double result goes |
2274 | 1203 |
// into d2. |
... | ... | |
2280 | 1209 |
Label calculate; |
2281 | 1210 |
Label invalid_cache; |
2282 | 1211 |
const Register scratch0 = r9; |
2283 |
const Register scratch1 = r7;
|
|
1212 |
Register scratch1 = no_reg; // will be r4
|
|
2284 | 1213 |
const Register cache_entry = r0; |
2285 | 1214 |
const bool tagged = (argument_type_ == TAGGED); |
2286 | 1215 |
|
... | ... | |
2360 | 1289 |
__ cmp(r2, r4); |
2361 | 1290 |
__ cmp(r3, r5, eq); |
2362 | 1291 |
__ b(ne, &calculate); |
1292 |
|
|
1293 |
scratch1 = r4; // Start of scratch1 range. |
|
1294 |
|
|
2363 | 1295 |
// Cache hit. Load result, cleanup and return. |
2364 | 1296 |
Counters* counters = masm->isolate()->counters(); |
2365 | 1297 |
__ IncrementCounter( |
... | ... | |
2502 | 1434 |
const DwVfpRegister double_scratch = d0; |
2503 | 1435 |
const SwVfpRegister single_scratch = s0; |
2504 | 1436 |
const Register scratch = r9; |
2505 |
const Register scratch2 = r7;
|
|
1437 |
const Register scratch2 = r4;
|
|
2506 | 1438 |
|
2507 | 1439 |
Label call_runtime, done, int_exponent; |
2508 | 1440 |
if (exponent_type_ == ON_STACK) { |
... | ... | |
2708 | 1640 |
RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate); |
2709 | 1641 |
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); |
2710 | 1642 |
CreateAllocationSiteStub::GenerateAheadOfTime(isolate); |
1643 |
BinaryOpStub::GenerateAheadOfTime(isolate); |
|
2711 | 1644 |
} |
2712 | 1645 |
|
2713 | 1646 |
|
... | ... | |
2765 | 1698 |
|
2766 | 1699 |
if (do_gc) { |
2767 | 1700 |
// Passing r0. |
2768 |
__ PrepareCallCFunction(1, 0, r1); |
|
1701 |
__ PrepareCallCFunction(2, 0, r1); |
|
1702 |
__ mov(r1, Operand(ExternalReference::isolate_address(masm->isolate()))); |
|
2769 | 1703 |
__ CallCFunction(ExternalReference::perform_gc_function(isolate), |
2770 |
1, 0);
|
|
1704 |
2, 0);
|
|
2771 | 1705 |
} |
2772 | 1706 |
|
2773 | 1707 |
ExternalReference scope_depth = |
... | ... | |
2841 | 1775 |
// sp: stack pointer |
2842 | 1776 |
// fp: frame pointer |
2843 | 1777 |
// Callee-saved register r4 still holds argc. |
2844 |
__ LeaveExitFrame(save_doubles_, r4); |
|
1778 |
__ LeaveExitFrame(save_doubles_, r4, true);
|
|
2845 | 1779 |
__ mov(pc, lr); |
2846 | 1780 |
|
2847 | 1781 |
// check if we should retry or throw exception |
... | ... | |
3011 | 1945 |
// r3: argc |
3012 | 1946 |
// r4: argv |
3013 | 1947 |
Isolate* isolate = masm->isolate(); |
3014 |
__ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used. |
|
3015 | 1948 |
int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; |
3016 |
__ mov(r7, Operand(Smi::FromInt(marker)));
|
|
1949 |
__ mov(r8, Operand(Smi::FromInt(marker)));
|
|
3017 | 1950 |
__ mov(r6, Operand(Smi::FromInt(marker))); |
3018 | 1951 |
__ mov(r5, |
3019 | 1952 |
Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate))); |
3020 | 1953 |
__ ldr(r5, MemOperand(r5)); |
3021 |
__ Push(r8, r7, r6, r5); |
|
1954 |
__ mov(ip, Operand(-1)); // Push a bad frame pointer to fail if it is used. |
|
1955 |
__ Push(ip, r8, r6, r5); |
|
3022 | 1956 |
|
3023 | 1957 |
// Set up frame pointer for the frame to be pushed. |
3024 | 1958 |
__ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); |
... | ... | |
3064 | 1998 |
// Invoke: Link this frame into the handler chain. There's only one |
3065 | 1999 |
// handler block in this code object, so its index is 0. |
3066 | 2000 |
__ bind(&invoke); |
3067 |
// Must preserve r0-r4, r5-r7 are available.
|
|
2001 |
// Must preserve r0-r4, r5-r6 are available.
|
|
3068 | 2002 |
__ PushTryHandler(StackHandler::JS_ENTRY, 0); |
3069 | 2003 |
// If an exception not caught by another handler occurs, this handler |
3070 | 2004 |
// returns control to the code after the bl(&invoke) above, which |
... | ... | |
3375 | 2309 |
receiver = r0; |
3376 | 2310 |
} |
3377 | 2311 |
|
3378 |
StubCompiler::GenerateLoadStringLength(masm, receiver, r3, r4, &miss, |
|
3379 |
support_wrapper_); |
|
2312 |
StubCompiler::GenerateLoadStringLength(masm, receiver, r3, r4, &miss); |
|
3380 | 2313 |
|
3381 | 2314 |
__ bind(&miss); |
3382 | 2315 |
StubCompiler::TailCallBuiltin( |
... | ... | |
3672 | 2605 |
__ ldr(r9, MemOperand(sp, 0 * kPointerSize)); |
3673 | 2606 |
__ add(r9, r9, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS))); |
3674 | 2607 |
__ sub(r9, r9, Operand(r1)); |
3675 |
__ LoadRoot(r7, Heap::kTheHoleValueRootIndex);
|
|
2608 |
__ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
|
|
3676 | 2609 |
__ add(r3, r4, Operand(r6, LSL, 1)); |
3677 | 2610 |
__ add(r3, r3, Operand(kParameterMapHeaderSize)); |
3678 | 2611 |
|
3679 | 2612 |
// r6 = loop variable (tagged) |
3680 | 2613 |
// r1 = mapping index (tagged) |
3681 | 2614 |
// r3 = address of backing store (tagged) |
3682 |
// r4 = address of parameter map (tagged) |
|
3683 |
// r5 = temporary scratch (a.o., for address calculation) |
|
3684 |
// r7 = the hole value |
|
2615 |
// r4 = address of parameter map (tagged), which is also the address of new |
|
2616 |
// object + Heap::kArgumentsObjectSize (tagged) |
|
2617 |
// r0 = temporary scratch (a.o., for address calculation) |
|
2618 |
// r5 = the hole value |
|
3685 | 2619 |
__ jmp(¶meters_test); |
3686 | 2620 |
|
3687 | 2621 |
__ bind(¶meters_loop); |
3688 | 2622 |
__ sub(r6, r6, Operand(Smi::FromInt(1))); |
3689 |
__ mov(r5, Operand(r6, LSL, 1));
|
|
3690 |
__ add(r5, r5, Operand(kParameterMapHeaderSize - kHeapObjectTag));
|
|
3691 |
__ str(r9, MemOperand(r4, r5));
|
|
3692 |
__ sub(r5, r5, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
|
|
3693 |
__ str(r7, MemOperand(r3, r5));
|
|
2623 |
__ mov(r0, Operand(r6, LSL, 1));
|
|
2624 |
__ add(r0, r0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
|
|
2625 |
__ str(r9, MemOperand(r4, r0));
|
|
2626 |
__ sub(r0, r0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
|
|
2627 |
__ str(r5, MemOperand(r3, r0));
|
|
3694 | 2628 |
__ add(r9, r9, Operand(Smi::FromInt(1))); |
3695 | 2629 |
__ bind(¶meters_test); |
3696 | 2630 |
__ cmp(r6, Operand(Smi::FromInt(0))); |
3697 | 2631 |
__ b(ne, ¶meters_loop); |
3698 | 2632 |
|
2633 |
// Restore r0 = new object (tagged) |
|
2634 |
__ sub(r0, r4, Operand(Heap::kArgumentsObjectSize)); |
|
2635 |
|
|
3699 | 2636 |
__ bind(&skip_parameter_map); |
2637 |
// r0 = address of new object (tagged) |
|
3700 | 2638 |
// r2 = argument count (tagged) |
3701 | 2639 |
// r3 = address of backing store (tagged) |
3702 | 2640 |
// r5 = scratch |
... | ... | |
3727 | 2665 |
__ Ret(); |
3728 | 2666 |
|
3729 | 2667 |
// Do the runtime call to allocate the arguments object. |
2668 |
// r0 = address of new object (tagged) |
|
3730 | 2669 |
// r2 = argument count (tagged) |
3731 | 2670 |
__ bind(&runtime); |
3732 | 2671 |
__ str(r2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count. |
... | ... | |
3855 | 2794 |
// therefore the content of these registers are safe to use after the call. |
3856 | 2795 |
Register subject = r4; |
3857 | 2796 |
Register regexp_data = r5; |
3858 |
Register last_match_info_elements = r6; |
|
2797 |
Register last_match_info_elements = no_reg; // will be r6;
|
|
3859 | 2798 |
|
3860 | 2799 |
// Ensure that a RegExp stack is allocated. |
3861 | 2800 |
Isolate* isolate = masm->isolate(); |
... | ... | |
3988 | 2927 |
STATIC_ASSERT(kTwoByteStringTag == 0); |
3989 | 2928 |
__ and_(r0, r0, Operand(kStringEncodingMask)); |
3990 | 2929 |
__ mov(r3, Operand(r0, ASR, 2), SetCC); |
3991 |
__ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne);
|
|
3992 |
__ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
|
|
2930 |
__ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne);
|
|
2931 |
__ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
|
|
3993 | 2932 |
|
3994 | 2933 |
// (E) Carry on. String handling is done. |
3995 |
// r7: irregexp code
|
|
2934 |
// r6: irregexp code
|
|
3996 | 2935 |
// Check that the irregexp code has been generated for the actual string |
3997 | 2936 |
// encoding. If it has, the field contains a code object otherwise it contains |
3998 | 2937 |
// a smi (code flushing support). |
3999 |
__ JumpIfSmi(r7, &runtime);
|
|
2938 |
__ JumpIfSmi(r6, &runtime);
|
|
4000 | 2939 |
|
4001 | 2940 |
// r1: previous index |
4002 | 2941 |
// r3: encoding of subject string (1 if ASCII, 0 if two_byte); |
4003 |
// r7: code
|
|
2942 |
// r6: code
|
|
4004 | 2943 |
// subject: Subject string |
4005 | 2944 |
// regexp_data: RegExp data (FixedArray) |
4006 | 2945 |
// All checks done. Now push arguments for native regexp code. |
... | ... | |
4067 | 3006 |
__ mov(r0, subject); |
4068 | 3007 |
|
4069 | 3008 |
// Locate the code entry and call it. |
4070 |
__ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag));
|
|
3009 |
__ add(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
|
|
4071 | 3010 |
DirectCEntryStub stub; |
4072 |
stub.GenerateCall(masm, r7);
|
|
3011 |
stub.GenerateCall(masm, r6);
|
|
4073 | 3012 |
|
4074 |
__ LeaveExitFrame(false, no_reg); |
|
3013 |
__ LeaveExitFrame(false, no_reg, true); |
|
3014 |
|
|
3015 |
last_match_info_elements = r6; |
|
4075 | 3016 |
|
4076 | 3017 |
// r0: result |
4077 | 3018 |
// subject: subject string (callee saved) |
... | ... | |
4161 | 3102 |
__ RecordWriteField(last_match_info_elements, |
4162 | 3103 |
RegExpImpl::kLastSubjectOffset, |
4163 | 3104 |
subject, |
4164 |
r7,
|
|
3105 |
r3,
|
|
4165 | 3106 |
kLRHasNotBeenSaved, |
4166 | 3107 |
kDontSaveFPRegs); |
4167 | 3108 |
__ mov(subject, r2); |
... | ... | |
4171 | 3112 |
__ RecordWriteField(last_match_info_elements, |
4172 | 3113 |
RegExpImpl::kLastInputOffset, |
4173 | 3114 |
subject, |
4174 |
r7,
|
|
3115 |
r3,
|
|
4175 | 3116 |
kLRHasNotBeenSaved, |
4176 | 3117 |
kDontSaveFPRegs); |
4177 | 3118 |
|
... | ... | |
4343 | 3284 |
// Cache the called function in a global property cell. Cache states |
4344 | 3285 |
// are uninitialized, monomorphic (indicated by a JSFunction), and |
4345 | 3286 |
// megamorphic. |
3287 |
// r0 : number of arguments to the construct function |
|
4346 | 3288 |
// r1 : the function to call |
4347 | 3289 |
// r2 : cache cell for call target |
4348 | 3290 |
Label initialize, done, miss, megamorphic, not_array_function; |
... | ... | |
4364 | 3306 |
// If we didn't have a matching function, and we didn't find the megamorph |
4365 | 3307 |
// sentinel, then we have in the cell either some other function or an |
4366 | 3308 |
// AllocationSite. Do a map check on the object in ecx. |
4367 |
Handle<Map> allocation_site_map( |
|
4368 |
masm->isolate()->heap()->allocation_site_map(), |
|
4369 |
masm->isolate()); |
|
4370 | 3309 |
__ ldr(r5, FieldMemOperand(r3, 0)); |
4371 | 3310 |
__ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex); |
4372 | 3311 |
__ b(ne, &miss); |
... | ... | |
4403 | 3342 |
{ |
4404 | 3343 |
FrameScope scope(masm, StackFrame::INTERNAL); |
4405 | 3344 |
|
3345 |
// Arguments register must be smi-tagged to call out. |
|
4406 | 3346 |
__ SmiTag(r0); |
4407 | 3347 |
__ push(r0); |
4408 | 3348 |
__ push(r1); |
... | ... | |
4739 | 3679 |
Register scratch2, |
4740 | 3680 |
Register scratch3, |
4741 | 3681 |
Register scratch4, |
4742 |
Register scratch5, |
|
4743 | 3682 |
int flags) { |
4744 | 3683 |
bool ascii = (flags & COPY_ASCII) != 0; |
4745 | 3684 |
bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0; |
... | ... | |
4814 | 3753 |
|
4815 | 3754 |
__ bind(&loop); |
4816 | 3755 |
__ ldr(scratch3, MemOperand(src, 4, PostIndex)); |
4817 |
__ sub(scratch5, limit, Operand(dest)); |
|
4818 | 3756 |
__ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift)); |
4819 | 3757 |
__ str(scratch1, MemOperand(dest, 4, PostIndex)); |
4820 | 3758 |
__ mov(scratch1, Operand(scratch3, LSR, right_shift)); |
4821 | 3759 |
// Loop if four or more bytes left to copy. |
4822 |
// Compare to eight, because we did the subtract before increasing dst.
|
|
4823 |
__ sub(scratch5, scratch5, Operand(8), SetCC);
|
|
3760 |
__ sub(scratch3, limit, Operand(dest));
|
|
3761 |
__ sub(scratch3, scratch3, Operand(4), SetCC);
|
|
4824 | 3762 |
__ b(ge, &loop); |
4825 | 3763 |
} |
4826 | 3764 |
// There is now between zero and three bytes left to copy (negative that |
4827 |
// number is in scratch5), and between one and three bytes already read into
|
|
3765 |
// number is in scratch3), and between one and three bytes already read into
|
|
4828 | 3766 |
// scratch1 (eight times that number in scratch4). We may have read past |
4829 | 3767 |
// the end of the string, but because objects are aligned, we have not read |
4830 | 3768 |
// past the end of the object. |
4831 | 3769 |
// Find the minimum of remaining characters to move and preloaded characters |
4832 | 3770 |
// and write those as bytes. |
4833 |
__ add(scratch5, scratch5, Operand(4), SetCC);
|
|
3771 |
__ add(scratch3, scratch3, Operand(4), SetCC);
|
|
4834 | 3772 |
__ b(eq, &done); |
4835 |
__ cmp(scratch4, Operand(scratch5, LSL, 3), ne);
|
|
3773 |
__ cmp(scratch4, Operand(scratch3, LSL, 3), ne);
|
|
4836 | 3774 |
// Move minimum of bytes read and bytes left to copy to scratch4. |
4837 |
__ mov(scratch5, Operand(scratch4, LSR, 3), LeaveCC, lt); |
|
4838 |
// Between one and three (value in scratch5) characters already read into |
Also available in: Unified diff