Revision f230a1cf deps/v8/src/mips/code-stubs-mips.cc

View differences:

deps/v8/src/mips/code-stubs-mips.cc
60 60
}
61 61

  
62 62

  
63
void NumberToStringStub::InitializeInterfaceDescriptor(
64
    Isolate* isolate,
65
    CodeStubInterfaceDescriptor* descriptor) {
66
  static Register registers[] = { a0 };
67
  descriptor->register_param_count_ = 1;
68
  descriptor->register_params_ = registers;
69
  descriptor->deoptimization_handler_ =
70
      Runtime::FunctionForId(Runtime::kNumberToString)->entry;
71
}
72

  
73

  
63 74
void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
64 75
    Isolate* isolate,
65 76
    CodeStubInterfaceDescriptor* descriptor) {
......
78 89
  descriptor->register_param_count_ = 4;
79 90
  descriptor->register_params_ = registers;
80 91
  descriptor->deoptimization_handler_ =
81
      Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry;
92
      Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry;
82 93
}
83 94

  
84 95

  
......
171 182
  descriptor->register_param_count_ = 2;
172 183
  if (constant_stack_parameter_count != 0) {
173 184
    // stack param count needs (constructor pointer, and single argument)
174
    descriptor->stack_parameter_count_ = &a0;
185
    descriptor->stack_parameter_count_ = a0;
175 186
  }
176 187
  descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
177 188
  descriptor->register_params_ = registers;
......
193 204

  
194 205
  if (constant_stack_parameter_count != 0) {
195 206
    // Stack param count needs (constructor pointer, and single argument).
196
    descriptor->stack_parameter_count_ = &a0;
207
    descriptor->stack_parameter_count_ = a0;
197 208
  }
198 209
  descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
199 210
  descriptor->register_params_ = registers;
......
536 547
      GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
537 548
  Register scratch3 =
538 549
      GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch2);
539
  DoubleRegister double_scratch = kLithiumScratchDouble.low();
540
  DoubleRegister double_input = f12;
550
  DoubleRegister double_scratch = kLithiumScratchDouble;
541 551

  
542 552
  __ Push(scratch, scratch2, scratch3);
543 553

  
544
  __ ldc1(double_input, MemOperand(input_reg, double_offset));
545

  
546 554
  if (!skip_fastpath()) {
555
    // Load double input.
556
    __ ldc1(double_scratch, MemOperand(input_reg, double_offset));
557

  
547 558
    // Clear cumulative exception flags and save the FCSR.
548 559
    __ cfc1(scratch2, FCSR);
549 560
    __ ctc1(zero_reg, FCSR);
561

  
550 562
    // Try a conversion to a signed integer.
551
    __ trunc_w_d(double_scratch, double_input);
563
    __ Trunc_w_d(double_scratch, double_scratch);
564
    // Move the converted value into the result register.
552 565
    __ mfc1(result_reg, double_scratch);
566

  
553 567
    // Retrieve and restore the FCSR.
554 568
    __ cfc1(scratch, FCSR);
555 569
    __ ctc1(scratch2, FCSR);
570

  
556 571
    // Check for overflow and NaNs.
557 572
    __ And(
558 573
        scratch, scratch,
......
565 580
  // Load the double value and perform a manual truncation.
566 581
  Register input_high = scratch2;
567 582
  Register input_low = scratch3;
568
  __ Move(input_low, input_high, double_input);
583

  
584
  __ lw(input_low, MemOperand(input_reg, double_offset));
585
  __ lw(input_high, MemOperand(input_reg, double_offset + kIntSize));
569 586

  
570 587
  Label normal_exponent, restore_sign;
571 588
  // Extract the biased exponent in result.
......
994 1011
}
995 1012

  
996 1013

  
997
void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
998
                                                         Register object,
999
                                                         Register result,
1000
                                                         Register scratch1,
1001
                                                         Register scratch2,
1002
                                                         Register scratch3,
1003
                                                         Label* not_found) {
1004
  // Use of registers. Register result is used as a temporary.
1005
  Register number_string_cache = result;
1006
  Register mask = scratch3;
1007

  
1008
  // Load the number string cache.
1009
  __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
1010

  
1011
  // Make the hash mask from the length of the number string cache. It
1012
  // contains two elements (number and string) for each cache entry.
1013
  __ lw(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
1014
  // Divide length by two (length is a smi).
1015
  __ sra(mask, mask, kSmiTagSize + 1);
1016
  __ Addu(mask, mask, -1);  // Make mask.
1017

  
1018
  // Calculate the entry in the number string cache. The hash value in the
1019
  // number string cache for smis is just the smi value, and the hash for
1020
  // doubles is the xor of the upper and lower words. See
1021
  // Heap::GetNumberStringCache.
1022
  Isolate* isolate = masm->isolate();
1023
  Label is_smi;
1024
  Label load_result_from_cache;
1025
  __ JumpIfSmi(object, &is_smi);
1026
  __ CheckMap(object,
1027
              scratch1,
1028
              Heap::kHeapNumberMapRootIndex,
1029
              not_found,
1030
              DONT_DO_SMI_CHECK);
1031

  
1032
  STATIC_ASSERT(8 == kDoubleSize);
1033
  __ Addu(scratch1,
1034
          object,
1035
          Operand(HeapNumber::kValueOffset - kHeapObjectTag));
1036
  __ lw(scratch2, MemOperand(scratch1, kPointerSize));
1037
  __ lw(scratch1, MemOperand(scratch1, 0));
1038
  __ Xor(scratch1, scratch1, Operand(scratch2));
1039
  __ And(scratch1, scratch1, Operand(mask));
1040

  
1041
  // Calculate address of entry in string cache: each entry consists
1042
  // of two pointer sized fields.
1043
  __ sll(scratch1, scratch1, kPointerSizeLog2 + 1);
1044
  __ Addu(scratch1, number_string_cache, scratch1);
1045

  
1046
  Register probe = mask;
1047
  __ lw(probe,
1048
          FieldMemOperand(scratch1, FixedArray::kHeaderSize));
1049
  __ JumpIfSmi(probe, not_found);
1050
  __ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
1051
  __ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
1052
  __ BranchF(&load_result_from_cache, NULL, eq, f12, f14);
1053
  __ Branch(not_found);
1054

  
1055
  __ bind(&is_smi);
1056
  Register scratch = scratch1;
1057
  __ sra(scratch, object, 1);   // Shift away the tag.
1058
  __ And(scratch, mask, Operand(scratch));
1059

  
1060
  // Calculate address of entry in string cache: each entry consists
1061
  // of two pointer sized fields.
1062
  __ sll(scratch, scratch, kPointerSizeLog2 + 1);
1063
  __ Addu(scratch, number_string_cache, scratch);
1064

  
1065
  // Check if the entry is the smi we are looking for.
1066
  __ lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
1067
  __ Branch(not_found, ne, object, Operand(probe));
1068

  
1069
  // Get the result from the cache.
1070
  __ bind(&load_result_from_cache);
1071
  __ lw(result,
1072
         FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
1073

  
1074
  __ IncrementCounter(isolate->counters()->number_to_string_native(),
1075
                      1,
1076
                      scratch1,
1077
                      scratch2);
1078
}
1079

  
1080

  
1081
void NumberToStringStub::Generate(MacroAssembler* masm) {
1082
  Label runtime;
1083

  
1084
  __ lw(a1, MemOperand(sp, 0));
1085

  
1086
  // Generate code to lookup number in the number string cache.
1087
  GenerateLookupNumberStringCache(masm, a1, v0, a2, a3, t0, &runtime);
1088
  __ DropAndRet(1);
1089

  
1090
  __ bind(&runtime);
1091
  // Handle number to string in the runtime system if not found in the cache.
1092
  __ TailCallRuntime(Runtime::kNumberToString, 1, 1);
1093
}
1094

  
1095

  
1096 1014
static void ICCompareStub_CheckInputType(MacroAssembler* masm,
1097 1015
                                         Register input,
1098 1016
                                         Register scratch,
......
1316 1234
}
1317 1235

  
1318 1236

  
1319
// Generates code to call a C function to do a double operation.
1320
// This code never falls through, but returns with a heap number containing
1321
// the result in v0.
1322
// Register heap_number_result must be a heap number in which the
1323
// result of the operation will be stored.
1324
// Requires the following layout on entry:
1325
// a0: Left value (least significant part of mantissa).
1326
// a1: Left value (sign, exponent, top of mantissa).
1327
// a2: Right value (least significant part of mantissa).
1328
// a3: Right value (sign, exponent, top of mantissa).
1329
static void CallCCodeForDoubleOperation(MacroAssembler* masm,
1330
                                        Token::Value op,
1331
                                        Register heap_number_result,
1332
                                        Register scratch) {
1333
  // Assert that heap_number_result is saved.
1334
  // We currently always use s0 to pass it.
1335
  ASSERT(heap_number_result.is(s0));
1336

  
1337
  // Push the current return address before the C call.
1338
  __ push(ra);
1339
  __ PrepareCallCFunction(4, scratch);  // Two doubles are 4 arguments.
1340
  {
1341
    AllowExternalCallThatCantCauseGC scope(masm);
1342
    __ CallCFunction(
1343
        ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
1344
  }
1345
  // Store answer in the overwritable heap number.
1346
  // Double returned in register f0.
1347
  __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
1348
  // Place heap_number_result in v0 and return to the pushed return address.
1349
  __ pop(ra);
1350
  __ Ret(USE_DELAY_SLOT);
1351
  __ mov(v0, heap_number_result);
1352
}
1353

  
1354

  
1355
void BinaryOpStub::Initialize() {
1356
  platform_specific_bit_ = true;  // FPU is a base requirement for V8.
1357
}
1358

  
1359

  
1360
void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
1361
  Label get_result;
1362

  
1363
  __ Push(a1, a0);
1364

  
1365
  __ li(a2, Operand(Smi::FromInt(MinorKey())));
1366
  __ push(a2);
1367

  
1368
  __ TailCallExternalReference(
1369
      ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
1370
                        masm->isolate()),
1371
      3,
1372
      1);
1373
}
1374

  
1375

  
1376
void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
1377
    MacroAssembler* masm) {
1378
  UNIMPLEMENTED();
1379
}
1380

  
1381

  
1382
void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
1383
                                          Token::Value op) {
1384
  Register left = a1;
1385
  Register right = a0;
1386

  
1387
  Register scratch1 = t0;
1388
  Register scratch2 = t1;
1389

  
1390
  ASSERT(right.is(a0));
1391
  STATIC_ASSERT(kSmiTag == 0);
1392

  
1393
  Label not_smi_result;
1394
  switch (op) {
1395
    case Token::ADD:
1396
      __ AdduAndCheckForOverflow(v0, left, right, scratch1);
1397
      __ RetOnNoOverflow(scratch1);
1398
      // No need to revert anything - right and left are intact.
1399
      break;
1400
    case Token::SUB:
1401
      __ SubuAndCheckForOverflow(v0, left, right, scratch1);
1402
      __ RetOnNoOverflow(scratch1);
1403
      // No need to revert anything - right and left are intact.
1404
      break;
1405
    case Token::MUL: {
1406
      // Remove tag from one of the operands. This way the multiplication result
1407
      // will be a smi if it fits the smi range.
1408
      __ SmiUntag(scratch1, right);
1409
      // Do multiplication.
1410
      // lo = lower 32 bits of scratch1 * left.
1411
      // hi = higher 32 bits of scratch1 * left.
1412
      __ Mult(left, scratch1);
1413
      // Check for overflowing the smi range - no overflow if higher 33 bits of
1414
      // the result are identical.
1415
      __ mflo(scratch1);
1416
      __ mfhi(scratch2);
1417
      __ sra(scratch1, scratch1, 31);
1418
      __ Branch(&not_smi_result, ne, scratch1, Operand(scratch2));
1419
      // Go slow on zero result to handle -0.
1420
      __ mflo(v0);
1421
      __ Ret(ne, v0, Operand(zero_reg));
1422
      // We need -0 if we were multiplying a negative number with 0 to get 0.
1423
      // We know one of them was zero.
1424
      __ Addu(scratch2, right, left);
1425
      Label skip;
1426
      // ARM uses the 'pl' condition, which is 'ge'.
1427
      // Negating it results in 'lt'.
1428
      __ Branch(&skip, lt, scratch2, Operand(zero_reg));
1429
      ASSERT(Smi::FromInt(0) == 0);
1430
      __ Ret(USE_DELAY_SLOT);
1431
      __ mov(v0, zero_reg);  // Return smi 0 if the non-zero one was positive.
1432
      __ bind(&skip);
1433
      // We fall through here if we multiplied a negative number with 0, because
1434
      // that would mean we should produce -0.
1435
      }
1436
      break;
1437
    case Token::DIV: {
1438
      Label done;
1439
      __ SmiUntag(scratch2, right);
1440
      __ SmiUntag(scratch1, left);
1441
      __ Div(scratch1, scratch2);
1442
      // A minor optimization: div may be calculated asynchronously, so we check
1443
      // for division by zero before getting the result.
1444
      __ Branch(&not_smi_result, eq, scratch2, Operand(zero_reg));
1445
      // If the result is 0, we need to make sure the dividsor (right) is
1446
      // positive, otherwise it is a -0 case.
1447
      // Quotient is in 'lo', remainder is in 'hi'.
1448
      // Check for no remainder first.
1449
      __ mfhi(scratch1);
1450
      __ Branch(&not_smi_result, ne, scratch1, Operand(zero_reg));
1451
      __ mflo(scratch1);
1452
      __ Branch(&done, ne, scratch1, Operand(zero_reg));
1453
      __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
1454
      __ bind(&done);
1455
      // Check that the signed result fits in a Smi.
1456
      __ Addu(scratch2, scratch1, Operand(0x40000000));
1457
      __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
1458
      __ Ret(USE_DELAY_SLOT);  // SmiTag emits one instruction in delay slot.
1459
      __ SmiTag(v0, scratch1);
1460
      }
1461
      break;
1462
    case Token::MOD: {
1463
      Label done;
1464
      __ SmiUntag(scratch2, right);
1465
      __ SmiUntag(scratch1, left);
1466
      __ Div(scratch1, scratch2);
1467
      // A minor optimization: div may be calculated asynchronously, so we check
1468
      // for division by 0 before calling mfhi.
1469
      // Check for zero on the right hand side.
1470
      __ Branch(&not_smi_result, eq, scratch2, Operand(zero_reg));
1471
      // If the result is 0, we need to make sure the dividend (left) is
1472
      // positive (or 0), otherwise it is a -0 case.
1473
      // Remainder is in 'hi'.
1474
      __ mfhi(scratch2);
1475
      __ Branch(&done, ne, scratch2, Operand(zero_reg));
1476
      __ Branch(&not_smi_result, lt, scratch1, Operand(zero_reg));
1477
      __ bind(&done);
1478
      // Check that the signed result fits in a Smi.
1479
      __ Addu(scratch1, scratch2, Operand(0x40000000));
1480
      __ Branch(&not_smi_result, lt, scratch1, Operand(zero_reg));
1481
      __ Ret(USE_DELAY_SLOT);   // SmiTag emits one instruction in delay slot.
1482
      __ SmiTag(v0, scratch2);
1483
      }
1484
      break;
1485
    case Token::BIT_OR:
1486
      __ Ret(USE_DELAY_SLOT);
1487
      __ or_(v0, left, right);
1488
      break;
1489
    case Token::BIT_AND:
1490
      __ Ret(USE_DELAY_SLOT);
1491
      __ and_(v0, left, right);
1492
      break;
1493
    case Token::BIT_XOR:
1494
      __ Ret(USE_DELAY_SLOT);
1495
      __ xor_(v0, left, right);
1496
      break;
1497
    case Token::SAR:
1498
      // Remove tags from right operand.
1499
      __ GetLeastBitsFromSmi(scratch1, right, 5);
1500
      __ srav(scratch1, left, scratch1);
1501
      // Smi tag result.
1502
      __ And(v0, scratch1, ~kSmiTagMask);
1503
      __ Ret();
1504
      break;
1505
    case Token::SHR:
1506
      // Remove tags from operands. We can't do this on a 31 bit number
1507
      // because then the 0s get shifted into bit 30 instead of bit 31.
1508
      __ SmiUntag(scratch1, left);
1509
      __ GetLeastBitsFromSmi(scratch2, right, 5);
1510
      __ srlv(v0, scratch1, scratch2);
1511
      // Unsigned shift is not allowed to produce a negative number, so
1512
      // check the sign bit and the sign bit after Smi tagging.
1513
      __ And(scratch1, v0, Operand(0xc0000000));
1514
      __ Branch(&not_smi_result, ne, scratch1, Operand(zero_reg));
1515
      // Smi tag result.
1516
      __ Ret(USE_DELAY_SLOT);  // SmiTag emits one instruction in delay slot.
1517
      __ SmiTag(v0);
1518
      break;
1519
    case Token::SHL:
1520
      // Remove tags from operands.
1521
      __ SmiUntag(scratch1, left);
1522
      __ GetLeastBitsFromSmi(scratch2, right, 5);
1523
      __ sllv(scratch1, scratch1, scratch2);
1524
      // Check that the signed result fits in a Smi.
1525
      __ Addu(scratch2, scratch1, Operand(0x40000000));
1526
      __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
1527
      __ Ret(USE_DELAY_SLOT);
1528
      __ SmiTag(v0, scratch1);  // SmiTag emits one instruction in delay slot.
1529
      break;
1530
    default:
1531
      UNREACHABLE();
1532
  }
1533
  __ bind(&not_smi_result);
1534
}
1535

  
1536

  
1537
void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
1538
                                               Register result,
1539
                                               Register heap_number_map,
1540
                                               Register scratch1,
1541
                                               Register scratch2,
1542
                                               Label* gc_required,
1543
                                               OverwriteMode mode);
1544

  
1545

  
1546
void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
1547
                                      BinaryOpIC::TypeInfo left_type,
1548
                                      BinaryOpIC::TypeInfo right_type,
1549
                                      bool smi_operands,
1550
                                      Label* not_numbers,
1551
                                      Label* gc_required,
1552
                                      Label* miss,
1553
                                      Token::Value op,
1554
                                      OverwriteMode mode) {
1555
  Register left = a1;
1556
  Register right = a0;
1557
  Register scratch1 = t3;
1558
  Register scratch2 = t5;
1559

  
1560
  ASSERT(smi_operands || (not_numbers != NULL));
1561
  if (smi_operands) {
1562
    __ AssertSmi(left);
1563
    __ AssertSmi(right);
1564
  }
1565
  if (left_type == BinaryOpIC::SMI) {
1566
    __ JumpIfNotSmi(left, miss);
1567
  }
1568
  if (right_type == BinaryOpIC::SMI) {
1569
    __ JumpIfNotSmi(right, miss);
1570
  }
1571

  
1572
  Register heap_number_map = t2;
1573
  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
1574

  
1575
  switch (op) {
1576
    case Token::ADD:
1577
    case Token::SUB:
1578
    case Token::MUL:
1579
    case Token::DIV:
1580
    case Token::MOD: {
1581
      // Allocate new heap number for result.
1582
      Register result = s0;
1583
      BinaryOpStub_GenerateHeapResultAllocation(
1584
          masm, result, heap_number_map, scratch1, scratch2, gc_required, mode);
1585

  
1586
      // Load left and right operands into f12 and f14.
1587
      if (smi_operands) {
1588
        __ SmiUntag(scratch1, a0);
1589
        __ mtc1(scratch1, f14);
1590
        __ cvt_d_w(f14, f14);
1591
        __ SmiUntag(scratch1, a1);
1592
        __ mtc1(scratch1, f12);
1593
        __ cvt_d_w(f12, f12);
1594
      } else {
1595
        // Load right operand to f14.
1596
        if (right_type == BinaryOpIC::INT32) {
1597
          __ LoadNumberAsInt32Double(
1598
              right, f14, heap_number_map, scratch1, scratch2, f2, miss);
1599
        } else {
1600
          Label* fail = (right_type == BinaryOpIC::NUMBER) ? miss : not_numbers;
1601
          __ LoadNumber(right, f14, heap_number_map, scratch1, fail);
1602
        }
1603
        // Load left operand to f12 or a0/a1. This keeps a0/a1 intact if it
1604
        // jumps to |miss|.
1605
        if (left_type == BinaryOpIC::INT32) {
1606
          __ LoadNumberAsInt32Double(
1607
              left, f12, heap_number_map, scratch1, scratch2, f2, miss);
1608
        } else {
1609
          Label* fail = (left_type == BinaryOpIC::NUMBER) ? miss : not_numbers;
1610
          __ LoadNumber(left, f12, heap_number_map, scratch1, fail);
1611
        }
1612
      }
1613

  
1614
      // Calculate the result.
1615
      if (op != Token::MOD) {
1616
        // Using FPU registers:
1617
        // f12: Left value.
1618
        // f14: Right value.
1619
        switch (op) {
1620
        case Token::ADD:
1621
          __ add_d(f10, f12, f14);
1622
          break;
1623
        case Token::SUB:
1624
          __ sub_d(f10, f12, f14);
1625
          break;
1626
        case Token::MUL:
1627
          __ mul_d(f10, f12, f14);
1628
          break;
1629
        case Token::DIV:
1630
          __ div_d(f10, f12, f14);
1631
          break;
1632
        default:
1633
          UNREACHABLE();
1634
        }
1635

  
1636
        // ARM uses a workaround here because of the unaligned HeapNumber
1637
        // kValueOffset. On MIPS this workaround is built into sdc1 so
1638
        // there's no point in generating even more instructions.
1639
        __ sdc1(f10, FieldMemOperand(result, HeapNumber::kValueOffset));
1640
        __ Ret(USE_DELAY_SLOT);
1641
        __ mov(v0, result);
1642
      } else {
1643
        // Call the C function to handle the double operation.
1644
        CallCCodeForDoubleOperation(masm, op, result, scratch1);
1645
        if (FLAG_debug_code) {
1646
          __ stop("Unreachable code.");
1647
        }
1648
      }
1649
      break;
1650
    }
1651
    case Token::BIT_OR:
1652
    case Token::BIT_XOR:
1653
    case Token::BIT_AND:
1654
    case Token::SAR:
1655
    case Token::SHR:
1656
    case Token::SHL: {
1657
      if (smi_operands) {
1658
        __ SmiUntag(a3, left);
1659
        __ SmiUntag(a2, right);
1660
      } else {
1661
        // Convert operands to 32-bit integers. Right in a2 and left in a3.
1662
        __ TruncateNumberToI(left, a3, heap_number_map, scratch1, not_numbers);
1663
        __ TruncateNumberToI(right, a2, heap_number_map, scratch1, not_numbers);
1664
      }
1665
      Label result_not_a_smi;
1666
      switch (op) {
1667
        case Token::BIT_OR:
1668
          __ Or(a2, a3, Operand(a2));
1669
          break;
1670
        case Token::BIT_XOR:
1671
          __ Xor(a2, a3, Operand(a2));
1672
          break;
1673
        case Token::BIT_AND:
1674
          __ And(a2, a3, Operand(a2));
1675
          break;
1676
        case Token::SAR:
1677
          // Use only the 5 least significant bits of the shift count.
1678
          __ GetLeastBitsFromInt32(a2, a2, 5);
1679
          __ srav(a2, a3, a2);
1680
          break;
1681
        case Token::SHR:
1682
          // Use only the 5 least significant bits of the shift count.
1683
          __ GetLeastBitsFromInt32(a2, a2, 5);
1684
          __ srlv(a2, a3, a2);
1685
          // SHR is special because it is required to produce a positive answer.
1686
          // The code below for writing into heap numbers isn't capable of
1687
          // writing the register as an unsigned int so we go to slow case if we
1688
          // hit this case.
1689
          __ Branch(&result_not_a_smi, lt, a2, Operand(zero_reg));
1690
          break;
1691
        case Token::SHL:
1692
          // Use only the 5 least significant bits of the shift count.
1693
          __ GetLeastBitsFromInt32(a2, a2, 5);
1694
          __ sllv(a2, a3, a2);
1695
          break;
1696
        default:
1697
          UNREACHABLE();
1698
      }
1699
      // Check that the *signed* result fits in a smi.
1700
      __ Addu(a3, a2, Operand(0x40000000));
1701
      __ Branch(&result_not_a_smi, lt, a3, Operand(zero_reg));
1702
      __ Ret(USE_DELAY_SLOT);  // SmiTag emits one instruction in delay slot.
1703
      __ SmiTag(v0, a2);
1704

  
1705
      // Allocate new heap number for result.
1706
      __ bind(&result_not_a_smi);
1707
      Register result = t1;
1708
      if (smi_operands) {
1709
        __ AllocateHeapNumber(
1710
            result, scratch1, scratch2, heap_number_map, gc_required);
1711
      } else {
1712
        BinaryOpStub_GenerateHeapResultAllocation(
1713
            masm, result, heap_number_map, scratch1, scratch2, gc_required,
1714
            mode);
1715
      }
1716

  
1717
      // a2: Answer as signed int32.
1718
      // t1: Heap number to write answer into.
1719

  
1720
      // Nothing can go wrong now, so move the heap number to v0, which is the
1721
      // result.
1722
      __ mov(v0, t1);
1723
      // Convert the int32 in a2 to the heap number in a0. As
1724
      // mentioned above SHR needs to always produce a positive result.
1725
      __ mtc1(a2, f0);
1726
      if (op == Token::SHR) {
1727
        __ Cvt_d_uw(f0, f0, f22);
1728
      } else {
1729
        __ cvt_d_w(f0, f0);
1730
      }
1731
      // ARM uses a workaround here because of the unaligned HeapNumber
1732
      // kValueOffset. On MIPS this workaround is built into sdc1 so
1733
      // there's no point in generating even more instructions.
1734
      __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
1735
      __ Ret();
1736
      break;
1737
    }
1738
    default:
1739
      UNREACHABLE();
1740
  }
1741
}
1742

  
1743

  
1744
// Generate the smi code. If the operation on smis are successful this return is
1745
// generated. If the result is not a smi and heap number allocation is not
1746
// requested the code falls through. If number allocation is requested but a
1747
// heap number cannot be allocated the code jumps to the label gc_required.
1748
void BinaryOpStub_GenerateSmiCode(
1749
    MacroAssembler* masm,
1750
    Label* use_runtime,
1751
    Label* gc_required,
1752
    Token::Value op,
1753
    BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
1754
    OverwriteMode mode) {
1755
  Label not_smis;
1756

  
1757
  Register left = a1;
1758
  Register right = a0;
1759
  Register scratch1 = t3;
1760

  
1761
  // Perform combined smi check on both operands.
1762
  __ Or(scratch1, left, Operand(right));
1763
  STATIC_ASSERT(kSmiTag == 0);
1764
  __ JumpIfNotSmi(scratch1, &not_smis);
1765

  
1766
  // If the smi-smi operation results in a smi return is generated.
1767
  BinaryOpStub_GenerateSmiSmiOperation(masm, op);
1768

  
1769
  // If heap number results are possible generate the result in an allocated
1770
  // heap number.
1771
  if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) {
1772
    BinaryOpStub_GenerateFPOperation(
1773
        masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true,
1774
        use_runtime, gc_required, &not_smis, op, mode);
1775
  }
1776
  __ bind(&not_smis);
1777
}
1778

  
1779

  
1780
void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
1781
  Label right_arg_changed, call_runtime;
1782

  
1783
  if (op_ == Token::MOD && encoded_right_arg_.has_value) {
1784
    // It is guaranteed that the value will fit into a Smi, because if it
1785
    // didn't, we wouldn't be here, see BinaryOp_Patch.
1786
    __ Branch(&right_arg_changed,
1787
              ne,
1788
              a0,
1789
              Operand(Smi::FromInt(fixed_right_arg_value())));
1790
  }
1791

  
1792
  if (result_type_ == BinaryOpIC::UNINITIALIZED ||
1793
      result_type_ == BinaryOpIC::SMI) {
1794
    // Only allow smi results.
1795
    BinaryOpStub_GenerateSmiCode(
1796
        masm, &call_runtime, NULL, op_, NO_HEAPNUMBER_RESULTS, mode_);
1797
  } else {
1798
    // Allow heap number result and don't make a transition if a heap number
1799
    // cannot be allocated.
1800
    BinaryOpStub_GenerateSmiCode(
1801
        masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS,
1802
        mode_);
1803
  }
1804

  
1805
  // Code falls through if the result is not returned as either a smi or heap
1806
  // number.
1807
  __ bind(&right_arg_changed);
1808
  GenerateTypeTransition(masm);
1809

  
1810
  __ bind(&call_runtime);
1811
  {
1812
    FrameScope scope(masm, StackFrame::INTERNAL);
1813
    GenerateRegisterArgsPush(masm);
1814
    GenerateCallRuntime(masm);
1815
  }
1816
  __ Ret();
1817
}
1818

  
1819

  
1820
void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
1821
  Label call_runtime;
1822
  ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
1823
  ASSERT(op_ == Token::ADD);
1824
  // If both arguments are strings, call the string add stub.
1825
  // Otherwise, do a transition.
1826

  
1827
  // Registers containing left and right operands respectively.
1828
  Register left = a1;
1829
  Register right = a0;
1830

  
1831
  // Test if left operand is a string.
1832
  __ JumpIfSmi(left, &call_runtime);
1833
  __ GetObjectType(left, a2, a2);
1834
  __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
1835

  
1836
  // Test if right operand is a string.
1837
  __ JumpIfSmi(right, &call_runtime);
1838
  __ GetObjectType(right, a2, a2);
1839
  __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
1840

  
1841
  StringAddStub string_add_stub(
1842
      (StringAddFlags)(STRING_ADD_CHECK_NONE | STRING_ADD_ERECT_FRAME));
1843
  GenerateRegisterArgsPush(masm);
1844
  __ TailCallStub(&string_add_stub);
1845

  
1846
  __ bind(&call_runtime);
1847
  GenerateTypeTransition(masm);
1848
}
1849

  
1850

  
1851
void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
1852
  ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32);
1853

  
1854
  Register left = a1;
1855
  Register right = a0;
1856
  Register scratch1 = t3;
1857
  Register scratch2 = t5;
1858
  FPURegister double_scratch = f0;
1859
  FPURegister single_scratch = f6;
1860

  
1861
  Register heap_number_result = no_reg;
1862
  Register heap_number_map = t2;
1863
  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
1864

  
1865
  Label call_runtime;
1866
  // Labels for type transition, used for wrong input or output types.
1867
  // Both label are currently actually bound to the same position. We use two
1868
  // different label to differentiate the cause leading to type transition.
1869
  Label transition;
1870

  
1871
  // Smi-smi fast case.
1872
  Label skip;
1873
  __ Or(scratch1, left, right);
1874
  __ JumpIfNotSmi(scratch1, &skip);
1875
  BinaryOpStub_GenerateSmiSmiOperation(masm, op_);
1876
  // Fall through if the result is not a smi.
1877
  __ bind(&skip);
1878

  
1879
  switch (op_) {
1880
    case Token::ADD:
1881
    case Token::SUB:
1882
    case Token::MUL:
1883
    case Token::DIV:
1884
    case Token::MOD: {
1885
      // It could be that only SMIs have been seen at either the left
1886
      // or the right operand. For precise type feedback, patch the IC
1887
      // again if this changes.
1888
      if (left_type_ == BinaryOpIC::SMI) {
1889
        __ JumpIfNotSmi(left, &transition);
1890
      }
1891
      if (right_type_ == BinaryOpIC::SMI) {
1892
        __ JumpIfNotSmi(right, &transition);
1893
      }
1894
      // Load both operands and check that they are 32-bit integer.
1895
      // Jump to type transition if they are not. The registers a0 and a1 (right
1896
      // and left) are preserved for the runtime call.
1897

  
1898
      __ LoadNumberAsInt32Double(
1899
          right, f14, heap_number_map, scratch1, scratch2, f2, &transition);
1900
      __ LoadNumberAsInt32Double(
1901
          left, f12, heap_number_map, scratch1, scratch2, f2, &transition);
1902

  
1903
      if (op_ != Token::MOD) {
1904
        Label return_heap_number;
1905
        switch (op_) {
1906
          case Token::ADD:
1907
            __ add_d(f10, f12, f14);
1908
            break;
1909
          case Token::SUB:
1910
            __ sub_d(f10, f12, f14);
1911
            break;
1912
          case Token::MUL:
1913
            __ mul_d(f10, f12, f14);
1914
            break;
1915
          case Token::DIV:
1916
            __ div_d(f10, f12, f14);
1917
            break;
1918
          default:
1919
            UNREACHABLE();
1920
        }
1921

  
1922
        if (result_type_ <= BinaryOpIC::INT32) {
1923
          Register except_flag = scratch2;
1924
          const FPURoundingMode kRoundingMode = op_ == Token::DIV ?
1925
              kRoundToMinusInf : kRoundToZero;
1926
          const CheckForInexactConversion kConversion = op_ == Token::DIV ?
1927
              kCheckForInexactConversion : kDontCheckForInexactConversion;
1928
          __ EmitFPUTruncate(kRoundingMode,
1929
                             scratch1,
1930
                             f10,
1931
                             at,
1932
                             f16,
1933
                             except_flag,
1934
                             kConversion);
1935
          // If except_flag != 0, result does not fit in a 32-bit integer.
1936
          __ Branch(&transition, ne, except_flag, Operand(zero_reg));
1937
          // Try to tag the result as a Smi, return heap number on overflow.
1938
          __ SmiTagCheckOverflow(scratch1, scratch1, scratch2);
1939
          __ Branch(&return_heap_number, lt, scratch2, Operand(zero_reg));
1940
          // Check for minus zero, transition in that case (because we need
1941
          // to return a heap number).
1942
          Label not_zero;
1943
          ASSERT(kSmiTag == 0);
1944
          __ Branch(&not_zero, ne, scratch1, Operand(zero_reg));
1945
          __ mfc1(scratch2, f11);
1946
          __ And(scratch2, scratch2, HeapNumber::kSignMask);
1947
          __ Branch(&transition, ne, scratch2, Operand(zero_reg));
1948
          __ bind(&not_zero);
1949

  
1950
          __ Ret(USE_DELAY_SLOT);
1951
          __ mov(v0, scratch1);
1952
        }
1953

  
1954
        __ bind(&return_heap_number);
1955
        // Return a heap number, or fall through to type transition or runtime
1956
        // call if we can't.
1957
        // We are using FPU registers so s0 is available.
1958
        heap_number_result = s0;
1959
        BinaryOpStub_GenerateHeapResultAllocation(masm,
1960
                                                  heap_number_result,
1961
                                                  heap_number_map,
1962
                                                  scratch1,
1963
                                                  scratch2,
1964
                                                  &call_runtime,
1965
                                                  mode_);
1966
        __ sdc1(f10,
1967
                FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
1968
        __ Ret(USE_DELAY_SLOT);
1969
        __ mov(v0, heap_number_result);
1970

  
1971
        // A DIV operation expecting an integer result falls through
1972
        // to type transition.
1973

  
1974
      } else {
1975
        if (encoded_right_arg_.has_value) {
1976
          __ Move(f16, fixed_right_arg_value());
1977
          __ BranchF(&transition, NULL, ne, f14, f16);
1978
        }
1979

  
1980
        Label pop_and_call_runtime;
1981

  
1982
        // Allocate a heap number to store the result.
1983
        heap_number_result = s0;
1984
        BinaryOpStub_GenerateHeapResultAllocation(masm,
1985
                                                  heap_number_result,
1986
                                                  heap_number_map,
1987
                                                  scratch1,
1988
                                                  scratch2,
1989
                                                  &pop_and_call_runtime,
1990
                                                  mode_);
1991

  
1992
        // Call the C function to handle the double operation.
1993
        CallCCodeForDoubleOperation(masm, op_, heap_number_result, scratch1);
1994
        if (FLAG_debug_code) {
1995
          __ stop("Unreachable code.");
1996
        }
1997

  
1998
        __ bind(&pop_and_call_runtime);
1999
        __ Drop(2);
2000
        __ Branch(&call_runtime);
2001
      }
2002

  
2003
      break;
2004
    }
2005

  
2006
    case Token::BIT_OR:
2007
    case Token::BIT_XOR:
2008
    case Token::BIT_AND:
2009
    case Token::SAR:
2010
    case Token::SHR:
2011
    case Token::SHL: {
2012
      Label return_heap_number;
2013
      // Convert operands to 32-bit integers. Right in a2 and left in a3. The
2014
      // registers a0 and a1 (right and left) are preserved for the runtime
2015
      // call.
2016
      __ LoadNumberAsInt32(
2017
          left, a3, heap_number_map, scratch1, scratch2, f0, f2, &transition);
2018
      __ LoadNumberAsInt32(
2019
          right, a2, heap_number_map, scratch1, scratch2, f0, f2, &transition);
2020

  
2021
      // The ECMA-262 standard specifies that, for shift operations, only the
2022
      // 5 least significant bits of the shift value should be used.
2023
      switch (op_) {
2024
        case Token::BIT_OR:
2025
          __ Or(a2, a3, Operand(a2));
2026
          break;
2027
        case Token::BIT_XOR:
2028
          __ Xor(a2, a3, Operand(a2));
2029
          break;
2030
        case Token::BIT_AND:
2031
          __ And(a2, a3, Operand(a2));
2032
          break;
2033
        case Token::SAR:
2034
          __ And(a2, a2, Operand(0x1f));
2035
          __ srav(a2, a3, a2);
2036
          break;
2037
        case Token::SHR:
2038
          __ And(a2, a2, Operand(0x1f));
2039
          __ srlv(a2, a3, a2);
2040
          // SHR is special because it is required to produce a positive answer.
2041
          // We only get a negative result if the shift value (a2) is 0.
2042
          // This result cannot be respresented as a signed 32-bit integer, try
2043
          // to return a heap number if we can.
2044
          __ Branch((result_type_ <= BinaryOpIC::INT32)
2045
                      ? &transition
2046
                      : &return_heap_number,
2047
                     lt,
2048
                     a2,
2049
                     Operand(zero_reg));
2050
          break;
2051
        case Token::SHL:
2052
          __ And(a2, a2, Operand(0x1f));
2053
          __ sllv(a2, a3, a2);
2054
          break;
2055
        default:
2056
          UNREACHABLE();
2057
      }
2058

  
2059
      // Check if the result fits in a smi.
2060
      __ Addu(scratch1, a2, Operand(0x40000000));
2061
      // If not try to return a heap number. (We know the result is an int32.)
2062
      __ Branch(&return_heap_number, lt, scratch1, Operand(zero_reg));
2063
      // Tag the result and return.
2064
      __ Ret(USE_DELAY_SLOT);  // SmiTag emits one instruction in delay slot.
2065
      __ SmiTag(v0, a2);
2066

  
2067
      __ bind(&return_heap_number);
2068
      heap_number_result = t1;
2069
      BinaryOpStub_GenerateHeapResultAllocation(masm,
2070
                                                heap_number_result,
2071
                                                heap_number_map,
2072
                                                scratch1,
2073
                                                scratch2,
2074
                                                &call_runtime,
2075
                                                mode_);
2076

  
2077
      if (op_ != Token::SHR) {
2078
        // Convert the result to a floating point value.
2079
        __ mtc1(a2, double_scratch);
2080
        __ cvt_d_w(double_scratch, double_scratch);
2081
      } else {
2082
        // The result must be interpreted as an unsigned 32-bit integer.
2083
        __ mtc1(a2, double_scratch);
2084
        __ Cvt_d_uw(double_scratch, double_scratch, single_scratch);
2085
      }
2086

  
2087
      // Store the result.
2088
      __ sdc1(double_scratch,
2089
              FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
2090
      __ Ret(USE_DELAY_SLOT);
2091
      __ mov(v0, heap_number_result);
2092

  
2093
      break;
2094
    }
2095

  
2096
    default:
2097
      UNREACHABLE();
2098
  }
2099

  
2100
  // We never expect DIV to yield an integer result, so we always generate
2101
  // type transition code for DIV operations expecting an integer result: the
2102
  // code will fall through to this type transition.
2103
  if (transition.is_linked() ||
2104
      ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) {
2105
    __ bind(&transition);
2106
    GenerateTypeTransition(masm);
2107
  }
2108

  
2109
  __ bind(&call_runtime);
2110
  {
2111
    FrameScope scope(masm, StackFrame::INTERNAL);
2112
    GenerateRegisterArgsPush(masm);
2113
    GenerateCallRuntime(masm);
2114
  }
2115
  __ Ret();
2116
}
2117

  
2118

  
2119
void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
2120
  Label call_runtime;
2121

  
2122
  if (op_ == Token::ADD) {
2123
    // Handle string addition here, because it is the only operation
2124
    // that does not do a ToNumber conversion on the operands.
2125
    GenerateAddStrings(masm);
2126
  }
2127

  
2128
  // Convert oddball arguments to numbers.
2129
  Label check, done;
2130
  __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
2131
  __ Branch(&check, ne, a1, Operand(t0));
2132
  if (Token::IsBitOp(op_)) {
2133
    __ li(a1, Operand(Smi::FromInt(0)));
2134
  } else {
2135
    __ LoadRoot(a1, Heap::kNanValueRootIndex);
2136
  }
2137
  __ jmp(&done);
2138
  __ bind(&check);
2139
  __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
2140
  __ Branch(&done, ne, a0, Operand(t0));
2141
  if (Token::IsBitOp(op_)) {
2142
    __ li(a0, Operand(Smi::FromInt(0)));
2143
  } else {
2144
    __ LoadRoot(a0, Heap::kNanValueRootIndex);
2145
  }
2146
  __ bind(&done);
2147

  
2148
  GenerateNumberStub(masm);
2149
}
2150

  
2151

  
2152
void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
2153
  Label call_runtime, transition;
2154
  BinaryOpStub_GenerateFPOperation(
2155
      masm, left_type_, right_type_, false,
2156
      &transition, &call_runtime, &transition, op_, mode_);
2157

  
2158
  __ bind(&transition);
2159
  GenerateTypeTransition(masm);
2160

  
2161
  __ bind(&call_runtime);
2162
  {
2163
    FrameScope scope(masm, StackFrame::INTERNAL);
2164
    GenerateRegisterArgsPush(masm);
2165
    GenerateCallRuntime(masm);
2166
  }
2167
  __ Ret();
2168
}
2169

  
2170

  
2171
void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
2172
  Label call_runtime, call_string_add_or_runtime, transition;
2173

  
2174
  BinaryOpStub_GenerateSmiCode(
2175
      masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_);
2176

  
2177
  BinaryOpStub_GenerateFPOperation(
2178
      masm, left_type_, right_type_, false,
2179
      &call_string_add_or_runtime, &call_runtime, &transition, op_, mode_);
2180

  
2181
  __ bind(&transition);
2182
  GenerateTypeTransition(masm);
2183

  
2184
  __ bind(&call_string_add_or_runtime);
2185
  if (op_ == Token::ADD) {
2186
    GenerateAddStrings(masm);
2187
  }
2188

  
2189
  __ bind(&call_runtime);
2190
  {
2191
    FrameScope scope(masm, StackFrame::INTERNAL);
2192
    GenerateRegisterArgsPush(masm);
2193
    GenerateCallRuntime(masm);
2194
  }
2195
  __ Ret();
2196
}
2197

  
2198

  
2199
void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
2200
  ASSERT(op_ == Token::ADD);
2201
  Label left_not_string, call_runtime;
2202

  
2203
  Register left = a1;
2204
  Register right = a0;
2205

  
2206
  // Check if left argument is a string.
2207
  __ JumpIfSmi(left, &left_not_string);
2208
  __ GetObjectType(left, a2, a2);
2209
  __ Branch(&left_not_string, ge, a2, Operand(FIRST_NONSTRING_TYPE));
2210

  
2211
  StringAddStub string_add_left_stub(
2212
      (StringAddFlags)(STRING_ADD_CHECK_RIGHT | STRING_ADD_ERECT_FRAME));
2213
  GenerateRegisterArgsPush(masm);
2214
  __ TailCallStub(&string_add_left_stub);
2215

  
2216
  // Left operand is not a string, test right.
2217
  __ bind(&left_not_string);
2218
  __ JumpIfSmi(right, &call_runtime);
2219
  __ GetObjectType(right, a2, a2);
2220
  __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
2221

  
2222
  StringAddStub string_add_right_stub(
2223
      (StringAddFlags)(STRING_ADD_CHECK_LEFT | STRING_ADD_ERECT_FRAME));
2224
  GenerateRegisterArgsPush(masm);
2225
  __ TailCallStub(&string_add_right_stub);
2226

  
2227
  // At least one argument is not a string.
2228
  __ bind(&call_runtime);
2229
}
2230

  
2231

  
2232
void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
2233
                                               Register result,
2234
                                               Register heap_number_map,
2235
                                               Register scratch1,
2236
                                               Register scratch2,
2237
                                               Label* gc_required,
2238
                                               OverwriteMode mode) {
2239
  // Code below will scratch result if allocation fails. To keep both arguments
2240
  // intact for the runtime call result cannot be one of these.
2241
  ASSERT(!result.is(a0) && !result.is(a1));
2242

  
2243
  if (mode == OVERWRITE_LEFT || mode == OVERWRITE_RIGHT) {
2244
    Label skip_allocation, allocated;
2245
    Register overwritable_operand = mode == OVERWRITE_LEFT ? a1 : a0;
2246
    // If the overwritable operand is already an object, we skip the
2247
    // allocation of a heap number.
2248
    __ JumpIfNotSmi(overwritable_operand, &skip_allocation);
2249
    // Allocate a heap number for the result.
2250
    __ AllocateHeapNumber(
2251
        result, scratch1, scratch2, heap_number_map, gc_required);
2252
    __ Branch(&allocated);
2253
    __ bind(&skip_allocation);
2254
    // Use object holding the overwritable operand for result.
2255
    __ mov(result, overwritable_operand);
2256
    __ bind(&allocated);
2257
  } else {
2258
    ASSERT(mode == NO_OVERWRITE);
2259
    __ AllocateHeapNumber(
2260
        result, scratch1, scratch2, heap_number_map, gc_required);
2261
  }
2262
}
2263

  
2264

  
2265
void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
2266
  __ Push(a1, a0);
1237
void BinaryOpStub::InitializeInterfaceDescriptor(
1238
    Isolate* isolate,
1239
    CodeStubInterfaceDescriptor* descriptor) {
1240
  static Register registers[] = { a1, a0 };
1241
  descriptor->register_param_count_ = 2;
1242
  descriptor->register_params_ = registers;
1243
  descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
1244
  descriptor->SetMissHandler(
1245
      ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
2267 1246
}
2268 1247

  
2269 1248

  
2270

  
2271 1249
void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
2272 1250
  // Untagged case: double input in f4, double result goes
2273 1251
  //   into f4.
......
2737 1715
  RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
2738 1716
  ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
2739 1717
  CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
1718
  BinaryOpStub::GenerateAheadOfTime(isolate);
2740 1719
}
2741 1720

  
2742 1721

  
......
2795 1774
  if (do_gc) {
2796 1775
    // Move result passed in v0 into a0 to call PerformGC.
2797 1776
    __ mov(a0, v0);
2798
    __ PrepareCallCFunction(1, 0, a1);
2799
    __ CallCFunction(ExternalReference::perform_gc_function(isolate), 1, 0);
1777
    __ PrepareCallCFunction(2, 0, a1);
1778
    __ li(a1, Operand(ExternalReference::isolate_address(masm->isolate())));
1779
    __ CallCFunction(ExternalReference::perform_gc_function(isolate), 2, 0);
2800 1780
  }
2801 1781

  
2802 1782
  ExternalReference scope_depth =
......
2875 1855
  // v0:v1: result
2876 1856
  // sp: stack pointer
2877 1857
  // fp: frame pointer
2878
  __ LeaveExitFrame(save_doubles_, s0, true);
1858
  __ LeaveExitFrame(save_doubles_, s0, true, EMIT_RETURN);
2879 1859

  
2880 1860
  // Check if we should retry or throw exception.
2881 1861
  Label retry;
......
3408 2388
    receiver = a0;
3409 2389
  }
3410 2390

  
3411
  StubCompiler::GenerateLoadStringLength(masm, receiver, a3, t0, &miss,
3412
                                         support_wrapper_);
2391
  StubCompiler::GenerateLoadStringLength(masm, receiver, a3, t0, &miss);
3413 2392

  
3414 2393
  __ bind(&miss);
3415 2394
  StubCompiler::TailCallBuiltin(
......
4156 3135
  DirectCEntryStub stub;
4157 3136
  stub.GenerateCall(masm, t9);
4158 3137

  
4159
  __ LeaveExitFrame(false, no_reg);
3138
  __ LeaveExitFrame(false, no_reg, true);
4160 3139

  
4161 3140
  // v0: result
4162 3141
  // subject: subject string (callee saved)
......
4424 3403
  // Cache the called function in a global property cell.  Cache states
4425 3404
  // are uninitialized, monomorphic (indicated by a JSFunction), and
4426 3405
  // megamorphic.
3406
  // a0 : number of arguments to the construct function
4427 3407
  // a1 : the function to call
4428 3408
  // a2 : cache cell for call target
4429 3409
  Label initialize, done, miss, megamorphic, not_array_function;
......
4444 3424
  // If we didn't have a matching function, and we didn't find the megamorph
4445 3425
  // sentinel, then we have in the cell either some other function or an
4446 3426
  // AllocationSite. Do a map check on the object in a3.
4447
  Handle<Map> allocation_site_map(
4448
      masm->isolate()->heap()->allocation_site_map(),
4449
      masm->isolate());
4450 3427
  __ lw(t1, FieldMemOperand(a3, 0));
4451 3428
  __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
4452 3429
  __ Branch(&miss, ne, t1, Operand(at));
......
4485 3462
        1 << 5  |  // a1
4486 3463
        1 << 6;    // a2
4487 3464

  
3465
    // Arguments register must be smi-tagged to call out.
4488 3466
    __ SmiTag(a0);
4489 3467
    __ MultiPush(kSavedRegs);
4490 3468

  
......
5803 4781

  
5804 4782
  // Just jump to runtime to add the two strings.
5805 4783
  __ bind(&call_runtime);
5806
  if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) {
5807
    GenerateRegisterArgsPop(masm);
5808
    // Build a frame.
5809
    {
5810
      FrameScope scope(masm, StackFrame::INTERNAL);
5811
      GenerateRegisterArgsPush(masm);
5812
      __ CallRuntime(Runtime::kStringAdd, 2);
5813
    }
5814
    __ Ret();
5815
  } else {
5816
    __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
5817
  }
4784
  __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
5818 4785

  
5819 4786
  if (call_builtin.is_linked()) {
5820 4787
    __ bind(&call_builtin);
5821
    if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) {
5822
      GenerateRegisterArgsPop(masm);
5823
      // Build a frame.
5824
      {
5825
        FrameScope scope(masm, StackFrame::INTERNAL);
5826
        GenerateRegisterArgsPush(masm);
5827
        __ InvokeBuiltin(builtin_id, CALL_FUNCTION);
5828
      }
5829
      __ Ret();
5830
    } else {
5831
      __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
5832
    }
4788
    __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
5833 4789
  }
5834 4790
}
5835 4791

  
......
5863 4819
  // Check the number to string cache.
5864 4820
  __ bind(&not_string);
5865 4821
  // Puts the cached result into scratch1.
5866
  NumberToStringStub::GenerateLookupNumberStringCache(masm,
5867
                                                      arg,
5868
                                                      scratch1,
5869
                                                      scratch2,
5870
                                                      scratch3,
5871
                                                      scratch4,
5872
                                                      slow);
4822
  __ LookupNumberStringCache(arg, scratch1, scratch2, scratch3, scratch4, slow);
5873 4823
  __ mov(arg, scratch1);
5874 4824
  __ sw(arg, MemOperand(sp, stack_offset));
5875 4825
  __ bind(&done);
......
6222 5172

  
6223 5173

  
6224 5174
void DirectCEntryStub::Generate(MacroAssembler* masm) {
6225
  // No need to pop or drop anything, LeaveExitFrame will restore the old
6226
  // stack, thus dropping the allocated space for the return value.
6227
  // The saved ra is after the reserved stack space for the 4 args.
5175
  // Make place for arguments to fit C calling convention. Most of the callers
5176
  // of DirectCEntryStub::GenerateCall are using EnterExitFrame/LeaveExitFrame
5177
  // so they handle stack restoring and we don't have to do that here.
5178
  // Any caller of DirectCEntryStub::GenerateCall must take care of dropping
5179
  // kCArgsSlotsSize stack space after the call.
5180
  __ Subu(sp, sp, Operand(kCArgsSlotsSize));
5181
  // Place the return address on the stack, making the call
5182
  // GC safe. The RegExp backend also relies on this.
5183
  __ sw(ra, MemOperand(sp, kCArgsSlotsSize));
5184
  __ Call(t9);  // Call the C++ function.
6228 5185
  __ lw(t9, MemOperand(sp, kCArgsSlotsSize));
6229 5186

  
6230 5187
  if (FLAG_debug_code && FLAG_enable_slow_asserts) {
......
6241 5198

  
6242 5199
void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
6243 5200
                                    Register target) {
6244
  __ Move(t9, target);
6245
  __ AssertStackIsAligned();
6246
  // Allocate space for arg slots.
6247
  __ Subu(sp, sp, kCArgsSlotsSize);
6248

  
6249
  // Block the trampoline pool through the whole function to make sure the
6250
  // number of generated instructions is constant.
6251
  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
6252

  
6253
  // We need to get the current 'pc' value, which is not available on MIPS.
6254
  Label find_ra;
6255
  masm->bal(&find_ra);  // ra = pc + 8.
6256
  masm->nop();  // Branch delay slot nop.
6257
  masm->bind(&find_ra);
6258

  
6259
  const int kNumInstructionsToJump = 6;
6260
  masm->addiu(ra, ra, kNumInstructionsToJump * kPointerSize);
6261
  // Push return address (accessible to GC through exit frame pc).
6262
  // This spot for ra was reserved in EnterExitFrame.
6263
  masm->sw(ra, MemOperand(sp, kCArgsSlotsSize));
6264 5201
  intptr_t loc =
6265 5202
      reinterpret_cast<intptr_t>(GetCode(masm->isolate()).location());
6266
  masm->li(ra, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
6267
  // Call the function.
6268
  masm->Jump(t9);
6269
  // Make sure the stored 'ra' points to this position.
6270
  ASSERT_EQ(kNumInstructionsToJump, masm->InstructionsGeneratedSince(&find_ra));
5203
  __ Move(t9, target);
5204
  __ li(ra, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
5205
  __ Call(ra);
6271 5206
}
6272 5207

  
6273 5208

  

Also available in: Unified diff