Revision f230a1cf deps/v8/src/x64/code-stubs-x64.cc

View differences:

deps/v8/src/x64/code-stubs-x64.cc
60 60
}
61 61

  
62 62

  
63
void NumberToStringStub::InitializeInterfaceDescriptor(
64
    Isolate* isolate,
65
    CodeStubInterfaceDescriptor* descriptor) {
66
  static Register registers[] = { rax };
67
  descriptor->register_param_count_ = 1;
68
  descriptor->register_params_ = registers;
69
  descriptor->deoptimization_handler_ =
70
      Runtime::FunctionForId(Runtime::kNumberToString)->entry;
71
}
72

  
73

  
63 74
void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
64 75
    Isolate* isolate,
65 76
    CodeStubInterfaceDescriptor* descriptor) {
......
78 89
  descriptor->register_param_count_ = 4;
79 90
  descriptor->register_params_ = registers;
80 91
  descriptor->deoptimization_handler_ =
81
      Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry;
92
      Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry;
82 93
}
83 94

  
84 95

  
......
145 156
}
146 157

  
147 158

  
159
void BinaryOpStub::InitializeInterfaceDescriptor(
160
    Isolate* isolate,
161
    CodeStubInterfaceDescriptor* descriptor) {
162
  static Register registers[] = { rdx, rax };
163
  descriptor->register_param_count_ = 2;
164
  descriptor->register_params_ = registers;
165
  descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
166
  descriptor->SetMissHandler(
167
      ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
168
}
169

  
170

  
148 171
static void InitializeArrayConstructorDescriptor(
149 172
    Isolate* isolate,
150 173
    CodeStubInterfaceDescriptor* descriptor,
......
157 180
  descriptor->register_param_count_ = 2;
158 181
  if (constant_stack_parameter_count != 0) {
159 182
    // stack param count needs (constructor pointer, and single argument)
160
    descriptor->stack_parameter_count_ = &rax;
183
    descriptor->stack_parameter_count_ = rax;
161 184
  }
162 185
  descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
163 186
  descriptor->register_params_ = registers;
......
179 202

  
180 203
  if (constant_stack_parameter_count != 0) {
181 204
    // stack param count needs (constructor pointer, and single argument)
182
    descriptor->stack_parameter_count_ = &rax;
205
    descriptor->stack_parameter_count_ = rax;
183 206
  }
184 207
  descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
185 208
  descriptor->register_params_ = registers;
......
437 460
  // If the operands are not both numbers, jump to not_numbers.
438 461
  // Leaves rdx and rax unchanged.  SmiOperands assumes both are smis.
439 462
  // NumberOperands assumes both are smis or heap numbers.
440
  static void LoadSSE2SmiOperands(MacroAssembler* masm);
441 463
  static void LoadSSE2UnknownOperands(MacroAssembler* masm,
442 464
                                      Label* not_numbers);
443

  
444
  // Takes the operands in rdx and rax and loads them as integers in rax
445
  // and rcx.
446
  static void LoadAsIntegers(MacroAssembler* masm,
447
                             Label* operand_conversion_failure,
448
                             Register heap_number_map);
449

  
450
  // Tries to convert two values to smis losslessly.
451
  // This fails if either argument is not a Smi nor a HeapNumber,
452
  // or if it's a HeapNumber with a value that can't be converted
453
  // losslessly to a Smi. In that case, control transitions to the
454
  // on_not_smis label.
455
  // On success, either control goes to the on_success label (if one is
456
  // provided), or it falls through at the end of the code (if on_success
457
  // is NULL).
458
  // On success, both first and second holds Smi tagged values.
459
  // One of first or second must be non-Smi when entering.
460
  static void NumbersToSmis(MacroAssembler* masm,
461
                            Register first,
462
                            Register second,
463
                            Register scratch1,
464
                            Register scratch2,
465
                            Register scratch3,
466
                            Label* on_success,
467
                            Label* on_not_smis,
468
                            ConvertUndefined convert_undefined);
469 465
};
470 466

  
471 467

  
......
553 549
}
554 550

  
555 551

  
556
void BinaryOpStub::Initialize() {}
557

  
558

  
559
void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
560
  __ PopReturnAddressTo(rcx);
561
  __ push(rdx);
562
  __ push(rax);
563
  // Left and right arguments are now on top.
564
  __ Push(Smi::FromInt(MinorKey()));
565

  
566
  __ PushReturnAddressFrom(rcx);
567

  
568
  // Patch the caller to an appropriate specialized stub and return the
569
  // operation result to the caller of the stub.
570
  __ TailCallExternalReference(
571
      ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
572
                        masm->isolate()),
573
      3,
574
      1);
575
}
576

  
577

  
578
static void BinaryOpStub_GenerateSmiCode(
579
    MacroAssembler* masm,
580
    Label* slow,
581
    BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
582
    Token::Value op) {
583

  
584
  // Arguments to BinaryOpStub are in rdx and rax.
585
  const Register left = rdx;
586
  const Register right = rax;
587

  
588
  // We only generate heapnumber answers for overflowing calculations
589
  // for the four basic arithmetic operations and logical right shift by 0.
590
  bool generate_inline_heapnumber_results =
591
      (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) &&
592
      (op == Token::ADD || op == Token::SUB ||
593
       op == Token::MUL || op == Token::DIV || op == Token::SHR);
594

  
595
  // Smi check of both operands.  If op is BIT_OR, the check is delayed
596
  // until after the OR operation.
597
  Label not_smis;
598
  Label use_fp_on_smis;
599
  Label fail;
600

  
601
  if (op != Token::BIT_OR) {
602
    Comment smi_check_comment(masm, "-- Smi check arguments");
603
    __ JumpIfNotBothSmi(left, right, &not_smis);
604
  }
605

  
606
  Label smi_values;
607
  __ bind(&smi_values);
608
  // Perform the operation.
609
  Comment perform_smi(masm, "-- Perform smi operation");
610
  switch (op) {
611
    case Token::ADD:
612
      ASSERT(right.is(rax));
613
      __ SmiAdd(right, right, left, &use_fp_on_smis);  // ADD is commutative.
614
      break;
615

  
616
    case Token::SUB:
617
      __ SmiSub(left, left, right, &use_fp_on_smis);
618
      __ movq(rax, left);
619
      break;
620

  
621
    case Token::MUL:
622
      ASSERT(right.is(rax));
623
      __ SmiMul(right, right, left, &use_fp_on_smis);  // MUL is commutative.
624
      break;
625

  
626
    case Token::DIV:
627
      // SmiDiv will not accept left in rdx or right in rax.
628
      __ movq(rbx, rax);
629
      __ movq(rcx, rdx);
630
      __ SmiDiv(rax, rcx, rbx, &use_fp_on_smis);
631
      break;
632

  
633
    case Token::MOD:
634
      // SmiMod will not accept left in rdx or right in rax.
635
      __ movq(rbx, rax);
636
      __ movq(rcx, rdx);
637
      __ SmiMod(rax, rcx, rbx, &use_fp_on_smis);
638
      break;
639

  
640
    case Token::BIT_OR: {
641
      ASSERT(right.is(rax));
642
      __ SmiOrIfSmis(right, right, left, &not_smis);  // BIT_OR is commutative.
643
      break;
644
      }
645
    case Token::BIT_XOR:
646
      ASSERT(right.is(rax));
647
      __ SmiXor(right, right, left);  // BIT_XOR is commutative.
648
      break;
649

  
650
    case Token::BIT_AND:
651
      ASSERT(right.is(rax));
652
      __ SmiAnd(right, right, left);  // BIT_AND is commutative.
653
      break;
654

  
655
    case Token::SHL:
656
      __ SmiShiftLeft(left, left, right);
657
      __ movq(rax, left);
658
      break;
659

  
660
    case Token::SAR:
661
      __ SmiShiftArithmeticRight(left, left, right);
662
      __ movq(rax, left);
663
      break;
664

  
665
    case Token::SHR:
666
      __ SmiShiftLogicalRight(left, left, right, &use_fp_on_smis);
667
      __ movq(rax, left);
668
      break;
669

  
670
    default:
671
      UNREACHABLE();
672
  }
673

  
674
  // 5. Emit return of result in rax.  Some operations have registers pushed.
675
  __ ret(0);
676

  
677
  if (use_fp_on_smis.is_linked()) {
678
    // 6. For some operations emit inline code to perform floating point
679
    //    operations on known smis (e.g., if the result of the operation
680
    //    overflowed the smi range).
681
    __ bind(&use_fp_on_smis);
682
    if (op == Token::DIV || op == Token::MOD) {
683
      // Restore left and right to rdx and rax.
684
      __ movq(rdx, rcx);
685
      __ movq(rax, rbx);
686
    }
687

  
688
    if (generate_inline_heapnumber_results) {
689
      __ AllocateHeapNumber(rcx, rbx, slow);
690
      Comment perform_float(masm, "-- Perform float operation on smis");
691
      if (op == Token::SHR) {
692
        __ SmiToInteger32(left, left);
693
        __ cvtqsi2sd(xmm0, left);
694
      } else {
695
        FloatingPointHelper::LoadSSE2SmiOperands(masm);
696
        switch (op) {
697
        case Token::ADD: __ addsd(xmm0, xmm1); break;
698
        case Token::SUB: __ subsd(xmm0, xmm1); break;
699
        case Token::MUL: __ mulsd(xmm0, xmm1); break;
700
        case Token::DIV: __ divsd(xmm0, xmm1); break;
701
        default: UNREACHABLE();
702
        }
703
      }
704
      __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
705
      __ movq(rax, rcx);
706
      __ ret(0);
707
    } else {
708
      __ jmp(&fail);
709
    }
710
  }
711

  
712
  // 7. Non-smi operands reach the end of the code generated by
713
  //    GenerateSmiCode, and fall through to subsequent code,
714
  //    with the operands in rdx and rax.
715
  //    But first we check if non-smi values are HeapNumbers holding
716
  //    values that could be smi.
717
  __ bind(&not_smis);
718
  Comment done_comment(masm, "-- Enter non-smi code");
719
  FloatingPointHelper::ConvertUndefined convert_undefined =
720
      FloatingPointHelper::BAILOUT_ON_UNDEFINED;
721
  // This list must be in sync with BinaryOpPatch() behavior in ic.cc.
722
  if (op == Token::BIT_AND ||
723
      op == Token::BIT_OR ||
724
      op == Token::BIT_XOR ||
725
      op == Token::SAR ||
726
      op == Token::SHL ||
727
      op == Token::SHR) {
728
    convert_undefined = FloatingPointHelper::CONVERT_UNDEFINED_TO_ZERO;
729
  }
730
  FloatingPointHelper::NumbersToSmis(masm, left, right, rbx, rdi, rcx,
731
                                     &smi_values, &fail, convert_undefined);
732
  __ jmp(&smi_values);
733
  __ bind(&fail);
734
}
735

  
736

  
737
static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
738
                                                      Label* alloc_failure,
739
                                                      OverwriteMode mode);
740

  
741

  
742
static void BinaryOpStub_GenerateFloatingPointCode(MacroAssembler* masm,
743
                                                   Label* allocation_failure,
744
                                                   Label* non_numeric_failure,
745
                                                   Token::Value op,
746
                                                   OverwriteMode mode) {
747
  switch (op) {
748
    case Token::ADD:
749
    case Token::SUB:
750
    case Token::MUL:
751
    case Token::DIV: {
752
      FloatingPointHelper::LoadSSE2UnknownOperands(masm, non_numeric_failure);
753

  
754
      switch (op) {
755
        case Token::ADD: __ addsd(xmm0, xmm1); break;
756
        case Token::SUB: __ subsd(xmm0, xmm1); break;
757
        case Token::MUL: __ mulsd(xmm0, xmm1); break;
758
        case Token::DIV: __ divsd(xmm0, xmm1); break;
759
        default: UNREACHABLE();
760
      }
761
      BinaryOpStub_GenerateHeapResultAllocation(
762
          masm, allocation_failure, mode);
763
      __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
764
      __ ret(0);
765
      break;
766
    }
767
    case Token::MOD: {
768
      // For MOD we jump to the allocation_failure label, to call runtime.
769
      __ jmp(allocation_failure);
770
      break;
771
    }
772
    case Token::BIT_OR:
773
    case Token::BIT_AND:
774
    case Token::BIT_XOR:
775
    case Token::SAR:
776
    case Token::SHL:
777
    case Token::SHR: {
778
      Label non_smi_shr_result;
779
      Register heap_number_map = r9;
780
      __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
781
      FloatingPointHelper::LoadAsIntegers(masm, non_numeric_failure,
782
                                          heap_number_map);
783
      switch (op) {
784
        case Token::BIT_OR:  __ orl(rax, rcx); break;
785
        case Token::BIT_AND: __ andl(rax, rcx); break;
786
        case Token::BIT_XOR: __ xorl(rax, rcx); break;
787
        case Token::SAR: __ sarl_cl(rax); break;
788
        case Token::SHL: __ shll_cl(rax); break;
789
        case Token::SHR: {
790
          __ shrl_cl(rax);
791
          // Check if result is negative. This can only happen for a shift
792
          // by zero.
793
          __ testl(rax, rax);
794
          __ j(negative, &non_smi_shr_result);
795
          break;
796
        }
797
        default: UNREACHABLE();
798
      }
799
      STATIC_ASSERT(kSmiValueSize == 32);
800
      // Tag smi result and return.
801
      __ Integer32ToSmi(rax, rax);
802
      __ Ret();
803

  
804
      // Logical shift right can produce an unsigned int32 that is not
805
      // an int32, and so is not in the smi range.  Allocate a heap number
806
      // in that case.
807
      if (op == Token::SHR) {
808
        __ bind(&non_smi_shr_result);
809
        Label allocation_failed;
810
        __ movl(rbx, rax);  // rbx holds result value (uint32 value as int64).
811
        // Allocate heap number in new space.
812
        // Not using AllocateHeapNumber macro in order to reuse
813
        // already loaded heap_number_map.
814
        __ Allocate(HeapNumber::kSize, rax, rdx, no_reg, &allocation_failed,
815
                    TAG_OBJECT);
816
        // Set the map.
817
        __ AssertRootValue(heap_number_map,
818
                           Heap::kHeapNumberMapRootIndex,
819
                           kHeapNumberMapRegisterClobbered);
820
        __ movq(FieldOperand(rax, HeapObject::kMapOffset),
821
                heap_number_map);
822
        __ cvtqsi2sd(xmm0, rbx);
823
        __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
824
        __ Ret();
825

  
826
        __ bind(&allocation_failed);
827
        // We need tagged values in rdx and rax for the following code,
828
        // not int32 in rax and rcx.
829
        __ Integer32ToSmi(rax, rcx);
830
        __ Integer32ToSmi(rdx, rbx);
831
        __ jmp(allocation_failure);
832
      }
833
      break;
834
    }
835
    default: UNREACHABLE(); break;
836
  }
837
  // No fall-through from this generated code.
838
  if (FLAG_debug_code) {
839
    __ Abort(kUnexpectedFallThroughInBinaryStubGenerateFloatingPointCode);
840
  }
841
}
842

  
843

  
844
static void BinaryOpStub_GenerateRegisterArgsPushUnderReturn(
845
    MacroAssembler* masm) {
846
  // Push arguments, but ensure they are under the return address
847
  // for a tail call.
848
  __ PopReturnAddressTo(rcx);
849
  __ push(rdx);
850
  __ push(rax);
851
  __ PushReturnAddressFrom(rcx);
852
}
853

  
854

  
855
void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
856
  ASSERT(op_ == Token::ADD);
857
  Label left_not_string, call_runtime;
858

  
859
  // Registers containing left and right operands respectively.
860
  Register left = rdx;
861
  Register right = rax;
862

  
863
  // Test if left operand is a string.
864
  __ JumpIfSmi(left, &left_not_string, Label::kNear);
865
  __ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx);
866
  __ j(above_equal, &left_not_string, Label::kNear);
867
  StringAddStub string_add_left_stub(
868
      (StringAddFlags)(STRING_ADD_CHECK_RIGHT | STRING_ADD_ERECT_FRAME));
869
  BinaryOpStub_GenerateRegisterArgsPushUnderReturn(masm);
870
  __ TailCallStub(&string_add_left_stub);
871

  
872
  // Left operand is not a string, test right.
873
  __ bind(&left_not_string);
874
  __ JumpIfSmi(right, &call_runtime, Label::kNear);
875
  __ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
876
  __ j(above_equal, &call_runtime, Label::kNear);
877

  
878
  StringAddStub string_add_right_stub(
879
      (StringAddFlags)(STRING_ADD_CHECK_LEFT | STRING_ADD_ERECT_FRAME));
880
  BinaryOpStub_GenerateRegisterArgsPushUnderReturn(masm);
881
  __ TailCallStub(&string_add_right_stub);
882

  
883
  // Neither argument is a string.
884
  __ bind(&call_runtime);
885
}
886

  
887

  
888
void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
889
  Label right_arg_changed, call_runtime;
890

  
891
  if (op_ == Token::MOD && encoded_right_arg_.has_value) {
892
    // It is guaranteed that the value will fit into a Smi, because if it
893
    // didn't, we wouldn't be here, see BinaryOp_Patch.
894
    __ Cmp(rax, Smi::FromInt(fixed_right_arg_value()));
895
    __ j(not_equal, &right_arg_changed);
896
  }
897

  
898
  if (result_type_ == BinaryOpIC::UNINITIALIZED ||
899
      result_type_ == BinaryOpIC::SMI) {
900
    // Only allow smi results.
901
    BinaryOpStub_GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS, op_);
902
  } else {
903
    // Allow heap number result and don't make a transition if a heap number
904
    // cannot be allocated.
905
    BinaryOpStub_GenerateSmiCode(
906
        masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
907
  }
908

  
909
  // Code falls through if the result is not returned as either a smi or heap
910
  // number.
911
  __ bind(&right_arg_changed);
912
  GenerateTypeTransition(masm);
913

  
914
  if (call_runtime.is_linked()) {
915
    __ bind(&call_runtime);
916
    {
917
      FrameScope scope(masm, StackFrame::INTERNAL);
918
      GenerateRegisterArgsPush(masm);
919
      GenerateCallRuntime(masm);
920
    }
921
    __ Ret();
922
  }
923
}
924

  
925

  
926
void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
927
  // The int32 case is identical to the Smi case.  We avoid creating this
928
  // ic state on x64.
929
  UNREACHABLE();
930
}
931

  
932

  
933
void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
934
  Label call_runtime;
935
  ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
936
  ASSERT(op_ == Token::ADD);
937
  // If both arguments are strings, call the string add stub.
938
  // Otherwise, do a transition.
939

  
940
  // Registers containing left and right operands respectively.
941
  Register left = rdx;
942
  Register right = rax;
943

  
944
  // Test if left operand is a string.
945
  __ JumpIfSmi(left, &call_runtime);
946
  __ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx);
947
  __ j(above_equal, &call_runtime);
948

  
949
  // Test if right operand is a string.
950
  __ JumpIfSmi(right, &call_runtime);
951
  __ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
952
  __ j(above_equal, &call_runtime);
953

  
954
  StringAddStub string_add_stub(
955
      (StringAddFlags)(STRING_ADD_CHECK_NONE | STRING_ADD_ERECT_FRAME));
956
  BinaryOpStub_GenerateRegisterArgsPushUnderReturn(masm);
957
  __ TailCallStub(&string_add_stub);
958

  
959
  __ bind(&call_runtime);
960
  GenerateTypeTransition(masm);
961
}
962

  
963

  
964
void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
965
  Label call_runtime;
966

  
967
  if (op_ == Token::ADD) {
968
    // Handle string addition here, because it is the only operation
969
    // that does not do a ToNumber conversion on the operands.
970
    GenerateAddStrings(masm);
971
  }
972

  
973
  // Convert oddball arguments to numbers.
974
  Label check, done;
975
  __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
976
  __ j(not_equal, &check, Label::kNear);
977
  if (Token::IsBitOp(op_)) {
978
    __ xor_(rdx, rdx);
979
  } else {
980
    __ LoadRoot(rdx, Heap::kNanValueRootIndex);
981
  }
982
  __ jmp(&done, Label::kNear);
983
  __ bind(&check);
984
  __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
985
  __ j(not_equal, &done, Label::kNear);
986
  if (Token::IsBitOp(op_)) {
987
    __ xor_(rax, rax);
988
  } else {
989
    __ LoadRoot(rax, Heap::kNanValueRootIndex);
990
  }
991
  __ bind(&done);
992

  
993
  GenerateNumberStub(masm);
994
}
995

  
996

  
997
static void BinaryOpStub_CheckSmiInput(MacroAssembler* masm,
998
                                       Register input,
999
                                       Label* fail) {
1000
  Label ok;
1001
  __ JumpIfSmi(input, &ok, Label::kNear);
1002
  Register heap_number_map = r8;
1003
  Register scratch1 = r9;
1004
  Register scratch2 = r10;
1005
  // HeapNumbers containing 32bit integer values are also allowed.
1006
  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
1007
  __ cmpq(FieldOperand(input, HeapObject::kMapOffset), heap_number_map);
1008
  __ j(not_equal, fail);
1009
  __ movsd(xmm0, FieldOperand(input, HeapNumber::kValueOffset));
1010
  // Convert, convert back, and compare the two doubles' bits.
1011
  __ cvttsd2siq(scratch2, xmm0);
1012
  __ cvtlsi2sd(xmm1, scratch2);
1013
  __ movq(scratch1, xmm0);
1014
  __ movq(scratch2, xmm1);
1015
  __ cmpq(scratch1, scratch2);
1016
  __ j(not_equal, fail);
1017
  __ bind(&ok);
1018
}
1019

  
1020

  
1021
void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
1022
  Label gc_required, not_number;
1023

  
1024
  // It could be that only SMIs have been seen at either the left
1025
  // or the right operand. For precise type feedback, patch the IC
1026
  // again if this changes.
1027
  if (left_type_ == BinaryOpIC::SMI) {
1028
    BinaryOpStub_CheckSmiInput(masm, rdx, &not_number);
1029
  }
1030
  if (right_type_ == BinaryOpIC::SMI) {
1031
    BinaryOpStub_CheckSmiInput(masm, rax, &not_number);
1032
  }
1033

  
1034
  BinaryOpStub_GenerateFloatingPointCode(
1035
      masm, &gc_required, &not_number, op_, mode_);
1036

  
1037
  __ bind(&not_number);
1038
  GenerateTypeTransition(masm);
1039

  
1040
  __ bind(&gc_required);
1041
  {
1042
    FrameScope scope(masm, StackFrame::INTERNAL);
1043
    GenerateRegisterArgsPush(masm);
1044
    GenerateCallRuntime(masm);
1045
  }
1046
  __ Ret();
1047
}
1048

  
1049

  
1050
void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
1051
  Label call_runtime, call_string_add_or_runtime;
1052

  
1053
  BinaryOpStub_GenerateSmiCode(
1054
      masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
1055

  
1056
  BinaryOpStub_GenerateFloatingPointCode(
1057
      masm, &call_runtime, &call_string_add_or_runtime, op_, mode_);
1058

  
1059
  __ bind(&call_string_add_or_runtime);
1060
  if (op_ == Token::ADD) {
1061
    GenerateAddStrings(masm);
1062
  }
1063

  
1064
  __ bind(&call_runtime);
1065
  {
1066
    FrameScope scope(masm, StackFrame::INTERNAL);
1067
    GenerateRegisterArgsPush(masm);
1068
    GenerateCallRuntime(masm);
1069
  }
1070
  __ Ret();
1071
}
1072

  
1073

  
1074
static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
1075
                                                      Label* alloc_failure,
1076
                                                      OverwriteMode mode) {
1077
  Label skip_allocation;
1078
  switch (mode) {
1079
    case OVERWRITE_LEFT: {
1080
      // If the argument in rdx is already an object, we skip the
1081
      // allocation of a heap number.
1082
      __ JumpIfNotSmi(rdx, &skip_allocation);
1083
      // Allocate a heap number for the result. Keep rax and rdx intact
1084
      // for the possible runtime call.
1085
      __ AllocateHeapNumber(rbx, rcx, alloc_failure);
1086
      // Now rdx can be overwritten losing one of the arguments as we are
1087
      // now done and will not need it any more.
1088
      __ movq(rdx, rbx);
1089
      __ bind(&skip_allocation);
1090
      // Use object in rdx as a result holder
1091
      __ movq(rax, rdx);
1092
      break;
1093
    }
1094
    case OVERWRITE_RIGHT:
1095
      // If the argument in rax is already an object, we skip the
1096
      // allocation of a heap number.
1097
      __ JumpIfNotSmi(rax, &skip_allocation);
1098
      // Fall through!
1099
    case NO_OVERWRITE:
1100
      // Allocate a heap number for the result. Keep rax and rdx intact
1101
      // for the possible runtime call.
1102
      __ AllocateHeapNumber(rbx, rcx, alloc_failure);
1103
      // Now rax can be overwritten losing one of the arguments as we are
1104
      // now done and will not need it any more.
1105
      __ movq(rax, rbx);
1106
      __ bind(&skip_allocation);
1107
      break;
1108
    default: UNREACHABLE();
1109
  }
1110
}
1111

  
1112

  
1113
void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
1114
  __ push(rdx);
1115
  __ push(rax);
1116
}
1117

  
1118

  
1119 552
void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
1120 553
  // TAGGED case:
1121 554
  //   Input:
......
1145 578
    // Then load the bits of the double into rbx.
1146 579
    __ SmiToInteger32(rax, rax);
1147 580
    __ subq(rsp, Immediate(kDoubleSize));
1148
    __ cvtlsi2sd(xmm1, rax);
581
    __ Cvtlsi2sd(xmm1, rax);
1149 582
    __ movsd(Operand(rsp, 0), xmm1);
1150 583
    __ movq(rbx, xmm1);
1151 584
    __ movq(rdx, xmm1);
......
1161 594
    // Input is a HeapNumber. Push it on the FPU stack and load its
1162 595
    // bits into rbx.
1163 596
    __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
1164
    __ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
597
    __ MoveDouble(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
1165 598
    __ movq(rdx, rbx);
1166 599

  
1167 600
    __ bind(&loaded);
......
1422 855
}
1423 856

  
1424 857

  
1425
// Input: rdx, rax are the left and right objects of a bit op.
1426
// Output: rax, rcx are left and right integers for a bit op.
1427
// Jump to conversion_failure: rdx and rax are unchanged.
1428
void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
1429
                                         Label* conversion_failure,
1430
                                         Register heap_number_map) {
1431
  // Check float operands.
1432
  Label arg1_is_object, check_undefined_arg1;
1433
  Label arg2_is_object, check_undefined_arg2;
1434
  Label load_arg2, done;
1435

  
1436
  __ JumpIfNotSmi(rdx, &arg1_is_object);
1437
  __ SmiToInteger32(r8, rdx);
1438
  __ jmp(&load_arg2);
1439

  
1440
  // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
1441
  __ bind(&check_undefined_arg1);
1442
  __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
1443
  __ j(not_equal, conversion_failure);
1444
  __ Set(r8, 0);
1445
  __ jmp(&load_arg2);
1446

  
1447
  __ bind(&arg1_is_object);
1448
  __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map);
1449
  __ j(not_equal, &check_undefined_arg1);
1450
  // Get the untagged integer version of the rdx heap number in r8.
1451
  __ TruncateHeapNumberToI(r8, rdx);
1452

  
1453
  // Here r8 has the untagged integer, rax has a Smi or a heap number.
1454
  __ bind(&load_arg2);
1455
  // Test if arg2 is a Smi.
1456
  __ JumpIfNotSmi(rax, &arg2_is_object);
1457
  __ SmiToInteger32(rcx, rax);
1458
  __ jmp(&done);
1459

  
1460
  // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
1461
  __ bind(&check_undefined_arg2);
1462
  __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
1463
  __ j(not_equal, conversion_failure);
1464
  __ Set(rcx, 0);
1465
  __ jmp(&done);
1466

  
1467
  __ bind(&arg2_is_object);
1468
  __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map);
1469
  __ j(not_equal, &check_undefined_arg2);
1470
  // Get the untagged integer version of the rax heap number in rcx.
1471
  __ TruncateHeapNumberToI(rcx, rax);
1472

  
1473
  __ bind(&done);
1474
  __ movl(rax, r8);
1475
}
1476

  
1477

  
1478
void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) {
1479
  __ SmiToInteger32(kScratchRegister, rdx);
1480
  __ cvtlsi2sd(xmm0, kScratchRegister);
1481
  __ SmiToInteger32(kScratchRegister, rax);
1482
  __ cvtlsi2sd(xmm1, kScratchRegister);
1483
}
1484

  
1485

  
1486 858
void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
1487 859
                                                  Label* not_numbers) {
1488 860
  Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done;
......
1503 875

  
1504 876
  __ bind(&load_smi_rdx);
1505 877
  __ SmiToInteger32(kScratchRegister, rdx);
1506
  __ cvtlsi2sd(xmm0, kScratchRegister);
878
  __ Cvtlsi2sd(xmm0, kScratchRegister);
1507 879
  __ JumpIfNotSmi(rax, &load_nonsmi_rax);
1508 880

  
1509 881
  __ bind(&load_smi_rax);
1510 882
  __ SmiToInteger32(kScratchRegister, rax);
1511
  __ cvtlsi2sd(xmm1, kScratchRegister);
1512
  __ bind(&done);
1513
}
1514

  
1515

  
1516
void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm,
1517
                                        Register first,
1518
                                        Register second,
1519
                                        Register scratch1,
1520
                                        Register scratch2,
1521
                                        Register scratch3,
1522
                                        Label* on_success,
1523
                                        Label* on_not_smis,
1524
                                        ConvertUndefined convert_undefined) {
1525
  Register heap_number_map = scratch3;
1526
  Register smi_result = scratch1;
1527
  Label done, maybe_undefined_first, maybe_undefined_second, first_done;
1528

  
1529
  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
1530

  
1531
  Label first_smi;
1532
  __ JumpIfSmi(first, &first_smi, Label::kNear);
1533
  __ cmpq(FieldOperand(first, HeapObject::kMapOffset), heap_number_map);
1534
  __ j(not_equal,
1535
       (convert_undefined == CONVERT_UNDEFINED_TO_ZERO)
1536
           ? &maybe_undefined_first
1537
           : on_not_smis);
1538
  // Convert HeapNumber to smi if possible.
1539
  __ movsd(xmm0, FieldOperand(first, HeapNumber::kValueOffset));
1540
  __ movq(scratch2, xmm0);
1541
  __ cvttsd2siq(smi_result, xmm0);
1542
  // Check if conversion was successful by converting back and
1543
  // comparing to the original double's bits.
1544
  __ cvtlsi2sd(xmm1, smi_result);
1545
  __ movq(kScratchRegister, xmm1);
1546
  __ cmpq(scratch2, kScratchRegister);
1547
  __ j(not_equal, on_not_smis);
1548
  __ Integer32ToSmi(first, smi_result);
1549

  
1550
  __ bind(&first_done);
1551
  __ JumpIfSmi(second, (on_success != NULL) ? on_success : &done);
1552
  __ bind(&first_smi);
1553
  __ AssertNotSmi(second);
1554
  __ cmpq(FieldOperand(second, HeapObject::kMapOffset), heap_number_map);
1555
  __ j(not_equal,
1556
       (convert_undefined == CONVERT_UNDEFINED_TO_ZERO)
1557
           ? &maybe_undefined_second
1558
           : on_not_smis);
1559
  // Convert second to smi, if possible.
1560
  __ movsd(xmm0, FieldOperand(second, HeapNumber::kValueOffset));
1561
  __ movq(scratch2, xmm0);
1562
  __ cvttsd2siq(smi_result, xmm0);
1563
  __ cvtlsi2sd(xmm1, smi_result);
1564
  __ movq(kScratchRegister, xmm1);
1565
  __ cmpq(scratch2, kScratchRegister);
1566
  __ j(not_equal, on_not_smis);
1567
  __ Integer32ToSmi(second, smi_result);
1568
  if (on_success != NULL) {
1569
    __ jmp(on_success);
1570
  } else {
1571
    __ jmp(&done);
1572
  }
1573

  
1574
  __ bind(&maybe_undefined_first);
1575
  __ CompareRoot(first, Heap::kUndefinedValueRootIndex);
1576
  __ j(not_equal, on_not_smis);
1577
  __ xor_(first, first);
1578
  __ jmp(&first_done);
1579

  
1580
  __ bind(&maybe_undefined_second);
1581
  __ CompareRoot(second, Heap::kUndefinedValueRootIndex);
1582
  __ j(not_equal, on_not_smis);
1583
  __ xor_(second, second);
1584
  if (on_success != NULL) {
1585
    __ jmp(on_success);
1586
  }
1587
  // Else: fall through.
1588

  
883
  __ Cvtlsi2sd(xmm1, kScratchRegister);
1589 884
  __ bind(&done);
1590 885
}
1591 886

  
......
1603 898

  
1604 899
  // Save 1 in double_result - we need this several times later on.
1605 900
  __ movq(scratch, Immediate(1));
1606
  __ cvtlsi2sd(double_result, scratch);
901
  __ Cvtlsi2sd(double_result, scratch);
1607 902

  
1608 903
  if (exponent_type_ == ON_STACK) {
1609 904
    Label base_is_smi, unpack_exponent;
......
1623 918

  
1624 919
    __ bind(&base_is_smi);
1625 920
    __ SmiToInteger32(base, base);
1626
    __ cvtlsi2sd(double_base, base);
921
    __ Cvtlsi2sd(double_base, base);
1627 922
    __ bind(&unpack_exponent);
1628 923

  
1629 924
    __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
......
1812 1107
  // and may not have contained the exponent value in the first place when the
1813 1108
  // input was a smi.  We reset it with exponent value before bailing out.
1814 1109
  __ j(not_equal, &done);
1815
  __ cvtlsi2sd(double_exponent, exponent);
1110
  __ Cvtlsi2sd(double_exponent, exponent);
1816 1111

  
1817 1112
  // Returning or bailing out.
1818 1113
  Counters* counters = masm->isolate()->counters();
......
1902 1197
    receiver = rax;
1903 1198
  }
1904 1199

  
1905
  StubCompiler::GenerateLoadStringLength(masm, receiver, r8, r9, &miss,
1906
                                         support_wrapper_);
1200
  StubCompiler::GenerateLoadStringLength(masm, receiver, r8, r9, &miss);
1907 1201
  __ bind(&miss);
1908 1202
  StubCompiler::TailCallBuiltin(
1909 1203
      masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
......
1977 1271
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
1978 1272
  // The key is in rdx and the parameter count is in rax.
1979 1273

  
1980
  // The displacement is used for skipping the frame pointer on the
1981
  // stack. It is the offset of the last parameter (if any) relative
1982
  // to the frame pointer.
1983
  static const int kDisplacement = 1 * kPointerSize;
1984

  
1985 1274
  // Check that the key is a smi.
1986 1275
  Label slow;
1987 1276
  __ JumpIfNotSmi(rdx, &slow);
......
2003 1292
  __ j(above_equal, &slow);
2004 1293

  
2005 1294
  // Read the argument from the stack and return it.
2006
  SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
2007
  __ lea(rbx, Operand(rbp, index.reg, index.scale, 0));
2008
  index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
2009
  __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
1295
  __ SmiSub(rax, rax, rdx);
1296
  __ SmiToInteger32(rax, rax);
1297
  StackArgumentsAccessor args(rbp, rax, ARGUMENTS_DONT_CONTAIN_RECEIVER);
1298
  __ movq(rax, args.GetArgumentOperand(0));
2010 1299
  __ Ret();
2011 1300

  
2012 1301
  // Arguments adaptor case: Check index against actual arguments
......
2018 1307
  __ j(above_equal, &slow);
2019 1308

  
2020 1309
  // Read the argument from the stack and return it.
2021
  index = masm->SmiToIndex(rax, rcx, kPointerSizeLog2);
2022
  __ lea(rbx, Operand(rbx, index.reg, index.scale, 0));
2023
  index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
2024
  __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
1310
  __ SmiSub(rcx, rcx, rdx);
1311
  __ SmiToInteger32(rcx, rcx);
1312
  StackArgumentsAccessor adaptor_args(rbx, rcx,
1313
                                      ARGUMENTS_DONT_CONTAIN_RECEIVER);
1314
  __ movq(rax, adaptor_args.GetArgumentOperand(0));
2025 1315
  __ Ret();
2026 1316

  
2027 1317
  // Slow-case: Handle non-smi or out-of-bounds access to arguments
......
2395 1685
  //  rsp[24] : subject string
2396 1686
  //  rsp[32] : JSRegExp object
2397 1687

  
2398
  static const int kLastMatchInfoOffset = 1 * kPointerSize;
2399
  static const int kPreviousIndexOffset = 2 * kPointerSize;
2400
  static const int kSubjectOffset = 3 * kPointerSize;
2401
  static const int kJSRegExpOffset = 4 * kPointerSize;
1688
  enum RegExpExecStubArgumentIndices {
1689
    JS_REG_EXP_OBJECT_ARGUMENT_INDEX,
1690
    SUBJECT_STRING_ARGUMENT_INDEX,
1691
    PREVIOUS_INDEX_ARGUMENT_INDEX,
1692
    LAST_MATCH_INFO_ARGUMENT_INDEX,
1693
    REG_EXP_EXEC_ARGUMENT_COUNT
1694
  };
2402 1695

  
1696
  StackArgumentsAccessor args(rsp, REG_EXP_EXEC_ARGUMENT_COUNT,
1697
                              ARGUMENTS_DONT_CONTAIN_RECEIVER);
2403 1698
  Label runtime;
2404 1699
  // Ensure that a RegExp stack is allocated.
2405 1700
  Isolate* isolate = masm->isolate();
......
2412 1707
  __ j(zero, &runtime);
2413 1708

  
2414 1709
  // Check that the first argument is a JSRegExp object.
2415
  __ movq(rax, Operand(rsp, kJSRegExpOffset));
1710
  __ movq(rax, args.GetArgumentOperand(JS_REG_EXP_OBJECT_ARGUMENT_INDEX));
2416 1711
  __ JumpIfSmi(rax, &runtime);
2417 1712
  __ CmpObjectType(rax, JS_REGEXP_TYPE, kScratchRegister);
2418 1713
  __ j(not_equal, &runtime);
......
2445 1740

  
2446 1741
  // Reset offset for possibly sliced string.
2447 1742
  __ Set(r14, 0);
2448
  __ movq(rdi, Operand(rsp, kSubjectOffset));
1743
  __ movq(rdi, args.GetArgumentOperand(SUBJECT_STRING_ARGUMENT_INDEX));
2449 1744
  __ JumpIfSmi(rdi, &runtime);
2450 1745
  __ movq(r15, rdi);  // Make a copy of the original subject string.
2451 1746
  __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
......
2547 1842
  // We have to use r15 instead of rdi to load the length because rdi might
2548 1843
  // have been only made to look like a sequential string when it actually
2549 1844
  // is an external string.
2550
  __ movq(rbx, Operand(rsp, kPreviousIndexOffset));
1845
  __ movq(rbx, args.GetArgumentOperand(PREVIOUS_INDEX_ARGUMENT_INDEX));
2551 1846
  __ JumpIfNotSmi(rbx, &runtime);
2552 1847
  __ SmiCompare(rbx, FieldOperand(r15, String::kLengthOffset));
2553 1848
  __ j(above_equal, &runtime);
......
2649 1944
  __ addq(r11, Immediate(Code::kHeaderSize - kHeapObjectTag));
2650 1945
  __ call(r11);
2651 1946

  
2652
  __ LeaveApiExitFrame();
1947
  __ LeaveApiExitFrame(true);
2653 1948

  
2654 1949
  // Check the result.
2655 1950
  Label success;
......
2667 1962

  
2668 1963
  // For failure return null.
2669 1964
  __ LoadRoot(rax, Heap::kNullValueRootIndex);
2670
  __ ret(4 * kPointerSize);
1965
  __ ret(REG_EXP_EXEC_ARGUMENT_COUNT * kPointerSize);
2671 1966

  
2672 1967
  // Load RegExp data.
2673 1968
  __ bind(&success);
2674
  __ movq(rax, Operand(rsp, kJSRegExpOffset));
1969
  __ movq(rax, args.GetArgumentOperand(JS_REG_EXP_OBJECT_ARGUMENT_INDEX));
2675 1970
  __ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
2676 1971
  __ SmiToInteger32(rax,
2677 1972
                    FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
......
2680 1975

  
2681 1976
  // rdx: Number of capture registers
2682 1977
  // Check that the fourth object is a JSArray object.
2683
  __ movq(r15, Operand(rsp, kLastMatchInfoOffset));
1978
  __ movq(r15, args.GetArgumentOperand(LAST_MATCH_INFO_ARGUMENT_INDEX));
2684 1979
  __ JumpIfSmi(r15, &runtime);
2685 1980
  __ CmpObjectType(r15, JS_ARRAY_TYPE, kScratchRegister);
2686 1981
  __ j(not_equal, &runtime);
......
2704 1999
  __ movq(FieldOperand(rbx, RegExpImpl::kLastCaptureCountOffset),
2705 2000
          kScratchRegister);
2706 2001
  // Store last subject and last input.
2707
  __ movq(rax, Operand(rsp, kSubjectOffset));
2002
  __ movq(rax, args.GetArgumentOperand(SUBJECT_STRING_ARGUMENT_INDEX));
2708 2003
  __ movq(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax);
2709 2004
  __ movq(rcx, rax);
2710 2005
  __ RecordWriteField(rbx,
......
2747 2042

  
2748 2043
  // Return last match info.
2749 2044
  __ movq(rax, r15);
2750
  __ ret(4 * kPointerSize);
2045
  __ ret(REG_EXP_EXEC_ARGUMENT_COUNT * kPointerSize);
2751 2046

  
2752 2047
  __ bind(&exception);
2753 2048
  // Result must now be exception. If there is no pending exception already a
......
2910 2205
}
2911 2206

  
2912 2207

  
2913
void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
2914
                                                         Register object,
2915
                                                         Register result,
2916
                                                         Register scratch1,
2917
                                                         Register scratch2,
2918
                                                         Label* not_found) {
2919
  // Use of registers. Register result is used as a temporary.
2920
  Register number_string_cache = result;
2921
  Register mask = scratch1;
2922
  Register scratch = scratch2;
2923

  
2924
  // Load the number string cache.
2925
  __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
2926

  
2927
  // Make the hash mask from the length of the number string cache. It
2928
  // contains two elements (number and string) for each cache entry.
2929
  __ SmiToInteger32(
2930
      mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
2931
  __ shrl(mask, Immediate(1));
2932
  __ subq(mask, Immediate(1));  // Make mask.
2933

  
2934
  // Calculate the entry in the number string cache. The hash value in the
2935
  // number string cache for smis is just the smi value, and the hash for
2936
  // doubles is the xor of the upper and lower words. See
2937
  // Heap::GetNumberStringCache.
2938
  Label is_smi;
2939
  Label load_result_from_cache;
2940
  Factory* factory = masm->isolate()->factory();
2941
  __ JumpIfSmi(object, &is_smi);
2942
  __ CheckMap(object,
2943
              factory->heap_number_map(),
2944
              not_found,
2945
              DONT_DO_SMI_CHECK);
2946

  
2947
  STATIC_ASSERT(8 == kDoubleSize);
2948
  __ movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
2949
  __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset));
2950
  GenerateConvertHashCodeToIndex(masm, scratch, mask);
2951

  
2952
  Register index = scratch;
2953
  Register probe = mask;
2954
  __ movq(probe,
2955
          FieldOperand(number_string_cache,
2956
                        index,
2957
                        times_1,
2958
                        FixedArray::kHeaderSize));
2959
  __ JumpIfSmi(probe, not_found);
2960
  __ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
2961
  __ movsd(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
2962
  __ ucomisd(xmm0, xmm1);
2963
  __ j(parity_even, not_found);  // Bail out if NaN is involved.
2964
  __ j(not_equal, not_found);  // The cache did not contain this value.
2965
  __ jmp(&load_result_from_cache);
2966

  
2967
  __ bind(&is_smi);
2968
  __ SmiToInteger32(scratch, object);
2969
  GenerateConvertHashCodeToIndex(masm, scratch, mask);
2970

  
2971
  // Check if the entry is the smi we are looking for.
2972
  __ cmpq(object,
2973
          FieldOperand(number_string_cache,
2974
                       index,
2975
                       times_1,
2976
                       FixedArray::kHeaderSize));
2977
  __ j(not_equal, not_found);
2978

  
2979
  // Get the result from the cache.
2980
  __ bind(&load_result_from_cache);
2981
  __ movq(result,
2982
          FieldOperand(number_string_cache,
2983
                       index,
2984
                       times_1,
2985
                       FixedArray::kHeaderSize + kPointerSize));
2986
  Counters* counters = masm->isolate()->counters();
2987
  __ IncrementCounter(counters->number_to_string_native(), 1);
2988
}
2989

  
2990

  
2991
void NumberToStringStub::GenerateConvertHashCodeToIndex(MacroAssembler* masm,
2992
                                                        Register hash,
2993
                                                        Register mask) {
2994
  __ and_(hash, mask);
2995
  // Each entry in string cache consists of two pointer sized fields,
2996
  // but times_twice_pointer_size (multiplication by 16) scale factor
2997
  // is not supported by addrmode on x64 platform.
2998
  // So we have to premultiply entry index before lookup.
2999
  __ shl(hash, Immediate(kPointerSizeLog2 + 1));
3000
}
3001

  
3002

  
3003
void NumberToStringStub::Generate(MacroAssembler* masm) {
3004
  Label runtime;
3005

  
3006
  StackArgumentsAccessor args(rsp, 1, ARGUMENTS_DONT_CONTAIN_RECEIVER);
3007
  __ movq(rbx, args.GetArgumentOperand(0));
3008

  
3009
  // Generate code to lookup number in the number string cache.
3010
  GenerateLookupNumberStringCache(masm, rbx, rax, r8, r9, &runtime);
3011
  __ ret(1 * kPointerSize);
3012

  
3013
  __ bind(&runtime);
3014
  // Handle number to string in the runtime system if not found in the cache.
3015
  __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
3016
}
3017

  
3018

  
3019 2208
static int NegativeComparisonResult(Condition cc) {
3020 2209
  ASSERT(cc != equal);
3021 2210
  ASSERT((cc == less) || (cc == less_equal)
......
3322 2511
  // Cache the called function in a global property cell.  Cache states
3323 2512
  // are uninitialized, monomorphic (indicated by a JSFunction), and
3324 2513
  // megamorphic.
2514
  // rax : number of arguments to the construct function
3325 2515
  // rbx : cache cell for call target
3326 2516
  // rdi : the function to call
3327 2517
  Isolate* isolate = masm->isolate();
......
3341 2531
  // If we didn't have a matching function, and we didn't find the megamorph
3342 2532
  // sentinel, then we have in the cell either some other function or an
3343 2533
  // AllocationSite. Do a map check on the object in rcx.
3344
  Handle<Map> allocation_site_map(
3345
      masm->isolate()->heap()->allocation_site_map(),
3346
      masm->isolate());
2534
  Handle<Map> allocation_site_map =
2535
      masm->isolate()->factory()->allocation_site_map();
3347 2536
  __ Cmp(FieldOperand(rcx, 0), allocation_site_map);
3348 2537
  __ j(not_equal, &miss);
3349 2538

  
......
3379 2568
  {
3380 2569
    FrameScope scope(masm, StackFrame::INTERNAL);
3381 2570

  
2571
    // Arguments register must be smi-tagged to call out.
3382 2572
    __ Integer32ToSmi(rax, rax);
3383 2573
    __ push(rax);
3384 2574
    __ push(rdi);
......
3562 2752
  RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
3563 2753
  ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
3564 2754
  CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
2755
  BinaryOpStub::GenerateAheadOfTime(isolate);
3565 2756
}
3566 2757

  
3567 2758

  
......
3619 2810
    // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
3620 2811
    // stack is known to be aligned. This function takes one argument which is
3621 2812
    // passed in register.
2813
    __ movq(arg_reg_2, ExternalReference::isolate_address(masm->isolate()));
3622 2814
    __ movq(arg_reg_1, rax);
3623 2815
    __ movq(kScratchRegister,
3624 2816
            ExternalReference::perform_gc_function(masm->isolate()));
......
4583 3775

  
4584 3776
  // Just jump to runtime to add the two strings.
4585 3777
  __ bind(&call_runtime);
4586

  
4587
  if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) {
4588
    GenerateRegisterArgsPop(masm, rcx);
4589
    // Build a frame
4590
    {
4591
      FrameScope scope(masm, StackFrame::INTERNAL);
4592
      GenerateRegisterArgsPush(masm);
4593
      __ CallRuntime(Runtime::kStringAdd, 2);
4594
    }
4595
    __ Ret();
4596
  } else {
4597
    __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
4598
  }
3778
  __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
4599 3779

  
4600 3780
  if (call_builtin.is_linked()) {
4601 3781
    __ bind(&call_builtin);
4602
    if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) {
4603
      GenerateRegisterArgsPop(masm, rcx);
4604
      // Build a frame
4605
      {
4606
        FrameScope scope(masm, StackFrame::INTERNAL);
4607
        GenerateRegisterArgsPush(masm);
4608
        __ InvokeBuiltin(builtin_id, CALL_FUNCTION);
4609
      }
4610
      __ Ret();
4611
    } else {
4612
      __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
4613
    }
3782
    __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
4614 3783
  }
4615 3784
}
4616 3785

  
......
4646 3815
  // Check the number to string cache.
4647 3816
  __ bind(&not_string);
4648 3817
  // Puts the cached result into scratch1.
4649
  NumberToStringStub::GenerateLookupNumberStringCache(masm,
4650
                                                      arg,
4651
                                                      scratch1,
4652
                                                      scratch2,
4653
                                                      scratch3,
4654
                                                      slow);
3818
  __ LookupNumberStringCache(arg, scratch1, scratch2, scratch3, slow);
4655 3819
  __ movq(arg, scratch1);
4656 3820
  __ movq(Operand(rsp, stack_offset), arg);
4657 3821
  __ bind(&done);
......
4935 4099
  //  rsp[16] : from
4936 4100
  //  rsp[24] : string
4937 4101

  
4938
  const int kToOffset = 1 * kPointerSize;
4939
  const int kFromOffset = kToOffset + kPointerSize;
4940
  const int kStringOffset = kFromOffset + kPointerSize;
4941
  const int kArgumentsSize = (kStringOffset + kPointerSize) - kToOffset;
4102
  enum SubStringStubArgumentIndices {
4103
    STRING_ARGUMENT_INDEX,
4104
    FROM_ARGUMENT_INDEX,
4105
    TO_ARGUMENT_INDEX,
4106
    SUB_STRING_ARGUMENT_COUNT
4107
  };
4108

  
4109
  StackArgumentsAccessor args(rsp, SUB_STRING_ARGUMENT_COUNT,
4110
                              ARGUMENTS_DONT_CONTAIN_RECEIVER);
4942 4111

  
4943 4112
  // Make sure first argument is a string.
4944
  __ movq(rax, Operand(rsp, kStringOffset));
4113
  __ movq(rax, args.GetArgumentOperand(STRING_ARGUMENT_INDEX));
4945 4114
  STATIC_ASSERT(kSmiTag == 0);
4946 4115
  __ testl(rax, Immediate(kSmiTagMask));
4947 4116
  __ j(zero, &runtime);
......
4951 4120
  // rax: string
4952 4121
  // rbx: instance type
4953 4122
  // Calculate length of sub string using the smi values.
4954
  __ movq(rcx, Operand(rsp, kToOffset));
4955
  __ movq(rdx, Operand(rsp, kFromOffset));
4123
  __ movq(rcx, args.GetArgumentOperand(TO_ARGUMENT_INDEX));
4124
  __ movq(rdx, args.GetArgumentOperand(FROM_ARGUMENT_INDEX));
4956 4125
  __ JumpUnlessBothNonNegativeSmi(rcx, rdx, &runtime);
4957 4126

  
4958 4127
  __ SmiSub(rcx, rcx, rdx);  // Overflow doesn't happen.
......
4965 4134
  // Return original string.
4966 4135
  Counters* counters = masm->isolate()->counters();
4967 4136
  __ IncrementCounter(counters->sub_string_native(), 1);
4968
  __ ret(kArgumentsSize);
4137
  __ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
4969 4138
  __ bind(&not_original_string);
4970 4139

  
4971 4140
  Label single_char;
......
5035 4204
    STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
5036 4205
    STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
5037 4206
    __ testb(rbx, Immediate(kStringEncodingMask));
5038
    __ j(zero, &two_byte_slice, Label::kNear);
4207
    // Make long jumps when allocations tracking is on due to
4208
    // RecordObjectAllocation inside MacroAssembler::Allocate.
4209
    Label::Distance jump_distance =
4210
        masm->isolate()->heap_profiler()->is_tracking_allocations()
4211
        ? Label::kFar
4212
        : Label::kNear;
4213
    __ j(zero, &two_byte_slice, jump_distance);
5039 4214
    __ AllocateAsciiSlicedString(rax, rbx, r14, &runtime);
5040
    __ jmp(&set_slice_header, Label::kNear);
4215
    __ jmp(&set_slice_header, jump_distance);
5041 4216
    __ bind(&two_byte_slice);
5042 4217
    __ AllocateTwoByteSlicedString(rax, rbx, r14, &runtime);
5043 4218
    __ bind(&set_slice_header);
......
5048 4223
    __ movq(FieldOperand(rax, SlicedString::kParentOffset), rdi);
5049 4224
    __ movq(FieldOperand(rax, SlicedString::kOffsetOffset), rdx);
5050 4225
    __ IncrementCounter(counters->sub_string_native(), 1);
5051
    __ ret(kArgumentsSize);
4226
    __ ret(3 * kPointerSize);
5052 4227

  
5053 4228
    __ bind(&copy_routine);
5054 4229
  }
......
5102 4277
  StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, true);
5103 4278
  __ movq(rsi, r14);  // Restore rsi.
5104 4279
  __ IncrementCounter(counters->sub_string_native(), 1);
5105
  __ ret(kArgumentsSize);
4280
  __ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
5106 4281

  
5107 4282
  __ bind(&two_byte_sequential);
5108 4283
  // Allocate the result.
......
5127 4302
  StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, false);
5128 4303
  __ movq(rsi, r14);  // Restore esi.
5129 4304
  __ IncrementCounter(counters->sub_string_native(), 1);
5130
  __ ret(kArgumentsSize);
4305
  __ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
5131 4306

  
5132 4307
  // Just jump to runtime to create the sub string.
5133 4308
  __ bind(&runtime);
......
5141 4316
  StringCharAtGenerator generator(
5142 4317
      rax, rdx, rcx, rax, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
5143 4318
  generator.GenerateFast(masm);
5144
  __ ret(kArgumentsSize);
4319
  __ ret(SUB_STRING_ARGUMENT_COUNT * kPointerSize);
5145 4320
  generator.SkipSlow(masm, &runtime);
5146 4321
}
5147 4322

  
......
5376 4551
  __ jmp(&left, Label::kNear);
5377 4552
  __ bind(&right_smi);
5378 4553
  __ SmiToInteger32(rcx, rax);  // Can't clobber rax yet.
5379
  __ cvtlsi2sd(xmm1, rcx);
4554
  __ Cvtlsi2sd(xmm1, rcx);
5380 4555

  
5381 4556
  __ bind(&left);
5382 4557
  __ JumpIfSmi(rdx, &left_smi, Label::kNear);
......
5386 4561
  __ jmp(&done);
5387 4562
  __ bind(&left_smi);
5388 4563
  __ SmiToInteger32(rcx, rdx);  // Can't clobber rdx yet.
5389
  __ cvtlsi2sd(xmm0, rcx);
4564
  __ Cvtlsi2sd(xmm0, rcx);
5390 4565

  
5391 4566
  __ bind(&done);
5392 4567
  // Compare operands
......
6392 5567
    __ incl(rdx);
6393 5568
    __ movq(rcx, FieldOperand(rbx, Cell::kValueOffset));
6394 5569
    if (FLAG_debug_code) {
6395
      Handle<Map> allocation_site_map(
6396
          masm->isolate()->heap()->allocation_site_map(),
6397
          masm->isolate());
5570
      Handle<Map> allocation_site_map =
5571
          masm->isolate()->factory()->allocation_site_map();
6398 5572
      __ Cmp(FieldOperand(rcx, 0), allocation_site_map);
6399 5573
      __ Assert(equal, kExpectedAllocationSiteInCell);
6400 5574
    }
......
6541 5715
  __ j(equal, &no_info);
6542 5716
  __ movq(rdx, FieldOperand(rbx, Cell::kValueOffset));
6543 5717
  __ Cmp(FieldOperand(rdx, 0),
6544
         Handle<Map>(masm->isolate()->heap()->allocation_site_map()));
5718
         masm->isolate()->factory()->allocation_site_map());
6545 5719
  __ j(not_equal, &no_info);
6546 5720

  
6547 5721
  __ movq(rdx, FieldOperand(rdx, AllocationSite::kTransitionInfoOffset));

Also available in: Unified diff