Revision f230a1cf deps/v8/src/arm/codegen-arm.cc

View differences:

deps/v8/src/arm/codegen-arm.cc
55 55
#if defined(USE_SIMULATOR)
56 56
byte* fast_exp_arm_machine_code = NULL;
57 57
double fast_exp_simulator(double x) {
58
  return Simulator::current(Isolate::Current())->CallFP(
58
  return Simulator::current(Isolate::Current())->CallFPReturnsDouble(
59 59
      fast_exp_arm_machine_code, x, 0);
60 60
}
61 61
#endif
......
402 402
  // -----------------------------------
403 403
  if (mode == TRACK_ALLOCATION_SITE) {
404 404
    ASSERT(allocation_memento_found != NULL);
405
    __ TestJSArrayForAllocationMemento(r2, r4);
406
    __ b(eq, allocation_memento_found);
405
    __ JumpIfJSArrayHasAllocationMemento(r2, r4, allocation_memento_found);
407 406
  }
408 407

  
409 408
  // Set transitioned map.
......
432 431
  Label loop, entry, convert_hole, gc_required, only_change_map, done;
433 432

  
434 433
  if (mode == TRACK_ALLOCATION_SITE) {
435
    __ TestJSArrayForAllocationMemento(r2, r4);
436
    __ b(eq, fail);
434
    __ JumpIfJSArrayHasAllocationMemento(r2, r4, fail);
437 435
  }
438 436

  
439 437
  // Check for empty arrays, which only require a map transition and no changes
......
444 442

  
445 443
  __ push(lr);
446 444
  __ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
447
  // r4: source FixedArray
448 445
  // r5: number of elements (smi-tagged)
449 446

  
450 447
  // Allocate new FixedDoubleArray.
451 448
  // Use lr as a temporary register.
452 449
  __ mov(lr, Operand(r5, LSL, 2));
453 450
  __ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize));
454
  __ Allocate(lr, r6, r7, r9, &gc_required, DOUBLE_ALIGNMENT);
451
  __ Allocate(lr, r6, r4, r9, &gc_required, DOUBLE_ALIGNMENT);
455 452
  // r6: destination FixedDoubleArray, not tagged as heap object.
453
  __ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
454
  // r4: source FixedArray.
456 455

  
457 456
  // Set destination FixedDoubleArray's length and map.
458 457
  __ LoadRoot(r9, Heap::kFixedDoubleArrayMapRootIndex);
......
483 482

  
484 483
  // Prepare for conversion loop.
485 484
  __ add(r3, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
486
  __ add(r7, r6, Operand(FixedDoubleArray::kHeaderSize));
487
  __ add(r6, r7, Operand(r5, LSL, 2));
485
  __ add(r9, r6, Operand(FixedDoubleArray::kHeaderSize));
486
  __ add(r6, r9, Operand(r5, LSL, 2));
488 487
  __ mov(r4, Operand(kHoleNanLower32));
489 488
  __ mov(r5, Operand(kHoleNanUpper32));
490 489
  // r3: begin of source FixedArray element fields, not tagged
491 490
  // r4: kHoleNanLower32
492 491
  // r5: kHoleNanUpper32
493 492
  // r6: end of destination FixedDoubleArray, not tagged
494
  // r7: begin of FixedDoubleArray element fields, not tagged
493
  // r9: begin of FixedDoubleArray element fields, not tagged
495 494

  
496 495
  __ b(&entry);
497 496

  
......
514 513

  
515 514
  // Convert and copy elements.
516 515
  __ bind(&loop);
517
  __ ldr(r9, MemOperand(r3, 4, PostIndex));
518
  // r9: current element
519
  __ UntagAndJumpIfNotSmi(r9, r9, &convert_hole);
516
  __ ldr(lr, MemOperand(r3, 4, PostIndex));
517
  // lr: current element
518
  __ UntagAndJumpIfNotSmi(lr, lr, &convert_hole);
520 519

  
521 520
  // Normal smi, convert to double and store.
522
  __ vmov(s0, r9);
521
  __ vmov(s0, lr);
523 522
  __ vcvt_f64_s32(d0, s0);
524
  __ vstr(d0, r7, 0);
525
  __ add(r7, r7, Operand(8));
523
  __ vstr(d0, r9, 0);
524
  __ add(r9, r9, Operand(8));
526 525
  __ b(&entry);
527 526

  
528 527
  // Hole found, store the-hole NaN.
529 528
  __ bind(&convert_hole);
530 529
  if (FLAG_debug_code) {
531 530
    // Restore a "smi-untagged" heap object.
532
    __ SmiTag(r9);
533
    __ orr(r9, r9, Operand(1));
534
    __ CompareRoot(r9, Heap::kTheHoleValueRootIndex);
531
    __ SmiTag(lr);
532
    __ orr(lr, lr, Operand(1));
533
    __ CompareRoot(lr, Heap::kTheHoleValueRootIndex);
535 534
    __ Assert(eq, kObjectFoundInSmiOnlyArray);
536 535
  }
537
  __ Strd(r4, r5, MemOperand(r7, 8, PostIndex));
536
  __ Strd(r4, r5, MemOperand(r9, 8, PostIndex));
538 537

  
539 538
  __ bind(&entry);
540
  __ cmp(r7, r6);
539
  __ cmp(r9, r6);
541 540
  __ b(lt, &loop);
542 541

  
543 542
  __ pop(lr);
......
558 557
  Label entry, loop, convert_hole, gc_required, only_change_map;
559 558

  
560 559
  if (mode == TRACK_ALLOCATION_SITE) {
561
    __ TestJSArrayForAllocationMemento(r2, r4);
562
    __ b(eq, fail);
560
    __ JumpIfJSArrayHasAllocationMemento(r2, r4, fail);
563 561
  }
564 562

  
565 563
  // Check for empty arrays, which only require a map transition and no changes
......
577 575
  // Allocate new FixedArray.
578 576
  __ mov(r0, Operand(FixedDoubleArray::kHeaderSize));
579 577
  __ add(r0, r0, Operand(r5, LSL, 1));
580
  __ Allocate(r0, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS);
578
  __ Allocate(r0, r6, r3, r9, &gc_required, NO_ALLOCATION_FLAGS);
581 579
  // r6: destination FixedArray, not tagged as heap object
582 580
  // Set destination FixedDoubleArray's length and map.
583 581
  __ LoadRoot(r9, Heap::kFixedArrayMapRootIndex);
......
589 587
  __ add(r3, r6, Operand(FixedArray::kHeaderSize));
590 588
  __ add(r6, r6, Operand(kHeapObjectTag));
591 589
  __ add(r5, r3, Operand(r5, LSL, 1));
592
  __ LoadRoot(r7, Heap::kTheHoleValueRootIndex);
593 590
  __ LoadRoot(r9, Heap::kHeapNumberMapRootIndex);
594 591
  // Using offsetted addresses in r4 to fully take advantage of post-indexing.
595 592
  // r3: begin of destination FixedArray element fields, not tagged
596 593
  // r4: begin of source FixedDoubleArray element fields, not tagged, +4
597 594
  // r5: end of destination FixedArray, not tagged
598 595
  // r6: destination FixedArray
599
  // r7: the-hole pointer
600 596
  // r9: heap number map
601 597
  __ b(&entry);
602 598

  
......
608 604

  
609 605
  __ bind(&loop);
610 606
  __ ldr(r1, MemOperand(r4, 8, PostIndex));
611
  // lr: current element's upper 32 bit
607
  // r1: current element's upper 32 bit
612 608
  // r4: address of next element's upper 32 bit
613 609
  __ cmp(r1, Operand(kHoleNanUpper32));
614 610
  __ b(eq, &convert_hole);
......
631 627

  
632 628
  // Replace the-hole NaN with the-hole pointer.
633 629
  __ bind(&convert_hole);
634
  __ str(r7, MemOperand(r3, 4, PostIndex));
630
  __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
631
  __ str(r0, MemOperand(r3, 4, PostIndex));
635 632

  
636 633
  __ bind(&entry);
637 634
  __ cmp(r3, r5);
......
775 772
  ASSERT(!temp2.is(temp3));
776 773
  ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
777 774

  
778
  Label done;
775
  Label zero, infinity, done;
779 776

  
780 777
  __ mov(temp3, Operand(ExternalReference::math_exp_constants(0)));
781 778

  
782 779
  __ vldr(double_scratch1, ExpConstant(0, temp3));
783
  __ vmov(result, kDoubleRegZero);
784 780
  __ VFPCompareAndSetFlags(double_scratch1, input);
785
  __ b(ge, &done);
781
  __ b(ge, &zero);
782

  
786 783
  __ vldr(double_scratch2, ExpConstant(1, temp3));
787 784
  __ VFPCompareAndSetFlags(input, double_scratch2);
788
  __ vldr(result, ExpConstant(2, temp3));
789
  __ b(ge, &done);
785
  __ b(ge, &infinity);
786

  
790 787
  __ vldr(double_scratch1, ExpConstant(3, temp3));
791 788
  __ vldr(result, ExpConstant(4, temp3));
792 789
  __ vmul(double_scratch1, double_scratch1, input);
793 790
  __ vadd(double_scratch1, double_scratch1, result);
794
  __ vmov(temp2, temp1, double_scratch1);
791
  __ VmovLow(temp2, double_scratch1);
795 792
  __ vsub(double_scratch1, double_scratch1, result);
796 793
  __ vldr(result, ExpConstant(6, temp3));
797 794
  __ vldr(double_scratch2, ExpConstant(5, temp3));
798 795
  __ vmul(double_scratch1, double_scratch1, double_scratch2);
799 796
  __ vsub(double_scratch1, double_scratch1, input);
800 797
  __ vsub(result, result, double_scratch1);
801
  __ vmul(input, double_scratch1, double_scratch1);
802
  __ vmul(result, result, input);
803
  __ mov(temp1, Operand(temp2, LSR, 11));
798
  __ vmul(double_scratch2, double_scratch1, double_scratch1);
799
  __ vmul(result, result, double_scratch2);
804 800
  __ vldr(double_scratch2, ExpConstant(7, temp3));
805 801
  __ vmul(result, result, double_scratch2);
806 802
  __ vsub(result, result, double_scratch1);
807
  __ vldr(double_scratch2, ExpConstant(8, temp3));
803
  // Mov 1 in double_scratch2 as math_exp_constants_array[8] == 1.
804
  ASSERT(*reinterpret_cast<double*>
805
         (ExternalReference::math_exp_constants(8).address()) == 1);
806
  __ vmov(double_scratch2, 1);
808 807
  __ vadd(result, result, double_scratch2);
809
  __ movw(ip, 0x7ff);
810
  __ and_(temp2, temp2, Operand(ip));
808
  __ mov(temp1, Operand(temp2, LSR, 11));
809
  __ Ubfx(temp2, temp2, 0, 11);
811 810
  __ add(temp1, temp1, Operand(0x3ff));
812
  __ mov(temp1, Operand(temp1, LSL, 20));
813 811

  
814 812
  // Must not call ExpConstant() after overwriting temp3!
815 813
  __ mov(temp3, Operand(ExternalReference::math_exp_log_table()));
816
  __ ldr(ip, MemOperand(temp3, temp2, LSL, 3));
817
  __ add(temp3, temp3, Operand(kPointerSize));
818
  __ ldr(temp2, MemOperand(temp3, temp2, LSL, 3));
819
  __ orr(temp1, temp1, temp2);
820
  __ vmov(input, ip, temp1);
821
  __ vmul(result, result, input);
814
  __ add(temp3, temp3, Operand(temp2, LSL, 3));
815
  __ ldm(ia, temp3, temp2.bit() | temp3.bit());
816
  // The first word is loaded is the lower number register.
817
  if (temp2.code() < temp3.code()) {
818
    __ orr(temp1, temp3, Operand(temp1, LSL, 20));
819
    __ vmov(double_scratch1, temp2, temp1);
820
  } else {
821
    __ orr(temp1, temp2, Operand(temp1, LSL, 20));
822
    __ vmov(double_scratch1, temp3, temp1);
823
  }
824
  __ vmul(result, result, double_scratch1);
825
  __ b(&done);
826

  
827
  __ bind(&zero);
828
  __ vmov(result, kDoubleRegZero);
829
  __ b(&done);
830

  
831
  __ bind(&infinity);
832
  __ vldr(result, ExpConstant(2, temp3));
833

  
822 834
  __ bind(&done);
823 835
}
824 836

  
......
859 871
void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
860 872
                               MarkingParity* parity) {
861 873
  if (IsYoungSequence(sequence)) {
862
    *age = kNoAge;
874
    *age = kNoAgeCodeAge;
863 875
    *parity = NO_MARKING_PARITY;
864 876
  } else {
865 877
    Address target_address = Memory::Address_at(
......
870 882
}
871 883

  
872 884

  
873
void Code::PatchPlatformCodeAge(byte* sequence,
885
void Code::PatchPlatformCodeAge(Isolate* isolate,
886
                                byte* sequence,
874 887
                                Code::Age age,
875 888
                                MarkingParity parity) {
876 889
  uint32_t young_length;
877 890
  byte* young_sequence = GetNoCodeAgeSequence(&young_length);
878
  if (age == kNoAge) {
891
  if (age == kNoAgeCodeAge) {
879 892
    CopyBytes(sequence, young_sequence, young_length);
880 893
    CPU::FlushICache(sequence, young_length);
881 894
  } else {
882
    Code* stub = GetCodeAgeStub(age, parity);
895
    Code* stub = GetCodeAgeStub(isolate, age, parity);
883 896
    CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
884 897
    patcher.masm()->add(r0, pc, Operand(-8));
885 898
    patcher.masm()->ldr(pc, MemOperand(pc, -4));

Also available in: Unified diff