Revision f230a1cf deps/v8/src/arm/macro-assembler-arm.cc

View differences:

deps/v8/src/arm/macro-assembler-arm.cc
35 35
#include "codegen.h"
36 36
#include "cpu-profiler.h"
37 37
#include "debug.h"
38
#include "isolate-inl.h"
38 39
#include "runtime.h"
39 40

  
40 41
namespace v8 {
......
233 234

  
234 235

  
235 236
void MacroAssembler::Move(Register dst, Handle<Object> value) {
236
  mov(dst, Operand(value));
237
  AllowDeferredHandleDereference smi_check;
238
  if (value->IsSmi()) {
239
    mov(dst, Operand(value));
240
  } else {
241
    ASSERT(value->IsHeapObject());
242
    if (isolate()->heap()->InNewSpace(*value)) {
243
      Handle<Cell> cell = isolate()->factory()->NewCell(value);
244
      mov(dst, Operand(cell));
245
      ldr(dst, FieldMemOperand(dst, Cell::kValueOffset));
246
    } else {
247
      mov(dst, Operand(value));
248
    }
249
  }
237 250
}
238 251

  
239 252

  
......
394 407
}
395 408

  
396 409

  
397
void MacroAssembler::LoadHeapObject(Register result,
398
                                    Handle<HeapObject> object) {
399
  AllowDeferredHandleDereference using_raw_address;
400
  if (isolate()->heap()->InNewSpace(*object)) {
401
    Handle<Cell> cell = isolate()->factory()->NewCell(object);
402
    mov(result, Operand(cell));
403
    ldr(result, FieldMemOperand(result, Cell::kValueOffset));
404
  } else {
405
    mov(result, Operand(object));
406
  }
407
}
408

  
409

  
410 410
void MacroAssembler::InNewSpace(Register object,
411 411
                                Register scratch,
412 412
                                Condition cond,
......
478 478
                                 SaveFPRegsMode fp_mode,
479 479
                                 RememberedSetAction remembered_set_action,
480 480
                                 SmiCheck smi_check) {
481
  // The compiled code assumes that record write doesn't change the
482
  // context register, so we check that none of the clobbered
483
  // registers are cp.
484
  ASSERT(!address.is(cp) && !value.is(cp));
485

  
486 481
  if (emit_debug_code()) {
487 482
    ldr(ip, MemOperand(address));
488 483
    cmp(ip, value);
......
733 728
  bind(&fpscr_done);
734 729
}
735 730

  
736
void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister value,
731

  
732
void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst,
733
                                        const DwVfpRegister src,
737 734
                                        const Condition cond) {
738
  vsub(value, value, kDoubleRegZero, cond);
735
  vsub(dst, src, kDoubleRegZero, cond);
739 736
}
740 737

  
741 738

  
......
919 916
}
920 917

  
921 918

  
919
void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
920
  if (frame_mode == BUILD_STUB_FRAME) {
921
    stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
922
    Push(Smi::FromInt(StackFrame::STUB));
923
    // Adjust FP to point to saved FP.
924
    add(fp, sp, Operand(2 * kPointerSize));
925
  } else {
926
    PredictableCodeSizeScope predictible_code_size_scope(
927
        this, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
928
    // The following three instructions must remain together and unmodified
929
    // for code aging to work properly.
930
    if (isolate()->IsCodePreAgingActive()) {
931
      // Pre-age the code.
932
      Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
933
      add(r0, pc, Operand(-8));
934
      ldr(pc, MemOperand(pc, -4));
935
      dd(reinterpret_cast<uint32_t>(stub->instruction_start()));
936
    } else {
937
      stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
938
      nop(ip.code());
939
      // Adjust FP to point to saved FP.
940
      add(fp, sp, Operand(2 * kPointerSize));
941
    }
942
  }
943
}
944

  
945

  
922 946
void MacroAssembler::EnterFrame(StackFrame::Type type) {
923 947
  // r0-r3: preserved
924 948
  stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
......
1020 1044

  
1021 1045

  
1022 1046
void MacroAssembler::LeaveExitFrame(bool save_doubles,
1023
                                    Register argument_count) {
1047
                                    Register argument_count,
1048
                                    bool restore_context) {
1024 1049
  // Optionally restore all double registers.
1025 1050
  if (save_doubles) {
1026 1051
    // Calculate the stack location of the saved doubles and restore them.
......
1035 1060
  mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1036 1061
  str(r3, MemOperand(ip));
1037 1062

  
1063

  
1038 1064
  // Restore current context from top and clear it in debug mode.
1039
  mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1040
  ldr(cp, MemOperand(ip));
1065
  if (restore_context) {
1066
    mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1067
    ldr(cp, MemOperand(ip));
1068
  }
1041 1069
#ifdef DEBUG
1070
  mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
1042 1071
  str(r3, MemOperand(ip));
1043 1072
#endif
1044 1073

  
......
1256 1285
  ASSERT(flag == JUMP_FUNCTION || has_frame());
1257 1286

  
1258 1287
  // Get the function and setup the context.
1259
  LoadHeapObject(r1, function);
1288
  Move(r1, function);
1260 1289
  ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
1261 1290

  
1262 1291
  // We call indirectly through the code field in the function to
......
1330 1359
  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1331 1360
  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1332 1361

  
1333
  // For the JSEntry handler, we must preserve r0-r4, r5-r7 are available.
1362
  // For the JSEntry handler, we must preserve r0-r4, r5-r6 are available.
1334 1363
  // We will build up the handler from the bottom by pushing on the stack.
1335 1364
  // Set up the code object (r5) and the state (r6) for pushing.
1336 1365
  unsigned state =
......
1341 1370

  
1342 1371
  // Push the frame pointer, context, state, and code object.
1343 1372
  if (kind == StackHandler::JS_ENTRY) {
1344
    mov(r7, Operand(Smi::FromInt(0)));  // Indicates no context.
1373
    mov(cp, Operand(Smi::FromInt(0)));  // Indicates no context.
1345 1374
    mov(ip, Operand::Zero());  // NULL frame pointer.
1346
    stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | ip.bit());
1375
    stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | ip.bit());
1347 1376
  } else {
1348 1377
    stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | fp.bit());
1349 1378
  }
......
2280 2309
}
2281 2310

  
2282 2311

  
2283
void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
2284
                                              Address function_address,
2285
                                              ExternalReference thunk_ref,
2286
                                              Register thunk_last_arg,
2287
                                              int stack_space,
2288
                                              int return_value_offset) {
2312
void MacroAssembler::CallApiFunctionAndReturn(
2313
    ExternalReference function,
2314
    Address function_address,
2315
    ExternalReference thunk_ref,
2316
    Register thunk_last_arg,
2317
    int stack_space,
2318
    MemOperand return_value_operand,
2319
    MemOperand* context_restore_operand) {
2289 2320
  ExternalReference next_address =
2290 2321
      ExternalReference::handle_scope_next_address(isolate());
2291 2322
  const int kNextOffset = 0;
......
2296 2327
      ExternalReference::handle_scope_level_address(isolate()),
2297 2328
      next_address);
2298 2329

  
2330
  ASSERT(!thunk_last_arg.is(r3));
2331

  
2299 2332
  // Allocate HandleScope in callee-save registers.
2300
  mov(r7, Operand(next_address));
2301
  ldr(r4, MemOperand(r7, kNextOffset));
2302
  ldr(r5, MemOperand(r7, kLimitOffset));
2303
  ldr(r6, MemOperand(r7, kLevelOffset));
2333
  mov(r9, Operand(next_address));
2334
  ldr(r4, MemOperand(r9, kNextOffset));
2335
  ldr(r5, MemOperand(r9, kLimitOffset));
2336
  ldr(r6, MemOperand(r9, kLevelOffset));
2304 2337
  add(r6, r6, Operand(1));
2305
  str(r6, MemOperand(r7, kLevelOffset));
2338
  str(r6, MemOperand(r9, kLevelOffset));
2306 2339

  
2307 2340
  if (FLAG_log_timer_events) {
2308 2341
    FrameScope frame(this, StackFrame::MANUAL);
......
2313 2346
    PopSafepointRegisters();
2314 2347
  }
2315 2348

  
2316
  ASSERT(!thunk_last_arg.is(r3));
2317 2349
  Label profiler_disabled;
2318 2350
  Label end_profiler_check;
2319 2351
  bool* is_profiling_flag =
......
2349 2381
  }
2350 2382

  
2351 2383
  Label promote_scheduled_exception;
2384
  Label exception_handled;
2352 2385
  Label delete_allocated_handles;
2353 2386
  Label leave_exit_frame;
2354 2387
  Label return_value_loaded;
2355 2388

  
2356 2389
  // load value from ReturnValue
2357
  ldr(r0, MemOperand(fp, return_value_offset*kPointerSize));
2390
  ldr(r0, return_value_operand);
2358 2391
  bind(&return_value_loaded);
2359 2392
  // No more valid handles (the result handle was the last one). Restore
2360 2393
  // previous handle scope.
2361
  str(r4, MemOperand(r7, kNextOffset));
2394
  str(r4, MemOperand(r9, kNextOffset));
2362 2395
  if (emit_debug_code()) {
2363
    ldr(r1, MemOperand(r7, kLevelOffset));
2396
    ldr(r1, MemOperand(r9, kLevelOffset));
2364 2397
    cmp(r1, r6);
2365 2398
    Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
2366 2399
  }
2367 2400
  sub(r6, r6, Operand(1));
2368
  str(r6, MemOperand(r7, kLevelOffset));
2369
  ldr(ip, MemOperand(r7, kLimitOffset));
2401
  str(r6, MemOperand(r9, kLevelOffset));
2402
  ldr(ip, MemOperand(r9, kLimitOffset));
2370 2403
  cmp(r5, ip);
2371 2404
  b(ne, &delete_allocated_handles);
2372 2405

  
......
2377 2410
  ldr(r5, MemOperand(ip));
2378 2411
  cmp(r4, r5);
2379 2412
  b(ne, &promote_scheduled_exception);
2413
  bind(&exception_handled);
2380 2414

  
2415
  bool restore_context = context_restore_operand != NULL;
2416
  if (restore_context) {
2417
    ldr(cp, *context_restore_operand);
2418
  }
2381 2419
  // LeaveExitFrame expects unwind space to be in a register.
2382 2420
  mov(r4, Operand(stack_space));
2383
  LeaveExitFrame(false, r4);
2421
  LeaveExitFrame(false, r4, !restore_context);
2384 2422
  mov(pc, lr);
2385 2423

  
2386 2424
  bind(&promote_scheduled_exception);
2387
  TailCallExternalReference(
2388
      ExternalReference(Runtime::kPromoteScheduledException, isolate()),
2389
      0,
2390
      1);
2425
  {
2426
    FrameScope frame(this, StackFrame::INTERNAL);
2427
    CallExternalReference(
2428
        ExternalReference(Runtime::kPromoteScheduledException, isolate()),
2429
        0);
2430
  }
2431
  jmp(&exception_handled);
2391 2432

  
2392 2433
  // HandleScope limit has changed. Delete allocated extensions.
2393 2434
  bind(&delete_allocated_handles);
2394
  str(r5, MemOperand(r7, kLimitOffset));
2435
  str(r5, MemOperand(r9, kLimitOffset));
2395 2436
  mov(r4, r0);
2396 2437
  PrepareCallCFunction(1, r5);
2397 2438
  mov(r0, Operand(ExternalReference::isolate_address(isolate())));
......
2603 2644

  
2604 2645

  
2605 2646
void MacroAssembler::CallRuntime(const Runtime::Function* f,
2606
                                 int num_arguments) {
2647
                                 int num_arguments,
2648
                                 SaveFPRegsMode save_doubles) {
2607 2649
  // All parameters are on the stack.  r0 has the return value after call.
2608 2650

  
2609 2651
  // If the expected number of arguments of the runtime function is
......
2620 2662
  // smarter.
2621 2663
  mov(r0, Operand(num_arguments));
2622 2664
  mov(r1, Operand(ExternalReference(f, isolate())));
2623
  CEntryStub stub(1);
2624
  CallStub(&stub);
2625
}
2626

  
2627

  
2628
void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
2629
  CallRuntime(Runtime::FunctionForId(fid), num_arguments);
2630
}
2631

  
2632

  
2633
void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
2634
  const Runtime::Function* function = Runtime::FunctionForId(id);
2635
  mov(r0, Operand(function->nargs));
2636
  mov(r1, Operand(ExternalReference(function, isolate())));
2637
  CEntryStub stub(1, kSaveFPRegs);
2665
  CEntryStub stub(1, save_doubles);
2638 2666
  CallStub(&stub);
2639 2667
}
2640 2668

  
......
3079 3107
}
3080 3108

  
3081 3109

  
3110
void MacroAssembler::LookupNumberStringCache(Register object,
3111
                                             Register result,
3112
                                             Register scratch1,
3113
                                             Register scratch2,
3114
                                             Register scratch3,
3115
                                             Label* not_found) {
3116
  // Use of registers. Register result is used as a temporary.
3117
  Register number_string_cache = result;
3118
  Register mask = scratch3;
3119

  
3120
  // Load the number string cache.
3121
  LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
3122

  
3123
  // Make the hash mask from the length of the number string cache. It
3124
  // contains two elements (number and string) for each cache entry.
3125
  ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
3126
  // Divide length by two (length is a smi).
3127
  mov(mask, Operand(mask, ASR, kSmiTagSize + 1));
3128
  sub(mask, mask, Operand(1));  // Make mask.
3129

  
3130
  // Calculate the entry in the number string cache. The hash value in the
3131
  // number string cache for smis is just the smi value, and the hash for
3132
  // doubles is the xor of the upper and lower words. See
3133
  // Heap::GetNumberStringCache.
3134
  Label is_smi;
3135
  Label load_result_from_cache;
3136
  JumpIfSmi(object, &is_smi);
3137
  CheckMap(object,
3138
           scratch1,
3139
           Heap::kHeapNumberMapRootIndex,
3140
           not_found,
3141
           DONT_DO_SMI_CHECK);
3142

  
3143
  STATIC_ASSERT(8 == kDoubleSize);
3144
  add(scratch1,
3145
      object,
3146
      Operand(HeapNumber::kValueOffset - kHeapObjectTag));
3147
  ldm(ia, scratch1, scratch1.bit() | scratch2.bit());
3148
  eor(scratch1, scratch1, Operand(scratch2));
3149
  and_(scratch1, scratch1, Operand(mask));
3150

  
3151
  // Calculate address of entry in string cache: each entry consists
3152
  // of two pointer sized fields.
3153
  add(scratch1,
3154
      number_string_cache,
3155
      Operand(scratch1, LSL, kPointerSizeLog2 + 1));
3156

  
3157
  Register probe = mask;
3158
  ldr(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
3159
  JumpIfSmi(probe, not_found);
3160
  sub(scratch2, object, Operand(kHeapObjectTag));
3161
  vldr(d0, scratch2, HeapNumber::kValueOffset);
3162
  sub(probe, probe, Operand(kHeapObjectTag));
3163
  vldr(d1, probe, HeapNumber::kValueOffset);
3164
  VFPCompareAndSetFlags(d0, d1);
3165
  b(ne, not_found);  // The cache did not contain this value.
3166
  b(&load_result_from_cache);
3167

  
3168
  bind(&is_smi);
3169
  Register scratch = scratch1;
3170
  and_(scratch, mask, Operand(object, ASR, 1));
3171
  // Calculate address of entry in string cache: each entry consists
3172
  // of two pointer sized fields.
3173
  add(scratch,
3174
      number_string_cache,
3175
      Operand(scratch, LSL, kPointerSizeLog2 + 1));
3176

  
3177
  // Check if the entry is the smi we are looking for.
3178
  ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
3179
  cmp(object, probe);
3180
  b(ne, not_found);
3181

  
3182
  // Get the result from the cache.
3183
  bind(&load_result_from_cache);
3184
  ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
3185
  IncrementCounter(isolate()->counters()->number_to_string_native(),
3186
                   1,
3187
                   scratch1,
3188
                   scratch2);
3189
}
3190

  
3191

  
3082 3192
void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
3083 3193
    Register first,
3084 3194
    Register second,
......
3191 3301
                               Register dst,
3192 3302
                               Register length,
3193 3303
                               Register scratch) {
3194
  Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done;
3304
  Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
3195 3305

  
3196 3306
  // Align src before copying in word size chunks.
3197
  bind(&align_loop);
3198
  cmp(length, Operand::Zero());
3199
  b(eq, &done);
3307
  cmp(length, Operand(kPointerSize));
3308
  b(le, &byte_loop);
3309

  
3200 3310
  bind(&align_loop_1);
3201 3311
  tst(src, Operand(kPointerSize - 1));
3202 3312
  b(eq, &word_loop);
3203 3313
  ldrb(scratch, MemOperand(src, 1, PostIndex));
3204 3314
  strb(scratch, MemOperand(dst, 1, PostIndex));
3205 3315
  sub(length, length, Operand(1), SetCC);
3206
  b(ne, &byte_loop_1);
3207

  
3316
  b(&align_loop_1);
3208 3317
  // Copy bytes in word size chunks.
3209 3318
  bind(&word_loop);
3210 3319
  if (emit_debug_code()) {
......
3776 3885

  
3777 3886
void MacroAssembler::TestJSArrayForAllocationMemento(
3778 3887
    Register receiver_reg,
3779
    Register scratch_reg) {
3780
  Label no_memento_available;
3888
    Register scratch_reg,
3889
    Label* no_memento_found) {
3781 3890
  ExternalReference new_space_start =
3782 3891
      ExternalReference::new_space_start(isolate());
3783 3892
  ExternalReference new_space_allocation_top =
......
3785 3894
  add(scratch_reg, receiver_reg,
3786 3895
      Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
3787 3896
  cmp(scratch_reg, Operand(new_space_start));
3788
  b(lt, &no_memento_available);
3897
  b(lt, no_memento_found);
3789 3898
  mov(ip, Operand(new_space_allocation_top));
3790 3899
  ldr(ip, MemOperand(ip));
3791 3900
  cmp(scratch_reg, ip);
3792
  b(gt, &no_memento_available);
3901
  b(gt, no_memento_found);
3793 3902
  ldr(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
3794 3903
  cmp(scratch_reg,
3795
      Operand(Handle<Map>(isolate()->heap()->allocation_memento_map())));
3796
  bind(&no_memento_available);
3904
      Operand(isolate()->factory()->allocation_memento_map()));
3797 3905
}
3798 3906

  
3799 3907

  

Also available in: Unified diff