The data contained in this repository can be downloaded to your computer using one of several clients.
Please see the documentation of your version control software client for more information.

Please select the desired protocol below to get the URL.

This URL has Read-Only access.

Statistics
| Branch: | Revision:

main_repo / deps / v8 / src / arm / deoptimizer-arm.cc @ f230a1cf

History | View | Annotate | Download (13.1 KB)

1
// Copyright 2012 the V8 project authors. All rights reserved.
2
// Redistribution and use in source and binary forms, with or without
3
// modification, are permitted provided that the following conditions are
4
// met:
5
//
6
//     * Redistributions of source code must retain the above copyright
7
//       notice, this list of conditions and the following disclaimer.
8
//     * Redistributions in binary form must reproduce the above
9
//       copyright notice, this list of conditions and the following
10
//       disclaimer in the documentation and/or other materials provided
11
//       with the distribution.
12
//     * Neither the name of Google Inc. nor the names of its
13
//       contributors may be used to endorse or promote products derived
14
//       from this software without specific prior written permission.
15
//
16
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27

    
28
#include "v8.h"
29

    
30
#include "codegen.h"
31
#include "deoptimizer.h"
32
#include "full-codegen.h"
33
#include "safepoint-table.h"
34

    
35
namespace v8 {
36
namespace internal {
37

    
38
const int Deoptimizer::table_entry_size_ = 12;
39

    
40

    
41
int Deoptimizer::patch_size() {
42
  const int kCallInstructionSizeInWords = 3;
43
  return kCallInstructionSizeInWords * Assembler::kInstrSize;
44
}
45

    
46

    
47
void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
48
  Address code_start_address = code->instruction_start();
49
  // Invalidate the relocation information, as it will become invalid by the
50
  // code patching below, and is not needed any more.
51
  code->InvalidateRelocation();
52

    
53
  // For each LLazyBailout instruction insert a call to the corresponding
54
  // deoptimization entry.
55
  DeoptimizationInputData* deopt_data =
56
      DeoptimizationInputData::cast(code->deoptimization_data());
57
#ifdef DEBUG
58
  Address prev_call_address = NULL;
59
#endif
60
  for (int i = 0; i < deopt_data->DeoptCount(); i++) {
61
    if (deopt_data->Pc(i)->value() == -1) continue;
62
    Address call_address = code_start_address + deopt_data->Pc(i)->value();
63
    Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
64
    // We need calls to have a predictable size in the unoptimized code, but
65
    // this is optimized code, so we don't have to have a predictable size.
66
    int call_size_in_bytes =
67
        MacroAssembler::CallSizeNotPredictableCodeSize(deopt_entry,
68
                                                       RelocInfo::NONE32);
69
    int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
70
    ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0);
71
    ASSERT(call_size_in_bytes <= patch_size());
72
    CodePatcher patcher(call_address, call_size_in_words);
73
    patcher.masm()->Call(deopt_entry, RelocInfo::NONE32);
74
    ASSERT(prev_call_address == NULL ||
75
           call_address >= prev_call_address + patch_size());
76
    ASSERT(call_address + patch_size() <= code->instruction_end());
77
#ifdef DEBUG
78
    prev_call_address = call_address;
79
#endif
80
  }
81
}
82

    
83

    
84
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
85
  // Set the register values. The values are not important as there are no
86
  // callee saved registers in JavaScript frames, so all registers are
87
  // spilled. Registers fp and sp are set to the correct values though.
88

    
89
  for (int i = 0; i < Register::kNumRegisters; i++) {
90
    input_->SetRegister(i, i * 4);
91
  }
92
  input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
93
  input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
94
  for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
95
    input_->SetDoubleRegister(i, 0.0);
96
  }
97

    
98
  // Fill the frame content from the actual data on the frame.
99
  for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
100
    input_->SetFrameSlot(i, Memory::uint32_at(tos + i));
101
  }
102
}
103

    
104

    
105
void Deoptimizer::SetPlatformCompiledStubRegisters(
106
    FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
107
  ApiFunction function(descriptor->deoptimization_handler_);
108
  ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
109
  intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
110
  int params = descriptor->environment_length();
111
  output_frame->SetRegister(r0.code(), params);
112
  output_frame->SetRegister(r1.code(), handler);
113
}
114

    
115

    
116
void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
117
  for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; ++i) {
118
    double double_value = input_->GetDoubleRegister(i);
119
    output_frame->SetDoubleRegister(i, double_value);
120
  }
121
}
122

    
123

    
124
bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
125
  // There is no dynamic alignment padding on ARM in the input frame.
126
  return false;
127
}
128

    
129

    
130
#define __ masm()->
131

    
132
// This code tries to be close to ia32 code so that any changes can be
133
// easily ported.
134
void Deoptimizer::EntryGenerator::Generate() {
135
  GeneratePrologue();
136

    
137
  // Save all general purpose registers before messing with them.
138
  const int kNumberOfRegisters = Register::kNumRegisters;
139

    
140
  // Everything but pc, lr and ip which will be saved but not restored.
141
  RegList restored_regs = kJSCallerSaved | kCalleeSaved | ip.bit();
142

    
143
  const int kDoubleRegsSize =
144
      kDoubleSize * DwVfpRegister::kMaxNumAllocatableRegisters;
145

    
146
  // Save all allocatable VFP registers before messing with them.
147
  ASSERT(kDoubleRegZero.code() == 14);
148
  ASSERT(kScratchDoubleReg.code() == 15);
149

    
150
  // Check CPU flags for number of registers, setting the Z condition flag.
151
  __ CheckFor32DRegs(ip);
152

    
153
  // Push registers d0-d13, and possibly d16-d31, on the stack.
154
  // If d16-d31 are not pushed, decrease the stack pointer instead.
155
  __ vstm(db_w, sp, d16, d31, ne);
156
  __ sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq);
157
  __ vstm(db_w, sp, d0, d13);
158

    
159
  // Push all 16 registers (needed to populate FrameDescription::registers_).
160
  // TODO(1588) Note that using pc with stm is deprecated, so we should perhaps
161
  // handle this a bit differently.
162
  __ stm(db_w, sp, restored_regs  | sp.bit() | lr.bit() | pc.bit());
163

    
164
  const int kSavedRegistersAreaSize =
165
      (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
166

    
167
  // Get the bailout id from the stack.
168
  __ ldr(r2, MemOperand(sp, kSavedRegistersAreaSize));
169

    
170
  // Get the address of the location in the code object (r3) (return
171
  // address for lazy deoptimization) and compute the fp-to-sp delta in
172
  // register r4.
173
  __ mov(r3, lr);
174
  // Correct one word for bailout id.
175
  __ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
176
  __ sub(r4, fp, r4);
177

    
178
  // Allocate a new deoptimizer object.
179
  // Pass four arguments in r0 to r3 and fifth argument on stack.
180
  __ PrepareCallCFunction(6, r5);
181
  __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
182
  __ mov(r1, Operand(type()));  // bailout type,
183
  // r2: bailout id already loaded.
184
  // r3: code address or 0 already loaded.
185
  __ str(r4, MemOperand(sp, 0 * kPointerSize));  // Fp-to-sp delta.
186
  __ mov(r5, Operand(ExternalReference::isolate_address(isolate())));
187
  __ str(r5, MemOperand(sp, 1 * kPointerSize));  // Isolate.
188
  // Call Deoptimizer::New().
189
  {
190
    AllowExternalCallThatCantCauseGC scope(masm());
191
    __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6);
192
  }
193

    
194
  // Preserve "deoptimizer" object in register r0 and get the input
195
  // frame descriptor pointer to r1 (deoptimizer->input_);
196
  __ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
197

    
198
  // Copy core registers into FrameDescription::registers_[kNumRegisters].
199
  ASSERT(Register::kNumRegisters == kNumberOfRegisters);
200
  for (int i = 0; i < kNumberOfRegisters; i++) {
201
    int offset = (i * kPointerSize) + FrameDescription::registers_offset();
202
    __ ldr(r2, MemOperand(sp, i * kPointerSize));
203
    __ str(r2, MemOperand(r1, offset));
204
  }
205

    
206
  // Copy VFP registers to
207
  // double_registers_[DoubleRegister::kMaxNumAllocatableRegisters]
208
  int double_regs_offset = FrameDescription::double_registers_offset();
209
  for (int i = 0; i < DwVfpRegister::kMaxNumAllocatableRegisters; ++i) {
210
    int dst_offset = i * kDoubleSize + double_regs_offset;
211
    int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
212
    __ vldr(d0, sp, src_offset);
213
    __ vstr(d0, r1, dst_offset);
214
  }
215

    
216
  // Remove the bailout id and the saved registers from the stack.
217
  __ add(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
218

    
219
  // Compute a pointer to the unwinding limit in register r2; that is
220
  // the first stack slot not part of the input frame.
221
  __ ldr(r2, MemOperand(r1, FrameDescription::frame_size_offset()));
222
  __ add(r2, r2, sp);
223

    
224
  // Unwind the stack down to - but not including - the unwinding
225
  // limit and copy the contents of the activation frame to the input
226
  // frame description.
227
  __ add(r3,  r1, Operand(FrameDescription::frame_content_offset()));
228
  Label pop_loop;
229
  Label pop_loop_header;
230
  __ b(&pop_loop_header);
231
  __ bind(&pop_loop);
232
  __ pop(r4);
233
  __ str(r4, MemOperand(r3, 0));
234
  __ add(r3, r3, Operand(sizeof(uint32_t)));
235
  __ bind(&pop_loop_header);
236
  __ cmp(r2, sp);
237
  __ b(ne, &pop_loop);
238

    
239
  // Compute the output frame in the deoptimizer.
240
  __ push(r0);  // Preserve deoptimizer object across call.
241
  // r0: deoptimizer object; r1: scratch.
242
  __ PrepareCallCFunction(1, r1);
243
  // Call Deoptimizer::ComputeOutputFrames().
244
  {
245
    AllowExternalCallThatCantCauseGC scope(masm());
246
    __ CallCFunction(
247
        ExternalReference::compute_output_frames_function(isolate()), 1);
248
  }
249
  __ pop(r0);  // Restore deoptimizer object (class Deoptimizer).
250

    
251
  // Replace the current (input) frame with the output frames.
252
  Label outer_push_loop, inner_push_loop,
253
      outer_loop_header, inner_loop_header;
254
  // Outer loop state: r4 = current "FrameDescription** output_",
255
  // r1 = one past the last FrameDescription**.
256
  __ ldr(r1, MemOperand(r0, Deoptimizer::output_count_offset()));
257
  __ ldr(r4, MemOperand(r0, Deoptimizer::output_offset()));  // r4 is output_.
258
  __ add(r1, r4, Operand(r1, LSL, 2));
259
  __ jmp(&outer_loop_header);
260
  __ bind(&outer_push_loop);
261
  // Inner loop state: r2 = current FrameDescription*, r3 = loop index.
262
  __ ldr(r2, MemOperand(r4, 0));  // output_[ix]
263
  __ ldr(r3, MemOperand(r2, FrameDescription::frame_size_offset()));
264
  __ jmp(&inner_loop_header);
265
  __ bind(&inner_push_loop);
266
  __ sub(r3, r3, Operand(sizeof(uint32_t)));
267
  __ add(r6, r2, Operand(r3));
268
  __ ldr(r6, MemOperand(r6, FrameDescription::frame_content_offset()));
269
  __ push(r6);
270
  __ bind(&inner_loop_header);
271
  __ cmp(r3, Operand::Zero());
272
  __ b(ne, &inner_push_loop);  // test for gt?
273
  __ add(r4, r4, Operand(kPointerSize));
274
  __ bind(&outer_loop_header);
275
  __ cmp(r4, r1);
276
  __ b(lt, &outer_push_loop);
277

    
278
  // Check CPU flags for number of registers, setting the Z condition flag.
279
  __ CheckFor32DRegs(ip);
280

    
281
  __ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
282
  int src_offset = FrameDescription::double_registers_offset();
283
  for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; ++i) {
284
    if (i == kDoubleRegZero.code()) continue;
285
    if (i == kScratchDoubleReg.code()) continue;
286

    
287
    const DwVfpRegister reg = DwVfpRegister::from_code(i);
288
    __ vldr(reg, r1, src_offset, i < 16 ? al : ne);
289
    src_offset += kDoubleSize;
290
  }
291

    
292
  // Push state, pc, and continuation from the last output frame.
293
  __ ldr(r6, MemOperand(r2, FrameDescription::state_offset()));
294
  __ push(r6);
295
  __ ldr(r6, MemOperand(r2, FrameDescription::pc_offset()));
296
  __ push(r6);
297
  __ ldr(r6, MemOperand(r2, FrameDescription::continuation_offset()));
298
  __ push(r6);
299

    
300
  // Push the registers from the last output frame.
301
  for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
302
    int offset = (i * kPointerSize) + FrameDescription::registers_offset();
303
    __ ldr(r6, MemOperand(r2, offset));
304
    __ push(r6);
305
  }
306

    
307
  // Restore the registers from the stack.
308
  __ ldm(ia_w, sp, restored_regs);  // all but pc registers.
309
  __ pop(ip);  // remove sp
310
  __ pop(ip);  // remove lr
311

    
312
  __ InitializeRootRegister();
313

    
314
  __ pop(ip);  // remove pc
315
  __ pop(ip);  // get continuation, leave pc on stack
316
  __ pop(lr);
317
  __ Jump(ip);
318
  __ stop("Unreachable.");
319
}
320

    
321

    
322
void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
323
  // Create a sequence of deoptimization entries.
324
  // Note that registers are still live when jumping to an entry.
325
  Label done;
326
  for (int i = 0; i < count(); i++) {
327
    int start = masm()->pc_offset();
328
    USE(start);
329
    __ mov(ip, Operand(i));
330
    __ push(ip);
331
    __ b(&done);
332
    ASSERT(masm()->pc_offset() - start == table_entry_size_);
333
  }
334
  __ bind(&done);
335
}
336

    
337

    
338
void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
339
  SetFrameSlot(offset, value);
340
}
341

    
342

    
343
void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
344
  SetFrameSlot(offset, value);
345
}
346

    
347

    
348
#undef __
349

    
350
} }  // namespace v8::internal