Revision f230a1cf deps/v8/src/arm/deoptimizer-arm.cc
deps/v8/src/arm/deoptimizer-arm.cc | ||
---|---|---|
81 | 81 |
} |
82 | 82 |
|
83 | 83 |
|
84 |
static const int32_t kBranchBeforeInterrupt = 0x5a000004; |
|
85 |
|
|
86 |
// The back edge bookkeeping code matches the pattern: |
|
87 |
// |
|
88 |
// <decrement profiling counter> |
|
89 |
// 2a 00 00 01 bpl ok |
|
90 |
// e5 9f c? ?? ldr ip, [pc, <interrupt stub address>] |
|
91 |
// e1 2f ff 3c blx ip |
|
92 |
// ok-label |
|
93 |
// |
|
94 |
// We patch the code to the following form: |
|
95 |
// |
|
96 |
// <decrement profiling counter> |
|
97 |
// e1 a0 00 00 mov r0, r0 (NOP) |
|
98 |
// e5 9f c? ?? ldr ip, [pc, <on-stack replacement address>] |
|
99 |
// e1 2f ff 3c blx ip |
|
100 |
// ok-label |
|
101 |
|
|
102 |
void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code, |
|
103 |
Address pc_after, |
|
104 |
Code* replacement_code) { |
|
105 |
static const int kInstrSize = Assembler::kInstrSize; |
|
106 |
// Turn the jump into nops. |
|
107 |
CodePatcher patcher(pc_after - 3 * kInstrSize, 1); |
|
108 |
patcher.masm()->nop(); |
|
109 |
// Replace the call address. |
|
110 |
uint32_t interrupt_address_offset = Memory::uint16_at(pc_after - |
|
111 |
2 * kInstrSize) & 0xfff; |
|
112 |
Address interrupt_address_pointer = pc_after + interrupt_address_offset; |
|
113 |
Memory::uint32_at(interrupt_address_pointer) = |
|
114 |
reinterpret_cast<uint32_t>(replacement_code->entry()); |
|
115 |
|
|
116 |
unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch( |
|
117 |
unoptimized_code, pc_after - 2 * kInstrSize, replacement_code); |
|
118 |
} |
|
119 |
|
|
120 |
|
|
121 |
void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code, |
|
122 |
Address pc_after, |
|
123 |
Code* interrupt_code) { |
|
124 |
static const int kInstrSize = Assembler::kInstrSize; |
|
125 |
// Restore the original jump. |
|
126 |
CodePatcher patcher(pc_after - 3 * kInstrSize, 1); |
|
127 |
patcher.masm()->b(4 * kInstrSize, pl); // ok-label is 4 instructions later. |
|
128 |
ASSERT_EQ(kBranchBeforeInterrupt, |
|
129 |
Memory::int32_at(pc_after - 3 * kInstrSize)); |
|
130 |
// Restore the original call address. |
|
131 |
uint32_t interrupt_address_offset = Memory::uint16_at(pc_after - |
|
132 |
2 * kInstrSize) & 0xfff; |
|
133 |
Address interrupt_address_pointer = pc_after + interrupt_address_offset; |
|
134 |
Memory::uint32_at(interrupt_address_pointer) = |
|
135 |
reinterpret_cast<uint32_t>(interrupt_code->entry()); |
|
136 |
|
|
137 |
interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch( |
|
138 |
unoptimized_code, pc_after - 2 * kInstrSize, interrupt_code); |
|
139 |
} |
|
140 |
|
|
141 |
|
|
142 |
#ifdef DEBUG |
|
143 |
Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState( |
|
144 |
Isolate* isolate, |
|
145 |
Code* unoptimized_code, |
|
146 |
Address pc_after) { |
|
147 |
static const int kInstrSize = Assembler::kInstrSize; |
|
148 |
ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp); |
|
149 |
|
|
150 |
uint32_t interrupt_address_offset = |
|
151 |
Memory::uint16_at(pc_after - 2 * kInstrSize) & 0xfff; |
|
152 |
Address interrupt_address_pointer = pc_after + interrupt_address_offset; |
|
153 |
|
|
154 |
if (Assembler::IsNop(Assembler::instr_at(pc_after - 3 * kInstrSize))) { |
|
155 |
ASSERT(Assembler::IsLdrPcImmediateOffset( |
|
156 |
Assembler::instr_at(pc_after - 2 * kInstrSize))); |
|
157 |
Code* osr_builtin = |
|
158 |
isolate->builtins()->builtin(Builtins::kOnStackReplacement); |
|
159 |
ASSERT(reinterpret_cast<uint32_t>(osr_builtin->entry()) == |
|
160 |
Memory::uint32_at(interrupt_address_pointer)); |
|
161 |
return PATCHED_FOR_OSR; |
|
162 |
} else { |
|
163 |
// Get the interrupt stub code object to match against from cache. |
|
164 |
Code* interrupt_builtin = |
|
165 |
isolate->builtins()->builtin(Builtins::kInterruptCheck); |
|
166 |
ASSERT(Assembler::IsLdrPcImmediateOffset( |
|
167 |
Assembler::instr_at(pc_after - 2 * kInstrSize))); |
|
168 |
ASSERT_EQ(kBranchBeforeInterrupt, |
|
169 |
Memory::int32_at(pc_after - 3 * kInstrSize)); |
|
170 |
ASSERT(reinterpret_cast<uint32_t>(interrupt_builtin->entry()) == |
|
171 |
Memory::uint32_at(interrupt_address_pointer)); |
|
172 |
return NOT_PATCHED; |
|
173 |
} |
|
174 |
} |
|
175 |
#endif // DEBUG |
|
176 |
|
|
177 |
|
|
178 | 84 |
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) { |
179 | 85 |
// Set the register values. The values are not important as there are no |
180 | 86 |
// callee saved registers in JavaScript frames, so all registers are |
... | ... | |
201 | 107 |
ApiFunction function(descriptor->deoptimization_handler_); |
202 | 108 |
ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_); |
203 | 109 |
intptr_t handler = reinterpret_cast<intptr_t>(xref.address()); |
204 |
int params = descriptor->register_param_count_; |
|
205 |
if (descriptor->stack_parameter_count_ != NULL) { |
|
206 |
params++; |
|
207 |
} |
|
110 |
int params = descriptor->environment_length(); |
|
208 | 111 |
output_frame->SetRegister(r0.code(), params); |
209 | 112 |
output_frame->SetRegister(r1.code(), handler); |
210 | 113 |
} |
... | ... | |
362 | 265 |
__ bind(&inner_push_loop); |
363 | 266 |
__ sub(r3, r3, Operand(sizeof(uint32_t))); |
364 | 267 |
__ add(r6, r2, Operand(r3)); |
365 |
__ ldr(r7, MemOperand(r6, FrameDescription::frame_content_offset()));
|
|
366 |
__ push(r7);
|
|
268 |
__ ldr(r6, MemOperand(r6, FrameDescription::frame_content_offset()));
|
|
269 |
__ push(r6);
|
|
367 | 270 |
__ bind(&inner_loop_header); |
368 | 271 |
__ cmp(r3, Operand::Zero()); |
369 | 272 |
__ b(ne, &inner_push_loop); // test for gt? |
... | ... | |
409 | 312 |
__ InitializeRootRegister(); |
410 | 313 |
|
411 | 314 |
__ pop(ip); // remove pc |
412 |
__ pop(r7); // get continuation, leave pc on stack
|
|
315 |
__ pop(ip); // get continuation, leave pc on stack
|
|
413 | 316 |
__ pop(lr); |
414 |
__ Jump(r7);
|
|
317 |
__ Jump(ip);
|
|
415 | 318 |
__ stop("Unreachable."); |
416 | 319 |
} |
417 | 320 |
|
Also available in: Unified diff