Revision f230a1cf deps/v8/src/ia32/deoptimizer-ia32.cc
deps/v8/src/ia32/deoptimizer-ia32.cc | ||
---|---|---|
177 | 177 |
} |
178 | 178 |
|
179 | 179 |
|
180 |
static const byte kJnsInstruction = 0x79; |
|
181 |
static const byte kJnsOffset = 0x11; |
|
182 |
static const byte kCallInstruction = 0xe8; |
|
183 |
static const byte kNopByteOne = 0x66; |
|
184 |
static const byte kNopByteTwo = 0x90; |
|
185 |
|
|
186 |
// The back edge bookkeeping code matches the pattern: |
|
187 |
// |
|
188 |
// sub <profiling_counter>, <delta> |
|
189 |
// jns ok |
|
190 |
// call <interrupt stub> |
|
191 |
// ok: |
|
192 |
// |
|
193 |
// The patched back edge looks like this: |
|
194 |
// |
|
195 |
// sub <profiling_counter>, <delta> ;; Not changed |
|
196 |
// nop |
|
197 |
// nop |
|
198 |
// call <on-stack replacment> |
|
199 |
// ok: |
|
200 |
|
|
201 |
void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code, |
|
202 |
Address pc_after, |
|
203 |
Code* replacement_code) { |
|
204 |
// Turn the jump into nops. |
|
205 |
Address call_target_address = pc_after - kIntSize; |
|
206 |
*(call_target_address - 3) = kNopByteOne; |
|
207 |
*(call_target_address - 2) = kNopByteTwo; |
|
208 |
// Replace the call address. |
|
209 |
Assembler::set_target_address_at(call_target_address, |
|
210 |
replacement_code->entry()); |
|
211 |
|
|
212 |
unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch( |
|
213 |
unoptimized_code, call_target_address, replacement_code); |
|
214 |
} |
|
215 |
|
|
216 |
|
|
217 |
void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code, |
|
218 |
Address pc_after, |
|
219 |
Code* interrupt_code) { |
|
220 |
// Restore the original jump. |
|
221 |
Address call_target_address = pc_after - kIntSize; |
|
222 |
*(call_target_address - 3) = kJnsInstruction; |
|
223 |
*(call_target_address - 2) = kJnsOffset; |
|
224 |
// Restore the original call address. |
|
225 |
Assembler::set_target_address_at(call_target_address, |
|
226 |
interrupt_code->entry()); |
|
227 |
|
|
228 |
interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch( |
|
229 |
unoptimized_code, call_target_address, interrupt_code); |
|
230 |
} |
|
231 |
|
|
232 |
|
|
233 |
#ifdef DEBUG |
|
234 |
Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState( |
|
235 |
Isolate* isolate, |
|
236 |
Code* unoptimized_code, |
|
237 |
Address pc_after) { |
|
238 |
Address call_target_address = pc_after - kIntSize; |
|
239 |
ASSERT_EQ(kCallInstruction, *(call_target_address - 1)); |
|
240 |
if (*(call_target_address - 3) == kNopByteOne) { |
|
241 |
ASSERT_EQ(kNopByteTwo, *(call_target_address - 2)); |
|
242 |
Code* osr_builtin = |
|
243 |
isolate->builtins()->builtin(Builtins::kOnStackReplacement); |
|
244 |
ASSERT_EQ(osr_builtin->entry(), |
|
245 |
Assembler::target_address_at(call_target_address)); |
|
246 |
return PATCHED_FOR_OSR; |
|
247 |
} else { |
|
248 |
// Get the interrupt stub code object to match against from cache. |
|
249 |
Code* interrupt_builtin = |
|
250 |
isolate->builtins()->builtin(Builtins::kInterruptCheck); |
|
251 |
ASSERT_EQ(interrupt_builtin->entry(), |
|
252 |
Assembler::target_address_at(call_target_address)); |
|
253 |
ASSERT_EQ(kJnsInstruction, *(call_target_address - 3)); |
|
254 |
ASSERT_EQ(kJnsOffset, *(call_target_address - 2)); |
|
255 |
return NOT_PATCHED; |
|
256 |
} |
|
257 |
} |
|
258 |
#endif // DEBUG |
|
259 |
|
|
260 |
|
|
261 | 180 |
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) { |
262 | 181 |
// Set the register values. The values are not important as there are no |
263 | 182 |
// callee saved registers in JavaScript frames, so all registers are |
... | ... | |
283 | 202 |
FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) { |
284 | 203 |
intptr_t handler = |
285 | 204 |
reinterpret_cast<intptr_t>(descriptor->deoptimization_handler_); |
286 |
int params = descriptor->register_param_count_; |
|
287 |
if (descriptor->stack_parameter_count_ != NULL) { |
|
288 |
params++; |
|
289 |
} |
|
205 |
int params = descriptor->environment_length(); |
|
290 | 206 |
output_frame->SetRegister(eax.code(), params); |
291 | 207 |
output_frame->SetRegister(ebx.code(), handler); |
292 | 208 |
} |
293 | 209 |
|
294 | 210 |
|
295 | 211 |
void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) { |
212 |
if (!CpuFeatures::IsSupported(SSE2)) return; |
|
296 | 213 |
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) { |
297 | 214 |
double double_value = input_->GetDoubleRegister(i); |
298 | 215 |
output_frame->SetDoubleRegister(i, double_value); |
... | ... | |
330 | 247 |
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) { |
331 | 248 |
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i); |
332 | 249 |
int offset = i * kDoubleSize; |
333 |
__ movdbl(Operand(esp, offset), xmm_reg);
|
|
250 |
__ movsd(Operand(esp, offset), xmm_reg);
|
|
334 | 251 |
} |
335 | 252 |
} |
336 | 253 |
|
... | ... | |
382 | 299 |
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) { |
383 | 300 |
int dst_offset = i * kDoubleSize + double_regs_offset; |
384 | 301 |
int src_offset = i * kDoubleSize; |
385 |
__ movdbl(xmm0, Operand(esp, src_offset));
|
|
386 |
__ movdbl(Operand(ebx, dst_offset), xmm0);
|
|
302 |
__ movsd(xmm0, Operand(esp, src_offset));
|
|
303 |
__ movsd(Operand(ebx, dst_offset), xmm0);
|
|
387 | 304 |
} |
388 | 305 |
} |
389 | 306 |
|
... | ... | |
468 | 385 |
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) { |
469 | 386 |
XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i); |
470 | 387 |
int src_offset = i * kDoubleSize + double_regs_offset; |
471 |
__ movdbl(xmm_reg, Operand(ebx, src_offset));
|
|
388 |
__ movsd(xmm_reg, Operand(ebx, src_offset));
|
|
472 | 389 |
} |
473 | 390 |
} |
474 | 391 |
|
Also available in: Unified diff