The data contained in this repository can be downloaded to your computer using one of several clients.
Please see the documentation of your version control software client for more information.
Please select the desired protocol below to get the URL.
This URL has Read-Only access.
main_repo / deps / v8 / src / arm / assembler-arm.cc @ f230a1cf
History | View | Annotate | Download (108 KB)
1 |
// Copyright (c) 1994-2006 Sun Microsystems Inc.
|
---|---|
2 |
// All Rights Reserved.
|
3 |
//
|
4 |
// Redistribution and use in source and binary forms, with or without
|
5 |
// modification, are permitted provided that the following conditions
|
6 |
// are met:
|
7 |
//
|
8 |
// - Redistributions of source code must retain the above copyright notice,
|
9 |
// this list of conditions and the following disclaimer.
|
10 |
//
|
11 |
// - Redistribution in binary form must reproduce the above copyright
|
12 |
// notice, this list of conditions and the following disclaimer in the
|
13 |
// documentation and/or other materials provided with the
|
14 |
// distribution.
|
15 |
//
|
16 |
// - Neither the name of Sun Microsystems or the names of contributors may
|
17 |
// be used to endorse or promote products derived from this software without
|
18 |
// specific prior written permission.
|
19 |
//
|
20 |
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
21 |
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
22 |
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
|
23 |
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
24 |
// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
25 |
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
26 |
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
27 |
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
28 |
// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
|
29 |
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
30 |
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
|
31 |
// OF THE POSSIBILITY OF SUCH DAMAGE.
|
32 |
|
33 |
// The original source code covered by the above license above has been
|
34 |
// modified significantly by Google Inc.
|
35 |
// Copyright 2012 the V8 project authors. All rights reserved.
|
36 |
|
37 |
#include "v8.h" |
38 |
|
39 |
#if V8_TARGET_ARCH_ARM
|
40 |
|
41 |
#include "arm/assembler-arm-inl.h" |
42 |
#include "macro-assembler.h" |
43 |
#include "serialize.h" |
44 |
|
45 |
namespace v8 {
|
46 |
namespace internal {
|
47 |
|
48 |
#ifdef DEBUG
|
49 |
bool CpuFeatures::initialized_ = false; |
50 |
#endif
|
51 |
unsigned CpuFeatures::supported_ = 0; |
52 |
unsigned CpuFeatures::found_by_runtime_probing_only_ = 0; |
53 |
unsigned CpuFeatures::cross_compile_ = 0; |
54 |
unsigned CpuFeatures::cache_line_size_ = 64; |
55 |
|
56 |
|
57 |
ExternalReference ExternalReference::cpu_features() { |
58 |
ASSERT(CpuFeatures::initialized_); |
59 |
return ExternalReference(&CpuFeatures::supported_);
|
60 |
} |
61 |
|
62 |
|
63 |
// Get the CPU features enabled by the build. For cross compilation the
|
64 |
// preprocessor symbols CAN_USE_ARMV7_INSTRUCTIONS and CAN_USE_VFP3_INSTRUCTIONS
|
65 |
// can be defined to enable ARMv7 and VFPv3 instructions when building the
|
66 |
// snapshot.
|
67 |
static unsigned CpuFeaturesImpliedByCompiler() { |
68 |
unsigned answer = 0; |
69 |
#ifdef CAN_USE_ARMV7_INSTRUCTIONS
|
70 |
if (FLAG_enable_armv7) {
|
71 |
answer |= 1u << ARMv7;
|
72 |
} |
73 |
#endif // CAN_USE_ARMV7_INSTRUCTIONS |
74 |
#ifdef CAN_USE_VFP3_INSTRUCTIONS
|
75 |
if (FLAG_enable_vfp3) {
|
76 |
answer |= 1u << VFP3 | 1u << ARMv7; |
77 |
} |
78 |
#endif // CAN_USE_VFP3_INSTRUCTIONS |
79 |
#ifdef CAN_USE_VFP32DREGS
|
80 |
if (FLAG_enable_32dregs) {
|
81 |
answer |= 1u << VFP32DREGS;
|
82 |
} |
83 |
#endif // CAN_USE_VFP32DREGS |
84 |
if ((answer & (1u << ARMv7)) && FLAG_enable_unaligned_accesses) { |
85 |
answer |= 1u << UNALIGNED_ACCESSES;
|
86 |
} |
87 |
|
88 |
return answer;
|
89 |
} |
90 |
|
91 |
|
92 |
const char* DwVfpRegister::AllocationIndexToString(int index) { |
93 |
ASSERT(index >= 0 && index < NumAllocatableRegisters());
|
94 |
ASSERT(kScratchDoubleReg.code() - kDoubleRegZero.code() == |
95 |
kNumReservedRegisters - 1);
|
96 |
if (index >= kDoubleRegZero.code())
|
97 |
index += kNumReservedRegisters; |
98 |
|
99 |
return VFPRegisters::Name(index, true); |
100 |
} |
101 |
|
102 |
|
103 |
void CpuFeatures::Probe() {
|
104 |
uint64_t standard_features = static_cast<unsigned>( |
105 |
OS::CpuFeaturesImpliedByPlatform()) | CpuFeaturesImpliedByCompiler(); |
106 |
ASSERT(supported_ == 0 || supported_ == standard_features);
|
107 |
#ifdef DEBUG
|
108 |
initialized_ = true;
|
109 |
#endif
|
110 |
|
111 |
// Get the features implied by the OS and the compiler settings. This is the
|
112 |
// minimal set of features which is also alowed for generated code in the
|
113 |
// snapshot.
|
114 |
supported_ |= standard_features; |
115 |
|
116 |
if (Serializer::enabled()) {
|
117 |
// No probing for features if we might serialize (generate snapshot).
|
118 |
printf(" ");
|
119 |
PrintFeatures(); |
120 |
return;
|
121 |
} |
122 |
|
123 |
#ifndef __arm__
|
124 |
// For the simulator=arm build, use VFP when FLAG_enable_vfp3 is
|
125 |
// enabled. VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6.
|
126 |
if (FLAG_enable_vfp3) {
|
127 |
supported_ |= |
128 |
static_cast<uint64_t>(1) << VFP3 | |
129 |
static_cast<uint64_t>(1) << ARMv7; |
130 |
} |
131 |
if (FLAG_enable_neon) {
|
132 |
supported_ |= 1u << NEON;
|
133 |
} |
134 |
// For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
|
135 |
if (FLAG_enable_armv7) {
|
136 |
supported_ |= static_cast<uint64_t>(1) << ARMv7; |
137 |
} |
138 |
|
139 |
if (FLAG_enable_sudiv) {
|
140 |
supported_ |= static_cast<uint64_t>(1) << SUDIV; |
141 |
} |
142 |
|
143 |
if (FLAG_enable_movw_movt) {
|
144 |
supported_ |= static_cast<uint64_t>(1) << MOVW_MOVT_IMMEDIATE_LOADS; |
145 |
} |
146 |
|
147 |
if (FLAG_enable_32dregs) {
|
148 |
supported_ |= static_cast<uint64_t>(1) << VFP32DREGS; |
149 |
} |
150 |
|
151 |
if (FLAG_enable_unaligned_accesses) {
|
152 |
supported_ |= static_cast<uint64_t>(1) << UNALIGNED_ACCESSES; |
153 |
} |
154 |
|
155 |
#else // __arm__ |
156 |
// Probe for additional features not already known to be available.
|
157 |
CPU cpu; |
158 |
if (!IsSupported(VFP3) && FLAG_enable_vfp3 && cpu.has_vfp3()) {
|
159 |
// This implementation also sets the VFP flags if runtime
|
160 |
// detection of VFP returns true. VFPv3 implies ARMv7, see ARM DDI
|
161 |
// 0406B, page A1-6.
|
162 |
found_by_runtime_probing_only_ |= |
163 |
static_cast<uint64_t>(1) << VFP3 | |
164 |
static_cast<uint64_t>(1) << ARMv7; |
165 |
} |
166 |
|
167 |
if (!IsSupported(NEON) && FLAG_enable_neon && cpu.has_neon()) {
|
168 |
found_by_runtime_probing_only_ |= 1u << NEON;
|
169 |
} |
170 |
|
171 |
if (!IsSupported(ARMv7) && FLAG_enable_armv7 && cpu.architecture() >= 7) { |
172 |
found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << ARMv7; |
173 |
} |
174 |
|
175 |
if (!IsSupported(SUDIV) && FLAG_enable_sudiv && cpu.has_idiva()) {
|
176 |
found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << SUDIV; |
177 |
} |
178 |
|
179 |
if (!IsSupported(UNALIGNED_ACCESSES) && FLAG_enable_unaligned_accesses
|
180 |
&& cpu.architecture() >= 7) {
|
181 |
found_by_runtime_probing_only_ |= |
182 |
static_cast<uint64_t>(1) << UNALIGNED_ACCESSES; |
183 |
} |
184 |
|
185 |
// Use movw/movt for QUALCOMM ARMv7 cores.
|
186 |
if (cpu.implementer() == CPU::QUALCOMM &&
|
187 |
cpu.architecture() >= 7 &&
|
188 |
FLAG_enable_movw_movt) { |
189 |
found_by_runtime_probing_only_ |= |
190 |
static_cast<uint64_t>(1) << MOVW_MOVT_IMMEDIATE_LOADS; |
191 |
} |
192 |
|
193 |
// ARM Cortex-A9 and Cortex-A5 have 32 byte cachelines.
|
194 |
if (cpu.implementer() == CPU::ARM &&
|
195 |
(cpu.part() == CPU::ARM_CORTEX_A5 || |
196 |
cpu.part() == CPU::ARM_CORTEX_A9)) { |
197 |
cache_line_size_ = 32;
|
198 |
} |
199 |
|
200 |
if (!IsSupported(VFP32DREGS) && FLAG_enable_32dregs && cpu.has_vfp3_d32()) {
|
201 |
found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << VFP32DREGS; |
202 |
} |
203 |
|
204 |
supported_ |= found_by_runtime_probing_only_; |
205 |
#endif
|
206 |
|
207 |
// Assert that VFP3 implies ARMv7.
|
208 |
ASSERT(!IsSupported(VFP3) || IsSupported(ARMv7)); |
209 |
} |
210 |
|
211 |
|
212 |
void CpuFeatures::PrintTarget() {
|
213 |
const char* arm_arch = NULL; |
214 |
const char* arm_test = ""; |
215 |
const char* arm_fpu = ""; |
216 |
const char* arm_thumb = ""; |
217 |
const char* arm_float_abi = NULL; |
218 |
|
219 |
#if defined CAN_USE_ARMV7_INSTRUCTIONS
|
220 |
arm_arch = "arm v7";
|
221 |
#else
|
222 |
arm_arch = "arm v6";
|
223 |
#endif
|
224 |
|
225 |
#ifdef __arm__
|
226 |
|
227 |
# ifdef ARM_TEST
|
228 |
arm_test = " test";
|
229 |
# endif
|
230 |
# if defined __ARM_NEON__
|
231 |
arm_fpu = " neon";
|
232 |
# elif defined CAN_USE_VFP3_INSTRUCTIONS
|
233 |
arm_fpu = " vfp3";
|
234 |
# else
|
235 |
arm_fpu = " vfp2";
|
236 |
# endif
|
237 |
# if (defined __thumb__) || (defined __thumb2__)
|
238 |
arm_thumb = " thumb";
|
239 |
# endif
|
240 |
arm_float_abi = OS::ArmUsingHardFloat() ? "hard" : "softfp"; |
241 |
|
242 |
#else // __arm__ |
243 |
|
244 |
arm_test = " simulator";
|
245 |
# if defined CAN_USE_VFP3_INSTRUCTIONS
|
246 |
# if defined CAN_USE_VFP32DREGS
|
247 |
arm_fpu = " vfp3";
|
248 |
# else
|
249 |
arm_fpu = " vfp3-d16";
|
250 |
# endif
|
251 |
# else
|
252 |
arm_fpu = " vfp2";
|
253 |
# endif
|
254 |
# if USE_EABI_HARDFLOAT == 1 |
255 |
arm_float_abi = "hard";
|
256 |
# else
|
257 |
arm_float_abi = "softfp";
|
258 |
# endif
|
259 |
|
260 |
#endif // __arm__ |
261 |
|
262 |
printf("target%s %s%s%s %s\n",
|
263 |
arm_test, arm_arch, arm_fpu, arm_thumb, arm_float_abi); |
264 |
} |
265 |
|
266 |
|
267 |
void CpuFeatures::PrintFeatures() {
|
268 |
printf( |
269 |
"ARMv7=%d VFP3=%d VFP32DREGS=%d NEON=%d SUDIV=%d UNALIGNED_ACCESSES=%d "
|
270 |
"MOVW_MOVT_IMMEDIATE_LOADS=%d",
|
271 |
CpuFeatures::IsSupported(ARMv7), |
272 |
CpuFeatures::IsSupported(VFP3), |
273 |
CpuFeatures::IsSupported(VFP32DREGS), |
274 |
CpuFeatures::IsSupported(NEON), |
275 |
CpuFeatures::IsSupported(SUDIV), |
276 |
CpuFeatures::IsSupported(UNALIGNED_ACCESSES), |
277 |
CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS)); |
278 |
#ifdef __arm__
|
279 |
bool eabi_hardfloat = OS::ArmUsingHardFloat();
|
280 |
#elif USE_EABI_HARDFLOAT
|
281 |
bool eabi_hardfloat = true; |
282 |
#else
|
283 |
bool eabi_hardfloat = false; |
284 |
#endif
|
285 |
printf(" USE_EABI_HARDFLOAT=%d\n", eabi_hardfloat);
|
286 |
} |
287 |
|
288 |
|
289 |
// -----------------------------------------------------------------------------
|
290 |
// Implementation of RelocInfo
|
291 |
|
292 |
const int RelocInfo::kApplyMask = 0; |
293 |
|
294 |
|
295 |
bool RelocInfo::IsCodedSpecially() {
|
296 |
// The deserializer needs to know whether a pointer is specially coded. Being
|
297 |
// specially coded on ARM means that it is a movw/movt instruction. We don't
|
298 |
// generate those yet.
|
299 |
return false; |
300 |
} |
301 |
|
302 |
|
303 |
void RelocInfo::PatchCode(byte* instructions, int instruction_count) { |
304 |
// Patch the code at the current address with the supplied instructions.
|
305 |
Instr* pc = reinterpret_cast<Instr*>(pc_);
|
306 |
Instr* instr = reinterpret_cast<Instr*>(instructions);
|
307 |
for (int i = 0; i < instruction_count; i++) { |
308 |
*(pc + i) = *(instr + i); |
309 |
} |
310 |
|
311 |
// Indicate that code has changed.
|
312 |
CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize); |
313 |
} |
314 |
|
315 |
|
316 |
// Patch the code at the current PC with a call to the target address.
|
317 |
// Additional guard instructions can be added if required.
|
318 |
void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) { |
319 |
// Patch the code at the current address with a call to the target.
|
320 |
UNIMPLEMENTED(); |
321 |
} |
322 |
|
323 |
|
324 |
// -----------------------------------------------------------------------------
|
325 |
// Implementation of Operand and MemOperand
|
326 |
// See assembler-arm-inl.h for inlined constructors
|
327 |
|
328 |
Operand::Operand(Handle<Object> handle) { |
329 |
AllowDeferredHandleDereference using_raw_address; |
330 |
rm_ = no_reg; |
331 |
// Verify all Objects referred by code are NOT in new space.
|
332 |
Object* obj = *handle; |
333 |
if (obj->IsHeapObject()) {
|
334 |
ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj)); |
335 |
imm32_ = reinterpret_cast<intptr_t>(handle.location());
|
336 |
rmode_ = RelocInfo::EMBEDDED_OBJECT; |
337 |
} else {
|
338 |
// no relocation needed
|
339 |
imm32_ = reinterpret_cast<intptr_t>(obj);
|
340 |
rmode_ = RelocInfo::NONE32; |
341 |
} |
342 |
} |
343 |
|
344 |
|
345 |
Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
|
346 |
ASSERT(is_uint5(shift_imm)); |
347 |
ASSERT(shift_op != ROR || shift_imm != 0); // use RRX if you mean it |
348 |
rm_ = rm; |
349 |
rs_ = no_reg; |
350 |
shift_op_ = shift_op; |
351 |
shift_imm_ = shift_imm & 31;
|
352 |
if (shift_op == RRX) {
|
353 |
// encoded as ROR with shift_imm == 0
|
354 |
ASSERT(shift_imm == 0);
|
355 |
shift_op_ = ROR; |
356 |
shift_imm_ = 0;
|
357 |
} |
358 |
} |
359 |
|
360 |
|
361 |
Operand::Operand(Register rm, ShiftOp shift_op, Register rs) { |
362 |
ASSERT(shift_op != RRX); |
363 |
rm_ = rm; |
364 |
rs_ = no_reg; |
365 |
shift_op_ = shift_op; |
366 |
rs_ = rs; |
367 |
} |
368 |
|
369 |
|
370 |
MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) { |
371 |
rn_ = rn; |
372 |
rm_ = no_reg; |
373 |
offset_ = offset; |
374 |
am_ = am; |
375 |
} |
376 |
|
377 |
|
378 |
MemOperand::MemOperand(Register rn, Register rm, AddrMode am) { |
379 |
rn_ = rn; |
380 |
rm_ = rm; |
381 |
shift_op_ = LSL; |
382 |
shift_imm_ = 0;
|
383 |
am_ = am; |
384 |
} |
385 |
|
386 |
|
387 |
MemOperand::MemOperand(Register rn, Register rm, |
388 |
ShiftOp shift_op, int shift_imm, AddrMode am) {
|
389 |
ASSERT(is_uint5(shift_imm)); |
390 |
rn_ = rn; |
391 |
rm_ = rm; |
392 |
shift_op_ = shift_op; |
393 |
shift_imm_ = shift_imm & 31;
|
394 |
am_ = am; |
395 |
} |
396 |
|
397 |
|
398 |
NeonMemOperand::NeonMemOperand(Register rn, AddrMode am, int align) {
|
399 |
ASSERT((am == Offset) || (am == PostIndex)); |
400 |
rn_ = rn; |
401 |
rm_ = (am == Offset) ? pc : sp; |
402 |
SetAlignment(align); |
403 |
} |
404 |
|
405 |
|
406 |
NeonMemOperand::NeonMemOperand(Register rn, Register rm, int align) {
|
407 |
rn_ = rn; |
408 |
rm_ = rm; |
409 |
SetAlignment(align); |
410 |
} |
411 |
|
412 |
|
413 |
void NeonMemOperand::SetAlignment(int align) { |
414 |
switch (align) {
|
415 |
case 0: |
416 |
align_ = 0;
|
417 |
break;
|
418 |
case 64: |
419 |
align_ = 1;
|
420 |
break;
|
421 |
case 128: |
422 |
align_ = 2;
|
423 |
break;
|
424 |
case 256: |
425 |
align_ = 3;
|
426 |
break;
|
427 |
default:
|
428 |
UNREACHABLE(); |
429 |
align_ = 0;
|
430 |
break;
|
431 |
} |
432 |
} |
433 |
|
434 |
|
435 |
NeonListOperand::NeonListOperand(DoubleRegister base, int registers_count) {
|
436 |
base_ = base; |
437 |
switch (registers_count) {
|
438 |
case 1: |
439 |
type_ = nlt_1; |
440 |
break;
|
441 |
case 2: |
442 |
type_ = nlt_2; |
443 |
break;
|
444 |
case 3: |
445 |
type_ = nlt_3; |
446 |
break;
|
447 |
case 4: |
448 |
type_ = nlt_4; |
449 |
break;
|
450 |
default:
|
451 |
UNREACHABLE(); |
452 |
type_ = nlt_1; |
453 |
break;
|
454 |
} |
455 |
} |
456 |
|
457 |
|
458 |
// -----------------------------------------------------------------------------
|
459 |
// Specific instructions, constants, and masks.
|
460 |
|
461 |
// add(sp, sp, 4) instruction (aka Pop())
|
462 |
const Instr kPopInstruction =
|
463 |
al | PostIndex | 4 | LeaveCC | I | kRegister_sp_Code * B16 |
|
464 |
kRegister_sp_Code * B12; |
465 |
// str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
|
466 |
// register r is not encoded.
|
467 |
const Instr kPushRegPattern =
|
468 |
al | B26 | 4 | NegPreIndex | kRegister_sp_Code * B16;
|
469 |
// ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
|
470 |
// register r is not encoded.
|
471 |
const Instr kPopRegPattern =
|
472 |
al | B26 | L | 4 | PostIndex | kRegister_sp_Code * B16;
|
473 |
// mov lr, pc
|
474 |
const Instr kMovLrPc = al | MOV | kRegister_pc_Code | kRegister_lr_Code * B12;
|
475 |
// ldr rd, [pc, #offset]
|
476 |
const Instr kLdrPCMask = 15 * B24 | 7 * B20 | 15 * B16; |
477 |
const Instr kLdrPCPattern = 5 * B24 | L | kRegister_pc_Code * B16; |
478 |
// vldr dd, [pc, #offset]
|
479 |
const Instr kVldrDPCMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8; |
480 |
const Instr kVldrDPCPattern = 13 * B24 | L | kRegister_pc_Code * B16 | 11 * B8; |
481 |
// blxcc rm
|
482 |
const Instr kBlxRegMask =
|
483 |
15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4; |
484 |
const Instr kBlxRegPattern =
|
485 |
B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | BLX; |
486 |
const Instr kBlxIp = al | kBlxRegPattern | ip.code();
|
487 |
const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16; |
488 |
const Instr kMovMvnPattern = 0xd * B21; |
489 |
const Instr kMovMvnFlip = B22;
|
490 |
const Instr kMovLeaveCCMask = 0xdff * B16; |
491 |
const Instr kMovLeaveCCPattern = 0x1a0 * B16; |
492 |
const Instr kMovwMask = 0xff * B20; |
493 |
const Instr kMovwPattern = 0x30 * B20; |
494 |
const Instr kMovwLeaveCCFlip = 0x5 * B21; |
495 |
const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12; |
496 |
const Instr kCmpCmnPattern = 0x15 * B20; |
497 |
const Instr kCmpCmnFlip = B21;
|
498 |
const Instr kAddSubFlip = 0x6 * B21; |
499 |
const Instr kAndBicFlip = 0xe * B21; |
500 |
|
501 |
// A mask for the Rd register for push, pop, ldr, str instructions.
|
502 |
const Instr kLdrRegFpOffsetPattern =
|
503 |
al | B26 | L | Offset | kRegister_fp_Code * B16; |
504 |
const Instr kStrRegFpOffsetPattern =
|
505 |
al | B26 | Offset | kRegister_fp_Code * B16; |
506 |
const Instr kLdrRegFpNegOffsetPattern =
|
507 |
al | B26 | L | NegOffset | kRegister_fp_Code * B16; |
508 |
const Instr kStrRegFpNegOffsetPattern =
|
509 |
al | B26 | NegOffset | kRegister_fp_Code * B16; |
510 |
const Instr kLdrStrInstrTypeMask = 0xffff0000; |
511 |
const Instr kLdrStrInstrArgumentMask = 0x0000ffff; |
512 |
const Instr kLdrStrOffsetMask = 0x00000fff; |
513 |
|
514 |
|
515 |
Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size) |
516 |
: AssemblerBase(isolate, buffer, buffer_size), |
517 |
recorded_ast_id_(TypeFeedbackId::None()), |
518 |
positions_recorder_(this) {
|
519 |
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_); |
520 |
num_pending_reloc_info_ = 0;
|
521 |
num_pending_64_bit_reloc_info_ = 0;
|
522 |
next_buffer_check_ = 0;
|
523 |
const_pool_blocked_nesting_ = 0;
|
524 |
no_const_pool_before_ = 0;
|
525 |
first_const_pool_use_ = -1;
|
526 |
last_bound_pos_ = 0;
|
527 |
ClearRecordedAstId(); |
528 |
} |
529 |
|
530 |
|
531 |
Assembler::~Assembler() { |
532 |
ASSERT(const_pool_blocked_nesting_ == 0);
|
533 |
} |
534 |
|
535 |
|
536 |
void Assembler::GetCode(CodeDesc* desc) {
|
537 |
// Emit constant pool if necessary.
|
538 |
CheckConstPool(true, false); |
539 |
ASSERT(num_pending_reloc_info_ == 0);
|
540 |
ASSERT(num_pending_64_bit_reloc_info_ == 0);
|
541 |
|
542 |
// Set up code descriptor.
|
543 |
desc->buffer = buffer_; |
544 |
desc->buffer_size = buffer_size_; |
545 |
desc->instr_size = pc_offset(); |
546 |
desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos(); |
547 |
} |
548 |
|
549 |
|
550 |
void Assembler::Align(int m) { |
551 |
ASSERT(m >= 4 && IsPowerOf2(m));
|
552 |
while ((pc_offset() & (m - 1)) != 0) { |
553 |
nop(); |
554 |
} |
555 |
} |
556 |
|
557 |
|
558 |
void Assembler::CodeTargetAlign() {
|
559 |
// Preferred alignment of jump targets on some ARM chips.
|
560 |
Align(8);
|
561 |
} |
562 |
|
563 |
|
564 |
Condition Assembler::GetCondition(Instr instr) { |
565 |
return Instruction::ConditionField(instr);
|
566 |
} |
567 |
|
568 |
|
569 |
bool Assembler::IsBranch(Instr instr) {
|
570 |
return (instr & (B27 | B25)) == (B27 | B25);
|
571 |
} |
572 |
|
573 |
|
574 |
int Assembler::GetBranchOffset(Instr instr) {
|
575 |
ASSERT(IsBranch(instr)); |
576 |
// Take the jump offset in the lower 24 bits, sign extend it and multiply it
|
577 |
// with 4 to get the offset in bytes.
|
578 |
return ((instr & kImm24Mask) << 8) >> 6; |
579 |
} |
580 |
|
581 |
|
582 |
bool Assembler::IsLdrRegisterImmediate(Instr instr) {
|
583 |
return (instr & (B27 | B26 | B25 | B22 | B20)) == (B26 | B20);
|
584 |
} |
585 |
|
586 |
|
587 |
bool Assembler::IsVldrDRegisterImmediate(Instr instr) {
|
588 |
return (instr & (15 * B24 | 3 * B20 | 15 * B8)) == (13 * B24 | B20 | 11 * B8); |
589 |
} |
590 |
|
591 |
|
592 |
int Assembler::GetLdrRegisterImmediateOffset(Instr instr) {
|
593 |
ASSERT(IsLdrRegisterImmediate(instr)); |
594 |
bool positive = (instr & B23) == B23;
|
595 |
int offset = instr & kOff12Mask; // Zero extended offset. |
596 |
return positive ? offset : -offset;
|
597 |
} |
598 |
|
599 |
|
600 |
int Assembler::GetVldrDRegisterImmediateOffset(Instr instr) {
|
601 |
ASSERT(IsVldrDRegisterImmediate(instr)); |
602 |
bool positive = (instr & B23) == B23;
|
603 |
int offset = instr & kOff8Mask; // Zero extended offset. |
604 |
offset <<= 2;
|
605 |
return positive ? offset : -offset;
|
606 |
} |
607 |
|
608 |
|
609 |
Instr Assembler::SetLdrRegisterImmediateOffset(Instr instr, int offset) {
|
610 |
ASSERT(IsLdrRegisterImmediate(instr)); |
611 |
bool positive = offset >= 0; |
612 |
if (!positive) offset = -offset;
|
613 |
ASSERT(is_uint12(offset)); |
614 |
// Set bit indicating whether the offset should be added.
|
615 |
instr = (instr & ~B23) | (positive ? B23 : 0);
|
616 |
// Set the actual offset.
|
617 |
return (instr & ~kOff12Mask) | offset;
|
618 |
} |
619 |
|
620 |
|
621 |
Instr Assembler::SetVldrDRegisterImmediateOffset(Instr instr, int offset) {
|
622 |
ASSERT(IsVldrDRegisterImmediate(instr)); |
623 |
ASSERT((offset & ~3) == offset); // Must be 64-bit aligned. |
624 |
bool positive = offset >= 0; |
625 |
if (!positive) offset = -offset;
|
626 |
ASSERT(is_uint10(offset)); |
627 |
// Set bit indicating whether the offset should be added.
|
628 |
instr = (instr & ~B23) | (positive ? B23 : 0);
|
629 |
// Set the actual offset. Its bottom 2 bits are zero.
|
630 |
return (instr & ~kOff8Mask) | (offset >> 2); |
631 |
} |
632 |
|
633 |
|
634 |
bool Assembler::IsStrRegisterImmediate(Instr instr) {
|
635 |
return (instr & (B27 | B26 | B25 | B22 | B20)) == B26;
|
636 |
} |
637 |
|
638 |
|
639 |
Instr Assembler::SetStrRegisterImmediateOffset(Instr instr, int offset) {
|
640 |
ASSERT(IsStrRegisterImmediate(instr)); |
641 |
bool positive = offset >= 0; |
642 |
if (!positive) offset = -offset;
|
643 |
ASSERT(is_uint12(offset)); |
644 |
// Set bit indicating whether the offset should be added.
|
645 |
instr = (instr & ~B23) | (positive ? B23 : 0);
|
646 |
// Set the actual offset.
|
647 |
return (instr & ~kOff12Mask) | offset;
|
648 |
} |
649 |
|
650 |
|
651 |
bool Assembler::IsAddRegisterImmediate(Instr instr) {
|
652 |
return (instr & (B27 | B26 | B25 | B24 | B23 | B22 | B21)) == (B25 | B23);
|
653 |
} |
654 |
|
655 |
|
656 |
Instr Assembler::SetAddRegisterImmediateOffset(Instr instr, int offset) {
|
657 |
ASSERT(IsAddRegisterImmediate(instr)); |
658 |
ASSERT(offset >= 0);
|
659 |
ASSERT(is_uint12(offset)); |
660 |
// Set the offset.
|
661 |
return (instr & ~kOff12Mask) | offset;
|
662 |
} |
663 |
|
664 |
|
665 |
Register Assembler::GetRd(Instr instr) { |
666 |
Register reg; |
667 |
reg.code_ = Instruction::RdValue(instr); |
668 |
return reg;
|
669 |
} |
670 |
|
671 |
|
672 |
Register Assembler::GetRn(Instr instr) { |
673 |
Register reg; |
674 |
reg.code_ = Instruction::RnValue(instr); |
675 |
return reg;
|
676 |
} |
677 |
|
678 |
|
679 |
Register Assembler::GetRm(Instr instr) { |
680 |
Register reg; |
681 |
reg.code_ = Instruction::RmValue(instr); |
682 |
return reg;
|
683 |
} |
684 |
|
685 |
|
686 |
bool Assembler::IsPush(Instr instr) {
|
687 |
return ((instr & ~kRdMask) == kPushRegPattern);
|
688 |
} |
689 |
|
690 |
|
691 |
bool Assembler::IsPop(Instr instr) {
|
692 |
return ((instr & ~kRdMask) == kPopRegPattern);
|
693 |
} |
694 |
|
695 |
|
696 |
bool Assembler::IsStrRegFpOffset(Instr instr) {
|
697 |
return ((instr & kLdrStrInstrTypeMask) == kStrRegFpOffsetPattern);
|
698 |
} |
699 |
|
700 |
|
701 |
bool Assembler::IsLdrRegFpOffset(Instr instr) {
|
702 |
return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpOffsetPattern);
|
703 |
} |
704 |
|
705 |
|
706 |
bool Assembler::IsStrRegFpNegOffset(Instr instr) {
|
707 |
return ((instr & kLdrStrInstrTypeMask) == kStrRegFpNegOffsetPattern);
|
708 |
} |
709 |
|
710 |
|
711 |
bool Assembler::IsLdrRegFpNegOffset(Instr instr) {
|
712 |
return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpNegOffsetPattern);
|
713 |
} |
714 |
|
715 |
|
716 |
bool Assembler::IsLdrPcImmediateOffset(Instr instr) {
|
717 |
// Check the instruction is indeed a
|
718 |
// ldr<cond> <Rd>, [pc +/- offset_12].
|
719 |
return (instr & kLdrPCMask) == kLdrPCPattern;
|
720 |
} |
721 |
|
722 |
|
723 |
bool Assembler::IsVldrDPcImmediateOffset(Instr instr) {
|
724 |
// Check the instruction is indeed a
|
725 |
// vldr<cond> <Dd>, [pc +/- offset_10].
|
726 |
return (instr & kVldrDPCMask) == kVldrDPCPattern;
|
727 |
} |
728 |
|
729 |
|
730 |
bool Assembler::IsTstImmediate(Instr instr) {
|
731 |
return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
|
732 |
(I | TST | S); |
733 |
} |
734 |
|
735 |
|
736 |
bool Assembler::IsCmpRegister(Instr instr) {
|
737 |
return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask | B4)) ==
|
738 |
(CMP | S); |
739 |
} |
740 |
|
741 |
|
742 |
bool Assembler::IsCmpImmediate(Instr instr) {
|
743 |
return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
|
744 |
(I | CMP | S); |
745 |
} |
746 |
|
747 |
|
748 |
Register Assembler::GetCmpImmediateRegister(Instr instr) { |
749 |
ASSERT(IsCmpImmediate(instr)); |
750 |
return GetRn(instr);
|
751 |
} |
752 |
|
753 |
|
754 |
int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
|
755 |
ASSERT(IsCmpImmediate(instr)); |
756 |
return instr & kOff12Mask;
|
757 |
} |
758 |
|
759 |
|
760 |
// Labels refer to positions in the (to be) generated code.
|
761 |
// There are bound, linked, and unused labels.
|
762 |
//
|
763 |
// Bound labels refer to known positions in the already
|
764 |
// generated code. pos() is the position the label refers to.
|
765 |
//
|
766 |
// Linked labels refer to unknown positions in the code
|
767 |
// to be generated; pos() is the position of the last
|
768 |
// instruction using the label.
|
769 |
//
|
770 |
// The linked labels form a link chain by making the branch offset
|
771 |
// in the instruction steam to point to the previous branch
|
772 |
// instruction using the same label.
|
773 |
//
|
774 |
// The link chain is terminated by a branch offset pointing to the
|
775 |
// same position.
|
776 |
|
777 |
|
778 |
int Assembler::target_at(int pos) { |
779 |
Instr instr = instr_at(pos); |
780 |
if (is_uint24(instr)) {
|
781 |
// Emitted link to a label, not part of a branch.
|
782 |
return instr;
|
783 |
} |
784 |
ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24 |
785 |
int imm26 = ((instr & kImm24Mask) << 8) >> 6; |
786 |
if ((Instruction::ConditionField(instr) == kSpecialCondition) &&
|
787 |
((instr & B24) != 0)) {
|
788 |
// blx uses bit 24 to encode bit 2 of imm26
|
789 |
imm26 += 2;
|
790 |
} |
791 |
return pos + kPcLoadDelta + imm26;
|
792 |
} |
793 |
|
794 |
|
795 |
void Assembler::target_at_put(int pos, int target_pos) { |
796 |
Instr instr = instr_at(pos); |
797 |
if (is_uint24(instr)) {
|
798 |
ASSERT(target_pos == pos || target_pos >= 0);
|
799 |
// Emitted link to a label, not part of a branch.
|
800 |
// Load the position of the label relative to the generated code object
|
801 |
// pointer in a register.
|
802 |
|
803 |
// Here are the instructions we need to emit:
|
804 |
// For ARMv7: target24 => target16_1:target16_0
|
805 |
// movw dst, #target16_0
|
806 |
// movt dst, #target16_1
|
807 |
// For ARMv6: target24 => target8_2:target8_1:target8_0
|
808 |
// mov dst, #target8_0
|
809 |
// orr dst, dst, #target8_1 << 8
|
810 |
// orr dst, dst, #target8_2 << 16
|
811 |
|
812 |
// We extract the destination register from the emitted nop instruction.
|
813 |
Register dst = Register::from_code( |
814 |
Instruction::RmValue(instr_at(pos + kInstrSize))); |
815 |
ASSERT(IsNop(instr_at(pos + kInstrSize), dst.code())); |
816 |
uint32_t target24 = target_pos + (Code::kHeaderSize - kHeapObjectTag); |
817 |
ASSERT(is_uint24(target24)); |
818 |
if (is_uint8(target24)) {
|
819 |
// If the target fits in a byte then only patch with a mov
|
820 |
// instruction.
|
821 |
CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
|
822 |
1,
|
823 |
CodePatcher::DONT_FLUSH); |
824 |
patcher.masm()->mov(dst, Operand(target24)); |
825 |
} else {
|
826 |
uint16_t target16_0 = target24 & kImm16Mask; |
827 |
uint16_t target16_1 = target24 >> 16;
|
828 |
if (CpuFeatures::IsSupported(ARMv7)) {
|
829 |
// Patch with movw/movt.
|
830 |
if (target16_1 == 0) { |
831 |
CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
|
832 |
1,
|
833 |
CodePatcher::DONT_FLUSH); |
834 |
patcher.masm()->movw(dst, target16_0); |
835 |
} else {
|
836 |
CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
|
837 |
2,
|
838 |
CodePatcher::DONT_FLUSH); |
839 |
patcher.masm()->movw(dst, target16_0); |
840 |
patcher.masm()->movt(dst, target16_1); |
841 |
} |
842 |
} else {
|
843 |
// Patch with a sequence of mov/orr/orr instructions.
|
844 |
uint8_t target8_0 = target16_0 & kImm8Mask; |
845 |
uint8_t target8_1 = target16_0 >> 8;
|
846 |
uint8_t target8_2 = target16_1 & kImm8Mask; |
847 |
if (target8_2 == 0) { |
848 |
CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
|
849 |
2,
|
850 |
CodePatcher::DONT_FLUSH); |
851 |
patcher.masm()->mov(dst, Operand(target8_0)); |
852 |
patcher.masm()->orr(dst, dst, Operand(target8_1 << 8));
|
853 |
} else {
|
854 |
CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
|
855 |
3,
|
856 |
CodePatcher::DONT_FLUSH); |
857 |
patcher.masm()->mov(dst, Operand(target8_0)); |
858 |
patcher.masm()->orr(dst, dst, Operand(target8_1 << 8));
|
859 |
patcher.masm()->orr(dst, dst, Operand(target8_2 << 16));
|
860 |
} |
861 |
} |
862 |
} |
863 |
return;
|
864 |
} |
865 |
int imm26 = target_pos - (pos + kPcLoadDelta);
|
866 |
ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24 |
867 |
if (Instruction::ConditionField(instr) == kSpecialCondition) {
|
868 |
// blx uses bit 24 to encode bit 2 of imm26
|
869 |
ASSERT((imm26 & 1) == 0); |
870 |
instr = (instr & ~(B24 | kImm24Mask)) | ((imm26 & 2) >> 1)*B24; |
871 |
} else {
|
872 |
ASSERT((imm26 & 3) == 0); |
873 |
instr &= ~kImm24Mask; |
874 |
} |
875 |
int imm24 = imm26 >> 2; |
876 |
ASSERT(is_int24(imm24)); |
877 |
instr_at_put(pos, instr | (imm24 & kImm24Mask)); |
878 |
} |
879 |
|
880 |
|
881 |
void Assembler::print(Label* L) {
|
882 |
if (L->is_unused()) {
|
883 |
PrintF("unused label\n");
|
884 |
} else if (L->is_bound()) { |
885 |
PrintF("bound label to %d\n", L->pos());
|
886 |
} else if (L->is_linked()) { |
887 |
Label l = *L; |
888 |
PrintF("unbound label");
|
889 |
while (l.is_linked()) {
|
890 |
PrintF("@ %d ", l.pos());
|
891 |
Instr instr = instr_at(l.pos()); |
892 |
if ((instr & ~kImm24Mask) == 0) { |
893 |
PrintF("value\n");
|
894 |
} else {
|
895 |
ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx |
896 |
Condition cond = Instruction::ConditionField(instr); |
897 |
const char* b; |
898 |
const char* c; |
899 |
if (cond == kSpecialCondition) {
|
900 |
b = "blx";
|
901 |
c = "";
|
902 |
} else {
|
903 |
if ((instr & B24) != 0) |
904 |
b = "bl";
|
905 |
else
|
906 |
b = "b";
|
907 |
|
908 |
switch (cond) {
|
909 |
case eq: c = "eq"; break; |
910 |
case ne: c = "ne"; break; |
911 |
case hs: c = "hs"; break; |
912 |
case lo: c = "lo"; break; |
913 |
case mi: c = "mi"; break; |
914 |
case pl: c = "pl"; break; |
915 |
case vs: c = "vs"; break; |
916 |
case vc: c = "vc"; break; |
917 |
case hi: c = "hi"; break; |
918 |
case ls: c = "ls"; break; |
919 |
case ge: c = "ge"; break; |
920 |
case lt: c = "lt"; break; |
921 |
case gt: c = "gt"; break; |
922 |
case le: c = "le"; break; |
923 |
case al: c = ""; break; |
924 |
default:
|
925 |
c = "";
|
926 |
UNREACHABLE(); |
927 |
} |
928 |
} |
929 |
PrintF("%s%s\n", b, c);
|
930 |
} |
931 |
next(&l); |
932 |
} |
933 |
} else {
|
934 |
PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
|
935 |
} |
936 |
} |
937 |
|
938 |
|
939 |
void Assembler::bind_to(Label* L, int pos) { |
940 |
ASSERT(0 <= pos && pos <= pc_offset()); // must have a valid binding position |
941 |
while (L->is_linked()) {
|
942 |
int fixup_pos = L->pos();
|
943 |
next(L); // call next before overwriting link with target at fixup_pos
|
944 |
target_at_put(fixup_pos, pos); |
945 |
} |
946 |
L->bind_to(pos); |
947 |
|
948 |
// Keep track of the last bound label so we don't eliminate any instructions
|
949 |
// before a bound label.
|
950 |
if (pos > last_bound_pos_)
|
951 |
last_bound_pos_ = pos; |
952 |
} |
953 |
|
954 |
|
955 |
void Assembler::bind(Label* L) {
|
956 |
ASSERT(!L->is_bound()); // label can only be bound once
|
957 |
bind_to(L, pc_offset()); |
958 |
} |
959 |
|
960 |
|
961 |
void Assembler::next(Label* L) {
|
962 |
ASSERT(L->is_linked()); |
963 |
int link = target_at(L->pos());
|
964 |
if (link == L->pos()) {
|
965 |
// Branch target points to the same instuction. This is the end of the link
|
966 |
// chain.
|
967 |
L->Unuse(); |
968 |
} else {
|
969 |
ASSERT(link >= 0);
|
970 |
L->link_to(link); |
971 |
} |
972 |
} |
973 |
|
974 |
|
975 |
// Low-level code emission routines depending on the addressing mode.
|
976 |
// If this returns true then you have to use the rotate_imm and immed_8
|
977 |
// that it returns, because it may have already changed the instruction
|
978 |
// to match them!
|
979 |
static bool fits_shifter(uint32_t imm32, |
980 |
uint32_t* rotate_imm, |
981 |
uint32_t* immed_8, |
982 |
Instr* instr) { |
983 |
// imm32 must be unsigned.
|
984 |
for (int rot = 0; rot < 16; rot++) { |
985 |
uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot)); |
986 |
if ((imm8 <= 0xff)) { |
987 |
*rotate_imm = rot; |
988 |
*immed_8 = imm8; |
989 |
return true; |
990 |
} |
991 |
} |
992 |
// If the opcode is one with a complementary version and the complementary
|
993 |
// immediate fits, change the opcode.
|
994 |
if (instr != NULL) { |
995 |
if ((*instr & kMovMvnMask) == kMovMvnPattern) {
|
996 |
if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) { |
997 |
*instr ^= kMovMvnFlip; |
998 |
return true; |
999 |
} else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) { |
1000 |
if (CpuFeatures::IsSupported(ARMv7)) {
|
1001 |
if (imm32 < 0x10000) { |
1002 |
*instr ^= kMovwLeaveCCFlip; |
1003 |
*instr |= EncodeMovwImmediate(imm32); |
1004 |
*rotate_imm = *immed_8 = 0; // Not used for movw. |
1005 |
return true; |
1006 |
} |
1007 |
} |
1008 |
} |
1009 |
} else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) { |
1010 |
if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) { |
1011 |
*instr ^= kCmpCmnFlip; |
1012 |
return true; |
1013 |
} |
1014 |
} else {
|
1015 |
Instr alu_insn = (*instr & kALUMask); |
1016 |
if (alu_insn == ADD ||
|
1017 |
alu_insn == SUB) { |
1018 |
if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) { |
1019 |
*instr ^= kAddSubFlip; |
1020 |
return true; |
1021 |
} |
1022 |
} else if (alu_insn == AND || |
1023 |
alu_insn == BIC) { |
1024 |
if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) { |
1025 |
*instr ^= kAndBicFlip; |
1026 |
return true; |
1027 |
} |
1028 |
} |
1029 |
} |
1030 |
} |
1031 |
return false; |
1032 |
} |
1033 |
|
1034 |
|
1035 |
// We have to use the temporary register for things that can be relocated even
|
1036 |
// if they can be encoded in the ARM's 12 bits of immediate-offset instruction
|
1037 |
// space. There is no guarantee that the relocated location can be similarly
|
1038 |
// encoded.
|
1039 |
bool Operand::must_output_reloc_info(const Assembler* assembler) const { |
1040 |
if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
|
1041 |
#ifdef DEBUG
|
1042 |
if (!Serializer::enabled()) {
|
1043 |
Serializer::TooLateToEnableNow(); |
1044 |
} |
1045 |
#endif // def DEBUG |
1046 |
if (assembler != NULL && assembler->predictable_code_size()) return true; |
1047 |
return Serializer::enabled();
|
1048 |
} else if (RelocInfo::IsNone(rmode_)) { |
1049 |
return false; |
1050 |
} |
1051 |
return true; |
1052 |
} |
1053 |
|
1054 |
|
1055 |
static bool use_movw_movt(const Operand& x, const Assembler* assembler) { |
1056 |
if (Assembler::use_immediate_embedded_pointer_loads(assembler)) {
|
1057 |
return true; |
1058 |
} |
1059 |
if (x.must_output_reloc_info(assembler)) {
|
1060 |
return false; |
1061 |
} |
1062 |
return CpuFeatures::IsSupported(ARMv7);
|
1063 |
} |
1064 |
|
1065 |
|
1066 |
bool Operand::is_single_instruction(const Assembler* assembler, |
1067 |
Instr instr) const {
|
1068 |
if (rm_.is_valid()) return true; |
1069 |
uint32_t dummy1, dummy2; |
1070 |
if (must_output_reloc_info(assembler) ||
|
1071 |
!fits_shifter(imm32_, &dummy1, &dummy2, &instr)) { |
1072 |
// The immediate operand cannot be encoded as a shifter operand, or use of
|
1073 |
// constant pool is required. For a mov instruction not setting the
|
1074 |
// condition code additional instruction conventions can be used.
|
1075 |
if ((instr & ~kCondMask) == 13*B21) { // mov, S not set |
1076 |
return !use_movw_movt(*this, assembler); |
1077 |
} else {
|
1078 |
// If this is not a mov or mvn instruction there will always an additional
|
1079 |
// instructions - either mov or ldr. The mov might actually be two
|
1080 |
// instructions mov or movw followed by movt so including the actual
|
1081 |
// instruction two or three instructions will be generated.
|
1082 |
return false; |
1083 |
} |
1084 |
} else {
|
1085 |
// No use of constant pool and the immediate operand can be encoded as a
|
1086 |
// shifter operand.
|
1087 |
return true; |
1088 |
} |
1089 |
} |
1090 |
|
1091 |
|
1092 |
void Assembler::move_32_bit_immediate(Condition cond,
|
1093 |
Register rd, |
1094 |
SBit s, |
1095 |
const Operand& x) {
|
1096 |
if (rd.code() != pc.code() && s == LeaveCC) {
|
1097 |
if (use_movw_movt(x, this)) { |
1098 |
if (x.must_output_reloc_info(this)) { |
1099 |
RecordRelocInfo(x.rmode_, x.imm32_, DONT_USE_CONSTANT_POOL); |
1100 |
// Make sure the movw/movt doesn't get separated.
|
1101 |
BlockConstPoolFor(2);
|
1102 |
} |
1103 |
emit(cond | 0x30*B20 | rd.code()*B12 |
|
1104 |
EncodeMovwImmediate(x.imm32_ & 0xffff));
|
1105 |
movt(rd, static_cast<uint32_t>(x.imm32_) >> 16, cond); |
1106 |
return;
|
1107 |
} |
1108 |
} |
1109 |
|
1110 |
RecordRelocInfo(x.rmode_, x.imm32_, USE_CONSTANT_POOL); |
1111 |
ldr(rd, MemOperand(pc, 0), cond);
|
1112 |
} |
1113 |
|
1114 |
|
1115 |
void Assembler::addrmod1(Instr instr,
|
1116 |
Register rn, |
1117 |
Register rd, |
1118 |
const Operand& x) {
|
1119 |
CheckBuffer(); |
1120 |
ASSERT((instr & ~(kCondMask | kOpCodeMask | S)) == 0);
|
1121 |
if (!x.rm_.is_valid()) {
|
1122 |
// Immediate.
|
1123 |
uint32_t rotate_imm; |
1124 |
uint32_t immed_8; |
1125 |
if (x.must_output_reloc_info(this) || |
1126 |
!fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) { |
1127 |
// The immediate operand cannot be encoded as a shifter operand, so load
|
1128 |
// it first to register ip and change the original instruction to use ip.
|
1129 |
// However, if the original instruction is a 'mov rd, x' (not setting the
|
1130 |
// condition code), then replace it with a 'ldr rd, [pc]'.
|
1131 |
CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
|
1132 |
Condition cond = Instruction::ConditionField(instr); |
1133 |
if ((instr & ~kCondMask) == 13*B21) { // mov, S not set |
1134 |
move_32_bit_immediate(cond, rd, LeaveCC, x); |
1135 |
} else {
|
1136 |
if ((instr & kMovMvnMask) == kMovMvnPattern) {
|
1137 |
// Moves need to use a constant pool entry.
|
1138 |
RecordRelocInfo(x.rmode_, x.imm32_, USE_CONSTANT_POOL); |
1139 |
ldr(ip, MemOperand(pc, 0), cond);
|
1140 |
} else if (x.must_output_reloc_info(this)) { |
1141 |
// Otherwise, use most efficient form of fetching from constant pool.
|
1142 |
move_32_bit_immediate(cond, ip, LeaveCC, x); |
1143 |
} else {
|
1144 |
// If this is not a mov or mvn instruction we may still be able to
|
1145 |
// avoid a constant pool entry by using mvn or movw.
|
1146 |
mov(ip, x, LeaveCC, cond); |
1147 |
} |
1148 |
addrmod1(instr, rn, rd, Operand(ip)); |
1149 |
} |
1150 |
return;
|
1151 |
} |
1152 |
instr |= I | rotate_imm*B8 | immed_8; |
1153 |
} else if (!x.rs_.is_valid()) { |
1154 |
// Immediate shift.
|
1155 |
instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code(); |
1156 |
} else {
|
1157 |
// Register shift.
|
1158 |
ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc)); |
1159 |
instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code(); |
1160 |
} |
1161 |
emit(instr | rn.code()*B16 | rd.code()*B12); |
1162 |
if (rn.is(pc) || x.rm_.is(pc)) {
|
1163 |
// Block constant pool emission for one instruction after reading pc.
|
1164 |
BlockConstPoolFor(1);
|
1165 |
} |
1166 |
} |
1167 |
|
1168 |
|
1169 |
void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) { |
1170 |
ASSERT((instr & ~(kCondMask | B | L)) == B26); |
1171 |
int am = x.am_;
|
1172 |
if (!x.rm_.is_valid()) {
|
1173 |
// Immediate offset.
|
1174 |
int offset_12 = x.offset_;
|
1175 |
if (offset_12 < 0) { |
1176 |
offset_12 = -offset_12; |
1177 |
am ^= U; |
1178 |
} |
1179 |
if (!is_uint12(offset_12)) {
|
1180 |
// Immediate offset cannot be encoded, load it first to register ip
|
1181 |
// rn (and rd in a load) should never be ip, or will be trashed.
|
1182 |
ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip))); |
1183 |
mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr)); |
1184 |
addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_)); |
1185 |
return;
|
1186 |
} |
1187 |
ASSERT(offset_12 >= 0); // no masking needed |
1188 |
instr |= offset_12; |
1189 |
} else {
|
1190 |
// Register offset (shift_imm_ and shift_op_ are 0) or scaled
|
1191 |
// register offset the constructors make sure than both shift_imm_
|
1192 |
// and shift_op_ are initialized.
|
1193 |
ASSERT(!x.rm_.is(pc)); |
1194 |
instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code(); |
1195 |
} |
1196 |
ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
|
1197 |
emit(instr | am | x.rn_.code()*B16 | rd.code()*B12); |
1198 |
} |
1199 |
|
1200 |
|
1201 |
void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) { |
1202 |
ASSERT((instr & ~(kCondMask | L | S6 | H)) == (B4 | B7)); |
1203 |
ASSERT(x.rn_.is_valid()); |
1204 |
int am = x.am_;
|
1205 |
if (!x.rm_.is_valid()) {
|
1206 |
// Immediate offset.
|
1207 |
int offset_8 = x.offset_;
|
1208 |
if (offset_8 < 0) { |
1209 |
offset_8 = -offset_8; |
1210 |
am ^= U; |
1211 |
} |
1212 |
if (!is_uint8(offset_8)) {
|
1213 |
// Immediate offset cannot be encoded, load it first to register ip
|
1214 |
// rn (and rd in a load) should never be ip, or will be trashed.
|
1215 |
ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip))); |
1216 |
mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr)); |
1217 |
addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_)); |
1218 |
return;
|
1219 |
} |
1220 |
ASSERT(offset_8 >= 0); // no masking needed |
1221 |
instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf); |
1222 |
} else if (x.shift_imm_ != 0) { |
1223 |
// Scaled register offset not supported, load index first
|
1224 |
// rn (and rd in a load) should never be ip, or will be trashed.
|
1225 |
ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip))); |
1226 |
mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC, |
1227 |
Instruction::ConditionField(instr)); |
1228 |
addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_)); |
1229 |
return;
|
1230 |
} else {
|
1231 |
// Register offset.
|
1232 |
ASSERT((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback
|
1233 |
instr |= x.rm_.code(); |
1234 |
} |
1235 |
ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
|
1236 |
emit(instr | am | x.rn_.code()*B16 | rd.code()*B12); |
1237 |
} |
1238 |
|
1239 |
|
1240 |
void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
|
1241 |
ASSERT((instr & ~(kCondMask | P | U | W | L)) == B27); |
1242 |
ASSERT(rl != 0);
|
1243 |
ASSERT(!rn.is(pc)); |
1244 |
emit(instr | rn.code()*B16 | rl); |
1245 |
} |
1246 |
|
1247 |
|
1248 |
void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) { |
1249 |
// Unindexed addressing is not encoded by this function.
|
1250 |
ASSERT_EQ((B27 | B26), |
1251 |
(instr & ~(kCondMask | kCoprocessorMask | P | U | N | W | L))); |
1252 |
ASSERT(x.rn_.is_valid() && !x.rm_.is_valid()); |
1253 |
int am = x.am_;
|
1254 |
int offset_8 = x.offset_;
|
1255 |
ASSERT((offset_8 & 3) == 0); // offset must be an aligned word offset |
1256 |
offset_8 >>= 2;
|
1257 |
if (offset_8 < 0) { |
1258 |
offset_8 = -offset_8; |
1259 |
am ^= U; |
1260 |
} |
1261 |
ASSERT(is_uint8(offset_8)); // unsigned word offset must fit in a byte
|
1262 |
ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
|
1263 |
|
1264 |
// Post-indexed addressing requires W == 1; different than in addrmod2/3.
|
1265 |
if ((am & P) == 0) |
1266 |
am |= W; |
1267 |
|
1268 |
ASSERT(offset_8 >= 0); // no masking needed |
1269 |
emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8); |
1270 |
} |
1271 |
|
1272 |
|
1273 |
int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) { |
1274 |
int target_pos;
|
1275 |
if (L->is_bound()) {
|
1276 |
target_pos = L->pos(); |
1277 |
} else {
|
1278 |
if (L->is_linked()) {
|
1279 |
// Point to previous instruction that uses the link.
|
1280 |
target_pos = L->pos(); |
1281 |
} else {
|
1282 |
// First entry of the link chain points to itself.
|
1283 |
target_pos = pc_offset(); |
1284 |
} |
1285 |
L->link_to(pc_offset()); |
1286 |
} |
1287 |
|
1288 |
// Block the emission of the constant pool, since the branch instruction must
|
1289 |
// be emitted at the pc offset recorded by the label.
|
1290 |
BlockConstPoolFor(1);
|
1291 |
return target_pos - (pc_offset() + kPcLoadDelta);
|
1292 |
} |
1293 |
|
1294 |
|
1295 |
// Branch instructions.
|
1296 |
void Assembler::b(int branch_offset, Condition cond) { |
1297 |
ASSERT((branch_offset & 3) == 0); |
1298 |
int imm24 = branch_offset >> 2; |
1299 |
ASSERT(is_int24(imm24)); |
1300 |
emit(cond | B27 | B25 | (imm24 & kImm24Mask)); |
1301 |
|
1302 |
if (cond == al) {
|
1303 |
// Dead code is a good location to emit the constant pool.
|
1304 |
CheckConstPool(false, false); |
1305 |
} |
1306 |
} |
1307 |
|
1308 |
|
1309 |
void Assembler::bl(int branch_offset, Condition cond) { |
1310 |
positions_recorder()->WriteRecordedPositions(); |
1311 |
ASSERT((branch_offset & 3) == 0); |
1312 |
int imm24 = branch_offset >> 2; |
1313 |
ASSERT(is_int24(imm24)); |
1314 |
emit(cond | B27 | B25 | B24 | (imm24 & kImm24Mask)); |
1315 |
} |
1316 |
|
1317 |
|
1318 |
void Assembler::blx(int branch_offset) { // v5 and above |
1319 |
positions_recorder()->WriteRecordedPositions(); |
1320 |
ASSERT((branch_offset & 1) == 0); |
1321 |
int h = ((branch_offset & 2) >> 1)*B24; |
1322 |
int imm24 = branch_offset >> 2; |
1323 |
ASSERT(is_int24(imm24)); |
1324 |
emit(kSpecialCondition | B27 | B25 | h | (imm24 & kImm24Mask)); |
1325 |
} |
1326 |
|
1327 |
|
1328 |
void Assembler::blx(Register target, Condition cond) { // v5 and above |
1329 |
positions_recorder()->WriteRecordedPositions(); |
1330 |
ASSERT(!target.is(pc)); |
1331 |
emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | target.code()); |
1332 |
} |
1333 |
|
1334 |
|
1335 |
void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t |
1336 |
positions_recorder()->WriteRecordedPositions(); |
1337 |
ASSERT(!target.is(pc)); // use of pc is actually allowed, but discouraged
|
1338 |
emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BX | target.code()); |
1339 |
} |
1340 |
|
1341 |
|
1342 |
// Data-processing instructions.
|
1343 |
|
1344 |
void Assembler::and_(Register dst, Register src1, const Operand& src2, |
1345 |
SBit s, Condition cond) { |
1346 |
addrmod1(cond | AND | s, src1, dst, src2); |
1347 |
} |
1348 |
|
1349 |
|
1350 |
void Assembler::eor(Register dst, Register src1, const Operand& src2, |
1351 |
SBit s, Condition cond) { |
1352 |
addrmod1(cond | EOR | s, src1, dst, src2); |
1353 |
} |
1354 |
|
1355 |
|
1356 |
void Assembler::sub(Register dst, Register src1, const Operand& src2, |
1357 |
SBit s, Condition cond) { |
1358 |
addrmod1(cond | SUB | s, src1, dst, src2); |
1359 |
} |
1360 |
|
1361 |
|
1362 |
void Assembler::rsb(Register dst, Register src1, const Operand& src2, |
1363 |
SBit s, Condition cond) { |
1364 |
addrmod1(cond | RSB | s, src1, dst, src2); |
1365 |
} |
1366 |
|
1367 |
|
1368 |
void Assembler::add(Register dst, Register src1, const Operand& src2, |
1369 |
SBit s, Condition cond) { |
1370 |
addrmod1(cond | ADD | s, src1, dst, src2); |
1371 |
} |
1372 |
|
1373 |
|
1374 |
void Assembler::adc(Register dst, Register src1, const Operand& src2, |
1375 |
SBit s, Condition cond) { |
1376 |
addrmod1(cond | ADC | s, src1, dst, src2); |
1377 |
} |
1378 |
|
1379 |
|
1380 |
void Assembler::sbc(Register dst, Register src1, const Operand& src2, |
1381 |
SBit s, Condition cond) { |
1382 |
addrmod1(cond | SBC | s, src1, dst, src2); |
1383 |
} |
1384 |
|
1385 |
|
1386 |
void Assembler::rsc(Register dst, Register src1, const Operand& src2, |
1387 |
SBit s, Condition cond) { |
1388 |
addrmod1(cond | RSC | s, src1, dst, src2); |
1389 |
} |
1390 |
|
1391 |
|
1392 |
void Assembler::tst(Register src1, const Operand& src2, Condition cond) { |
1393 |
addrmod1(cond | TST | S, src1, r0, src2); |
1394 |
} |
1395 |
|
1396 |
|
1397 |
void Assembler::teq(Register src1, const Operand& src2, Condition cond) { |
1398 |
addrmod1(cond | TEQ | S, src1, r0, src2); |
1399 |
} |
1400 |
|
1401 |
|
1402 |
void Assembler::cmp(Register src1, const Operand& src2, Condition cond) { |
1403 |
addrmod1(cond | CMP | S, src1, r0, src2); |
1404 |
} |
1405 |
|
1406 |
|
1407 |
void Assembler::cmp_raw_immediate(
|
1408 |
Register src, int raw_immediate, Condition cond) {
|
1409 |
ASSERT(is_uint12(raw_immediate)); |
1410 |
emit(cond | I | CMP | S | src.code() << 16 | raw_immediate);
|
1411 |
} |
1412 |
|
1413 |
|
1414 |
void Assembler::cmn(Register src1, const Operand& src2, Condition cond) { |
1415 |
addrmod1(cond | CMN | S, src1, r0, src2); |
1416 |
} |
1417 |
|
1418 |
|
1419 |
void Assembler::orr(Register dst, Register src1, const Operand& src2, |
1420 |
SBit s, Condition cond) { |
1421 |
addrmod1(cond | ORR | s, src1, dst, src2); |
1422 |
} |
1423 |
|
1424 |
|
1425 |
void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) { |
1426 |
if (dst.is(pc)) {
|
1427 |
positions_recorder()->WriteRecordedPositions(); |
1428 |
} |
1429 |
// Don't allow nop instructions in the form mov rn, rn to be generated using
|
1430 |
// the mov instruction. They must be generated using nop(int/NopMarkerTypes)
|
1431 |
// or MarkCode(int/NopMarkerTypes) pseudo instructions.
|
1432 |
ASSERT(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al)); |
1433 |
addrmod1(cond | MOV | s, r0, dst, src); |
1434 |
} |
1435 |
|
1436 |
|
1437 |
void Assembler::mov_label_offset(Register dst, Label* label) {
|
1438 |
if (label->is_bound()) {
|
1439 |
mov(dst, Operand(label->pos() + (Code::kHeaderSize - kHeapObjectTag))); |
1440 |
} else {
|
1441 |
// Emit the link to the label in the code stream followed by extra nop
|
1442 |
// instructions.
|
1443 |
// If the label is not linked, then start a new link chain by linking it to
|
1444 |
// itself, emitting pc_offset().
|
1445 |
int link = label->is_linked() ? label->pos() : pc_offset();
|
1446 |
label->link_to(pc_offset()); |
1447 |
|
1448 |
// When the label is bound, these instructions will be patched with a
|
1449 |
// sequence of movw/movt or mov/orr/orr instructions. They will load the
|
1450 |
// destination register with the position of the label from the beginning
|
1451 |
// of the code.
|
1452 |
//
|
1453 |
// The link will be extracted from the first instruction and the destination
|
1454 |
// register from the second.
|
1455 |
// For ARMv7:
|
1456 |
// link
|
1457 |
// mov dst, dst
|
1458 |
// For ARMv6:
|
1459 |
// link
|
1460 |
// mov dst, dst
|
1461 |
// mov dst, dst
|
1462 |
//
|
1463 |
// When the label gets bound: target_at extracts the link and target_at_put
|
1464 |
// patches the instructions.
|
1465 |
ASSERT(is_uint24(link)); |
1466 |
BlockConstPoolScope block_const_pool(this);
|
1467 |
emit(link); |
1468 |
nop(dst.code()); |
1469 |
if (!CpuFeatures::IsSupported(ARMv7)) {
|
1470 |
nop(dst.code()); |
1471 |
} |
1472 |
} |
1473 |
} |
1474 |
|
1475 |
|
1476 |
void Assembler::movw(Register reg, uint32_t immediate, Condition cond) {
|
1477 |
ASSERT(immediate < 0x10000);
|
1478 |
// May use movw if supported, but on unsupported platforms will try to use
|
1479 |
// equivalent rotated immed_8 value and other tricks before falling back to a
|
1480 |
// constant pool load.
|
1481 |
mov(reg, Operand(immediate), LeaveCC, cond); |
1482 |
} |
1483 |
|
1484 |
|
1485 |
void Assembler::movt(Register reg, uint32_t immediate, Condition cond) {
|
1486 |
emit(cond | 0x34*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate));
|
1487 |
} |
1488 |
|
1489 |
|
1490 |
void Assembler::bic(Register dst, Register src1, const Operand& src2, |
1491 |
SBit s, Condition cond) { |
1492 |
addrmod1(cond | BIC | s, src1, dst, src2); |
1493 |
} |
1494 |
|
1495 |
|
1496 |
void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) { |
1497 |
addrmod1(cond | MVN | s, r0, dst, src); |
1498 |
} |
1499 |
|
1500 |
|
1501 |
// Multiply instructions.
|
1502 |
void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
|
1503 |
SBit s, Condition cond) { |
1504 |
ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc)); |
1505 |
emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 | |
1506 |
src2.code()*B8 | B7 | B4 | src1.code()); |
1507 |
} |
1508 |
|
1509 |
|
1510 |
void Assembler::mls(Register dst, Register src1, Register src2, Register srcA,
|
1511 |
Condition cond) { |
1512 |
ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc)); |
1513 |
emit(cond | B22 | B21 | dst.code()*B16 | srcA.code()*B12 | |
1514 |
src2.code()*B8 | B7 | B4 | src1.code()); |
1515 |
} |
1516 |
|
1517 |
|
1518 |
void Assembler::sdiv(Register dst, Register src1, Register src2,
|
1519 |
Condition cond) { |
1520 |
ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc)); |
1521 |
ASSERT(IsEnabled(SUDIV)); |
1522 |
emit(cond | B26 | B25| B24 | B20 | dst.code()*B16 | 0xf * B12 |
|
1523 |
src2.code()*B8 | B4 | src1.code()); |
1524 |
} |
1525 |
|
1526 |
|
1527 |
void Assembler::mul(Register dst, Register src1, Register src2,
|
1528 |
SBit s, Condition cond) { |
1529 |
ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc)); |
1530 |
// dst goes in bits 16-19 for this instruction!
|
1531 |
emit(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code()); |
1532 |
} |
1533 |
|
1534 |
|
1535 |
void Assembler::smlal(Register dstL,
|
1536 |
Register dstH, |
1537 |
Register src1, |
1538 |
Register src2, |
1539 |
SBit s, |
1540 |
Condition cond) { |
1541 |
ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc)); |
1542 |
ASSERT(!dstL.is(dstH)); |
1543 |
emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 | |
1544 |
src2.code()*B8 | B7 | B4 | src1.code()); |
1545 |
} |
1546 |
|
1547 |
|
1548 |
void Assembler::smull(Register dstL,
|
1549 |
Register dstH, |
1550 |
Register src1, |
1551 |
Register src2, |
1552 |
SBit s, |
1553 |
Condition cond) { |
1554 |
ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc)); |
1555 |
ASSERT(!dstL.is(dstH)); |
1556 |
emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 | |
1557 |
src2.code()*B8 | B7 | B4 | src1.code()); |
1558 |
} |
1559 |
|
1560 |
|
1561 |
void Assembler::umlal(Register dstL,
|
1562 |
Register dstH, |
1563 |
Register src1, |
1564 |
Register src2, |
1565 |
SBit s, |
1566 |
Condition cond) { |
1567 |
ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc)); |
1568 |
ASSERT(!dstL.is(dstH)); |
1569 |
emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 | |
1570 |
src2.code()*B8 | B7 | B4 | src1.code()); |
1571 |
} |
1572 |
|
1573 |
|
1574 |
void Assembler::umull(Register dstL,
|
1575 |
Register dstH, |
1576 |
Register src1, |
1577 |
Register src2, |
1578 |
SBit s, |
1579 |
Condition cond) { |
1580 |
ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc)); |
1581 |
ASSERT(!dstL.is(dstH)); |
1582 |
emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 | |
1583 |
src2.code()*B8 | B7 | B4 | src1.code()); |
1584 |
} |
1585 |
|
1586 |
|
1587 |
// Miscellaneous arithmetic instructions.
|
1588 |
void Assembler::clz(Register dst, Register src, Condition cond) {
|
1589 |
// v5 and above.
|
1590 |
ASSERT(!dst.is(pc) && !src.is(pc)); |
1591 |
emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
|
1592 |
15*B8 | CLZ | src.code());
|
1593 |
} |
1594 |
|
1595 |
|
1596 |
// Saturating instructions.
|
1597 |
|
1598 |
// Unsigned saturate.
|
1599 |
void Assembler::usat(Register dst,
|
1600 |
int satpos,
|
1601 |
const Operand& src,
|
1602 |
Condition cond) { |
1603 |
// v6 and above.
|
1604 |
ASSERT(CpuFeatures::IsSupported(ARMv7)); |
1605 |
ASSERT(!dst.is(pc) && !src.rm_.is(pc)); |
1606 |
ASSERT((satpos >= 0) && (satpos <= 31)); |
1607 |
ASSERT((src.shift_op_ == ASR) || (src.shift_op_ == LSL)); |
1608 |
ASSERT(src.rs_.is(no_reg)); |
1609 |
|
1610 |
int sh = 0; |
1611 |
if (src.shift_op_ == ASR) {
|
1612 |
sh = 1;
|
1613 |
} |
1614 |
|
1615 |
emit(cond | 0x6*B24 | 0xe*B20 | satpos*B16 | dst.code()*B12 | |
1616 |
src.shift_imm_*B7 | sh*B6 | 0x1*B4 | src.rm_.code());
|
1617 |
} |
1618 |
|
1619 |
|
1620 |
// Bitfield manipulation instructions.
|
1621 |
|
1622 |
// Unsigned bit field extract.
|
1623 |
// Extracts #width adjacent bits from position #lsb in a register, and
|
1624 |
// writes them to the low bits of a destination register.
|
1625 |
// ubfx dst, src, #lsb, #width
|
1626 |
void Assembler::ubfx(Register dst,
|
1627 |
Register src, |
1628 |
int lsb,
|
1629 |
int width,
|
1630 |
Condition cond) { |
1631 |
// v7 and above.
|
1632 |
ASSERT(CpuFeatures::IsSupported(ARMv7)); |
1633 |
ASSERT(!dst.is(pc) && !src.is(pc)); |
1634 |
ASSERT((lsb >= 0) && (lsb <= 31)); |
1635 |
ASSERT((width >= 1) && (width <= (32 - lsb))); |
1636 |
emit(cond | 0xf*B23 | B22 | B21 | (width - 1)*B16 | dst.code()*B12 | |
1637 |
lsb*B7 | B6 | B4 | src.code()); |
1638 |
} |
1639 |
|
1640 |
|
1641 |
// Signed bit field extract.
|
1642 |
// Extracts #width adjacent bits from position #lsb in a register, and
|
1643 |
// writes them to the low bits of a destination register. The extracted
|
1644 |
// value is sign extended to fill the destination register.
|
1645 |
// sbfx dst, src, #lsb, #width
|
1646 |
void Assembler::sbfx(Register dst,
|
1647 |
Register src, |
1648 |
int lsb,
|
1649 |
int width,
|
1650 |
Condition cond) { |
1651 |
// v7 and above.
|
1652 |
ASSERT(CpuFeatures::IsSupported(ARMv7)); |
1653 |
ASSERT(!dst.is(pc) && !src.is(pc)); |
1654 |
ASSERT((lsb >= 0) && (lsb <= 31)); |
1655 |
ASSERT((width >= 1) && (width <= (32 - lsb))); |
1656 |
emit(cond | 0xf*B23 | B21 | (width - 1)*B16 | dst.code()*B12 | |
1657 |
lsb*B7 | B6 | B4 | src.code()); |
1658 |
} |
1659 |
|
1660 |
|
1661 |
// Bit field clear.
|
1662 |
// Sets #width adjacent bits at position #lsb in the destination register
|
1663 |
// to zero, preserving the value of the other bits.
|
1664 |
// bfc dst, #lsb, #width
|
1665 |
void Assembler::bfc(Register dst, int lsb, int width, Condition cond) { |
1666 |
// v7 and above.
|
1667 |
ASSERT(CpuFeatures::IsSupported(ARMv7)); |
1668 |
ASSERT(!dst.is(pc)); |
1669 |
ASSERT((lsb >= 0) && (lsb <= 31)); |
1670 |
ASSERT((width >= 1) && (width <= (32 - lsb))); |
1671 |
int msb = lsb + width - 1; |
1672 |
emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 | 0xf); |
1673 |
} |
1674 |
|
1675 |
|
1676 |
// Bit field insert.
|
1677 |
// Inserts #width adjacent bits from the low bits of the source register
|
1678 |
// into position #lsb of the destination register.
|
1679 |
// bfi dst, src, #lsb, #width
|
1680 |
void Assembler::bfi(Register dst,
|
1681 |
Register src, |
1682 |
int lsb,
|
1683 |
int width,
|
1684 |
Condition cond) { |
1685 |
// v7 and above.
|
1686 |
ASSERT(CpuFeatures::IsSupported(ARMv7)); |
1687 |
ASSERT(!dst.is(pc) && !src.is(pc)); |
1688 |
ASSERT((lsb >= 0) && (lsb <= 31)); |
1689 |
ASSERT((width >= 1) && (width <= (32 - lsb))); |
1690 |
int msb = lsb + width - 1; |
1691 |
emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 |
|
1692 |
src.code()); |
1693 |
} |
1694 |
|
1695 |
|
1696 |
void Assembler::pkhbt(Register dst,
|
1697 |
Register src1, |
1698 |
const Operand& src2,
|
1699 |
Condition cond ) { |
1700 |
// Instruction details available in ARM DDI 0406C.b, A8.8.125.
|
1701 |
// cond(31-28) | 01101000(27-20) | Rn(19-16) |
|
1702 |
// Rd(15-12) | imm5(11-7) | 0(6) | 01(5-4) | Rm(3-0)
|
1703 |
ASSERT(!dst.is(pc)); |
1704 |
ASSERT(!src1.is(pc)); |
1705 |
ASSERT(!src2.rm().is(pc)); |
1706 |
ASSERT(!src2.rm().is(no_reg)); |
1707 |
ASSERT(src2.rs().is(no_reg)); |
1708 |
ASSERT((src2.shift_imm_ >= 0) && (src2.shift_imm_ <= 31)); |
1709 |
ASSERT(src2.shift_op() == LSL); |
1710 |
emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 |
|
1711 |
src2.shift_imm_*B7 | B4 | src2.rm().code()); |
1712 |
} |
1713 |
|
1714 |
|
1715 |
void Assembler::pkhtb(Register dst,
|
1716 |
Register src1, |
1717 |
const Operand& src2,
|
1718 |
Condition cond) { |
1719 |
// Instruction details available in ARM DDI 0406C.b, A8.8.125.
|
1720 |
// cond(31-28) | 01101000(27-20) | Rn(19-16) |
|
1721 |
// Rd(15-12) | imm5(11-7) | 1(6) | 01(5-4) | Rm(3-0)
|
1722 |
ASSERT(!dst.is(pc)); |
1723 |
ASSERT(!src1.is(pc)); |
1724 |
ASSERT(!src2.rm().is(pc)); |
1725 |
ASSERT(!src2.rm().is(no_reg)); |
1726 |
ASSERT(src2.rs().is(no_reg)); |
1727 |
ASSERT((src2.shift_imm_ >= 1) && (src2.shift_imm_ <= 32)); |
1728 |
ASSERT(src2.shift_op() == ASR); |
1729 |
int asr = (src2.shift_imm_ == 32) ? 0 : src2.shift_imm_; |
1730 |
emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 |
|
1731 |
asr*B7 | B6 | B4 | src2.rm().code()); |
1732 |
} |
1733 |
|
1734 |
|
1735 |
void Assembler::uxtb(Register dst,
|
1736 |
const Operand& src,
|
1737 |
Condition cond) { |
1738 |
// Instruction details available in ARM DDI 0406C.b, A8.8.274.
|
1739 |
// cond(31-28) | 01101110(27-20) | 1111(19-16) |
|
1740 |
// Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
|
1741 |
ASSERT(!dst.is(pc)); |
1742 |
ASSERT(!src.rm().is(pc)); |
1743 |
ASSERT(!src.rm().is(no_reg)); |
1744 |
ASSERT(src.rs().is(no_reg)); |
1745 |
ASSERT((src.shift_imm_ == 0) ||
|
1746 |
(src.shift_imm_ == 8) ||
|
1747 |
(src.shift_imm_ == 16) ||
|
1748 |
(src.shift_imm_ == 24));
|
1749 |
ASSERT(src.shift_op() == ROR); |
1750 |
emit(cond | 0x6E*B20 | 0xF*B16 | dst.code()*B12 | |
1751 |
((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code()); |
1752 |
} |
1753 |
|
1754 |
|
1755 |
void Assembler::uxtab(Register dst,
|
1756 |
Register src1, |
1757 |
const Operand& src2,
|
1758 |
Condition cond) { |
1759 |
// Instruction details available in ARM DDI 0406C.b, A8.8.271.
|
1760 |
// cond(31-28) | 01101110(27-20) | Rn(19-16) |
|
1761 |
// Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
|
1762 |
ASSERT(!dst.is(pc)); |
1763 |
ASSERT(!src1.is(pc)); |
1764 |
ASSERT(!src2.rm().is(pc)); |
1765 |
ASSERT(!src2.rm().is(no_reg)); |
1766 |
ASSERT(src2.rs().is(no_reg)); |
1767 |
ASSERT((src2.shift_imm_ == 0) ||
|
1768 |
(src2.shift_imm_ == 8) ||
|
1769 |
(src2.shift_imm_ == 16) ||
|
1770 |
(src2.shift_imm_ == 24));
|
1771 |
ASSERT(src2.shift_op() == ROR); |
1772 |
emit(cond | 0x6E*B20 | src1.code()*B16 | dst.code()*B12 |
|
1773 |
((src2.shift_imm_ >> 1) &0xC)*B8 | 7*B4 | src2.rm().code()); |
1774 |
} |
1775 |
|
1776 |
|
1777 |
void Assembler::uxtb16(Register dst,
|
1778 |
const Operand& src,
|
1779 |
Condition cond) { |
1780 |
// Instruction details available in ARM DDI 0406C.b, A8.8.275.
|
1781 |
// cond(31-28) | 01101100(27-20) | 1111(19-16) |
|
1782 |
// Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
|
1783 |
ASSERT(!dst.is(pc)); |
1784 |
ASSERT(!src.rm().is(pc)); |
1785 |
ASSERT(!src.rm().is(no_reg)); |
1786 |
ASSERT(src.rs().is(no_reg)); |
1787 |
ASSERT((src.shift_imm_ == 0) ||
|
1788 |
(src.shift_imm_ == 8) ||
|
1789 |
(src.shift_imm_ == 16) ||
|
1790 |
(src.shift_imm_ == 24));
|
1791 |
ASSERT(src.shift_op() == ROR); |
1792 |
emit(cond | 0x6C*B20 | 0xF*B16 | dst.code()*B12 | |
1793 |
((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code()); |
1794 |
} |
1795 |
|
1796 |
|
1797 |
// Status register access instructions.
|
1798 |
void Assembler::mrs(Register dst, SRegister s, Condition cond) {
|
1799 |
ASSERT(!dst.is(pc)); |
1800 |
emit(cond | B24 | s | 15*B16 | dst.code()*B12);
|
1801 |
} |
1802 |
|
1803 |
|
1804 |
void Assembler::msr(SRegisterFieldMask fields, const Operand& src, |
1805 |
Condition cond) { |
1806 |
ASSERT(fields >= B16 && fields < B20); // at least one field set
|
1807 |
Instr instr; |
1808 |
if (!src.rm_.is_valid()) {
|
1809 |
// Immediate.
|
1810 |
uint32_t rotate_imm; |
1811 |
uint32_t immed_8; |
1812 |
if (src.must_output_reloc_info(this) || |
1813 |
!fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
|
1814 |
// Immediate operand cannot be encoded, load it first to register ip.
|
1815 |
RecordRelocInfo(src.rmode_, src.imm32_); |
1816 |
ldr(ip, MemOperand(pc, 0), cond);
|
1817 |
msr(fields, Operand(ip), cond); |
1818 |
return;
|
1819 |
} |
1820 |
instr = I | rotate_imm*B8 | immed_8; |
1821 |
} else {
|
1822 |
ASSERT(!src.rs_.is_valid() && src.shift_imm_ == 0); // only rm allowed |
1823 |
instr = src.rm_.code(); |
1824 |
} |
1825 |
emit(cond | instr | B24 | B21 | fields | 15*B12);
|
1826 |
} |
1827 |
|
1828 |
|
1829 |
// Load/Store instructions.
|
1830 |
void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) { |
1831 |
if (dst.is(pc)) {
|
1832 |
positions_recorder()->WriteRecordedPositions(); |
1833 |
} |
1834 |
addrmod2(cond | B26 | L, dst, src); |
1835 |
} |
1836 |
|
1837 |
|
1838 |
void Assembler::str(Register src, const MemOperand& dst, Condition cond) { |
1839 |
addrmod2(cond | B26, src, dst); |
1840 |
} |
1841 |
|
1842 |
|
1843 |
void Assembler::ldrb(Register dst, const MemOperand& src, Condition cond) { |
1844 |
addrmod2(cond | B26 | B | L, dst, src); |
1845 |
} |
1846 |
|
1847 |
|
1848 |
void Assembler::strb(Register src, const MemOperand& dst, Condition cond) { |
1849 |
addrmod2(cond | B26 | B, src, dst); |
1850 |
} |
1851 |
|
1852 |
|
1853 |
void Assembler::ldrh(Register dst, const MemOperand& src, Condition cond) { |
1854 |
addrmod3(cond | L | B7 | H | B4, dst, src); |
1855 |
} |
1856 |
|
1857 |
|
1858 |
void Assembler::strh(Register src, const MemOperand& dst, Condition cond) { |
1859 |
addrmod3(cond | B7 | H | B4, src, dst); |
1860 |
} |
1861 |
|
1862 |
|
1863 |
void Assembler::ldrsb(Register dst, const MemOperand& src, Condition cond) { |
1864 |
addrmod3(cond | L | B7 | S6 | B4, dst, src); |
1865 |
} |
1866 |
|
1867 |
|
1868 |
void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) { |
1869 |
addrmod3(cond | L | B7 | S6 | H | B4, dst, src); |
1870 |
} |
1871 |
|
1872 |
|
1873 |
void Assembler::ldrd(Register dst1, Register dst2,
|
1874 |
const MemOperand& src, Condition cond) {
|
1875 |
ASSERT(IsEnabled(ARMv7)); |
1876 |
ASSERT(src.rm().is(no_reg)); |
1877 |
ASSERT(!dst1.is(lr)); // r14.
|
1878 |
ASSERT_EQ(0, dst1.code() % 2); |
1879 |
ASSERT_EQ(dst1.code() + 1, dst2.code());
|
1880 |
addrmod3(cond | B7 | B6 | B4, dst1, src); |
1881 |
} |
1882 |
|
1883 |
|
1884 |
void Assembler::strd(Register src1, Register src2,
|
1885 |
const MemOperand& dst, Condition cond) {
|
1886 |
ASSERT(dst.rm().is(no_reg)); |
1887 |
ASSERT(!src1.is(lr)); // r14.
|
1888 |
ASSERT_EQ(0, src1.code() % 2); |
1889 |
ASSERT_EQ(src1.code() + 1, src2.code());
|
1890 |
ASSERT(IsEnabled(ARMv7)); |
1891 |
addrmod3(cond | B7 | B6 | B5 | B4, src1, dst); |
1892 |
} |
1893 |
|
1894 |
|
1895 |
// Preload instructions.
|
1896 |
void Assembler::pld(const MemOperand& address) { |
1897 |
// Instruction details available in ARM DDI 0406C.b, A8.8.128.
|
1898 |
// 1111(31-28) | 0111(27-24) | U(23) | R(22) | 01(21-20) | Rn(19-16) |
|
1899 |
// 1111(15-12) | imm5(11-07) | type(6-5) | 0(4)| Rm(3-0) |
|
1900 |
ASSERT(address.rm().is(no_reg)); |
1901 |
ASSERT(address.am() == Offset); |
1902 |
int U = B23;
|
1903 |
int offset = address.offset();
|
1904 |
if (offset < 0) { |
1905 |
offset = -offset; |
1906 |
U = 0;
|
1907 |
} |
1908 |
ASSERT(offset < 4096);
|
1909 |
emit(kSpecialCondition | B26 | B24 | U | B22 | B20 | address.rn().code()*B16 | |
1910 |
0xf*B12 | offset);
|
1911 |
} |
1912 |
|
1913 |
|
1914 |
// Load/Store multiple instructions.
|
1915 |
void Assembler::ldm(BlockAddrMode am,
|
1916 |
Register base, |
1917 |
RegList dst, |
1918 |
Condition cond) { |
1919 |
// ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable.
|
1920 |
ASSERT(base.is(sp) || (dst & sp.bit()) == 0);
|
1921 |
|
1922 |
addrmod4(cond | B27 | am | L, base, dst); |
1923 |
|
1924 |
// Emit the constant pool after a function return implemented by ldm ..{..pc}.
|
1925 |
if (cond == al && (dst & pc.bit()) != 0) { |
1926 |
// There is a slight chance that the ldm instruction was actually a call,
|
1927 |
// in which case it would be wrong to return into the constant pool; we
|
1928 |
// recognize this case by checking if the emission of the pool was blocked
|
1929 |
// at the pc of the ldm instruction by a mov lr, pc instruction; if this is
|
1930 |
// the case, we emit a jump over the pool.
|
1931 |
CheckConstPool(true, no_const_pool_before_ == pc_offset() - kInstrSize);
|
1932 |
} |
1933 |
} |
1934 |
|
1935 |
|
1936 |
void Assembler::stm(BlockAddrMode am,
|
1937 |
Register base, |
1938 |
RegList src, |
1939 |
Condition cond) { |
1940 |
addrmod4(cond | B27 | am, base, src); |
1941 |
} |
1942 |
|
1943 |
|
1944 |
// Exception-generating instructions and debugging support.
|
1945 |
// Stops with a non-negative code less than kNumOfWatchedStops support
|
1946 |
// enabling/disabling and a counter feature. See simulator-arm.h .
|
1947 |
void Assembler::stop(const char* msg, Condition cond, int32_t code) { |
1948 |
#ifndef __arm__
|
1949 |
ASSERT(code >= kDefaultStopCode); |
1950 |
{ |
1951 |
// The Simulator will handle the stop instruction and get the message
|
1952 |
// address. It expects to find the address just after the svc instruction.
|
1953 |
BlockConstPoolScope block_const_pool(this);
|
1954 |
if (code >= 0) { |
1955 |
svc(kStopCode + code, cond); |
1956 |
} else {
|
1957 |
svc(kStopCode + kMaxStopCode, cond); |
1958 |
} |
1959 |
emit(reinterpret_cast<Instr>(msg));
|
1960 |
} |
1961 |
#else // def __arm__ |
1962 |
if (cond != al) {
|
1963 |
Label skip; |
1964 |
b(&skip, NegateCondition(cond)); |
1965 |
bkpt(0);
|
1966 |
bind(&skip); |
1967 |
} else {
|
1968 |
bkpt(0);
|
1969 |
} |
1970 |
#endif // def __arm__ |
1971 |
} |
1972 |
|
1973 |
|
1974 |
void Assembler::bkpt(uint32_t imm16) { // v5 and above |
1975 |
ASSERT(is_uint16(imm16)); |
1976 |
emit(al | B24 | B21 | (imm16 >> 4)*B8 | BKPT | (imm16 & 0xf)); |
1977 |
} |
1978 |
|
1979 |
|
1980 |
void Assembler::svc(uint32_t imm24, Condition cond) {
|
1981 |
ASSERT(is_uint24(imm24)); |
1982 |
emit(cond | 15*B24 | imm24);
|
1983 |
} |
1984 |
|
1985 |
|
1986 |
// Coprocessor instructions.
|
1987 |
void Assembler::cdp(Coprocessor coproc,
|
1988 |
int opcode_1,
|
1989 |
CRegister crd, |
1990 |
CRegister crn, |
1991 |
CRegister crm, |
1992 |
int opcode_2,
|
1993 |
Condition cond) { |
1994 |
ASSERT(is_uint4(opcode_1) && is_uint3(opcode_2)); |
1995 |
emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 |
|
1996 |
crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code());
|
1997 |
} |
1998 |
|
1999 |
|
2000 |
void Assembler::cdp2(Coprocessor coproc,
|
2001 |
int opcode_1,
|
2002 |
CRegister crd, |
2003 |
CRegister crn, |
2004 |
CRegister crm, |
2005 |
int opcode_2) { // v5 and above |
2006 |
cdp(coproc, opcode_1, crd, crn, crm, opcode_2, kSpecialCondition); |
2007 |
} |
2008 |
|
2009 |
|
2010 |
void Assembler::mcr(Coprocessor coproc,
|
2011 |
int opcode_1,
|
2012 |
Register rd, |
2013 |
CRegister crn, |
2014 |
CRegister crm, |
2015 |
int opcode_2,
|
2016 |
Condition cond) { |
2017 |
ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2)); |
2018 |
emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 |
|
2019 |
rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
|
2020 |
} |
2021 |
|
2022 |
|
2023 |
void Assembler::mcr2(Coprocessor coproc,
|
2024 |
int opcode_1,
|
2025 |
Register rd, |
2026 |
CRegister crn, |
2027 |
CRegister crm, |
2028 |
int opcode_2) { // v5 and above |
2029 |
mcr(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition); |
2030 |
} |
2031 |
|
2032 |
|
2033 |
void Assembler::mrc(Coprocessor coproc,
|
2034 |
int opcode_1,
|
2035 |
Register rd, |
2036 |
CRegister crn, |
2037 |
CRegister crm, |
2038 |
int opcode_2,
|
2039 |
Condition cond) { |
2040 |
ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2)); |
2041 |
emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 |
|
2042 |
rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
|
2043 |
} |
2044 |
|
2045 |
|
2046 |
void Assembler::mrc2(Coprocessor coproc,
|
2047 |
int opcode_1,
|
2048 |
Register rd, |
2049 |
CRegister crn, |
2050 |
CRegister crm, |
2051 |
int opcode_2) { // v5 and above |
2052 |
mrc(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition); |
2053 |
} |
2054 |
|
2055 |
|
2056 |
void Assembler::ldc(Coprocessor coproc,
|
2057 |
CRegister crd, |
2058 |
const MemOperand& src,
|
2059 |
LFlag l, |
2060 |
Condition cond) { |
2061 |
addrmod5(cond | B27 | B26 | l | L | coproc*B8, crd, src); |
2062 |
} |
2063 |
|
2064 |
|
2065 |
void Assembler::ldc(Coprocessor coproc,
|
2066 |
CRegister crd, |
2067 |
Register rn, |
2068 |
int option,
|
2069 |
LFlag l, |
2070 |
Condition cond) { |
2071 |
// Unindexed addressing.
|
2072 |
ASSERT(is_uint8(option)); |
2073 |
emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 | |
2074 |
coproc*B8 | (option & 255));
|
2075 |
} |
2076 |
|
2077 |
|
2078 |
void Assembler::ldc2(Coprocessor coproc,
|
2079 |
CRegister crd, |
2080 |
const MemOperand& src,
|
2081 |
LFlag l) { // v5 and above
|
2082 |
ldc(coproc, crd, src, l, kSpecialCondition); |
2083 |
} |
2084 |
|
2085 |
|
2086 |
void Assembler::ldc2(Coprocessor coproc,
|
2087 |
CRegister crd, |
2088 |
Register rn, |
2089 |
int option,
|
2090 |
LFlag l) { // v5 and above
|
2091 |
ldc(coproc, crd, rn, option, l, kSpecialCondition); |
2092 |
} |
2093 |
|
2094 |
|
2095 |
// Support for VFP.
|
2096 |
|
2097 |
void Assembler::vldr(const DwVfpRegister dst, |
2098 |
const Register base,
|
2099 |
int offset,
|
2100 |
const Condition cond) {
|
2101 |
// Ddst = MEM(Rbase + offset).
|
2102 |
// Instruction details available in ARM DDI 0406C.b, A8-924.
|
2103 |
// cond(31-28) | 1101(27-24)| U(23) | D(22) | 01(21-20) | Rbase(19-16) |
|
2104 |
// Vd(15-12) | 1011(11-8) | offset
|
2105 |
int u = 1; |
2106 |
if (offset < 0) { |
2107 |
offset = -offset; |
2108 |
u = 0;
|
2109 |
} |
2110 |
int vd, d;
|
2111 |
dst.split_code(&vd, &d); |
2112 |
|
2113 |
ASSERT(offset >= 0);
|
2114 |
if ((offset % 4) == 0 && (offset / 4) < 256) { |
2115 |
emit(cond | 0xD*B24 | u*B23 | d*B22 | B20 | base.code()*B16 | vd*B12 |
|
2116 |
0xB*B8 | ((offset / 4) & 255)); |
2117 |
} else {
|
2118 |
// Larger offsets must be handled by computing the correct address
|
2119 |
// in the ip register.
|
2120 |
ASSERT(!base.is(ip)); |
2121 |
if (u == 1) { |
2122 |
add(ip, base, Operand(offset)); |
2123 |
} else {
|
2124 |
sub(ip, base, Operand(offset)); |
2125 |
} |
2126 |
emit(cond | 0xD*B24 | d*B22 | B20 | ip.code()*B16 | vd*B12 | 0xB*B8); |
2127 |
} |
2128 |
} |
2129 |
|
2130 |
|
2131 |
void Assembler::vldr(const DwVfpRegister dst, |
2132 |
const MemOperand& operand,
|
2133 |
const Condition cond) {
|
2134 |
ASSERT(!operand.rm().is_valid()); |
2135 |
ASSERT(operand.am_ == Offset); |
2136 |
vldr(dst, operand.rn(), operand.offset(), cond); |
2137 |
} |
2138 |
|
2139 |
|
2140 |
void Assembler::vldr(const SwVfpRegister dst, |
2141 |
const Register base,
|
2142 |
int offset,
|
2143 |
const Condition cond) {
|
2144 |
// Sdst = MEM(Rbase + offset).
|
2145 |
// Instruction details available in ARM DDI 0406A, A8-628.
|
2146 |
// cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
|
2147 |
// Vdst(15-12) | 1010(11-8) | offset
|
2148 |
int u = 1; |
2149 |
if (offset < 0) { |
2150 |
offset = -offset; |
2151 |
u = 0;
|
2152 |
} |
2153 |
int sd, d;
|
2154 |
dst.split_code(&sd, &d); |
2155 |
ASSERT(offset >= 0);
|
2156 |
|
2157 |
if ((offset % 4) == 0 && (offset / 4) < 256) { |
2158 |
emit(cond | u*B23 | d*B22 | 0xD1*B20 | base.code()*B16 | sd*B12 |
|
2159 |
0xA*B8 | ((offset / 4) & 255)); |
2160 |
} else {
|
2161 |
// Larger offsets must be handled by computing the correct address
|
2162 |
// in the ip register.
|
2163 |
ASSERT(!base.is(ip)); |
2164 |
if (u == 1) { |
2165 |
add(ip, base, Operand(offset)); |
2166 |
} else {
|
2167 |
sub(ip, base, Operand(offset)); |
2168 |
} |
2169 |
emit(cond | d*B22 | 0xD1*B20 | ip.code()*B16 | sd*B12 | 0xA*B8); |
2170 |
} |
2171 |
} |
2172 |
|
2173 |
|
2174 |
void Assembler::vldr(const SwVfpRegister dst, |
2175 |
const MemOperand& operand,
|
2176 |
const Condition cond) {
|
2177 |
ASSERT(!operand.rm().is_valid()); |
2178 |
ASSERT(operand.am_ == Offset); |
2179 |
vldr(dst, operand.rn(), operand.offset(), cond); |
2180 |
} |
2181 |
|
2182 |
|
2183 |
void Assembler::vstr(const DwVfpRegister src, |
2184 |
const Register base,
|
2185 |
int offset,
|
2186 |
const Condition cond) {
|
2187 |
// MEM(Rbase + offset) = Dsrc.
|
2188 |
// Instruction details available in ARM DDI 0406C.b, A8-1082.
|
2189 |
// cond(31-28) | 1101(27-24)| U(23) | D(22) | 00(21-20) | Rbase(19-16) |
|
2190 |
// Vd(15-12) | 1011(11-8) | (offset/4)
|
2191 |
int u = 1; |
2192 |
if (offset < 0) { |
2193 |
offset = -offset; |
2194 |
u = 0;
|
2195 |
} |
2196 |
ASSERT(offset >= 0);
|
2197 |
int vd, d;
|
2198 |
src.split_code(&vd, &d); |
2199 |
|
2200 |
if ((offset % 4) == 0 && (offset / 4) < 256) { |
2201 |
emit(cond | 0xD*B24 | u*B23 | d*B22 | base.code()*B16 | vd*B12 | 0xB*B8 | |
2202 |
((offset / 4) & 255)); |
2203 |
} else {
|
2204 |
// Larger offsets must be handled by computing the correct address
|
2205 |
// in the ip register.
|
2206 |
ASSERT(!base.is(ip)); |
2207 |
if (u == 1) { |
2208 |
add(ip, base, Operand(offset)); |
2209 |
} else {
|
2210 |
sub(ip, base, Operand(offset)); |
2211 |
} |
2212 |
emit(cond | 0xD*B24 | d*B22 | ip.code()*B16 | vd*B12 | 0xB*B8); |
2213 |
} |
2214 |
} |
2215 |
|
2216 |
|
2217 |
void Assembler::vstr(const DwVfpRegister src, |
2218 |
const MemOperand& operand,
|
2219 |
const Condition cond) {
|
2220 |
ASSERT(!operand.rm().is_valid()); |
2221 |
ASSERT(operand.am_ == Offset); |
2222 |
vstr(src, operand.rn(), operand.offset(), cond); |
2223 |
} |
2224 |
|
2225 |
|
2226 |
void Assembler::vstr(const SwVfpRegister src, |
2227 |
const Register base,
|
2228 |
int offset,
|
2229 |
const Condition cond) {
|
2230 |
// MEM(Rbase + offset) = SSrc.
|
2231 |
// Instruction details available in ARM DDI 0406A, A8-786.
|
2232 |
// cond(31-28) | 1101(27-24)| U000(23-20) | Rbase(19-16) |
|
2233 |
// Vdst(15-12) | 1010(11-8) | (offset/4)
|
2234 |
int u = 1; |
2235 |
if (offset < 0) { |
2236 |
offset = -offset; |
2237 |
u = 0;
|
2238 |
} |
2239 |
int sd, d;
|
2240 |
src.split_code(&sd, &d); |
2241 |
ASSERT(offset >= 0);
|
2242 |
if ((offset % 4) == 0 && (offset / 4) < 256) { |
2243 |
emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 |
|
2244 |
0xA*B8 | ((offset / 4) & 255)); |
2245 |
} else {
|
2246 |
// Larger offsets must be handled by computing the correct address
|
2247 |
// in the ip register.
|
2248 |
ASSERT(!base.is(ip)); |
2249 |
if (u == 1) { |
2250 |
add(ip, base, Operand(offset)); |
2251 |
} else {
|
2252 |
sub(ip, base, Operand(offset)); |
2253 |
} |
2254 |
emit(cond | d*B22 | 0xD0*B20 | ip.code()*B16 | sd*B12 | 0xA*B8); |
2255 |
} |
2256 |
} |
2257 |
|
2258 |
|
2259 |
void Assembler::vstr(const SwVfpRegister src, |
2260 |
const MemOperand& operand,
|
2261 |
const Condition cond) {
|
2262 |
ASSERT(!operand.rm().is_valid()); |
2263 |
ASSERT(operand.am_ == Offset); |
2264 |
vstr(src, operand.rn(), operand.offset(), cond); |
2265 |
} |
2266 |
|
2267 |
|
2268 |
void Assembler::vldm(BlockAddrMode am,
|
2269 |
Register base, |
2270 |
DwVfpRegister first, |
2271 |
DwVfpRegister last, |
2272 |
Condition cond) { |
2273 |
// Instruction details available in ARM DDI 0406C.b, A8-922.
|
2274 |
// cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
|
2275 |
// first(15-12) | 1011(11-8) | (count * 2)
|
2276 |
ASSERT_LE(first.code(), last.code()); |
2277 |
ASSERT(am == ia || am == ia_w || am == db_w); |
2278 |
ASSERT(!base.is(pc)); |
2279 |
|
2280 |
int sd, d;
|
2281 |
first.split_code(&sd, &d); |
2282 |
int count = last.code() - first.code() + 1; |
2283 |
ASSERT(count <= 16);
|
2284 |
emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 | |
2285 |
0xB*B8 | count*2); |
2286 |
} |
2287 |
|
2288 |
|
2289 |
void Assembler::vstm(BlockAddrMode am,
|
2290 |
Register base, |
2291 |
DwVfpRegister first, |
2292 |
DwVfpRegister last, |
2293 |
Condition cond) { |
2294 |
// Instruction details available in ARM DDI 0406C.b, A8-1080.
|
2295 |
// cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
|
2296 |
// first(15-12) | 1011(11-8) | (count * 2)
|
2297 |
ASSERT_LE(first.code(), last.code()); |
2298 |
ASSERT(am == ia || am == ia_w || am == db_w); |
2299 |
ASSERT(!base.is(pc)); |
2300 |
|
2301 |
int sd, d;
|
2302 |
first.split_code(&sd, &d); |
2303 |
int count = last.code() - first.code() + 1; |
2304 |
ASSERT(count <= 16);
|
2305 |
emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 | |
2306 |
0xB*B8 | count*2); |
2307 |
} |
2308 |
|
2309 |
void Assembler::vldm(BlockAddrMode am,
|
2310 |
Register base, |
2311 |
SwVfpRegister first, |
2312 |
SwVfpRegister last, |
2313 |
Condition cond) { |
2314 |
// Instruction details available in ARM DDI 0406A, A8-626.
|
2315 |
// cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
|
2316 |
// first(15-12) | 1010(11-8) | (count/2)
|
2317 |
ASSERT_LE(first.code(), last.code()); |
2318 |
ASSERT(am == ia || am == ia_w || am == db_w); |
2319 |
ASSERT(!base.is(pc)); |
2320 |
|
2321 |
int sd, d;
|
2322 |
first.split_code(&sd, &d); |
2323 |
int count = last.code() - first.code() + 1; |
2324 |
emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 | |
2325 |
0xA*B8 | count);
|
2326 |
} |
2327 |
|
2328 |
|
2329 |
void Assembler::vstm(BlockAddrMode am,
|
2330 |
Register base, |
2331 |
SwVfpRegister first, |
2332 |
SwVfpRegister last, |
2333 |
Condition cond) { |
2334 |
// Instruction details available in ARM DDI 0406A, A8-784.
|
2335 |
// cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
|
2336 |
// first(15-12) | 1011(11-8) | (count/2)
|
2337 |
ASSERT_LE(first.code(), last.code()); |
2338 |
ASSERT(am == ia || am == ia_w || am == db_w); |
2339 |
ASSERT(!base.is(pc)); |
2340 |
|
2341 |
int sd, d;
|
2342 |
first.split_code(&sd, &d); |
2343 |
int count = last.code() - first.code() + 1; |
2344 |
emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 | |
2345 |
0xA*B8 | count);
|
2346 |
} |
2347 |
|
2348 |
|
2349 |
static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) { |
2350 |
uint64_t i; |
2351 |
OS::MemCopy(&i, &d, 8);
|
2352 |
|
2353 |
*lo = i & 0xffffffff;
|
2354 |
*hi = i >> 32;
|
2355 |
} |
2356 |
|
2357 |
|
2358 |
// Only works for little endian floating point formats.
|
2359 |
// We don't support VFP on the mixed endian floating point platform.
|
2360 |
static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) { |
2361 |
ASSERT(CpuFeatures::IsSupported(VFP3)); |
2362 |
|
2363 |
// VMOV can accept an immediate of the form:
|
2364 |
//
|
2365 |
// +/- m * 2^(-n) where 16 <= m <= 31 and 0 <= n <= 7
|
2366 |
//
|
2367 |
// The immediate is encoded using an 8-bit quantity, comprised of two
|
2368 |
// 4-bit fields. For an 8-bit immediate of the form:
|
2369 |
//
|
2370 |
// [abcdefgh]
|
2371 |
//
|
2372 |
// where a is the MSB and h is the LSB, an immediate 64-bit double can be
|
2373 |
// created of the form:
|
2374 |
//
|
2375 |
// [aBbbbbbb,bbcdefgh,00000000,00000000,
|
2376 |
// 00000000,00000000,00000000,00000000]
|
2377 |
//
|
2378 |
// where B = ~b.
|
2379 |
//
|
2380 |
|
2381 |
uint32_t lo, hi; |
2382 |
DoubleAsTwoUInt32(d, &lo, &hi); |
2383 |
|
2384 |
// The most obvious constraint is the long block of zeroes.
|
2385 |
if ((lo != 0) || ((hi & 0xffff) != 0)) { |
2386 |
return false; |
2387 |
} |
2388 |
|
2389 |
// Bits 62:55 must be all clear or all set.
|
2390 |
if (((hi & 0x3fc00000) != 0) && ((hi & 0x3fc00000) != 0x3fc00000)) { |
2391 |
return false; |
2392 |
} |
2393 |
|
2394 |
// Bit 63 must be NOT bit 62.
|
2395 |
if (((hi ^ (hi << 1)) & (0x40000000)) == 0) { |
2396 |
return false; |
2397 |
} |
2398 |
|
2399 |
// Create the encoded immediate in the form:
|
2400 |
// [00000000,0000abcd,00000000,0000efgh]
|
2401 |
*encoding = (hi >> 16) & 0xf; // Low nybble. |
2402 |
*encoding |= (hi >> 4) & 0x70000; // Low three bits of the high nybble. |
2403 |
*encoding |= (hi >> 12) & 0x80000; // Top bit of the high nybble. |
2404 |
|
2405 |
return true; |
2406 |
} |
2407 |
|
2408 |
|
2409 |
void Assembler::vmov(const DwVfpRegister dst, |
2410 |
double imm,
|
2411 |
const Register scratch) {
|
2412 |
uint32_t enc; |
2413 |
if (CpuFeatures::IsSupported(VFP3) && FitsVMOVDoubleImmediate(imm, &enc)) {
|
2414 |
// The double can be encoded in the instruction.
|
2415 |
//
|
2416 |
// Dd = immediate
|
2417 |
// Instruction details available in ARM DDI 0406C.b, A8-936.
|
2418 |
// cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | imm4H(19-16) |
|
2419 |
// Vd(15-12) | 101(11-9) | sz=1(8) | imm4L(3-0)
|
2420 |
int vd, d;
|
2421 |
dst.split_code(&vd, &d); |
2422 |
emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc); |
2423 |
} else if (FLAG_enable_vldr_imm) { |
2424 |
// TODO(jfb) Temporarily turned off until we have constant blinding or
|
2425 |
// some equivalent mitigation: an attacker can otherwise control
|
2426 |
// generated data which also happens to be executable, a Very Bad
|
2427 |
// Thing indeed.
|
2428 |
// Blinding gets tricky because we don't have xor, we probably
|
2429 |
// need to add/subtract without losing precision, which requires a
|
2430 |
// cookie value that Lithium is probably better positioned to
|
2431 |
// choose.
|
2432 |
// We could also add a few peepholes here like detecting 0.0 and
|
2433 |
// -0.0 and doing a vmov from the sequestered d14, forcing denorms
|
2434 |
// to zero (we set flush-to-zero), and normalizing NaN values.
|
2435 |
// We could also detect redundant values.
|
2436 |
// The code could also randomize the order of values, though
|
2437 |
// that's tricky because vldr has a limited reach. Furthermore
|
2438 |
// it breaks load locality.
|
2439 |
RecordRelocInfo(imm); |
2440 |
vldr(dst, MemOperand(pc, 0));
|
2441 |
} else {
|
2442 |
// Synthesise the double from ARM immediates.
|
2443 |
uint32_t lo, hi; |
2444 |
DoubleAsTwoUInt32(imm, &lo, &hi); |
2445 |
|
2446 |
if (scratch.is(no_reg)) {
|
2447 |
if (dst.code() < 16) { |
2448 |
const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
|
2449 |
// Move the low part of the double into the lower of the corresponsing S
|
2450 |
// registers of D register dst.
|
2451 |
mov(ip, Operand(lo)); |
2452 |
vmov(loc.low(), ip); |
2453 |
|
2454 |
// Move the high part of the double into the higher of the
|
2455 |
// corresponsing S registers of D register dst.
|
2456 |
mov(ip, Operand(hi)); |
2457 |
vmov(loc.high(), ip); |
2458 |
} else {
|
2459 |
// D16-D31 does not have S registers, so move the low and high parts
|
2460 |
// directly to the D register using vmov.32.
|
2461 |
// Note: This may be slower, so we only do this when we have to.
|
2462 |
mov(ip, Operand(lo)); |
2463 |
vmov(dst, VmovIndexLo, ip); |
2464 |
mov(ip, Operand(hi)); |
2465 |
vmov(dst, VmovIndexHi, ip); |
2466 |
} |
2467 |
} else {
|
2468 |
// Move the low and high parts of the double to a D register in one
|
2469 |
// instruction.
|
2470 |
mov(ip, Operand(lo)); |
2471 |
mov(scratch, Operand(hi)); |
2472 |
vmov(dst, ip, scratch); |
2473 |
} |
2474 |
} |
2475 |
} |
2476 |
|
2477 |
|
2478 |
void Assembler::vmov(const SwVfpRegister dst, |
2479 |
const SwVfpRegister src,
|
2480 |
const Condition cond) {
|
2481 |
// Sd = Sm
|
2482 |
// Instruction details available in ARM DDI 0406B, A8-642.
|
2483 |
int sd, d, sm, m;
|
2484 |
dst.split_code(&sd, &d); |
2485 |
src.split_code(&sm, &m); |
2486 |
emit(cond | 0xE*B24 | d*B22 | 0xB*B20 | sd*B12 | 0xA*B8 | B6 | m*B5 | sm); |
2487 |
} |
2488 |
|
2489 |
|
2490 |
void Assembler::vmov(const DwVfpRegister dst, |
2491 |
const DwVfpRegister src,
|
2492 |
const Condition cond) {
|
2493 |
// Dd = Dm
|
2494 |
// Instruction details available in ARM DDI 0406C.b, A8-938.
|
2495 |
// cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) |
|
2496 |
// 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
|
2497 |
int vd, d;
|
2498 |
dst.split_code(&vd, &d); |
2499 |
int vm, m;
|
2500 |
src.split_code(&vm, &m); |
2501 |
emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | B6 | m*B5 | |
2502 |
vm); |
2503 |
} |
2504 |
|
2505 |
|
2506 |
void Assembler::vmov(const DwVfpRegister dst, |
2507 |
const VmovIndex index,
|
2508 |
const Register src,
|
2509 |
const Condition cond) {
|
2510 |
// Dd[index] = Rt
|
2511 |
// Instruction details available in ARM DDI 0406C.b, A8-940.
|
2512 |
// cond(31-28) | 1110(27-24) | 0(23) | opc1=0index(22-21) | 0(20) |
|
2513 |
// Vd(19-16) | Rt(15-12) | 1011(11-8) | D(7) | opc2=00(6-5) | 1(4) | 0000(3-0)
|
2514 |
ASSERT(index.index == 0 || index.index == 1); |
2515 |
int vd, d;
|
2516 |
dst.split_code(&vd, &d); |
2517 |
emit(cond | 0xE*B24 | index.index*B21 | vd*B16 | src.code()*B12 | 0xB*B8 | |
2518 |
d*B7 | B4); |
2519 |
} |
2520 |
|
2521 |
|
2522 |
void Assembler::vmov(const Register dst, |
2523 |
const VmovIndex index,
|
2524 |
const DwVfpRegister src,
|
2525 |
const Condition cond) {
|
2526 |
// Dd[index] = Rt
|
2527 |
// Instruction details available in ARM DDI 0406C.b, A8.8.342.
|
2528 |
// cond(31-28) | 1110(27-24) | U=0(23) | opc1=0index(22-21) | 1(20) |
|
2529 |
// Vn(19-16) | Rt(15-12) | 1011(11-8) | N(7) | opc2=00(6-5) | 1(4) | 0000(3-0)
|
2530 |
ASSERT(index.index == 0 || index.index == 1); |
2531 |
int vn, n;
|
2532 |
src.split_code(&vn, &n); |
2533 |
emit(cond | 0xE*B24 | index.index*B21 | B20 | vn*B16 | dst.code()*B12 |
|
2534 |
0xB*B8 | n*B7 | B4);
|
2535 |
} |
2536 |
|
2537 |
|
2538 |
void Assembler::vmov(const DwVfpRegister dst, |
2539 |
const Register src1,
|
2540 |
const Register src2,
|
2541 |
const Condition cond) {
|
2542 |
// Dm = <Rt,Rt2>.
|
2543 |
// Instruction details available in ARM DDI 0406C.b, A8-948.
|
2544 |
// cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
|
2545 |
// Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
|
2546 |
ASSERT(!src1.is(pc) && !src2.is(pc)); |
2547 |
int vm, m;
|
2548 |
dst.split_code(&vm, &m); |
2549 |
emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
|
2550 |
src1.code()*B12 | 0xB*B8 | m*B5 | B4 | vm);
|
2551 |
} |
2552 |
|
2553 |
|
2554 |
void Assembler::vmov(const Register dst1, |
2555 |
const Register dst2,
|
2556 |
const DwVfpRegister src,
|
2557 |
const Condition cond) {
|
2558 |
// <Rt,Rt2> = Dm.
|
2559 |
// Instruction details available in ARM DDI 0406C.b, A8-948.
|
2560 |
// cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
|
2561 |
// Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
|
2562 |
ASSERT(!dst1.is(pc) && !dst2.is(pc)); |
2563 |
int vm, m;
|
2564 |
src.split_code(&vm, &m); |
2565 |
emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
|
2566 |
dst1.code()*B12 | 0xB*B8 | m*B5 | B4 | vm);
|
2567 |
} |
2568 |
|
2569 |
|
2570 |
void Assembler::vmov(const SwVfpRegister dst, |
2571 |
const Register src,
|
2572 |
const Condition cond) {
|
2573 |
// Sn = Rt.
|
2574 |
// Instruction details available in ARM DDI 0406A, A8-642.
|
2575 |
// cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
|
2576 |
// Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
|
2577 |
ASSERT(!src.is(pc)); |
2578 |
int sn, n;
|
2579 |
dst.split_code(&sn, &n); |
2580 |
emit(cond | 0xE*B24 | sn*B16 | src.code()*B12 | 0xA*B8 | n*B7 | B4); |
2581 |
} |
2582 |
|
2583 |
|
2584 |
void Assembler::vmov(const Register dst, |
2585 |
const SwVfpRegister src,
|
2586 |
const Condition cond) {
|
2587 |
// Rt = Sn.
|
2588 |
// Instruction details available in ARM DDI 0406A, A8-642.
|
2589 |
// cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
|
2590 |
// Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
|
2591 |
ASSERT(!dst.is(pc)); |
2592 |
int sn, n;
|
2593 |
src.split_code(&sn, &n); |
2594 |
emit(cond | 0xE*B24 | B20 | sn*B16 | dst.code()*B12 | 0xA*B8 | n*B7 | B4); |
2595 |
} |
2596 |
|
2597 |
|
2598 |
// Type of data to read from or write to VFP register.
|
2599 |
// Used as specifier in generic vcvt instruction.
|
2600 |
enum VFPType { S32, U32, F32, F64 };
|
2601 |
|
2602 |
|
2603 |
static bool IsSignedVFPType(VFPType type) { |
2604 |
switch (type) {
|
2605 |
case S32:
|
2606 |
return true; |
2607 |
case U32:
|
2608 |
return false; |
2609 |
default:
|
2610 |
UNREACHABLE(); |
2611 |
return false; |
2612 |
} |
2613 |
} |
2614 |
|
2615 |
|
2616 |
static bool IsIntegerVFPType(VFPType type) { |
2617 |
switch (type) {
|
2618 |
case S32:
|
2619 |
case U32:
|
2620 |
return true; |
2621 |
case F32:
|
2622 |
case F64:
|
2623 |
return false; |
2624 |
default:
|
2625 |
UNREACHABLE(); |
2626 |
return false; |
2627 |
} |
2628 |
} |
2629 |
|
2630 |
|
2631 |
static bool IsDoubleVFPType(VFPType type) { |
2632 |
switch (type) {
|
2633 |
case F32:
|
2634 |
return false; |
2635 |
case F64:
|
2636 |
return true; |
2637 |
default:
|
2638 |
UNREACHABLE(); |
2639 |
return false; |
2640 |
} |
2641 |
} |
2642 |
|
2643 |
|
2644 |
// Split five bit reg_code based on size of reg_type.
|
2645 |
// 32-bit register codes are Vm:M
|
2646 |
// 64-bit register codes are M:Vm
|
2647 |
// where Vm is four bits, and M is a single bit.
|
2648 |
static void SplitRegCode(VFPType reg_type, |
2649 |
int reg_code,
|
2650 |
int* vm,
|
2651 |
int* m) {
|
2652 |
ASSERT((reg_code >= 0) && (reg_code <= 31)); |
2653 |
if (IsIntegerVFPType(reg_type) || !IsDoubleVFPType(reg_type)) {
|
2654 |
// 32 bit type.
|
2655 |
*m = reg_code & 0x1;
|
2656 |
*vm = reg_code >> 1;
|
2657 |
} else {
|
2658 |
// 64 bit type.
|
2659 |
*m = (reg_code & 0x10) >> 4; |
2660 |
*vm = reg_code & 0x0F;
|
2661 |
} |
2662 |
} |
2663 |
|
2664 |
|
2665 |
// Encode vcvt.src_type.dst_type instruction.
|
2666 |
static Instr EncodeVCVT(const VFPType dst_type, |
2667 |
const int dst_code, |
2668 |
const VFPType src_type,
|
2669 |
const int src_code, |
2670 |
VFPConversionMode mode, |
2671 |
const Condition cond) {
|
2672 |
ASSERT(src_type != dst_type); |
2673 |
int D, Vd, M, Vm;
|
2674 |
SplitRegCode(src_type, src_code, &Vm, &M); |
2675 |
SplitRegCode(dst_type, dst_code, &Vd, &D); |
2676 |
|
2677 |
if (IsIntegerVFPType(dst_type) || IsIntegerVFPType(src_type)) {
|
2678 |
// Conversion between IEEE floating point and 32-bit integer.
|
2679 |
// Instruction details available in ARM DDI 0406B, A8.6.295.
|
2680 |
// cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 1(19) | opc2(18-16) |
|
2681 |
// Vd(15-12) | 101(11-9) | sz(8) | op(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
|
2682 |
ASSERT(!IsIntegerVFPType(dst_type) || !IsIntegerVFPType(src_type)); |
2683 |
|
2684 |
int sz, opc2, op;
|
2685 |
|
2686 |
if (IsIntegerVFPType(dst_type)) {
|
2687 |
opc2 = IsSignedVFPType(dst_type) ? 0x5 : 0x4; |
2688 |
sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0; |
2689 |
op = mode; |
2690 |
} else {
|
2691 |
ASSERT(IsIntegerVFPType(src_type)); |
2692 |
opc2 = 0x0;
|
2693 |
sz = IsDoubleVFPType(dst_type) ? 0x1 : 0x0; |
2694 |
op = IsSignedVFPType(src_type) ? 0x1 : 0x0; |
2695 |
} |
2696 |
|
2697 |
return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | B19 | opc2*B16 | |
2698 |
Vd*B12 | 0x5*B9 | sz*B8 | op*B7 | B6 | M*B5 | Vm);
|
2699 |
} else {
|
2700 |
// Conversion between IEEE double and single precision.
|
2701 |
// Instruction details available in ARM DDI 0406B, A8.6.298.
|
2702 |
// cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0111(19-16) |
|
2703 |
// Vd(15-12) | 101(11-9) | sz(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
|
2704 |
int sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0; |
2705 |
return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | 0x7*B16 | |
2706 |
Vd*B12 | 0x5*B9 | sz*B8 | B7 | B6 | M*B5 | Vm);
|
2707 |
} |
2708 |
} |
2709 |
|
2710 |
|
2711 |
void Assembler::vcvt_f64_s32(const DwVfpRegister dst, |
2712 |
const SwVfpRegister src,
|
2713 |
VFPConversionMode mode, |
2714 |
const Condition cond) {
|
2715 |
emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond)); |
2716 |
} |
2717 |
|
2718 |
|
2719 |
void Assembler::vcvt_f32_s32(const SwVfpRegister dst, |
2720 |
const SwVfpRegister src,
|
2721 |
VFPConversionMode mode, |
2722 |
const Condition cond) {
|
2723 |
emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond)); |
2724 |
} |
2725 |
|
2726 |
|
2727 |
void Assembler::vcvt_f64_u32(const DwVfpRegister dst, |
2728 |
const SwVfpRegister src,
|
2729 |
VFPConversionMode mode, |
2730 |
const Condition cond) {
|
2731 |
emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond)); |
2732 |
} |
2733 |
|
2734 |
|
2735 |
void Assembler::vcvt_s32_f64(const SwVfpRegister dst, |
2736 |
const DwVfpRegister src,
|
2737 |
VFPConversionMode mode, |
2738 |
const Condition cond) {
|
2739 |
emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond)); |
2740 |
} |
2741 |
|
2742 |
|
2743 |
void Assembler::vcvt_u32_f64(const SwVfpRegister dst, |
2744 |
const DwVfpRegister src,
|
2745 |
VFPConversionMode mode, |
2746 |
const Condition cond) {
|
2747 |
emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond)); |
2748 |
} |
2749 |
|
2750 |
|
2751 |
void Assembler::vcvt_f64_f32(const DwVfpRegister dst, |
2752 |
const SwVfpRegister src,
|
2753 |
VFPConversionMode mode, |
2754 |
const Condition cond) {
|
2755 |
emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond)); |
2756 |
} |
2757 |
|
2758 |
|
2759 |
void Assembler::vcvt_f32_f64(const SwVfpRegister dst, |
2760 |
const DwVfpRegister src,
|
2761 |
VFPConversionMode mode, |
2762 |
const Condition cond) {
|
2763 |
emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond)); |
2764 |
} |
2765 |
|
2766 |
|
2767 |
void Assembler::vcvt_f64_s32(const DwVfpRegister dst, |
2768 |
int fraction_bits,
|
2769 |
const Condition cond) {
|
2770 |
// Instruction details available in ARM DDI 0406C.b, A8-874.
|
2771 |
// cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 1010(19-16) | Vd(15-12) |
|
2772 |
// 101(11-9) | sf=1(8) | sx=1(7) | 1(6) | i(5) | 0(4) | imm4(3-0)
|
2773 |
ASSERT(fraction_bits > 0 && fraction_bits <= 32); |
2774 |
ASSERT(CpuFeatures::IsSupported(VFP3)); |
2775 |
int vd, d;
|
2776 |
dst.split_code(&vd, &d); |
2777 |
int i = ((32 - fraction_bits) >> 4) & 1; |
2778 |
int imm4 = (32 - fraction_bits) & 0xf; |
2779 |
emit(cond | 0xE*B24 | B23 | d*B22 | 0x3*B20 | B19 | 0x2*B16 | |
2780 |
vd*B12 | 0x5*B9 | B8 | B7 | B6 | i*B5 | imm4);
|
2781 |
} |
2782 |
|
2783 |
|
2784 |
void Assembler::vneg(const DwVfpRegister dst, |
2785 |
const DwVfpRegister src,
|
2786 |
const Condition cond) {
|
2787 |
// Instruction details available in ARM DDI 0406C.b, A8-968.
|
2788 |
// cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0001(19-16) | Vd(15-12) |
|
2789 |
// 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
|
2790 |
int vd, d;
|
2791 |
dst.split_code(&vd, &d); |
2792 |
int vm, m;
|
2793 |
src.split_code(&vm, &m); |
2794 |
|
2795 |
emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | B16 | vd*B12 | 0x5*B9 | B8 | B6 | |
2796 |
m*B5 | vm); |
2797 |
} |
2798 |
|
2799 |
|
2800 |
void Assembler::vabs(const DwVfpRegister dst, |
2801 |
const DwVfpRegister src,
|
2802 |
const Condition cond) {
|
2803 |
// Instruction details available in ARM DDI 0406C.b, A8-524.
|
2804 |
// cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) |
|
2805 |
// 101(11-9) | sz=1(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
|
2806 |
int vd, d;
|
2807 |
dst.split_code(&vd, &d); |
2808 |
int vm, m;
|
2809 |
src.split_code(&vm, &m); |
2810 |
emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | B7 | B6 | |
2811 |
m*B5 | vm); |
2812 |
} |
2813 |
|
2814 |
|
2815 |
void Assembler::vadd(const DwVfpRegister dst, |
2816 |
const DwVfpRegister src1,
|
2817 |
const DwVfpRegister src2,
|
2818 |
const Condition cond) {
|
2819 |
// Dd = vadd(Dn, Dm) double precision floating point addition.
|
2820 |
// Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
|
2821 |
// Instruction details available in ARM DDI 0406C.b, A8-830.
|
2822 |
// cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
|
2823 |
// Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
|
2824 |
int vd, d;
|
2825 |
dst.split_code(&vd, &d); |
2826 |
int vn, n;
|
2827 |
src1.split_code(&vn, &n); |
2828 |
int vm, m;
|
2829 |
src2.split_code(&vm, &m); |
2830 |
emit(cond | 0x1C*B23 | d*B22 | 0x3*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 | |
2831 |
n*B7 | m*B5 | vm); |
2832 |
} |
2833 |
|
2834 |
|
2835 |
void Assembler::vsub(const DwVfpRegister dst, |
2836 |
const DwVfpRegister src1,
|
2837 |
const DwVfpRegister src2,
|
2838 |
const Condition cond) {
|
2839 |
// Dd = vsub(Dn, Dm) double precision floating point subtraction.
|
2840 |
// Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
|
2841 |
// Instruction details available in ARM DDI 0406C.b, A8-1086.
|
2842 |
// cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
|
2843 |
// Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
|
2844 |
int vd, d;
|
2845 |
dst.split_code(&vd, &d); |
2846 |
int vn, n;
|
2847 |
src1.split_code(&vn, &n); |
2848 |
int vm, m;
|
2849 |
src2.split_code(&vm, &m); |
2850 |
emit(cond | 0x1C*B23 | d*B22 | 0x3*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 | |
2851 |
n*B7 | B6 | m*B5 | vm); |
2852 |
} |
2853 |
|
2854 |
|
2855 |
void Assembler::vmul(const DwVfpRegister dst, |
2856 |
const DwVfpRegister src1,
|
2857 |
const DwVfpRegister src2,
|
2858 |
const Condition cond) {
|
2859 |
// Dd = vmul(Dn, Dm) double precision floating point multiplication.
|
2860 |
// Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
|
2861 |
// Instruction details available in ARM DDI 0406C.b, A8-960.
|
2862 |
// cond(31-28) | 11100(27-23)| D(22) | 10(21-20) | Vn(19-16) |
|
2863 |
// Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
|
2864 |
int vd, d;
|
2865 |
dst.split_code(&vd, &d); |
2866 |
int vn, n;
|
2867 |
src1.split_code(&vn, &n); |
2868 |
int vm, m;
|
2869 |
src2.split_code(&vm, &m); |
2870 |
emit(cond | 0x1C*B23 | d*B22 | 0x2*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 | |
2871 |
n*B7 | m*B5 | vm); |
2872 |
} |
2873 |
|
2874 |
|
2875 |
void Assembler::vmla(const DwVfpRegister dst, |
2876 |
const DwVfpRegister src1,
|
2877 |
const DwVfpRegister src2,
|
2878 |
const Condition cond) {
|
2879 |
// Instruction details available in ARM DDI 0406C.b, A8-932.
|
2880 |
// cond(31-28) | 11100(27-23) | D(22) | 00(21-20) | Vn(19-16) |
|
2881 |
// Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | op=0(6) | M(5) | 0(4) | Vm(3-0)
|
2882 |
int vd, d;
|
2883 |
dst.split_code(&vd, &d); |
2884 |
int vn, n;
|
2885 |
src1.split_code(&vn, &n); |
2886 |
int vm, m;
|
2887 |
src2.split_code(&vm, &m); |
2888 |
emit(cond | 0x1C*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | m*B5 | |
2889 |
vm); |
2890 |
} |
2891 |
|
2892 |
|
2893 |
void Assembler::vmls(const DwVfpRegister dst, |
2894 |
const DwVfpRegister src1,
|
2895 |
const DwVfpRegister src2,
|
2896 |
const Condition cond) {
|
2897 |
// Instruction details available in ARM DDI 0406C.b, A8-932.
|
2898 |
// cond(31-28) | 11100(27-23) | D(22) | 00(21-20) | Vn(19-16) |
|
2899 |
// Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | op=1(6) | M(5) | 0(4) | Vm(3-0)
|
2900 |
int vd, d;
|
2901 |
dst.split_code(&vd, &d); |
2902 |
int vn, n;
|
2903 |
src1.split_code(&vn, &n); |
2904 |
int vm, m;
|
2905 |
src2.split_code(&vm, &m); |
2906 |
emit(cond | 0x1C*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | B6 | |
2907 |
m*B5 | vm); |
2908 |
} |
2909 |
|
2910 |
|
2911 |
void Assembler::vdiv(const DwVfpRegister dst, |
2912 |
const DwVfpRegister src1,
|
2913 |
const DwVfpRegister src2,
|
2914 |
const Condition cond) {
|
2915 |
// Dd = vdiv(Dn, Dm) double precision floating point division.
|
2916 |
// Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
|
2917 |
// Instruction details available in ARM DDI 0406C.b, A8-882.
|
2918 |
// cond(31-28) | 11101(27-23)| D(22) | 00(21-20) | Vn(19-16) |
|
2919 |
// Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
|
2920 |
int vd, d;
|
2921 |
dst.split_code(&vd, &d); |
2922 |
int vn, n;
|
2923 |
src1.split_code(&vn, &n); |
2924 |
int vm, m;
|
2925 |
src2.split_code(&vm, &m); |
2926 |
emit(cond | 0x1D*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | m*B5 | |
2927 |
vm); |
2928 |
} |
2929 |
|
2930 |
|
2931 |
void Assembler::vcmp(const DwVfpRegister src1, |
2932 |
const DwVfpRegister src2,
|
2933 |
const Condition cond) {
|
2934 |
// vcmp(Dd, Dm) double precision floating point comparison.
|
2935 |
// Instruction details available in ARM DDI 0406C.b, A8-864.
|
2936 |
// cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0100(19-16) |
|
2937 |
// Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
|
2938 |
int vd, d;
|
2939 |
src1.split_code(&vd, &d); |
2940 |
int vm, m;
|
2941 |
src2.split_code(&vm, &m); |
2942 |
emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | 0x4*B16 | vd*B12 | 0x5*B9 | B8 | B6 | |
2943 |
m*B5 | vm); |
2944 |
} |
2945 |
|
2946 |
|
2947 |
void Assembler::vcmp(const DwVfpRegister src1, |
2948 |
const double src2, |
2949 |
const Condition cond) {
|
2950 |
// vcmp(Dd, #0.0) double precision floating point comparison.
|
2951 |
// Instruction details available in ARM DDI 0406C.b, A8-864.
|
2952 |
// cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0101(19-16) |
|
2953 |
// Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | 0(5) | 0(4) | 0000(3-0)
|
2954 |
ASSERT(src2 == 0.0); |
2955 |
int vd, d;
|
2956 |
src1.split_code(&vd, &d); |
2957 |
emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | 0x5*B16 | vd*B12 | 0x5*B9 | B8 | B6); |
2958 |
} |
2959 |
|
2960 |
|
2961 |
void Assembler::vmsr(Register dst, Condition cond) {
|
2962 |
// Instruction details available in ARM DDI 0406A, A8-652.
|
2963 |
// cond(31-28) | 1110 (27-24) | 1110(23-20)| 0001 (19-16) |
|
2964 |
// Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
|
2965 |
emit(cond | 0xE*B24 | 0xE*B20 | B16 | |
2966 |
dst.code()*B12 | 0xA*B8 | B4);
|
2967 |
} |
2968 |
|
2969 |
|
2970 |
void Assembler::vmrs(Register dst, Condition cond) {
|
2971 |
// Instruction details available in ARM DDI 0406A, A8-652.
|
2972 |
// cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
|
2973 |
// Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
|
2974 |
emit(cond | 0xE*B24 | 0xF*B20 | B16 | |
2975 |
dst.code()*B12 | 0xA*B8 | B4);
|
2976 |
} |
2977 |
|
2978 |
|
2979 |
void Assembler::vsqrt(const DwVfpRegister dst, |
2980 |
const DwVfpRegister src,
|
2981 |
const Condition cond) {
|
2982 |
// Instruction details available in ARM DDI 0406C.b, A8-1058.
|
2983 |
// cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0001(19-16) |
|
2984 |
// Vd(15-12) | 101(11-9) | sz=1(8) | 11(7-6) | M(5) | 0(4) | Vm(3-0)
|
2985 |
int vd, d;
|
2986 |
dst.split_code(&vd, &d); |
2987 |
int vm, m;
|
2988 |
src.split_code(&vm, &m); |
2989 |
emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | B16 | vd*B12 | 0x5*B9 | B8 | 0x3*B6 | |
2990 |
m*B5 | vm); |
2991 |
} |
2992 |
|
2993 |
|
2994 |
// Support for NEON.
|
2995 |
|
2996 |
void Assembler::vld1(NeonSize size,
|
2997 |
const NeonListOperand& dst,
|
2998 |
const NeonMemOperand& src) {
|
2999 |
// Instruction details available in ARM DDI 0406C.b, A8.8.320.
|
3000 |
// 1111(31-28) | 01000(27-23) | D(22) | 10(21-20) | Rn(19-16) |
|
3001 |
// Vd(15-12) | type(11-8) | size(7-6) | align(5-4) | Rm(3-0)
|
3002 |
ASSERT(CpuFeatures::IsSupported(NEON)); |
3003 |
int vd, d;
|
3004 |
dst.base().split_code(&vd, &d); |
3005 |
emit(0xFU*B28 | 4*B24 | d*B22 | 2*B20 | src.rn().code()*B16 | vd*B12 | |
3006 |
dst.type()*B8 | size*B6 | src.align()*B4 | src.rm().code()); |
3007 |
} |
3008 |
|
3009 |
|
3010 |
void Assembler::vst1(NeonSize size,
|
3011 |
const NeonListOperand& src,
|
3012 |
const NeonMemOperand& dst) {
|
3013 |
// Instruction details available in ARM DDI 0406C.b, A8.8.404.
|
3014 |
// 1111(31-28) | 01000(27-23) | D(22) | 00(21-20) | Rn(19-16) |
|
3015 |
// Vd(15-12) | type(11-8) | size(7-6) | align(5-4) | Rm(3-0)
|
3016 |
ASSERT(CpuFeatures::IsSupported(NEON)); |
3017 |
int vd, d;
|
3018 |
src.base().split_code(&vd, &d); |
3019 |
emit(0xFU*B28 | 4*B24 | d*B22 | dst.rn().code()*B16 | vd*B12 | src.type()*B8 | |
3020 |
size*B6 | dst.align()*B4 | dst.rm().code()); |
3021 |
} |
3022 |
|
3023 |
|
3024 |
void Assembler::vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src) {
|
3025 |
// Instruction details available in ARM DDI 0406C.b, A8.8.346.
|
3026 |
// 1111(31-28) | 001(27-25) | U(24) | 1(23) | D(22) | imm3(21-19) |
|
3027 |
// 000(18-16) | Vd(15-12) | 101000(11-6) | M(5) | 1(4) | Vm(3-0)
|
3028 |
ASSERT(CpuFeatures::IsSupported(NEON)); |
3029 |
int vd, d;
|
3030 |
dst.split_code(&vd, &d); |
3031 |
int vm, m;
|
3032 |
src.split_code(&vm, &m); |
3033 |
emit(0xFU*B28 | B25 | (dt & NeonDataTypeUMask) | B23 | d*B22 |
|
3034 |
(dt & NeonDataTypeSizeMask)*B19 | vd*B12 | 0xA*B8 | m*B5 | B4 | vm);
|
3035 |
} |
3036 |
|
3037 |
|
3038 |
// Pseudo instructions.
|
3039 |
void Assembler::nop(int type) { |
3040 |
// ARMv6{K/T2} and v7 have an actual NOP instruction but it serializes
|
3041 |
// some of the CPU's pipeline and has to issue. Older ARM chips simply used
|
3042 |
// MOV Rx, Rx as NOP and it performs better even in newer CPUs.
|
3043 |
// We therefore use MOV Rx, Rx, even on newer CPUs, and use Rx to encode
|
3044 |
// a type.
|
3045 |
ASSERT(0 <= type && type <= 14); // mov pc, pc isn't a nop. |
3046 |
emit(al | 13*B21 | type*B12 | type);
|
3047 |
} |
3048 |
|
3049 |
|
3050 |
bool Assembler::IsMovT(Instr instr) {
|
3051 |
instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions |
3052 |
((kNumRegisters-1)*B12) | // mask out register |
3053 |
EncodeMovwImmediate(0xFFFF)); // mask out immediate value |
3054 |
return instr == 0x34*B20; |
3055 |
} |
3056 |
|
3057 |
|
3058 |
bool Assembler::IsMovW(Instr instr) {
|
3059 |
instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions |
3060 |
((kNumRegisters-1)*B12) | // mask out destination |
3061 |
EncodeMovwImmediate(0xFFFF)); // mask out immediate value |
3062 |
return instr == 0x30*B20; |
3063 |
} |
3064 |
|
3065 |
|
3066 |
bool Assembler::IsNop(Instr instr, int type) { |
3067 |
ASSERT(0 <= type && type <= 14); // mov pc, pc isn't a nop. |
3068 |
// Check for mov rx, rx where x = type.
|
3069 |
return instr == (al | 13*B21 | type*B12 | type); |
3070 |
} |
3071 |
|
3072 |
|
3073 |
bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
|
3074 |
uint32_t dummy1; |
3075 |
uint32_t dummy2; |
3076 |
return fits_shifter(imm32, &dummy1, &dummy2, NULL); |
3077 |
} |
3078 |
|
3079 |
|
3080 |
// Debugging.
|
3081 |
void Assembler::RecordJSReturn() {
|
3082 |
positions_recorder()->WriteRecordedPositions(); |
3083 |
CheckBuffer(); |
3084 |
RecordRelocInfo(RelocInfo::JS_RETURN); |
3085 |
} |
3086 |
|
3087 |
|
3088 |
void Assembler::RecordDebugBreakSlot() {
|
3089 |
positions_recorder()->WriteRecordedPositions(); |
3090 |
CheckBuffer(); |
3091 |
RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT); |
3092 |
} |
3093 |
|
3094 |
|
3095 |
void Assembler::RecordComment(const char* msg) { |
3096 |
if (FLAG_code_comments) {
|
3097 |
CheckBuffer(); |
3098 |
RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
|
3099 |
} |
3100 |
} |
3101 |
|
3102 |
|
3103 |
void Assembler::RecordConstPool(int size) { |
3104 |
// We only need this for debugger support, to correctly compute offsets in the
|
3105 |
// code.
|
3106 |
#ifdef ENABLE_DEBUGGER_SUPPORT
|
3107 |
RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size));
|
3108 |
#endif
|
3109 |
} |
3110 |
|
3111 |
|
3112 |
void Assembler::GrowBuffer() {
|
3113 |
if (!own_buffer_) FATAL("external code buffer is too small"); |
3114 |
|
3115 |
// Compute new buffer size.
|
3116 |
CodeDesc desc; // the new buffer
|
3117 |
if (buffer_size_ < 4*KB) { |
3118 |
desc.buffer_size = 4*KB;
|
3119 |
} else if (buffer_size_ < 1*MB) { |
3120 |
desc.buffer_size = 2*buffer_size_;
|
3121 |
} else {
|
3122 |
desc.buffer_size = buffer_size_ + 1*MB;
|
3123 |
} |
3124 |
CHECK_GT(desc.buffer_size, 0); // no overflow |
3125 |
|
3126 |
// Set up new buffer.
|
3127 |
desc.buffer = NewArray<byte>(desc.buffer_size); |
3128 |
|
3129 |
desc.instr_size = pc_offset(); |
3130 |
desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos(); |
3131 |
|
3132 |
// Copy the data.
|
3133 |
int pc_delta = desc.buffer - buffer_;
|
3134 |
int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
|
3135 |
OS::MemMove(desc.buffer, buffer_, desc.instr_size); |
3136 |
OS::MemMove(reloc_info_writer.pos() + rc_delta, |
3137 |
reloc_info_writer.pos(), desc.reloc_size); |
3138 |
|
3139 |
// Switch buffers.
|
3140 |
DeleteArray(buffer_); |
3141 |
buffer_ = desc.buffer; |
3142 |
buffer_size_ = desc.buffer_size; |
3143 |
pc_ += pc_delta; |
3144 |
reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta, |
3145 |
reloc_info_writer.last_pc() + pc_delta); |
3146 |
|
3147 |
// None of our relocation types are pc relative pointing outside the code
|
3148 |
// buffer nor pc absolute pointing inside the code buffer, so there is no need
|
3149 |
// to relocate any emitted relocation entries.
|
3150 |
|
3151 |
// Relocate pending relocation entries.
|
3152 |
for (int i = 0; i < num_pending_reloc_info_; i++) { |
3153 |
RelocInfo& rinfo = pending_reloc_info_[i]; |
3154 |
ASSERT(rinfo.rmode() != RelocInfo::COMMENT && |
3155 |
rinfo.rmode() != RelocInfo::POSITION); |
3156 |
if (rinfo.rmode() != RelocInfo::JS_RETURN) {
|
3157 |
rinfo.set_pc(rinfo.pc() + pc_delta); |
3158 |
} |
3159 |
} |
3160 |
} |
3161 |
|
3162 |
|
3163 |
void Assembler::db(uint8_t data) {
|
3164 |
// No relocation info should be pending while using db. db is used
|
3165 |
// to write pure data with no pointers and the constant pool should
|
3166 |
// be emitted before using db.
|
3167 |
ASSERT(num_pending_reloc_info_ == 0);
|
3168 |
ASSERT(num_pending_64_bit_reloc_info_ == 0);
|
3169 |
CheckBuffer(); |
3170 |
*reinterpret_cast<uint8_t*>(pc_) = data;
|
3171 |
pc_ += sizeof(uint8_t);
|
3172 |
} |
3173 |
|
3174 |
|
3175 |
void Assembler::dd(uint32_t data) {
|
3176 |
// No relocation info should be pending while using dd. dd is used
|
3177 |
// to write pure data with no pointers and the constant pool should
|
3178 |
// be emitted before using dd.
|
3179 |
ASSERT(num_pending_reloc_info_ == 0);
|
3180 |
ASSERT(num_pending_64_bit_reloc_info_ == 0);
|
3181 |
CheckBuffer(); |
3182 |
*reinterpret_cast<uint32_t*>(pc_) = data;
|
3183 |
pc_ += sizeof(uint32_t);
|
3184 |
} |
3185 |
|
3186 |
|
3187 |
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
|
3188 |
UseConstantPoolMode mode) { |
3189 |
// We do not try to reuse pool constants.
|
3190 |
RelocInfo rinfo(pc_, rmode, data, NULL);
|
3191 |
if (((rmode >= RelocInfo::JS_RETURN) &&
|
3192 |
(rmode <= RelocInfo::DEBUG_BREAK_SLOT)) || |
3193 |
(rmode == RelocInfo::CONST_POOL) || |
3194 |
mode == DONT_USE_CONSTANT_POOL) { |
3195 |
// Adjust code for new modes.
|
3196 |
ASSERT(RelocInfo::IsDebugBreakSlot(rmode) |
3197 |
|| RelocInfo::IsJSReturn(rmode) |
3198 |
|| RelocInfo::IsComment(rmode) |
3199 |
|| RelocInfo::IsPosition(rmode) |
3200 |
|| RelocInfo::IsConstPool(rmode) |
3201 |
|| mode == DONT_USE_CONSTANT_POOL); |
3202 |
// These modes do not need an entry in the constant pool.
|
3203 |
} else {
|
3204 |
RecordRelocInfoConstantPoolEntryHelper(rinfo); |
3205 |
} |
3206 |
if (!RelocInfo::IsNone(rinfo.rmode())) {
|
3207 |
// Don't record external references unless the heap will be serialized.
|
3208 |
if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
|
3209 |
#ifdef DEBUG
|
3210 |
if (!Serializer::enabled()) {
|
3211 |
Serializer::TooLateToEnableNow(); |
3212 |
} |
3213 |
#endif
|
3214 |
if (!Serializer::enabled() && !emit_debug_code()) {
|
3215 |
return;
|
3216 |
} |
3217 |
} |
3218 |
ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
|
3219 |
if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
|
3220 |
RelocInfo reloc_info_with_ast_id(pc_, |
3221 |
rmode, |
3222 |
RecordedAstId().ToInt(), |
3223 |
NULL);
|
3224 |
ClearRecordedAstId(); |
3225 |
reloc_info_writer.Write(&reloc_info_with_ast_id); |
3226 |
} else {
|
3227 |
reloc_info_writer.Write(&rinfo); |
3228 |
} |
3229 |
} |
3230 |
} |
3231 |
|
3232 |
|
3233 |
void Assembler::RecordRelocInfo(double data) { |
3234 |
// We do not try to reuse pool constants.
|
3235 |
RelocInfo rinfo(pc_, data); |
3236 |
RecordRelocInfoConstantPoolEntryHelper(rinfo); |
3237 |
} |
3238 |
|
3239 |
|
3240 |
void Assembler::RecordRelocInfoConstantPoolEntryHelper(const RelocInfo& rinfo) { |
3241 |
ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo); |
3242 |
if (num_pending_reloc_info_ == 0) { |
3243 |
first_const_pool_use_ = pc_offset(); |
3244 |
} |
3245 |
pending_reloc_info_[num_pending_reloc_info_++] = rinfo; |
3246 |
if (rinfo.rmode() == RelocInfo::NONE64) {
|
3247 |
++num_pending_64_bit_reloc_info_; |
3248 |
} |
3249 |
ASSERT(num_pending_64_bit_reloc_info_ <= num_pending_reloc_info_); |
3250 |
// Make sure the constant pool is not emitted in place of the next
|
3251 |
// instruction for which we just recorded relocation info.
|
3252 |
BlockConstPoolFor(1);
|
3253 |
} |
3254 |
|
3255 |
|
3256 |
void Assembler::BlockConstPoolFor(int instructions) { |
3257 |
int pc_limit = pc_offset() + instructions * kInstrSize;
|
3258 |
if (no_const_pool_before_ < pc_limit) {
|
3259 |
// If there are some pending entries, the constant pool cannot be blocked
|
3260 |
// further than constant pool instruction's reach.
|
3261 |
ASSERT((num_pending_reloc_info_ == 0) ||
|
3262 |
(pc_limit - first_const_pool_use_ < kMaxDistToIntPool)); |
3263 |
// TODO(jfb) Also check 64-bit entries are in range (requires splitting
|
3264 |
// them up from 32-bit entries).
|
3265 |
no_const_pool_before_ = pc_limit; |
3266 |
} |
3267 |
|
3268 |
if (next_buffer_check_ < no_const_pool_before_) {
|
3269 |
next_buffer_check_ = no_const_pool_before_; |
3270 |
} |
3271 |
} |
3272 |
|
3273 |
|
3274 |
void Assembler::CheckConstPool(bool force_emit, bool require_jump) { |
3275 |
// Some short sequence of instruction mustn't be broken up by constant pool
|
3276 |
// emission, such sequences are protected by calls to BlockConstPoolFor and
|
3277 |
// BlockConstPoolScope.
|
3278 |
if (is_const_pool_blocked()) {
|
3279 |
// Something is wrong if emission is forced and blocked at the same time.
|
3280 |
ASSERT(!force_emit); |
3281 |
return;
|
3282 |
} |
3283 |
|
3284 |
// There is nothing to do if there are no pending constant pool entries.
|
3285 |
if (num_pending_reloc_info_ == 0) { |
3286 |
ASSERT(num_pending_64_bit_reloc_info_ == 0);
|
3287 |
// Calculate the offset of the next check.
|
3288 |
next_buffer_check_ = pc_offset() + kCheckPoolInterval; |
3289 |
return;
|
3290 |
} |
3291 |
|
3292 |
// Check that the code buffer is large enough before emitting the constant
|
3293 |
// pool (include the jump over the pool and the constant pool marker and
|
3294 |
// the gap to the relocation information).
|
3295 |
// Note 64-bit values are wider, and the first one needs to be 64-bit aligned.
|
3296 |
int jump_instr = require_jump ? kInstrSize : 0; |
3297 |
int size_up_to_marker = jump_instr + kInstrSize;
|
3298 |
int size_after_marker = num_pending_reloc_info_ * kPointerSize;
|
3299 |
bool has_fp_values = (num_pending_64_bit_reloc_info_ > 0); |
3300 |
// 64-bit values must be 64-bit aligned.
|
3301 |
// We'll start emitting at PC: branch+marker, then 32-bit values, then
|
3302 |
// 64-bit values which might need to be aligned.
|
3303 |
bool require_64_bit_align = has_fp_values &&
|
3304 |
(((uintptr_t)pc_ + size_up_to_marker + size_after_marker) & 0x3);
|
3305 |
if (require_64_bit_align) {
|
3306 |
size_after_marker += kInstrSize; |
3307 |
} |
3308 |
// num_pending_reloc_info_ also contains 64-bit entries, the above code
|
3309 |
// therefore already counted half of the size for 64-bit entries. Add the
|
3310 |
// remaining size.
|
3311 |
STATIC_ASSERT(kPointerSize == kDoubleSize / 2);
|
3312 |
size_after_marker += num_pending_64_bit_reloc_info_ * (kDoubleSize / 2);
|
3313 |
|
3314 |
int size = size_up_to_marker + size_after_marker;
|
3315 |
|
3316 |
// We emit a constant pool when:
|
3317 |
// * requested to do so by parameter force_emit (e.g. after each function).
|
3318 |
// * the distance from the first instruction accessing the constant pool to
|
3319 |
// any of the constant pool entries will exceed its limit the next
|
3320 |
// time the pool is checked. This is overly restrictive, but we don't emit
|
3321 |
// constant pool entries in-order so it's conservatively correct.
|
3322 |
// * the instruction doesn't require a jump after itself to jump over the
|
3323 |
// constant pool, and we're getting close to running out of range.
|
3324 |
if (!force_emit) {
|
3325 |
ASSERT((first_const_pool_use_ >= 0) && (num_pending_reloc_info_ > 0)); |
3326 |
int dist = pc_offset() + size - first_const_pool_use_;
|
3327 |
if (has_fp_values) {
|
3328 |
if ((dist < kMaxDistToFPPool - kCheckPoolInterval) &&
|
3329 |
(require_jump || (dist < kMaxDistToFPPool / 2))) {
|
3330 |
return;
|
3331 |
} |
3332 |
} else {
|
3333 |
if ((dist < kMaxDistToIntPool - kCheckPoolInterval) &&
|
3334 |
(require_jump || (dist < kMaxDistToIntPool / 2))) {
|
3335 |
return;
|
3336 |
} |
3337 |
} |
3338 |
} |
3339 |
|
3340 |
int needed_space = size + kGap;
|
3341 |
while (buffer_space() <= needed_space) GrowBuffer();
|
3342 |
|
3343 |
{ |
3344 |
// Block recursive calls to CheckConstPool.
|
3345 |
BlockConstPoolScope block_const_pool(this);
|
3346 |
RecordComment("[ Constant Pool");
|
3347 |
RecordConstPool(size); |
3348 |
|
3349 |
// Emit jump over constant pool if necessary.
|
3350 |
Label after_pool; |
3351 |
if (require_jump) {
|
3352 |
b(&after_pool); |
3353 |
} |
3354 |
|
3355 |
// Put down constant pool marker "Undefined instruction".
|
3356 |
// The data size helps disassembly know what to print.
|
3357 |
emit(kConstantPoolMarker | |
3358 |
EncodeConstantPoolLength(size_after_marker / kPointerSize)); |
3359 |
|
3360 |
if (require_64_bit_align) {
|
3361 |
emit(kConstantPoolMarker); |
3362 |
} |
3363 |
|
3364 |
// Emit 64-bit constant pool entries first: their range is smaller than
|
3365 |
// 32-bit entries.
|
3366 |
for (int i = 0; i < num_pending_reloc_info_; i++) { |
3367 |
RelocInfo& rinfo = pending_reloc_info_[i]; |
3368 |
|
3369 |
if (rinfo.rmode() != RelocInfo::NONE64) {
|
3370 |
// 32-bit values emitted later.
|
3371 |
continue;
|
3372 |
} |
3373 |
|
3374 |
ASSERT(!((uintptr_t)pc_ & 0x3)); // Check 64-bit alignment. |
3375 |
|
3376 |
Instr instr = instr_at(rinfo.pc()); |
3377 |
// Instruction to patch must be 'vldr rd, [pc, #offset]' with offset == 0.
|
3378 |
ASSERT((IsVldrDPcImmediateOffset(instr) && |
3379 |
GetVldrDRegisterImmediateOffset(instr) == 0));
|
3380 |
|
3381 |
int delta = pc_ - rinfo.pc() - kPcLoadDelta;
|
3382 |
ASSERT(is_uint10(delta)); |
3383 |
|
3384 |
instr_at_put(rinfo.pc(), SetVldrDRegisterImmediateOffset(instr, delta)); |
3385 |
|
3386 |
const double double_data = rinfo.data64(); |
3387 |
uint64_t uint_data = 0;
|
3388 |
OS::MemCopy(&uint_data, &double_data, sizeof(double_data));
|
3389 |
emit(uint_data & 0xFFFFFFFF);
|
3390 |
emit(uint_data >> 32);
|
3391 |
} |
3392 |
|
3393 |
// Emit 32-bit constant pool entries.
|
3394 |
for (int i = 0; i < num_pending_reloc_info_; i++) { |
3395 |
RelocInfo& rinfo = pending_reloc_info_[i]; |
3396 |
ASSERT(rinfo.rmode() != RelocInfo::COMMENT && |
3397 |
rinfo.rmode() != RelocInfo::POSITION && |
3398 |
rinfo.rmode() != RelocInfo::STATEMENT_POSITION && |
3399 |
rinfo.rmode() != RelocInfo::CONST_POOL); |
3400 |
|
3401 |
if (rinfo.rmode() == RelocInfo::NONE64) {
|
3402 |
// 64-bit values emitted earlier.
|
3403 |
continue;
|
3404 |
} |
3405 |
|
3406 |
Instr instr = instr_at(rinfo.pc()); |
3407 |
|
3408 |
// 64-bit loads shouldn't get here.
|
3409 |
ASSERT(!IsVldrDPcImmediateOffset(instr)); |
3410 |
|
3411 |
int delta = pc_ - rinfo.pc() - kPcLoadDelta;
|
3412 |
// 0 is the smallest delta:
|
3413 |
// ldr rd, [pc, #0]
|
3414 |
// constant pool marker
|
3415 |
// data
|
3416 |
|
3417 |
if (IsLdrPcImmediateOffset(instr) &&
|
3418 |
GetLdrRegisterImmediateOffset(instr) == 0) {
|
3419 |
ASSERT(is_uint12(delta)); |
3420 |
instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta)); |
3421 |
emit(rinfo.data()); |
3422 |
} else {
|
3423 |
ASSERT(IsMovW(instr)); |
3424 |
emit(rinfo.data()); |
3425 |
} |
3426 |
} |
3427 |
|
3428 |
num_pending_reloc_info_ = 0;
|
3429 |
num_pending_64_bit_reloc_info_ = 0;
|
3430 |
first_const_pool_use_ = -1;
|
3431 |
|
3432 |
RecordComment("]");
|
3433 |
|
3434 |
if (after_pool.is_linked()) {
|
3435 |
bind(&after_pool); |
3436 |
} |
3437 |
} |
3438 |
|
3439 |
// Since a constant pool was just emitted, move the check offset forward by
|
3440 |
// the standard interval.
|
3441 |
next_buffer_check_ = pc_offset() + kCheckPoolInterval; |
3442 |
} |
3443 |
|
3444 |
|
3445 |
} } // namespace v8::internal
|
3446 |
|
3447 |
#endif // V8_TARGET_ARCH_ARM |