The data contained in this repository can be downloaded to your computer using one of several clients.
Please see the documentation of your version control software client for more information.
Please select the desired protocol below to get the URL.
This URL has Read-Only access.
main_repo / deps / v8 / src / assembler.cc @ f230a1cf
History | View | Annotate | Download (52.1 KB)
1 |
// Copyright (c) 1994-2006 Sun Microsystems Inc.
|
---|---|
2 |
// All Rights Reserved.
|
3 |
//
|
4 |
// Redistribution and use in source and binary forms, with or without
|
5 |
// modification, are permitted provided that the following conditions are
|
6 |
// met:
|
7 |
//
|
8 |
// - Redistributions of source code must retain the above copyright notice,
|
9 |
// this list of conditions and the following disclaimer.
|
10 |
//
|
11 |
// - Redistribution in binary form must reproduce the above copyright
|
12 |
// notice, this list of conditions and the following disclaimer in the
|
13 |
// documentation and/or other materials provided with the distribution.
|
14 |
//
|
15 |
// - Neither the name of Sun Microsystems or the names of contributors may
|
16 |
// be used to endorse or promote products derived from this software without
|
17 |
// specific prior written permission.
|
18 |
//
|
19 |
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
|
20 |
// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
|
21 |
// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
22 |
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
|
23 |
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
24 |
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
25 |
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
26 |
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
27 |
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
28 |
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
29 |
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
30 |
|
31 |
// The original source code covered by the above license above has been
|
32 |
// modified significantly by Google Inc.
|
33 |
// Copyright 2012 the V8 project authors. All rights reserved.
|
34 |
|
35 |
#include "assembler.h" |
36 |
|
37 |
#include <cmath> |
38 |
#include "api.h" |
39 |
#include "builtins.h" |
40 |
#include "counters.h" |
41 |
#include "cpu.h" |
42 |
#include "debug.h" |
43 |
#include "deoptimizer.h" |
44 |
#include "execution.h" |
45 |
#include "ic.h" |
46 |
#include "isolate-inl.h" |
47 |
#include "jsregexp.h" |
48 |
#include "lazy-instance.h" |
49 |
#include "platform.h" |
50 |
#include "regexp-macro-assembler.h" |
51 |
#include "regexp-stack.h" |
52 |
#include "runtime.h" |
53 |
#include "serialize.h" |
54 |
#include "store-buffer-inl.h" |
55 |
#include "stub-cache.h" |
56 |
#include "token.h" |
57 |
|
58 |
#if V8_TARGET_ARCH_IA32
|
59 |
#include "ia32/assembler-ia32-inl.h" |
60 |
#elif V8_TARGET_ARCH_X64
|
61 |
#include "x64/assembler-x64-inl.h" |
62 |
#elif V8_TARGET_ARCH_ARM
|
63 |
#include "arm/assembler-arm-inl.h" |
64 |
#elif V8_TARGET_ARCH_MIPS
|
65 |
#include "mips/assembler-mips-inl.h" |
66 |
#else
|
67 |
#error "Unknown architecture." |
68 |
#endif
|
69 |
|
70 |
// Include native regexp-macro-assembler.
|
71 |
#ifndef V8_INTERPRETED_REGEXP
|
72 |
#if V8_TARGET_ARCH_IA32
|
73 |
#include "ia32/regexp-macro-assembler-ia32.h" |
74 |
#elif V8_TARGET_ARCH_X64
|
75 |
#include "x64/regexp-macro-assembler-x64.h" |
76 |
#elif V8_TARGET_ARCH_ARM
|
77 |
#include "arm/regexp-macro-assembler-arm.h" |
78 |
#elif V8_TARGET_ARCH_MIPS
|
79 |
#include "mips/regexp-macro-assembler-mips.h" |
80 |
#else // Unknown architecture. |
81 |
#error "Unknown architecture." |
82 |
#endif // Target architecture. |
83 |
#endif // V8_INTERPRETED_REGEXP |
84 |
|
85 |
namespace v8 {
|
86 |
namespace internal {
|
87 |
|
88 |
// -----------------------------------------------------------------------------
|
89 |
// Common double constants.
|
90 |
|
91 |
struct DoubleConstant BASE_EMBEDDED {
|
92 |
double min_int;
|
93 |
double one_half;
|
94 |
double minus_one_half;
|
95 |
double minus_zero;
|
96 |
double zero;
|
97 |
double uint8_max_value;
|
98 |
double negative_infinity;
|
99 |
double canonical_non_hole_nan;
|
100 |
double the_hole_nan;
|
101 |
double uint32_bias;
|
102 |
}; |
103 |
|
104 |
static DoubleConstant double_constants;
|
105 |
|
106 |
const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING"; |
107 |
|
108 |
static bool math_exp_data_initialized = false; |
109 |
static Mutex* math_exp_data_mutex = NULL; |
110 |
static double* math_exp_constants_array = NULL; |
111 |
static double* math_exp_log_table_array = NULL; |
112 |
|
113 |
// -----------------------------------------------------------------------------
|
114 |
// Implementation of AssemblerBase
|
115 |
|
116 |
AssemblerBase::AssemblerBase(Isolate* isolate, void* buffer, int buffer_size) |
117 |
: isolate_(isolate), |
118 |
jit_cookie_(0),
|
119 |
enabled_cpu_features_(0),
|
120 |
emit_debug_code_(FLAG_debug_code), |
121 |
predictable_code_size_(false) {
|
122 |
if (FLAG_mask_constants_with_cookie && isolate != NULL) { |
123 |
jit_cookie_ = isolate->random_number_generator()->NextInt(); |
124 |
} |
125 |
|
126 |
if (buffer == NULL) { |
127 |
// Do our own buffer management.
|
128 |
if (buffer_size <= kMinimalBufferSize) {
|
129 |
buffer_size = kMinimalBufferSize; |
130 |
if (isolate->assembler_spare_buffer() != NULL) { |
131 |
buffer = isolate->assembler_spare_buffer(); |
132 |
isolate->set_assembler_spare_buffer(NULL);
|
133 |
} |
134 |
} |
135 |
if (buffer == NULL) buffer = NewArray<byte>(buffer_size); |
136 |
own_buffer_ = true;
|
137 |
} else {
|
138 |
// Use externally provided buffer instead.
|
139 |
ASSERT(buffer_size > 0);
|
140 |
own_buffer_ = false;
|
141 |
} |
142 |
buffer_ = static_cast<byte*>(buffer);
|
143 |
buffer_size_ = buffer_size; |
144 |
|
145 |
pc_ = buffer_; |
146 |
} |
147 |
|
148 |
|
149 |
AssemblerBase::~AssemblerBase() { |
150 |
if (own_buffer_) {
|
151 |
if (isolate() != NULL && |
152 |
isolate()->assembler_spare_buffer() == NULL &&
|
153 |
buffer_size_ == kMinimalBufferSize) { |
154 |
isolate()->set_assembler_spare_buffer(buffer_); |
155 |
} else {
|
156 |
DeleteArray(buffer_); |
157 |
} |
158 |
} |
159 |
} |
160 |
|
161 |
|
162 |
// -----------------------------------------------------------------------------
|
163 |
// Implementation of PredictableCodeSizeScope
|
164 |
|
165 |
PredictableCodeSizeScope::PredictableCodeSizeScope(AssemblerBase* assembler, |
166 |
int expected_size)
|
167 |
: assembler_(assembler), |
168 |
expected_size_(expected_size), |
169 |
start_offset_(assembler->pc_offset()), |
170 |
old_value_(assembler->predictable_code_size()) { |
171 |
assembler_->set_predictable_code_size(true);
|
172 |
} |
173 |
|
174 |
|
175 |
PredictableCodeSizeScope::~PredictableCodeSizeScope() { |
176 |
// TODO(svenpanne) Remove the 'if' when everything works.
|
177 |
if (expected_size_ >= 0) { |
178 |
CHECK_EQ(expected_size_, assembler_->pc_offset() - start_offset_); |
179 |
} |
180 |
assembler_->set_predictable_code_size(old_value_); |
181 |
} |
182 |
|
183 |
|
184 |
// -----------------------------------------------------------------------------
|
185 |
// Implementation of CpuFeatureScope
|
186 |
|
187 |
#ifdef DEBUG
|
188 |
CpuFeatureScope::CpuFeatureScope(AssemblerBase* assembler, CpuFeature f) |
189 |
: assembler_(assembler) { |
190 |
ASSERT(CpuFeatures::IsSafeForSnapshot(f)); |
191 |
old_enabled_ = assembler_->enabled_cpu_features(); |
192 |
uint64_t mask = static_cast<uint64_t>(1) << f; |
193 |
// TODO(svenpanne) This special case below doesn't belong here!
|
194 |
#if V8_TARGET_ARCH_ARM
|
195 |
// ARMv7 is implied by VFP3.
|
196 |
if (f == VFP3) {
|
197 |
mask |= static_cast<uint64_t>(1) << ARMv7; |
198 |
} |
199 |
#endif
|
200 |
assembler_->set_enabled_cpu_features(old_enabled_ | mask); |
201 |
} |
202 |
|
203 |
|
204 |
CpuFeatureScope::~CpuFeatureScope() { |
205 |
assembler_->set_enabled_cpu_features(old_enabled_); |
206 |
} |
207 |
#endif
|
208 |
|
209 |
|
210 |
// -----------------------------------------------------------------------------
|
211 |
// Implementation of PlatformFeatureScope
|
212 |
|
213 |
PlatformFeatureScope::PlatformFeatureScope(CpuFeature f) |
214 |
: old_cross_compile_(CpuFeatures::cross_compile_) { |
215 |
// CpuFeatures is a global singleton, therefore this is only safe in
|
216 |
// single threaded code.
|
217 |
ASSERT(Serializer::enabled()); |
218 |
uint64_t mask = static_cast<uint64_t>(1) << f; |
219 |
CpuFeatures::cross_compile_ |= mask; |
220 |
} |
221 |
|
222 |
|
223 |
PlatformFeatureScope::~PlatformFeatureScope() { |
224 |
CpuFeatures::cross_compile_ = old_cross_compile_; |
225 |
} |
226 |
|
227 |
|
228 |
// -----------------------------------------------------------------------------
|
229 |
// Implementation of Label
|
230 |
|
231 |
int Label::pos() const { |
232 |
if (pos_ < 0) return -pos_ - 1; |
233 |
if (pos_ > 0) return pos_ - 1; |
234 |
UNREACHABLE(); |
235 |
return 0; |
236 |
} |
237 |
|
238 |
|
239 |
// -----------------------------------------------------------------------------
|
240 |
// Implementation of RelocInfoWriter and RelocIterator
|
241 |
//
|
242 |
// Relocation information is written backwards in memory, from high addresses
|
243 |
// towards low addresses, byte by byte. Therefore, in the encodings listed
|
244 |
// below, the first byte listed it at the highest address, and successive
|
245 |
// bytes in the record are at progressively lower addresses.
|
246 |
//
|
247 |
// Encoding
|
248 |
//
|
249 |
// The most common modes are given single-byte encodings. Also, it is
|
250 |
// easy to identify the type of reloc info and skip unwanted modes in
|
251 |
// an iteration.
|
252 |
//
|
253 |
// The encoding relies on the fact that there are fewer than 14
|
254 |
// different relocation modes using standard non-compact encoding.
|
255 |
//
|
256 |
// The first byte of a relocation record has a tag in its low 2 bits:
|
257 |
// Here are the record schemes, depending on the low tag and optional higher
|
258 |
// tags.
|
259 |
//
|
260 |
// Low tag:
|
261 |
// 00: embedded_object: [6-bit pc delta] 00
|
262 |
//
|
263 |
// 01: code_target: [6-bit pc delta] 01
|
264 |
//
|
265 |
// 10: short_data_record: [6-bit pc delta] 10 followed by
|
266 |
// [6-bit data delta] [2-bit data type tag]
|
267 |
//
|
268 |
// 11: long_record [2-bit high tag][4 bit middle_tag] 11
|
269 |
// followed by variable data depending on type.
|
270 |
//
|
271 |
// 2-bit data type tags, used in short_data_record and data_jump long_record:
|
272 |
// code_target_with_id: 00
|
273 |
// position: 01
|
274 |
// statement_position: 10
|
275 |
// comment: 11 (not used in short_data_record)
|
276 |
//
|
277 |
// Long record format:
|
278 |
// 4-bit middle_tag:
|
279 |
// 0000 - 1100 : Short record for RelocInfo::Mode middle_tag + 2
|
280 |
// (The middle_tag encodes rmode - RelocInfo::LAST_COMPACT_ENUM,
|
281 |
// and is between 0000 and 1100)
|
282 |
// The format is:
|
283 |
// 00 [4 bit middle_tag] 11 followed by
|
284 |
// 00 [6 bit pc delta]
|
285 |
//
|
286 |
// 1101: constant pool. Used on ARM only for now.
|
287 |
// The format is: 11 1101 11
|
288 |
// signed int (size of the constant pool).
|
289 |
// 1110: long_data_record
|
290 |
// The format is: [2-bit data_type_tag] 1110 11
|
291 |
// signed intptr_t, lowest byte written first
|
292 |
// (except data_type code_target_with_id, which
|
293 |
// is followed by a signed int, not intptr_t.)
|
294 |
//
|
295 |
// 1111: long_pc_jump
|
296 |
// The format is:
|
297 |
// pc-jump: 00 1111 11,
|
298 |
// 00 [6 bits pc delta]
|
299 |
// or
|
300 |
// pc-jump (variable length):
|
301 |
// 01 1111 11,
|
302 |
// [7 bits data] 0
|
303 |
// ...
|
304 |
// [7 bits data] 1
|
305 |
// (Bits 6..31 of pc delta, with leading zeroes
|
306 |
// dropped, and last non-zero chunk tagged with 1.)
|
307 |
|
308 |
|
309 |
const int kMaxStandardNonCompactModes = 14; |
310 |
|
311 |
const int kTagBits = 2; |
312 |
const int kTagMask = (1 << kTagBits) - 1; |
313 |
const int kExtraTagBits = 4; |
314 |
const int kLocatableTypeTagBits = 2; |
315 |
const int kSmallDataBits = kBitsPerByte - kLocatableTypeTagBits; |
316 |
|
317 |
const int kEmbeddedObjectTag = 0; |
318 |
const int kCodeTargetTag = 1; |
319 |
const int kLocatableTag = 2; |
320 |
const int kDefaultTag = 3; |
321 |
|
322 |
const int kPCJumpExtraTag = (1 << kExtraTagBits) - 1; |
323 |
|
324 |
const int kSmallPCDeltaBits = kBitsPerByte - kTagBits; |
325 |
const int kSmallPCDeltaMask = (1 << kSmallPCDeltaBits) - 1; |
326 |
const int RelocInfo::kMaxSmallPCDelta = kSmallPCDeltaMask; |
327 |
|
328 |
const int kVariableLengthPCJumpTopTag = 1; |
329 |
const int kChunkBits = 7; |
330 |
const int kChunkMask = (1 << kChunkBits) - 1; |
331 |
const int kLastChunkTagBits = 1; |
332 |
const int kLastChunkTagMask = 1; |
333 |
const int kLastChunkTag = 1; |
334 |
|
335 |
|
336 |
const int kDataJumpExtraTag = kPCJumpExtraTag - 1; |
337 |
|
338 |
const int kCodeWithIdTag = 0; |
339 |
const int kNonstatementPositionTag = 1; |
340 |
const int kStatementPositionTag = 2; |
341 |
const int kCommentTag = 3; |
342 |
|
343 |
const int kConstPoolExtraTag = kPCJumpExtraTag - 2; |
344 |
const int kConstPoolTag = 3; |
345 |
|
346 |
|
347 |
uint32_t RelocInfoWriter::WriteVariableLengthPCJump(uint32_t pc_delta) { |
348 |
// Return if the pc_delta can fit in kSmallPCDeltaBits bits.
|
349 |
// Otherwise write a variable length PC jump for the bits that do
|
350 |
// not fit in the kSmallPCDeltaBits bits.
|
351 |
if (is_uintn(pc_delta, kSmallPCDeltaBits)) return pc_delta; |
352 |
WriteExtraTag(kPCJumpExtraTag, kVariableLengthPCJumpTopTag); |
353 |
uint32_t pc_jump = pc_delta >> kSmallPCDeltaBits; |
354 |
ASSERT(pc_jump > 0);
|
355 |
// Write kChunkBits size chunks of the pc_jump.
|
356 |
for (; pc_jump > 0; pc_jump = pc_jump >> kChunkBits) { |
357 |
byte b = pc_jump & kChunkMask; |
358 |
*--pos_ = b << kLastChunkTagBits; |
359 |
} |
360 |
// Tag the last chunk so it can be identified.
|
361 |
*pos_ = *pos_ | kLastChunkTag; |
362 |
// Return the remaining kSmallPCDeltaBits of the pc_delta.
|
363 |
return pc_delta & kSmallPCDeltaMask;
|
364 |
} |
365 |
|
366 |
|
367 |
void RelocInfoWriter::WriteTaggedPC(uint32_t pc_delta, int tag) { |
368 |
// Write a byte of tagged pc-delta, possibly preceded by var. length pc-jump.
|
369 |
pc_delta = WriteVariableLengthPCJump(pc_delta); |
370 |
*--pos_ = pc_delta << kTagBits | tag; |
371 |
} |
372 |
|
373 |
|
374 |
void RelocInfoWriter::WriteTaggedData(intptr_t data_delta, int tag) { |
375 |
*--pos_ = static_cast<byte>(data_delta << kLocatableTypeTagBits | tag);
|
376 |
} |
377 |
|
378 |
|
379 |
void RelocInfoWriter::WriteExtraTag(int extra_tag, int top_tag) { |
380 |
*--pos_ = static_cast<int>(top_tag << (kTagBits + kExtraTagBits) | |
381 |
extra_tag << kTagBits | |
382 |
kDefaultTag); |
383 |
} |
384 |
|
385 |
|
386 |
void RelocInfoWriter::WriteExtraTaggedPC(uint32_t pc_delta, int extra_tag) { |
387 |
// Write two-byte tagged pc-delta, possibly preceded by var. length pc-jump.
|
388 |
pc_delta = WriteVariableLengthPCJump(pc_delta); |
389 |
WriteExtraTag(extra_tag, 0);
|
390 |
*--pos_ = pc_delta; |
391 |
} |
392 |
|
393 |
|
394 |
void RelocInfoWriter::WriteExtraTaggedIntData(int data_delta, int top_tag) { |
395 |
WriteExtraTag(kDataJumpExtraTag, top_tag); |
396 |
for (int i = 0; i < kIntSize; i++) { |
397 |
*--pos_ = static_cast<byte>(data_delta);
|
398 |
// Signed right shift is arithmetic shift. Tested in test-utils.cc.
|
399 |
data_delta = data_delta >> kBitsPerByte; |
400 |
} |
401 |
} |
402 |
|
403 |
|
404 |
void RelocInfoWriter::WriteExtraTaggedConstPoolData(int data) { |
405 |
WriteExtraTag(kConstPoolExtraTag, kConstPoolTag); |
406 |
for (int i = 0; i < kIntSize; i++) { |
407 |
*--pos_ = static_cast<byte>(data);
|
408 |
// Signed right shift is arithmetic shift. Tested in test-utils.cc.
|
409 |
data = data >> kBitsPerByte; |
410 |
} |
411 |
} |
412 |
|
413 |
|
414 |
void RelocInfoWriter::WriteExtraTaggedData(intptr_t data_delta, int top_tag) { |
415 |
WriteExtraTag(kDataJumpExtraTag, top_tag); |
416 |
for (int i = 0; i < kIntptrSize; i++) { |
417 |
*--pos_ = static_cast<byte>(data_delta);
|
418 |
// Signed right shift is arithmetic shift. Tested in test-utils.cc.
|
419 |
data_delta = data_delta >> kBitsPerByte; |
420 |
} |
421 |
} |
422 |
|
423 |
|
424 |
void RelocInfoWriter::Write(const RelocInfo* rinfo) { |
425 |
#ifdef DEBUG
|
426 |
byte* begin_pos = pos_; |
427 |
#endif
|
428 |
ASSERT(rinfo->rmode() < RelocInfo::NUMBER_OF_MODES); |
429 |
ASSERT(rinfo->pc() - last_pc_ >= 0);
|
430 |
ASSERT(RelocInfo::LAST_STANDARD_NONCOMPACT_ENUM - RelocInfo::LAST_COMPACT_ENUM |
431 |
<= kMaxStandardNonCompactModes); |
432 |
// Use unsigned delta-encoding for pc.
|
433 |
uint32_t pc_delta = static_cast<uint32_t>(rinfo->pc() - last_pc_);
|
434 |
RelocInfo::Mode rmode = rinfo->rmode(); |
435 |
|
436 |
// The two most common modes are given small tags, and usually fit in a byte.
|
437 |
if (rmode == RelocInfo::EMBEDDED_OBJECT) {
|
438 |
WriteTaggedPC(pc_delta, kEmbeddedObjectTag); |
439 |
} else if (rmode == RelocInfo::CODE_TARGET) { |
440 |
WriteTaggedPC(pc_delta, kCodeTargetTag); |
441 |
ASSERT(begin_pos - pos_ <= RelocInfo::kMaxCallSize); |
442 |
} else if (rmode == RelocInfo::CODE_TARGET_WITH_ID) { |
443 |
// Use signed delta-encoding for id.
|
444 |
ASSERT(static_cast<int>(rinfo->data()) == rinfo->data()); |
445 |
int id_delta = static_cast<int>(rinfo->data()) - last_id_; |
446 |
// Check if delta is small enough to fit in a tagged byte.
|
447 |
if (is_intn(id_delta, kSmallDataBits)) {
|
448 |
WriteTaggedPC(pc_delta, kLocatableTag); |
449 |
WriteTaggedData(id_delta, kCodeWithIdTag); |
450 |
} else {
|
451 |
// Otherwise, use costly encoding.
|
452 |
WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag); |
453 |
WriteExtraTaggedIntData(id_delta, kCodeWithIdTag); |
454 |
} |
455 |
last_id_ = static_cast<int>(rinfo->data()); |
456 |
} else if (RelocInfo::IsPosition(rmode)) { |
457 |
// Use signed delta-encoding for position.
|
458 |
ASSERT(static_cast<int>(rinfo->data()) == rinfo->data()); |
459 |
int pos_delta = static_cast<int>(rinfo->data()) - last_position_; |
460 |
int pos_type_tag = (rmode == RelocInfo::POSITION) ? kNonstatementPositionTag
|
461 |
: kStatementPositionTag; |
462 |
// Check if delta is small enough to fit in a tagged byte.
|
463 |
if (is_intn(pos_delta, kSmallDataBits)) {
|
464 |
WriteTaggedPC(pc_delta, kLocatableTag); |
465 |
WriteTaggedData(pos_delta, pos_type_tag); |
466 |
} else {
|
467 |
// Otherwise, use costly encoding.
|
468 |
WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag); |
469 |
WriteExtraTaggedIntData(pos_delta, pos_type_tag); |
470 |
} |
471 |
last_position_ = static_cast<int>(rinfo->data()); |
472 |
} else if (RelocInfo::IsComment(rmode)) { |
473 |
// Comments are normally not generated, so we use the costly encoding.
|
474 |
WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag); |
475 |
WriteExtraTaggedData(rinfo->data(), kCommentTag); |
476 |
ASSERT(begin_pos - pos_ >= RelocInfo::kMinRelocCommentSize); |
477 |
} else if (RelocInfo::IsConstPool(rmode)) { |
478 |
WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag); |
479 |
WriteExtraTaggedConstPoolData(static_cast<int>(rinfo->data())); |
480 |
} else {
|
481 |
ASSERT(rmode > RelocInfo::LAST_COMPACT_ENUM); |
482 |
int saved_mode = rmode - RelocInfo::LAST_COMPACT_ENUM;
|
483 |
// For all other modes we simply use the mode as the extra tag.
|
484 |
// None of these modes need a data component.
|
485 |
ASSERT(saved_mode < kPCJumpExtraTag && saved_mode < kDataJumpExtraTag); |
486 |
WriteExtraTaggedPC(pc_delta, saved_mode); |
487 |
} |
488 |
last_pc_ = rinfo->pc(); |
489 |
#ifdef DEBUG
|
490 |
ASSERT(begin_pos - pos_ <= kMaxSize); |
491 |
#endif
|
492 |
} |
493 |
|
494 |
|
495 |
inline int RelocIterator::AdvanceGetTag() { |
496 |
return *--pos_ & kTagMask;
|
497 |
} |
498 |
|
499 |
|
500 |
inline int RelocIterator::GetExtraTag() { |
501 |
return (*pos_ >> kTagBits) & ((1 << kExtraTagBits) - 1); |
502 |
} |
503 |
|
504 |
|
505 |
inline int RelocIterator::GetTopTag() { |
506 |
return *pos_ >> (kTagBits + kExtraTagBits);
|
507 |
} |
508 |
|
509 |
|
510 |
inline void RelocIterator::ReadTaggedPC() { |
511 |
rinfo_.pc_ += *pos_ >> kTagBits; |
512 |
} |
513 |
|
514 |
|
515 |
inline void RelocIterator::AdvanceReadPC() { |
516 |
rinfo_.pc_ += *--pos_; |
517 |
} |
518 |
|
519 |
|
520 |
void RelocIterator::AdvanceReadId() {
|
521 |
int x = 0; |
522 |
for (int i = 0; i < kIntSize; i++) { |
523 |
x |= static_cast<int>(*--pos_) << i * kBitsPerByte; |
524 |
} |
525 |
last_id_ += x; |
526 |
rinfo_.data_ = last_id_; |
527 |
} |
528 |
|
529 |
|
530 |
void RelocIterator::AdvanceReadConstPoolData() {
|
531 |
int x = 0; |
532 |
for (int i = 0; i < kIntSize; i++) { |
533 |
x |= static_cast<int>(*--pos_) << i * kBitsPerByte; |
534 |
} |
535 |
rinfo_.data_ = x; |
536 |
} |
537 |
|
538 |
|
539 |
void RelocIterator::AdvanceReadPosition() {
|
540 |
int x = 0; |
541 |
for (int i = 0; i < kIntSize; i++) { |
542 |
x |= static_cast<int>(*--pos_) << i * kBitsPerByte; |
543 |
} |
544 |
last_position_ += x; |
545 |
rinfo_.data_ = last_position_; |
546 |
} |
547 |
|
548 |
|
549 |
void RelocIterator::AdvanceReadData() {
|
550 |
intptr_t x = 0;
|
551 |
for (int i = 0; i < kIntptrSize; i++) { |
552 |
x |= static_cast<intptr_t>(*--pos_) << i * kBitsPerByte;
|
553 |
} |
554 |
rinfo_.data_ = x; |
555 |
} |
556 |
|
557 |
|
558 |
void RelocIterator::AdvanceReadVariableLengthPCJump() {
|
559 |
// Read the 32-kSmallPCDeltaBits most significant bits of the
|
560 |
// pc jump in kChunkBits bit chunks and shift them into place.
|
561 |
// Stop when the last chunk is encountered.
|
562 |
uint32_t pc_jump = 0;
|
563 |
for (int i = 0; i < kIntSize; i++) { |
564 |
byte pc_jump_part = *--pos_; |
565 |
pc_jump |= (pc_jump_part >> kLastChunkTagBits) << i * kChunkBits; |
566 |
if ((pc_jump_part & kLastChunkTagMask) == 1) break; |
567 |
} |
568 |
// The least significant kSmallPCDeltaBits bits will be added
|
569 |
// later.
|
570 |
rinfo_.pc_ += pc_jump << kSmallPCDeltaBits; |
571 |
} |
572 |
|
573 |
|
574 |
inline int RelocIterator::GetLocatableTypeTag() { |
575 |
return *pos_ & ((1 << kLocatableTypeTagBits) - 1); |
576 |
} |
577 |
|
578 |
|
579 |
inline void RelocIterator::ReadTaggedId() { |
580 |
int8_t signed_b = *pos_; |
581 |
// Signed right shift is arithmetic shift. Tested in test-utils.cc.
|
582 |
last_id_ += signed_b >> kLocatableTypeTagBits; |
583 |
rinfo_.data_ = last_id_; |
584 |
} |
585 |
|
586 |
|
587 |
inline void RelocIterator::ReadTaggedPosition() { |
588 |
int8_t signed_b = *pos_; |
589 |
// Signed right shift is arithmetic shift. Tested in test-utils.cc.
|
590 |
last_position_ += signed_b >> kLocatableTypeTagBits; |
591 |
rinfo_.data_ = last_position_; |
592 |
} |
593 |
|
594 |
|
595 |
static inline RelocInfo::Mode GetPositionModeFromTag(int tag) { |
596 |
ASSERT(tag == kNonstatementPositionTag || |
597 |
tag == kStatementPositionTag); |
598 |
return (tag == kNonstatementPositionTag) ?
|
599 |
RelocInfo::POSITION : |
600 |
RelocInfo::STATEMENT_POSITION; |
601 |
} |
602 |
|
603 |
|
604 |
void RelocIterator::next() {
|
605 |
ASSERT(!done()); |
606 |
// Basically, do the opposite of RelocInfoWriter::Write.
|
607 |
// Reading of data is as far as possible avoided for unwanted modes,
|
608 |
// but we must always update the pc.
|
609 |
//
|
610 |
// We exit this loop by returning when we find a mode we want.
|
611 |
while (pos_ > end_) {
|
612 |
int tag = AdvanceGetTag();
|
613 |
if (tag == kEmbeddedObjectTag) {
|
614 |
ReadTaggedPC(); |
615 |
if (SetMode(RelocInfo::EMBEDDED_OBJECT)) return; |
616 |
} else if (tag == kCodeTargetTag) { |
617 |
ReadTaggedPC(); |
618 |
if (SetMode(RelocInfo::CODE_TARGET)) return; |
619 |
} else if (tag == kLocatableTag) { |
620 |
ReadTaggedPC(); |
621 |
Advance(); |
622 |
int locatable_tag = GetLocatableTypeTag();
|
623 |
if (locatable_tag == kCodeWithIdTag) {
|
624 |
if (SetMode(RelocInfo::CODE_TARGET_WITH_ID)) {
|
625 |
ReadTaggedId(); |
626 |
return;
|
627 |
} |
628 |
} else {
|
629 |
// Compact encoding is never used for comments,
|
630 |
// so it must be a position.
|
631 |
ASSERT(locatable_tag == kNonstatementPositionTag || |
632 |
locatable_tag == kStatementPositionTag); |
633 |
if (mode_mask_ & RelocInfo::kPositionMask) {
|
634 |
ReadTaggedPosition(); |
635 |
if (SetMode(GetPositionModeFromTag(locatable_tag))) return; |
636 |
} |
637 |
} |
638 |
} else {
|
639 |
ASSERT(tag == kDefaultTag); |
640 |
int extra_tag = GetExtraTag();
|
641 |
if (extra_tag == kPCJumpExtraTag) {
|
642 |
if (GetTopTag() == kVariableLengthPCJumpTopTag) {
|
643 |
AdvanceReadVariableLengthPCJump(); |
644 |
} else {
|
645 |
AdvanceReadPC(); |
646 |
} |
647 |
} else if (extra_tag == kDataJumpExtraTag) { |
648 |
int locatable_tag = GetTopTag();
|
649 |
if (locatable_tag == kCodeWithIdTag) {
|
650 |
if (SetMode(RelocInfo::CODE_TARGET_WITH_ID)) {
|
651 |
AdvanceReadId(); |
652 |
return;
|
653 |
} |
654 |
Advance(kIntSize); |
655 |
} else if (locatable_tag != kCommentTag) { |
656 |
ASSERT(locatable_tag == kNonstatementPositionTag || |
657 |
locatable_tag == kStatementPositionTag); |
658 |
if (mode_mask_ & RelocInfo::kPositionMask) {
|
659 |
AdvanceReadPosition(); |
660 |
if (SetMode(GetPositionModeFromTag(locatable_tag))) return; |
661 |
} else {
|
662 |
Advance(kIntSize); |
663 |
} |
664 |
} else {
|
665 |
ASSERT(locatable_tag == kCommentTag); |
666 |
if (SetMode(RelocInfo::COMMENT)) {
|
667 |
AdvanceReadData(); |
668 |
return;
|
669 |
} |
670 |
Advance(kIntptrSize); |
671 |
} |
672 |
} else if ((extra_tag == kConstPoolExtraTag) && |
673 |
(GetTopTag() == kConstPoolTag)) { |
674 |
if (SetMode(RelocInfo::CONST_POOL)) {
|
675 |
AdvanceReadConstPoolData(); |
676 |
return;
|
677 |
} |
678 |
Advance(kIntSize); |
679 |
} else {
|
680 |
AdvanceReadPC(); |
681 |
int rmode = extra_tag + RelocInfo::LAST_COMPACT_ENUM;
|
682 |
if (SetMode(static_cast<RelocInfo::Mode>(rmode))) return; |
683 |
} |
684 |
} |
685 |
} |
686 |
if (code_age_sequence_ != NULL) { |
687 |
byte* old_code_age_sequence = code_age_sequence_; |
688 |
code_age_sequence_ = NULL;
|
689 |
if (SetMode(RelocInfo::CODE_AGE_SEQUENCE)) {
|
690 |
rinfo_.data_ = 0;
|
691 |
rinfo_.pc_ = old_code_age_sequence; |
692 |
return;
|
693 |
} |
694 |
} |
695 |
done_ = true;
|
696 |
} |
697 |
|
698 |
|
699 |
RelocIterator::RelocIterator(Code* code, int mode_mask) {
|
700 |
rinfo_.host_ = code; |
701 |
rinfo_.pc_ = code->instruction_start(); |
702 |
rinfo_.data_ = 0;
|
703 |
// Relocation info is read backwards.
|
704 |
pos_ = code->relocation_start() + code->relocation_size(); |
705 |
end_ = code->relocation_start(); |
706 |
done_ = false;
|
707 |
mode_mask_ = mode_mask; |
708 |
last_id_ = 0;
|
709 |
last_position_ = 0;
|
710 |
byte* sequence = code->FindCodeAgeSequence(); |
711 |
if (sequence != NULL && !Code::IsYoungSequence(sequence)) { |
712 |
code_age_sequence_ = sequence; |
713 |
} else {
|
714 |
code_age_sequence_ = NULL;
|
715 |
} |
716 |
if (mode_mask_ == 0) pos_ = end_; |
717 |
next(); |
718 |
} |
719 |
|
720 |
|
721 |
RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask) { |
722 |
rinfo_.pc_ = desc.buffer; |
723 |
rinfo_.data_ = 0;
|
724 |
// Relocation info is read backwards.
|
725 |
pos_ = desc.buffer + desc.buffer_size; |
726 |
end_ = pos_ - desc.reloc_size; |
727 |
done_ = false;
|
728 |
mode_mask_ = mode_mask; |
729 |
last_id_ = 0;
|
730 |
last_position_ = 0;
|
731 |
code_age_sequence_ = NULL;
|
732 |
if (mode_mask_ == 0) pos_ = end_; |
733 |
next(); |
734 |
} |
735 |
|
736 |
|
737 |
// -----------------------------------------------------------------------------
|
738 |
// Implementation of RelocInfo
|
739 |
|
740 |
|
741 |
#ifdef DEBUG
|
742 |
bool RelocInfo::RequiresRelocation(const CodeDesc& desc) { |
743 |
// Ensure there are no code targets or embedded objects present in the
|
744 |
// deoptimization entries, they would require relocation after code
|
745 |
// generation.
|
746 |
int mode_mask = RelocInfo::kCodeTargetMask |
|
747 |
RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) | |
748 |
RelocInfo::ModeMask(RelocInfo::CELL) | |
749 |
RelocInfo::kApplyMask; |
750 |
RelocIterator it(desc, mode_mask); |
751 |
return !it.done();
|
752 |
} |
753 |
#endif
|
754 |
|
755 |
|
756 |
#ifdef ENABLE_DISASSEMBLER
|
757 |
const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) { |
758 |
switch (rmode) {
|
759 |
case RelocInfo::NONE32:
|
760 |
return "no reloc 32"; |
761 |
case RelocInfo::NONE64:
|
762 |
return "no reloc 64"; |
763 |
case RelocInfo::EMBEDDED_OBJECT:
|
764 |
return "embedded object"; |
765 |
case RelocInfo::CONSTRUCT_CALL:
|
766 |
return "code target (js construct call)"; |
767 |
case RelocInfo::CODE_TARGET_CONTEXT:
|
768 |
return "code target (context)"; |
769 |
case RelocInfo::DEBUG_BREAK:
|
770 |
#ifndef ENABLE_DEBUGGER_SUPPORT
|
771 |
UNREACHABLE(); |
772 |
#endif
|
773 |
return "debug break"; |
774 |
case RelocInfo::CODE_TARGET:
|
775 |
return "code target"; |
776 |
case RelocInfo::CODE_TARGET_WITH_ID:
|
777 |
return "code target with id"; |
778 |
case RelocInfo::CELL:
|
779 |
return "property cell"; |
780 |
case RelocInfo::RUNTIME_ENTRY:
|
781 |
return "runtime entry"; |
782 |
case RelocInfo::JS_RETURN:
|
783 |
return "js return"; |
784 |
case RelocInfo::COMMENT:
|
785 |
return "comment"; |
786 |
case RelocInfo::POSITION:
|
787 |
return "position"; |
788 |
case RelocInfo::STATEMENT_POSITION:
|
789 |
return "statement position"; |
790 |
case RelocInfo::EXTERNAL_REFERENCE:
|
791 |
return "external reference"; |
792 |
case RelocInfo::INTERNAL_REFERENCE:
|
793 |
return "internal reference"; |
794 |
case RelocInfo::CONST_POOL:
|
795 |
return "constant pool"; |
796 |
case RelocInfo::DEBUG_BREAK_SLOT:
|
797 |
#ifndef ENABLE_DEBUGGER_SUPPORT
|
798 |
UNREACHABLE(); |
799 |
#endif
|
800 |
return "debug break slot"; |
801 |
case RelocInfo::CODE_AGE_SEQUENCE:
|
802 |
return "code_age_sequence"; |
803 |
case RelocInfo::NUMBER_OF_MODES:
|
804 |
UNREACHABLE(); |
805 |
return "number_of_modes"; |
806 |
} |
807 |
return "unknown relocation type"; |
808 |
} |
809 |
|
810 |
|
811 |
void RelocInfo::Print(Isolate* isolate, FILE* out) {
|
812 |
PrintF(out, "%p %s", pc_, RelocModeName(rmode_));
|
813 |
if (IsComment(rmode_)) {
|
814 |
PrintF(out, " (%s)", reinterpret_cast<char*>(data_)); |
815 |
} else if (rmode_ == EMBEDDED_OBJECT) { |
816 |
PrintF(out, " (");
|
817 |
target_object()->ShortPrint(out); |
818 |
PrintF(out, ")");
|
819 |
} else if (rmode_ == EXTERNAL_REFERENCE) { |
820 |
ExternalReferenceEncoder ref_encoder(isolate); |
821 |
PrintF(out, " (%s) (%p)",
|
822 |
ref_encoder.NameOfAddress(*target_reference_address()), |
823 |
*target_reference_address()); |
824 |
} else if (IsCodeTarget(rmode_)) { |
825 |
Code* code = Code::GetCodeFromTargetAddress(target_address()); |
826 |
PrintF(out, " (%s) (%p)", Code::Kind2String(code->kind()),
|
827 |
target_address()); |
828 |
if (rmode_ == CODE_TARGET_WITH_ID) {
|
829 |
PrintF(" (id=%d)", static_cast<int>(data_)); |
830 |
} |
831 |
} else if (IsPosition(rmode_)) { |
832 |
PrintF(out, " (%" V8_PTR_PREFIX "d)", data()); |
833 |
} else if (IsRuntimeEntry(rmode_) && |
834 |
isolate->deoptimizer_data() != NULL) {
|
835 |
// Depotimization bailouts are stored as runtime entries.
|
836 |
int id = Deoptimizer::GetDeoptimizationId(
|
837 |
isolate, target_address(), Deoptimizer::EAGER); |
838 |
if (id != Deoptimizer::kNotDeoptimizationEntry) {
|
839 |
PrintF(out, " (deoptimization bailout %d)", id);
|
840 |
} |
841 |
} |
842 |
|
843 |
PrintF(out, "\n");
|
844 |
} |
845 |
#endif // ENABLE_DISASSEMBLER |
846 |
|
847 |
|
848 |
#ifdef VERIFY_HEAP
|
849 |
void RelocInfo::Verify() {
|
850 |
switch (rmode_) {
|
851 |
case EMBEDDED_OBJECT:
|
852 |
Object::VerifyPointer(target_object()); |
853 |
break;
|
854 |
case CELL:
|
855 |
Object::VerifyPointer(target_cell()); |
856 |
break;
|
857 |
case DEBUG_BREAK:
|
858 |
#ifndef ENABLE_DEBUGGER_SUPPORT
|
859 |
UNREACHABLE(); |
860 |
break;
|
861 |
#endif
|
862 |
case CONSTRUCT_CALL:
|
863 |
case CODE_TARGET_CONTEXT:
|
864 |
case CODE_TARGET_WITH_ID:
|
865 |
case CODE_TARGET: {
|
866 |
// convert inline target address to code object
|
867 |
Address addr = target_address(); |
868 |
CHECK(addr != NULL);
|
869 |
// Check that we can find the right code object.
|
870 |
Code* code = Code::GetCodeFromTargetAddress(addr); |
871 |
Object* found = code->GetIsolate()->FindCodeObject(addr); |
872 |
CHECK(found->IsCode()); |
873 |
CHECK(code->address() == HeapObject::cast(found)->address()); |
874 |
break;
|
875 |
} |
876 |
case RUNTIME_ENTRY:
|
877 |
case JS_RETURN:
|
878 |
case COMMENT:
|
879 |
case POSITION:
|
880 |
case STATEMENT_POSITION:
|
881 |
case EXTERNAL_REFERENCE:
|
882 |
case INTERNAL_REFERENCE:
|
883 |
case CONST_POOL:
|
884 |
case DEBUG_BREAK_SLOT:
|
885 |
case NONE32:
|
886 |
case NONE64:
|
887 |
break;
|
888 |
case NUMBER_OF_MODES:
|
889 |
UNREACHABLE(); |
890 |
break;
|
891 |
case CODE_AGE_SEQUENCE:
|
892 |
ASSERT(Code::IsYoungSequence(pc_) || code_age_stub()->IsCode()); |
893 |
break;
|
894 |
} |
895 |
} |
896 |
#endif // VERIFY_HEAP |
897 |
|
898 |
|
899 |
// -----------------------------------------------------------------------------
|
900 |
// Implementation of ExternalReference
|
901 |
|
902 |
void ExternalReference::SetUp() {
|
903 |
double_constants.min_int = kMinInt; |
904 |
double_constants.one_half = 0.5; |
905 |
double_constants.minus_one_half = -0.5; |
906 |
double_constants.minus_zero = -0.0; |
907 |
double_constants.uint8_max_value = 255;
|
908 |
double_constants.zero = 0.0; |
909 |
double_constants.canonical_non_hole_nan = OS::nan_value(); |
910 |
double_constants.the_hole_nan = BitCast<double>(kHoleNanInt64);
|
911 |
double_constants.negative_infinity = -V8_INFINITY; |
912 |
double_constants.uint32_bias = |
913 |
static_cast<double>(static_cast<uint32_t>(0xFFFFFFFF)) + 1; |
914 |
|
915 |
math_exp_data_mutex = new Mutex();
|
916 |
} |
917 |
|
918 |
|
919 |
void ExternalReference::InitializeMathExpData() {
|
920 |
// Early return?
|
921 |
if (math_exp_data_initialized) return; |
922 |
|
923 |
LockGuard<Mutex> lock_guard(math_exp_data_mutex); |
924 |
if (!math_exp_data_initialized) {
|
925 |
// If this is changed, generated code must be adapted too.
|
926 |
const int kTableSizeBits = 11; |
927 |
const int kTableSize = 1 << kTableSizeBits; |
928 |
const double kTableSizeDouble = static_cast<double>(kTableSize); |
929 |
|
930 |
math_exp_constants_array = new double[9]; |
931 |
// Input values smaller than this always return 0.
|
932 |
math_exp_constants_array[0] = -708.39641853226408; |
933 |
// Input values larger than this always return +Infinity.
|
934 |
math_exp_constants_array[1] = 709.78271289338397; |
935 |
math_exp_constants_array[2] = V8_INFINITY;
|
936 |
// The rest is black magic. Do not attempt to understand it. It is
|
937 |
// loosely based on the "expd" function published at:
|
938 |
// http://herumi.blogspot.com/2011/08/fast-double-precision-exponential.html
|
939 |
const double constant3 = (1 << kTableSizeBits) / log(2.0); |
940 |
math_exp_constants_array[3] = constant3;
|
941 |
math_exp_constants_array[4] =
|
942 |
static_cast<double>(static_cast<int64_t>(3) << 51); |
943 |
math_exp_constants_array[5] = 1 / constant3; |
944 |
math_exp_constants_array[6] = 3.0000000027955394; |
945 |
math_exp_constants_array[7] = 0.16666666685227835; |
946 |
math_exp_constants_array[8] = 1; |
947 |
|
948 |
math_exp_log_table_array = new double[kTableSize]; |
949 |
for (int i = 0; i < kTableSize; i++) { |
950 |
double value = pow(2, i / kTableSizeDouble); |
951 |
uint64_t bits = BitCast<uint64_t, double>(value);
|
952 |
bits &= (static_cast<uint64_t>(1) << 52) - 1; |
953 |
double mantissa = BitCast<double, uint64_t>(bits); |
954 |
math_exp_log_table_array[i] = mantissa; |
955 |
} |
956 |
|
957 |
math_exp_data_initialized = true;
|
958 |
} |
959 |
} |
960 |
|
961 |
|
962 |
void ExternalReference::TearDownMathExpData() {
|
963 |
delete[] math_exp_constants_array;
|
964 |
delete[] math_exp_log_table_array;
|
965 |
delete math_exp_data_mutex;
|
966 |
} |
967 |
|
968 |
|
969 |
ExternalReference::ExternalReference(Builtins::CFunctionId id, Isolate* isolate) |
970 |
: address_(Redirect(isolate, Builtins::c_function_address(id))) {} |
971 |
|
972 |
|
973 |
ExternalReference::ExternalReference( |
974 |
ApiFunction* fun, |
975 |
Type type = ExternalReference::BUILTIN_CALL, |
976 |
Isolate* isolate = NULL)
|
977 |
: address_(Redirect(isolate, fun->address(), type)) {} |
978 |
|
979 |
|
980 |
ExternalReference::ExternalReference(Builtins::Name name, Isolate* isolate) |
981 |
: address_(isolate->builtins()->builtin_address(name)) {} |
982 |
|
983 |
|
984 |
ExternalReference::ExternalReference(Runtime::FunctionId id, |
985 |
Isolate* isolate) |
986 |
: address_(Redirect(isolate, Runtime::FunctionForId(id)->entry)) {} |
987 |
|
988 |
|
989 |
ExternalReference::ExternalReference(const Runtime::Function* f,
|
990 |
Isolate* isolate) |
991 |
: address_(Redirect(isolate, f->entry)) {} |
992 |
|
993 |
|
994 |
ExternalReference ExternalReference::isolate_address(Isolate* isolate) { |
995 |
return ExternalReference(isolate);
|
996 |
} |
997 |
|
998 |
|
999 |
ExternalReference::ExternalReference(const IC_Utility& ic_utility,
|
1000 |
Isolate* isolate) |
1001 |
: address_(Redirect(isolate, ic_utility.address())) {} |
1002 |
|
1003 |
#ifdef ENABLE_DEBUGGER_SUPPORT
|
1004 |
ExternalReference::ExternalReference(const Debug_Address& debug_address,
|
1005 |
Isolate* isolate) |
1006 |
: address_(debug_address.address(isolate)) {} |
1007 |
#endif
|
1008 |
|
1009 |
ExternalReference::ExternalReference(StatsCounter* counter) |
1010 |
: address_(reinterpret_cast<Address>(counter->GetInternalPointer())) {}
|
1011 |
|
1012 |
|
1013 |
ExternalReference::ExternalReference(Isolate::AddressId id, Isolate* isolate) |
1014 |
: address_(isolate->get_address_from_id(id)) {} |
1015 |
|
1016 |
|
1017 |
ExternalReference::ExternalReference(const SCTableReference& table_ref)
|
1018 |
: address_(table_ref.address()) {} |
1019 |
|
1020 |
|
1021 |
ExternalReference ExternalReference:: |
1022 |
incremental_marking_record_write_function(Isolate* isolate) { |
1023 |
return ExternalReference(Redirect(
|
1024 |
isolate, |
1025 |
FUNCTION_ADDR(IncrementalMarking::RecordWriteFromCode))); |
1026 |
} |
1027 |
|
1028 |
|
1029 |
ExternalReference ExternalReference:: |
1030 |
incremental_evacuation_record_write_function(Isolate* isolate) { |
1031 |
return ExternalReference(Redirect(
|
1032 |
isolate, |
1033 |
FUNCTION_ADDR(IncrementalMarking::RecordWriteForEvacuationFromCode))); |
1034 |
} |
1035 |
|
1036 |
|
1037 |
ExternalReference ExternalReference:: |
1038 |
store_buffer_overflow_function(Isolate* isolate) { |
1039 |
return ExternalReference(Redirect(
|
1040 |
isolate, |
1041 |
FUNCTION_ADDR(StoreBuffer::StoreBufferOverflow))); |
1042 |
} |
1043 |
|
1044 |
|
1045 |
ExternalReference ExternalReference::flush_icache_function(Isolate* isolate) { |
1046 |
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(CPU::FlushICache)));
|
1047 |
} |
1048 |
|
1049 |
|
1050 |
ExternalReference ExternalReference::perform_gc_function(Isolate* isolate) { |
1051 |
return
|
1052 |
ExternalReference(Redirect(isolate, FUNCTION_ADDR(Runtime::PerformGC))); |
1053 |
} |
1054 |
|
1055 |
|
1056 |
ExternalReference ExternalReference::fill_heap_number_with_random_function( |
1057 |
Isolate* isolate) { |
1058 |
return ExternalReference(Redirect(
|
1059 |
isolate, |
1060 |
FUNCTION_ADDR(V8::FillHeapNumberWithRandom))); |
1061 |
} |
1062 |
|
1063 |
|
1064 |
ExternalReference ExternalReference::delete_handle_scope_extensions( |
1065 |
Isolate* isolate) { |
1066 |
return ExternalReference(Redirect(
|
1067 |
isolate, |
1068 |
FUNCTION_ADDR(HandleScope::DeleteExtensions))); |
1069 |
} |
1070 |
|
1071 |
|
1072 |
ExternalReference ExternalReference::random_uint32_function( |
1073 |
Isolate* isolate) { |
1074 |
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(V8::Random)));
|
1075 |
} |
1076 |
|
1077 |
|
1078 |
ExternalReference ExternalReference::get_date_field_function( |
1079 |
Isolate* isolate) { |
1080 |
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(JSDate::GetField)));
|
1081 |
} |
1082 |
|
1083 |
|
1084 |
ExternalReference ExternalReference::get_make_code_young_function( |
1085 |
Isolate* isolate) { |
1086 |
return ExternalReference(Redirect(
|
1087 |
isolate, FUNCTION_ADDR(Code::MakeCodeAgeSequenceYoung))); |
1088 |
} |
1089 |
|
1090 |
|
1091 |
ExternalReference ExternalReference::get_mark_code_as_executed_function( |
1092 |
Isolate* isolate) { |
1093 |
return ExternalReference(Redirect(
|
1094 |
isolate, FUNCTION_ADDR(Code::MarkCodeAsExecuted))); |
1095 |
} |
1096 |
|
1097 |
|
1098 |
ExternalReference ExternalReference::date_cache_stamp(Isolate* isolate) { |
1099 |
return ExternalReference(isolate->date_cache()->stamp_address());
|
1100 |
} |
1101 |
|
1102 |
|
1103 |
ExternalReference ExternalReference::stress_deopt_count(Isolate* isolate) { |
1104 |
return ExternalReference(isolate->stress_deopt_count_address());
|
1105 |
} |
1106 |
|
1107 |
|
1108 |
ExternalReference ExternalReference::transcendental_cache_array_address( |
1109 |
Isolate* isolate) { |
1110 |
return ExternalReference(
|
1111 |
isolate->transcendental_cache()->cache_array_address()); |
1112 |
} |
1113 |
|
1114 |
|
1115 |
ExternalReference ExternalReference::new_deoptimizer_function( |
1116 |
Isolate* isolate) { |
1117 |
return ExternalReference(
|
1118 |
Redirect(isolate, FUNCTION_ADDR(Deoptimizer::New))); |
1119 |
} |
1120 |
|
1121 |
|
1122 |
ExternalReference ExternalReference::compute_output_frames_function( |
1123 |
Isolate* isolate) { |
1124 |
return ExternalReference(
|
1125 |
Redirect(isolate, FUNCTION_ADDR(Deoptimizer::ComputeOutputFrames))); |
1126 |
} |
1127 |
|
1128 |
|
1129 |
ExternalReference ExternalReference::log_enter_external_function( |
1130 |
Isolate* isolate) { |
1131 |
return ExternalReference(
|
1132 |
Redirect(isolate, FUNCTION_ADDR(Logger::EnterExternal))); |
1133 |
} |
1134 |
|
1135 |
|
1136 |
ExternalReference ExternalReference::log_leave_external_function( |
1137 |
Isolate* isolate) { |
1138 |
return ExternalReference(
|
1139 |
Redirect(isolate, FUNCTION_ADDR(Logger::LeaveExternal))); |
1140 |
} |
1141 |
|
1142 |
|
1143 |
ExternalReference ExternalReference::keyed_lookup_cache_keys(Isolate* isolate) { |
1144 |
return ExternalReference(isolate->keyed_lookup_cache()->keys_address());
|
1145 |
} |
1146 |
|
1147 |
|
1148 |
ExternalReference ExternalReference::keyed_lookup_cache_field_offsets( |
1149 |
Isolate* isolate) { |
1150 |
return ExternalReference(
|
1151 |
isolate->keyed_lookup_cache()->field_offsets_address()); |
1152 |
} |
1153 |
|
1154 |
|
1155 |
ExternalReference ExternalReference::roots_array_start(Isolate* isolate) { |
1156 |
return ExternalReference(isolate->heap()->roots_array_start());
|
1157 |
} |
1158 |
|
1159 |
|
1160 |
ExternalReference ExternalReference::allocation_sites_list_address( |
1161 |
Isolate* isolate) { |
1162 |
return ExternalReference(isolate->heap()->allocation_sites_list_address());
|
1163 |
} |
1164 |
|
1165 |
|
1166 |
ExternalReference ExternalReference::address_of_stack_limit(Isolate* isolate) { |
1167 |
return ExternalReference(isolate->stack_guard()->address_of_jslimit());
|
1168 |
} |
1169 |
|
1170 |
|
1171 |
ExternalReference ExternalReference::address_of_real_stack_limit( |
1172 |
Isolate* isolate) { |
1173 |
return ExternalReference(isolate->stack_guard()->address_of_real_jslimit());
|
1174 |
} |
1175 |
|
1176 |
|
1177 |
ExternalReference ExternalReference::address_of_regexp_stack_limit( |
1178 |
Isolate* isolate) { |
1179 |
return ExternalReference(isolate->regexp_stack()->limit_address());
|
1180 |
} |
1181 |
|
1182 |
|
1183 |
ExternalReference ExternalReference::new_space_start(Isolate* isolate) { |
1184 |
return ExternalReference(isolate->heap()->NewSpaceStart());
|
1185 |
} |
1186 |
|
1187 |
|
1188 |
ExternalReference ExternalReference::store_buffer_top(Isolate* isolate) { |
1189 |
return ExternalReference(isolate->heap()->store_buffer()->TopAddress());
|
1190 |
} |
1191 |
|
1192 |
|
1193 |
ExternalReference ExternalReference::new_space_mask(Isolate* isolate) { |
1194 |
return ExternalReference(reinterpret_cast<Address>( |
1195 |
isolate->heap()->NewSpaceMask())); |
1196 |
} |
1197 |
|
1198 |
|
1199 |
ExternalReference ExternalReference::new_space_allocation_top_address( |
1200 |
Isolate* isolate) { |
1201 |
return ExternalReference(isolate->heap()->NewSpaceAllocationTopAddress());
|
1202 |
} |
1203 |
|
1204 |
|
1205 |
ExternalReference ExternalReference::heap_always_allocate_scope_depth( |
1206 |
Isolate* isolate) { |
1207 |
Heap* heap = isolate->heap(); |
1208 |
return ExternalReference(heap->always_allocate_scope_depth_address());
|
1209 |
} |
1210 |
|
1211 |
|
1212 |
ExternalReference ExternalReference::new_space_allocation_limit_address( |
1213 |
Isolate* isolate) { |
1214 |
return ExternalReference(isolate->heap()->NewSpaceAllocationLimitAddress());
|
1215 |
} |
1216 |
|
1217 |
|
1218 |
ExternalReference ExternalReference::old_pointer_space_allocation_top_address( |
1219 |
Isolate* isolate) { |
1220 |
return ExternalReference(
|
1221 |
isolate->heap()->OldPointerSpaceAllocationTopAddress()); |
1222 |
} |
1223 |
|
1224 |
|
1225 |
ExternalReference ExternalReference::old_pointer_space_allocation_limit_address( |
1226 |
Isolate* isolate) { |
1227 |
return ExternalReference(
|
1228 |
isolate->heap()->OldPointerSpaceAllocationLimitAddress()); |
1229 |
} |
1230 |
|
1231 |
|
1232 |
ExternalReference ExternalReference::old_data_space_allocation_top_address( |
1233 |
Isolate* isolate) { |
1234 |
return ExternalReference(
|
1235 |
isolate->heap()->OldDataSpaceAllocationTopAddress()); |
1236 |
} |
1237 |
|
1238 |
|
1239 |
ExternalReference ExternalReference::old_data_space_allocation_limit_address( |
1240 |
Isolate* isolate) { |
1241 |
return ExternalReference(
|
1242 |
isolate->heap()->OldDataSpaceAllocationLimitAddress()); |
1243 |
} |
1244 |
|
1245 |
|
1246 |
ExternalReference ExternalReference:: |
1247 |
new_space_high_promotion_mode_active_address(Isolate* isolate) { |
1248 |
return ExternalReference(
|
1249 |
isolate->heap()->NewSpaceHighPromotionModeActiveAddress()); |
1250 |
} |
1251 |
|
1252 |
|
1253 |
ExternalReference ExternalReference::handle_scope_level_address( |
1254 |
Isolate* isolate) { |
1255 |
return ExternalReference(HandleScope::current_level_address(isolate));
|
1256 |
} |
1257 |
|
1258 |
|
1259 |
ExternalReference ExternalReference::handle_scope_next_address( |
1260 |
Isolate* isolate) { |
1261 |
return ExternalReference(HandleScope::current_next_address(isolate));
|
1262 |
} |
1263 |
|
1264 |
|
1265 |
ExternalReference ExternalReference::handle_scope_limit_address( |
1266 |
Isolate* isolate) { |
1267 |
return ExternalReference(HandleScope::current_limit_address(isolate));
|
1268 |
} |
1269 |
|
1270 |
|
1271 |
ExternalReference ExternalReference::scheduled_exception_address( |
1272 |
Isolate* isolate) { |
1273 |
return ExternalReference(isolate->scheduled_exception_address());
|
1274 |
} |
1275 |
|
1276 |
|
1277 |
ExternalReference ExternalReference::address_of_pending_message_obj( |
1278 |
Isolate* isolate) { |
1279 |
return ExternalReference(isolate->pending_message_obj_address());
|
1280 |
} |
1281 |
|
1282 |
|
1283 |
ExternalReference ExternalReference::address_of_has_pending_message( |
1284 |
Isolate* isolate) { |
1285 |
return ExternalReference(isolate->has_pending_message_address());
|
1286 |
} |
1287 |
|
1288 |
|
1289 |
ExternalReference ExternalReference::address_of_pending_message_script( |
1290 |
Isolate* isolate) { |
1291 |
return ExternalReference(isolate->pending_message_script_address());
|
1292 |
} |
1293 |
|
1294 |
|
1295 |
ExternalReference ExternalReference::address_of_min_int() { |
1296 |
return ExternalReference(reinterpret_cast<void*>(&double_constants.min_int)); |
1297 |
} |
1298 |
|
1299 |
|
1300 |
ExternalReference ExternalReference::address_of_one_half() { |
1301 |
return ExternalReference(reinterpret_cast<void*>(&double_constants.one_half)); |
1302 |
} |
1303 |
|
1304 |
|
1305 |
ExternalReference ExternalReference::address_of_minus_one_half() { |
1306 |
return ExternalReference(
|
1307 |
reinterpret_cast<void*>(&double_constants.minus_one_half)); |
1308 |
} |
1309 |
|
1310 |
|
1311 |
ExternalReference ExternalReference::address_of_minus_zero() { |
1312 |
return ExternalReference(
|
1313 |
reinterpret_cast<void*>(&double_constants.minus_zero)); |
1314 |
} |
1315 |
|
1316 |
|
1317 |
ExternalReference ExternalReference::address_of_zero() { |
1318 |
return ExternalReference(reinterpret_cast<void*>(&double_constants.zero)); |
1319 |
} |
1320 |
|
1321 |
|
1322 |
ExternalReference ExternalReference::address_of_uint8_max_value() { |
1323 |
return ExternalReference(
|
1324 |
reinterpret_cast<void*>(&double_constants.uint8_max_value)); |
1325 |
} |
1326 |
|
1327 |
|
1328 |
ExternalReference ExternalReference::address_of_negative_infinity() { |
1329 |
return ExternalReference(
|
1330 |
reinterpret_cast<void*>(&double_constants.negative_infinity)); |
1331 |
} |
1332 |
|
1333 |
|
1334 |
ExternalReference ExternalReference::address_of_canonical_non_hole_nan() { |
1335 |
return ExternalReference(
|
1336 |
reinterpret_cast<void*>(&double_constants.canonical_non_hole_nan)); |
1337 |
} |
1338 |
|
1339 |
|
1340 |
ExternalReference ExternalReference::address_of_the_hole_nan() { |
1341 |
return ExternalReference(
|
1342 |
reinterpret_cast<void*>(&double_constants.the_hole_nan)); |
1343 |
} |
1344 |
|
1345 |
|
1346 |
ExternalReference ExternalReference::record_object_allocation_function( |
1347 |
Isolate* isolate) { |
1348 |
return ExternalReference(
|
1349 |
Redirect(isolate, |
1350 |
FUNCTION_ADDR(HeapProfiler::RecordObjectAllocationFromMasm))); |
1351 |
} |
1352 |
|
1353 |
|
1354 |
ExternalReference ExternalReference::address_of_uint32_bias() { |
1355 |
return ExternalReference(
|
1356 |
reinterpret_cast<void*>(&double_constants.uint32_bias)); |
1357 |
} |
1358 |
|
1359 |
|
1360 |
#ifndef V8_INTERPRETED_REGEXP
|
1361 |
|
1362 |
ExternalReference ExternalReference::re_check_stack_guard_state( |
1363 |
Isolate* isolate) { |
1364 |
Address function; |
1365 |
#if V8_TARGET_ARCH_X64
|
1366 |
function = FUNCTION_ADDR(RegExpMacroAssemblerX64::CheckStackGuardState); |
1367 |
#elif V8_TARGET_ARCH_IA32
|
1368 |
function = FUNCTION_ADDR(RegExpMacroAssemblerIA32::CheckStackGuardState); |
1369 |
#elif V8_TARGET_ARCH_ARM
|
1370 |
function = FUNCTION_ADDR(RegExpMacroAssemblerARM::CheckStackGuardState); |
1371 |
#elif V8_TARGET_ARCH_MIPS
|
1372 |
function = FUNCTION_ADDR(RegExpMacroAssemblerMIPS::CheckStackGuardState); |
1373 |
#else
|
1374 |
UNREACHABLE(); |
1375 |
#endif
|
1376 |
return ExternalReference(Redirect(isolate, function));
|
1377 |
} |
1378 |
|
1379 |
|
1380 |
ExternalReference ExternalReference::re_grow_stack(Isolate* isolate) { |
1381 |
return ExternalReference(
|
1382 |
Redirect(isolate, FUNCTION_ADDR(NativeRegExpMacroAssembler::GrowStack))); |
1383 |
} |
1384 |
|
1385 |
ExternalReference ExternalReference::re_case_insensitive_compare_uc16( |
1386 |
Isolate* isolate) { |
1387 |
return ExternalReference(Redirect(
|
1388 |
isolate, |
1389 |
FUNCTION_ADDR(NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16))); |
1390 |
} |
1391 |
|
1392 |
|
1393 |
ExternalReference ExternalReference::re_word_character_map() { |
1394 |
return ExternalReference(
|
1395 |
NativeRegExpMacroAssembler::word_character_map_address()); |
1396 |
} |
1397 |
|
1398 |
ExternalReference ExternalReference::address_of_static_offsets_vector( |
1399 |
Isolate* isolate) { |
1400 |
return ExternalReference(
|
1401 |
reinterpret_cast<Address>(isolate->jsregexp_static_offsets_vector()));
|
1402 |
} |
1403 |
|
1404 |
ExternalReference ExternalReference::address_of_regexp_stack_memory_address( |
1405 |
Isolate* isolate) { |
1406 |
return ExternalReference(
|
1407 |
isolate->regexp_stack()->memory_address()); |
1408 |
} |
1409 |
|
1410 |
ExternalReference ExternalReference::address_of_regexp_stack_memory_size( |
1411 |
Isolate* isolate) { |
1412 |
return ExternalReference(isolate->regexp_stack()->memory_size_address());
|
1413 |
} |
1414 |
|
1415 |
#endif // V8_INTERPRETED_REGEXP |
1416 |
|
1417 |
|
1418 |
static double add_two_doubles(double x, double y) { |
1419 |
return x + y;
|
1420 |
} |
1421 |
|
1422 |
|
1423 |
static double sub_two_doubles(double x, double y) { |
1424 |
return x - y;
|
1425 |
} |
1426 |
|
1427 |
|
1428 |
static double mul_two_doubles(double x, double y) { |
1429 |
return x * y;
|
1430 |
} |
1431 |
|
1432 |
|
1433 |
static double div_two_doubles(double x, double y) { |
1434 |
return x / y;
|
1435 |
} |
1436 |
|
1437 |
|
1438 |
static double mod_two_doubles(double x, double y) { |
1439 |
return modulo(x, y);
|
1440 |
} |
1441 |
|
1442 |
|
1443 |
static double math_sin_double(double x) { |
1444 |
return sin(x);
|
1445 |
} |
1446 |
|
1447 |
|
1448 |
static double math_cos_double(double x) { |
1449 |
return cos(x);
|
1450 |
} |
1451 |
|
1452 |
|
1453 |
static double math_tan_double(double x) { |
1454 |
return tan(x);
|
1455 |
} |
1456 |
|
1457 |
|
1458 |
static double math_log_double(double x) { |
1459 |
return log(x);
|
1460 |
} |
1461 |
|
1462 |
|
1463 |
ExternalReference ExternalReference::math_sin_double_function( |
1464 |
Isolate* isolate) { |
1465 |
return ExternalReference(Redirect(isolate,
|
1466 |
FUNCTION_ADDR(math_sin_double), |
1467 |
BUILTIN_FP_CALL)); |
1468 |
} |
1469 |
|
1470 |
|
1471 |
ExternalReference ExternalReference::math_cos_double_function( |
1472 |
Isolate* isolate) { |
1473 |
return ExternalReference(Redirect(isolate,
|
1474 |
FUNCTION_ADDR(math_cos_double), |
1475 |
BUILTIN_FP_CALL)); |
1476 |
} |
1477 |
|
1478 |
|
1479 |
ExternalReference ExternalReference::math_tan_double_function( |
1480 |
Isolate* isolate) { |
1481 |
return ExternalReference(Redirect(isolate,
|
1482 |
FUNCTION_ADDR(math_tan_double), |
1483 |
BUILTIN_FP_CALL)); |
1484 |
} |
1485 |
|
1486 |
|
1487 |
ExternalReference ExternalReference::math_log_double_function( |
1488 |
Isolate* isolate) { |
1489 |
return ExternalReference(Redirect(isolate,
|
1490 |
FUNCTION_ADDR(math_log_double), |
1491 |
BUILTIN_FP_CALL)); |
1492 |
} |
1493 |
|
1494 |
|
1495 |
ExternalReference ExternalReference::math_exp_constants(int constant_index) {
|
1496 |
ASSERT(math_exp_data_initialized); |
1497 |
return ExternalReference(
|
1498 |
reinterpret_cast<void*>(math_exp_constants_array + constant_index)); |
1499 |
} |
1500 |
|
1501 |
|
1502 |
ExternalReference ExternalReference::math_exp_log_table() { |
1503 |
ASSERT(math_exp_data_initialized); |
1504 |
return ExternalReference(reinterpret_cast<void*>(math_exp_log_table_array)); |
1505 |
} |
1506 |
|
1507 |
|
1508 |
ExternalReference ExternalReference::page_flags(Page* page) { |
1509 |
return ExternalReference(reinterpret_cast<Address>(page) + |
1510 |
MemoryChunk::kFlagsOffset); |
1511 |
} |
1512 |
|
1513 |
|
1514 |
ExternalReference ExternalReference::ForDeoptEntry(Address entry) { |
1515 |
return ExternalReference(entry);
|
1516 |
} |
1517 |
|
1518 |
|
1519 |
double power_helper(double x, double y) { |
1520 |
int y_int = static_cast<int>(y); |
1521 |
if (y == y_int) {
|
1522 |
return power_double_int(x, y_int); // Returns 1 if exponent is 0. |
1523 |
} |
1524 |
if (y == 0.5) { |
1525 |
return (std::isinf(x)) ? V8_INFINITY
|
1526 |
: fast_sqrt(x + 0.0); // Convert -0 to +0. |
1527 |
} |
1528 |
if (y == -0.5) { |
1529 |
return (std::isinf(x)) ? 0 : 1.0 / fast_sqrt(x + 0.0); // Convert -0 to +0. |
1530 |
} |
1531 |
return power_double_double(x, y);
|
1532 |
} |
1533 |
|
1534 |
|
1535 |
// Helper function to compute x^y, where y is known to be an
|
1536 |
// integer. Uses binary decomposition to limit the number of
|
1537 |
// multiplications; see the discussion in "Hacker's Delight" by Henry
|
1538 |
// S. Warren, Jr., figure 11-6, page 213.
|
1539 |
double power_double_int(double x, int y) { |
1540 |
double m = (y < 0) ? 1 / x : x; |
1541 |
unsigned n = (y < 0) ? -y : y; |
1542 |
double p = 1; |
1543 |
while (n != 0) { |
1544 |
if ((n & 1) != 0) p *= m; |
1545 |
m *= m; |
1546 |
if ((n & 2) != 0) p *= m; |
1547 |
m *= m; |
1548 |
n >>= 2;
|
1549 |
} |
1550 |
return p;
|
1551 |
} |
1552 |
|
1553 |
|
1554 |
double power_double_double(double x, double y) { |
1555 |
#if defined(__MINGW64_VERSION_MAJOR) && \
|
1556 |
(!defined(__MINGW64_VERSION_RC) || __MINGW64_VERSION_RC < 1)
|
1557 |
// MinGW64 has a custom implementation for pow. This handles certain
|
1558 |
// special cases that are different.
|
1559 |
if ((x == 0.0 || std::isinf(x)) && std::isfinite(y)) { |
1560 |
double f;
|
1561 |
if (modf(y, &f) != 0.0) return ((x == 0.0) ^ (y > 0)) ? V8_INFINITY : 0; |
1562 |
} |
1563 |
|
1564 |
if (x == 2.0) { |
1565 |
int y_int = static_cast<int>(y); |
1566 |
if (y == y_int) return ldexp(1.0, y_int); |
1567 |
} |
1568 |
#endif
|
1569 |
|
1570 |
// The checks for special cases can be dropped in ia32 because it has already
|
1571 |
// been done in generated code before bailing out here.
|
1572 |
if (std::isnan(y) || ((x == 1 || x == -1) && std::isinf(y))) { |
1573 |
return OS::nan_value();
|
1574 |
} |
1575 |
return pow(x, y);
|
1576 |
} |
1577 |
|
1578 |
|
1579 |
ExternalReference ExternalReference::power_double_double_function( |
1580 |
Isolate* isolate) { |
1581 |
return ExternalReference(Redirect(isolate,
|
1582 |
FUNCTION_ADDR(power_double_double), |
1583 |
BUILTIN_FP_FP_CALL)); |
1584 |
} |
1585 |
|
1586 |
|
1587 |
ExternalReference ExternalReference::power_double_int_function( |
1588 |
Isolate* isolate) { |
1589 |
return ExternalReference(Redirect(isolate,
|
1590 |
FUNCTION_ADDR(power_double_int), |
1591 |
BUILTIN_FP_INT_CALL)); |
1592 |
} |
1593 |
|
1594 |
|
1595 |
static int native_compare_doubles(double y, double x) { |
1596 |
if (x == y) return EQUAL; |
1597 |
return x < y ? LESS : GREATER;
|
1598 |
} |
1599 |
|
1600 |
|
1601 |
bool EvalComparison(Token::Value op, double op1, double op2) { |
1602 |
ASSERT(Token::IsCompareOp(op)); |
1603 |
switch (op) {
|
1604 |
case Token::EQ:
|
1605 |
case Token::EQ_STRICT: return (op1 == op2); |
1606 |
case Token::NE: return (op1 != op2); |
1607 |
case Token::LT: return (op1 < op2); |
1608 |
case Token::GT: return (op1 > op2); |
1609 |
case Token::LTE: return (op1 <= op2); |
1610 |
case Token::GTE: return (op1 >= op2); |
1611 |
default:
|
1612 |
UNREACHABLE(); |
1613 |
return false; |
1614 |
} |
1615 |
} |
1616 |
|
1617 |
|
1618 |
ExternalReference ExternalReference::double_fp_operation( |
1619 |
Token::Value operation, Isolate* isolate) { |
1620 |
typedef double BinaryFPOperation(double x, double y); |
1621 |
BinaryFPOperation* function = NULL;
|
1622 |
switch (operation) {
|
1623 |
case Token::ADD:
|
1624 |
function = &add_two_doubles; |
1625 |
break;
|
1626 |
case Token::SUB:
|
1627 |
function = &sub_two_doubles; |
1628 |
break;
|
1629 |
case Token::MUL:
|
1630 |
function = &mul_two_doubles; |
1631 |
break;
|
1632 |
case Token::DIV:
|
1633 |
function = &div_two_doubles; |
1634 |
break;
|
1635 |
case Token::MOD:
|
1636 |
function = &mod_two_doubles; |
1637 |
break;
|
1638 |
default:
|
1639 |
UNREACHABLE(); |
1640 |
} |
1641 |
return ExternalReference(Redirect(isolate,
|
1642 |
FUNCTION_ADDR(function), |
1643 |
BUILTIN_FP_FP_CALL)); |
1644 |
} |
1645 |
|
1646 |
|
1647 |
ExternalReference ExternalReference::compare_doubles(Isolate* isolate) { |
1648 |
return ExternalReference(Redirect(isolate,
|
1649 |
FUNCTION_ADDR(native_compare_doubles), |
1650 |
BUILTIN_COMPARE_CALL)); |
1651 |
} |
1652 |
|
1653 |
|
1654 |
#ifdef ENABLE_DEBUGGER_SUPPORT
|
1655 |
ExternalReference ExternalReference::debug_break(Isolate* isolate) { |
1656 |
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(Debug_Break)));
|
1657 |
} |
1658 |
|
1659 |
|
1660 |
ExternalReference ExternalReference::debug_step_in_fp_address( |
1661 |
Isolate* isolate) { |
1662 |
return ExternalReference(isolate->debug()->step_in_fp_addr());
|
1663 |
} |
1664 |
#endif
|
1665 |
|
1666 |
|
1667 |
void PositionsRecorder::RecordPosition(int pos) { |
1668 |
ASSERT(pos != RelocInfo::kNoPosition); |
1669 |
ASSERT(pos >= 0);
|
1670 |
state_.current_position = pos; |
1671 |
#ifdef ENABLE_GDB_JIT_INTERFACE
|
1672 |
if (gdbjit_lineinfo_ != NULL) { |
1673 |
gdbjit_lineinfo_->SetPosition(assembler_->pc_offset(), pos, false);
|
1674 |
} |
1675 |
#endif
|
1676 |
LOG_CODE_EVENT(assembler_->isolate(), |
1677 |
CodeLinePosInfoAddPositionEvent(jit_handler_data_, |
1678 |
assembler_->pc_offset(), |
1679 |
pos)); |
1680 |
} |
1681 |
|
1682 |
|
1683 |
void PositionsRecorder::RecordStatementPosition(int pos) { |
1684 |
ASSERT(pos != RelocInfo::kNoPosition); |
1685 |
ASSERT(pos >= 0);
|
1686 |
state_.current_statement_position = pos; |
1687 |
#ifdef ENABLE_GDB_JIT_INTERFACE
|
1688 |
if (gdbjit_lineinfo_ != NULL) { |
1689 |
gdbjit_lineinfo_->SetPosition(assembler_->pc_offset(), pos, true);
|
1690 |
} |
1691 |
#endif
|
1692 |
LOG_CODE_EVENT(assembler_->isolate(), |
1693 |
CodeLinePosInfoAddStatementPositionEvent( |
1694 |
jit_handler_data_, |
1695 |
assembler_->pc_offset(), |
1696 |
pos)); |
1697 |
} |
1698 |
|
1699 |
|
1700 |
bool PositionsRecorder::WriteRecordedPositions() {
|
1701 |
bool written = false; |
1702 |
|
1703 |
// Write the statement position if it is different from what was written last
|
1704 |
// time.
|
1705 |
if (state_.current_statement_position != state_.written_statement_position) {
|
1706 |
EnsureSpace ensure_space(assembler_); |
1707 |
assembler_->RecordRelocInfo(RelocInfo::STATEMENT_POSITION, |
1708 |
state_.current_statement_position); |
1709 |
state_.written_statement_position = state_.current_statement_position; |
1710 |
written = true;
|
1711 |
} |
1712 |
|
1713 |
// Write the position if it is different from what was written last time and
|
1714 |
// also different from the written statement position.
|
1715 |
if (state_.current_position != state_.written_position &&
|
1716 |
state_.current_position != state_.written_statement_position) { |
1717 |
EnsureSpace ensure_space(assembler_); |
1718 |
assembler_->RecordRelocInfo(RelocInfo::POSITION, state_.current_position); |
1719 |
state_.written_position = state_.current_position; |
1720 |
written = true;
|
1721 |
} |
1722 |
|
1723 |
// Return whether something was written.
|
1724 |
return written;
|
1725 |
} |
1726 |
|
1727 |
} } // namespace v8::internal
|