The data contained in this repository can be downloaded to your computer using one of several clients.
Please see the documentation of your version control software client for more information.

Please select the desired protocol below to get the URL.

This URL has Read-Only access.

Statistics
| Branch: | Revision:

main_repo / deps / v8 / src / platform-macos.cc @ f230a1cf

History | View | Annotate | Download (9.4 KB)

1
// Copyright 2012 the V8 project authors. All rights reserved.
2
// Redistribution and use in source and binary forms, with or without
3
// modification, are permitted provided that the following conditions are
4
// met:
5
//
6
//     * Redistributions of source code must retain the above copyright
7
//       notice, this list of conditions and the following disclaimer.
8
//     * Redistributions in binary form must reproduce the above
9
//       copyright notice, this list of conditions and the following
10
//       disclaimer in the documentation and/or other materials provided
11
//       with the distribution.
12
//     * Neither the name of Google Inc. nor the names of its
13
//       contributors may be used to endorse or promote products derived
14
//       from this software without specific prior written permission.
15
//
16
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27

    
28
// Platform specific code for MacOS goes here. For the POSIX comaptible parts
29
// the implementation is in platform-posix.cc.
30

    
31
#include <dlfcn.h>
32
#include <unistd.h>
33
#include <sys/mman.h>
34
#include <mach/mach_init.h>
35
#include <mach-o/dyld.h>
36
#include <mach-o/getsect.h>
37

    
38
#include <AvailabilityMacros.h>
39

    
40
#include <pthread.h>
41
#include <semaphore.h>
42
#include <signal.h>
43
#include <libkern/OSAtomic.h>
44
#include <mach/mach.h>
45
#include <mach/semaphore.h>
46
#include <mach/task.h>
47
#include <mach/vm_statistics.h>
48
#include <sys/time.h>
49
#include <sys/resource.h>
50
#include <sys/types.h>
51
#include <sys/sysctl.h>
52
#include <stdarg.h>
53
#include <stdlib.h>
54
#include <string.h>
55
#include <errno.h>
56

    
57
#undef MAP_TYPE
58

    
59
#include "v8.h"
60

    
61
#include "platform.h"
62
#include "simulator.h"
63
#include "vm-state-inl.h"
64

    
65

    
66
namespace v8 {
67
namespace internal {
68

    
69

    
70
// Constants used for mmap.
71
// kMmapFd is used to pass vm_alloc flags to tag the region with the user
72
// defined tag 255 This helps identify V8-allocated regions in memory analysis
73
// tools like vmmap(1).
74
static const int kMmapFd = VM_MAKE_TAG(255);
75
static const off_t kMmapFdOffset = 0;
76

    
77

    
78
void* OS::Allocate(const size_t requested,
79
                   size_t* allocated,
80
                   bool is_executable) {
81
  const size_t msize = RoundUp(requested, getpagesize());
82
  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
83
  void* mbase = mmap(OS::GetRandomMmapAddr(),
84
                     msize,
85
                     prot,
86
                     MAP_PRIVATE | MAP_ANON,
87
                     kMmapFd,
88
                     kMmapFdOffset);
89
  if (mbase == MAP_FAILED) {
90
    LOG(Isolate::Current(), StringEvent("OS::Allocate", "mmap failed"));
91
    return NULL;
92
  }
93
  *allocated = msize;
94
  return mbase;
95
}
96

    
97

    
98
class PosixMemoryMappedFile : public OS::MemoryMappedFile {
99
 public:
100
  PosixMemoryMappedFile(FILE* file, void* memory, int size)
101
    : file_(file), memory_(memory), size_(size) { }
102
  virtual ~PosixMemoryMappedFile();
103
  virtual void* memory() { return memory_; }
104
  virtual int size() { return size_; }
105
 private:
106
  FILE* file_;
107
  void* memory_;
108
  int size_;
109
};
110

    
111

    
112
OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
113
  FILE* file = fopen(name, "r+");
114
  if (file == NULL) return NULL;
115

    
116
  fseek(file, 0, SEEK_END);
117
  int size = ftell(file);
118

    
119
  void* memory =
120
      mmap(OS::GetRandomMmapAddr(),
121
           size,
122
           PROT_READ | PROT_WRITE,
123
           MAP_SHARED,
124
           fileno(file),
125
           0);
126
  return new PosixMemoryMappedFile(file, memory, size);
127
}
128

    
129

    
130
OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
131
    void* initial) {
132
  FILE* file = fopen(name, "w+");
133
  if (file == NULL) return NULL;
134
  int result = fwrite(initial, size, 1, file);
135
  if (result < 1) {
136
    fclose(file);
137
    return NULL;
138
  }
139
  void* memory =
140
      mmap(OS::GetRandomMmapAddr(),
141
          size,
142
          PROT_READ | PROT_WRITE,
143
          MAP_SHARED,
144
          fileno(file),
145
          0);
146
  return new PosixMemoryMappedFile(file, memory, size);
147
}
148

    
149

    
150
PosixMemoryMappedFile::~PosixMemoryMappedFile() {
151
  if (memory_) OS::Free(memory_, size_);
152
  fclose(file_);
153
}
154

    
155

    
156
void OS::LogSharedLibraryAddresses(Isolate* isolate) {
157
  unsigned int images_count = _dyld_image_count();
158
  for (unsigned int i = 0; i < images_count; ++i) {
159
    const mach_header* header = _dyld_get_image_header(i);
160
    if (header == NULL) continue;
161
#if V8_HOST_ARCH_X64
162
    uint64_t size;
163
    char* code_ptr = getsectdatafromheader_64(
164
        reinterpret_cast<const mach_header_64*>(header),
165
        SEG_TEXT,
166
        SECT_TEXT,
167
        &size);
168
#else
169
    unsigned int size;
170
    char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size);
171
#endif
172
    if (code_ptr == NULL) continue;
173
    const uintptr_t slide = _dyld_get_image_vmaddr_slide(i);
174
    const uintptr_t start = reinterpret_cast<uintptr_t>(code_ptr) + slide;
175
    LOG(isolate,
176
        SharedLibraryEvent(_dyld_get_image_name(i), start, start + size));
177
  }
178
}
179

    
180

    
181
void OS::SignalCodeMovingGC() {
182
}
183

    
184

    
185
const char* OS::LocalTimezone(double time) {
186
  if (std::isnan(time)) return "";
187
  time_t tv = static_cast<time_t>(floor(time/msPerSecond));
188
  struct tm* t = localtime(&tv);
189
  if (NULL == t) return "";
190
  return t->tm_zone;
191
}
192

    
193

    
194
double OS::LocalTimeOffset() {
195
  time_t tv = time(NULL);
196
  struct tm* t = localtime(&tv);
197
  // tm_gmtoff includes any daylight savings offset, so subtract it.
198
  return static_cast<double>(t->tm_gmtoff * msPerSecond -
199
                             (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
200
}
201

    
202

    
203
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
204

    
205

    
206
VirtualMemory::VirtualMemory(size_t size)
207
    : address_(ReserveRegion(size)), size_(size) { }
208

    
209

    
210
VirtualMemory::VirtualMemory(size_t size, size_t alignment)
211
    : address_(NULL), size_(0) {
212
  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
213
  size_t request_size = RoundUp(size + alignment,
214
                                static_cast<intptr_t>(OS::AllocateAlignment()));
215
  void* reservation = mmap(OS::GetRandomMmapAddr(),
216
                           request_size,
217
                           PROT_NONE,
218
                           MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
219
                           kMmapFd,
220
                           kMmapFdOffset);
221
  if (reservation == MAP_FAILED) return;
222

    
223
  Address base = static_cast<Address>(reservation);
224
  Address aligned_base = RoundUp(base, alignment);
225
  ASSERT_LE(base, aligned_base);
226

    
227
  // Unmap extra memory reserved before and after the desired block.
228
  if (aligned_base != base) {
229
    size_t prefix_size = static_cast<size_t>(aligned_base - base);
230
    OS::Free(base, prefix_size);
231
    request_size -= prefix_size;
232
  }
233

    
234
  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
235
  ASSERT_LE(aligned_size, request_size);
236

    
237
  if (aligned_size != request_size) {
238
    size_t suffix_size = request_size - aligned_size;
239
    OS::Free(aligned_base + aligned_size, suffix_size);
240
    request_size -= suffix_size;
241
  }
242

    
243
  ASSERT(aligned_size == request_size);
244

    
245
  address_ = static_cast<void*>(aligned_base);
246
  size_ = aligned_size;
247
}
248

    
249

    
250
VirtualMemory::~VirtualMemory() {
251
  if (IsReserved()) {
252
    bool result = ReleaseRegion(address(), size());
253
    ASSERT(result);
254
    USE(result);
255
  }
256
}
257

    
258

    
259
bool VirtualMemory::IsReserved() {
260
  return address_ != NULL;
261
}
262

    
263

    
264
void VirtualMemory::Reset() {
265
  address_ = NULL;
266
  size_ = 0;
267
}
268

    
269

    
270
bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
271
  return CommitRegion(address, size, is_executable);
272
}
273

    
274

    
275
bool VirtualMemory::Uncommit(void* address, size_t size) {
276
  return UncommitRegion(address, size);
277
}
278

    
279

    
280
bool VirtualMemory::Guard(void* address) {
281
  OS::Guard(address, OS::CommitPageSize());
282
  return true;
283
}
284

    
285

    
286
void* VirtualMemory::ReserveRegion(size_t size) {
287
  void* result = mmap(OS::GetRandomMmapAddr(),
288
                      size,
289
                      PROT_NONE,
290
                      MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
291
                      kMmapFd,
292
                      kMmapFdOffset);
293

    
294
  if (result == MAP_FAILED) return NULL;
295

    
296
  return result;
297
}
298

    
299

    
300
bool VirtualMemory::CommitRegion(void* address,
301
                                 size_t size,
302
                                 bool is_executable) {
303
  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
304
  if (MAP_FAILED == mmap(address,
305
                         size,
306
                         prot,
307
                         MAP_PRIVATE | MAP_ANON | MAP_FIXED,
308
                         kMmapFd,
309
                         kMmapFdOffset)) {
310
    return false;
311
  }
312
  return true;
313
}
314

    
315

    
316
bool VirtualMemory::UncommitRegion(void* address, size_t size) {
317
  return mmap(address,
318
              size,
319
              PROT_NONE,
320
              MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
321
              kMmapFd,
322
              kMmapFdOffset) != MAP_FAILED;
323
}
324

    
325

    
326
bool VirtualMemory::ReleaseRegion(void* address, size_t size) {
327
  return munmap(address, size) == 0;
328
}
329

    
330

    
331
bool VirtualMemory::HasLazyCommits() {
332
  return false;
333
}
334

    
335
} }  // namespace v8::internal