master
  1//===-- sanitizer_posix.cpp -----------------------------------------------===//
  2//
  3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  4// See https://llvm.org/LICENSE.txt for license information.
  5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  6//
  7//===----------------------------------------------------------------------===//
  8//
  9// This file is shared between AddressSanitizer and ThreadSanitizer
 10// run-time libraries and implements POSIX-specific functions from
 11// sanitizer_posix.h.
 12//===----------------------------------------------------------------------===//
 13
 14#include "sanitizer_platform.h"
 15
 16#if SANITIZER_POSIX
 17
 18#include "sanitizer_common.h"
 19#include "sanitizer_file.h"
 20#include "sanitizer_flags.h"
 21#include "sanitizer_libc.h"
 22#include "sanitizer_posix.h"
 23#include "sanitizer_procmaps.h"
 24
 25#include <errno.h>
 26#include <fcntl.h>
 27#include <signal.h>
 28#include <sys/mman.h>
 29
 30#if SANITIZER_FREEBSD
 31// The MAP_NORESERVE define has been removed in FreeBSD 11.x, and even before
 32// that, it was never implemented.  So just define it to zero.
 33#undef  MAP_NORESERVE
 34#define MAP_NORESERVE 0
 35#endif
 36
 37namespace __sanitizer {
 38
 39// ------------- sanitizer_common.h
 40uptr GetMmapGranularity() {
 41  return GetPageSize();
 42}
 43
 44bool ErrorIsOOM(error_t err) { return err == ENOMEM; }
 45
 46void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
 47  size = RoundUpTo(size, GetPageSizeCached());
 48  uptr res = MmapNamed(nullptr, size, PROT_READ | PROT_WRITE,
 49                       MAP_PRIVATE | MAP_ANON, mem_type);
 50  int reserrno;
 51  if (UNLIKELY(internal_iserror(res, &reserrno)))
 52    ReportMmapFailureAndDie(size, mem_type, "allocate", reserrno, raw_report);
 53  IncreaseTotalMmap(size);
 54  return (void *)res;
 55}
 56
 57void UnmapOrDie(void *addr, uptr size, bool raw_report) {
 58  if (!addr || !size) return;
 59  uptr res = internal_munmap(addr, size);
 60  int reserrno;
 61  if (UNLIKELY(internal_iserror(res, &reserrno)))
 62    ReportMunmapFailureAndDie(addr, size, reserrno, raw_report);
 63  DecreaseTotalMmap(size);
 64}
 65
 66void *MmapOrDieOnFatalError(uptr size, const char *mem_type) {
 67  size = RoundUpTo(size, GetPageSizeCached());
 68  uptr res = MmapNamed(nullptr, size, PROT_READ | PROT_WRITE,
 69                       MAP_PRIVATE | MAP_ANON, mem_type);
 70  int reserrno;
 71  if (UNLIKELY(internal_iserror(res, &reserrno))) {
 72    if (reserrno == ENOMEM)
 73      return nullptr;
 74    ReportMmapFailureAndDie(size, mem_type, "allocate", reserrno);
 75  }
 76  IncreaseTotalMmap(size);
 77  return (void *)res;
 78}
 79
 80// We want to map a chunk of address space aligned to 'alignment'.
 81// We do it by mapping a bit more and then unmapping redundant pieces.
 82// We probably can do it with fewer syscalls in some OS-dependent way.
 83void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
 84                                   const char *mem_type) {
 85  CHECK(IsPowerOfTwo(size));
 86  CHECK(IsPowerOfTwo(alignment));
 87  uptr map_size = size + alignment;
 88  // mmap maps entire pages and rounds up map_size needs to be a an integral
 89  // number of pages.
 90  // We need to be aware of this size for calculating end and for unmapping
 91  // fragments before and after the alignment region.
 92  map_size = RoundUpTo(map_size, GetPageSizeCached());
 93  uptr map_res = (uptr)MmapOrDieOnFatalError(map_size, mem_type);
 94  if (UNLIKELY(!map_res))
 95    return nullptr;
 96  uptr res = map_res;
 97  if (!IsAligned(res, alignment)) {
 98    res = (map_res + alignment - 1) & ~(alignment - 1);
 99    UnmapOrDie((void*)map_res, res - map_res);
100  }
101  uptr map_end = map_res + map_size;
102  uptr end = res + size;
103  end = RoundUpTo(end, GetPageSizeCached());
104  if (end != map_end) {
105    CHECK_LT(end, map_end);
106    UnmapOrDie((void*)end, map_end - end);
107  }
108  return (void*)res;
109}
110
111void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
112  size = RoundUpTo(size, GetPageSizeCached());
113  uptr p = MmapNamed(nullptr, size, PROT_READ | PROT_WRITE,
114                     MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, mem_type);
115  int reserrno;
116  if (UNLIKELY(internal_iserror(p, &reserrno)))
117    ReportMmapFailureAndDie(size, mem_type, "allocate noreserve", reserrno);
118  IncreaseTotalMmap(size);
119  return (void *)p;
120}
121
122static void *MmapFixedImpl(uptr fixed_addr, uptr size, bool tolerate_enomem,
123                           const char *name) {
124  size = RoundUpTo(size, GetPageSizeCached());
125  fixed_addr = RoundDownTo(fixed_addr, GetPageSizeCached());
126  uptr p = MmapNamed((void *)fixed_addr, size, PROT_READ | PROT_WRITE,
127                     MAP_PRIVATE | MAP_ANON | MAP_FIXED, name);
128  int reserrno;
129  if (UNLIKELY(internal_iserror(p, &reserrno))) {
130    if (tolerate_enomem && reserrno == ENOMEM)
131      return nullptr;
132    char mem_type[40];
133    internal_snprintf(mem_type, sizeof(mem_type), "memory at address %p",
134                      (void *)fixed_addr);
135    ReportMmapFailureAndDie(size, mem_type, "allocate", reserrno);
136  }
137  IncreaseTotalMmap(size);
138  return (void *)p;
139}
140
141void *MmapFixedOrDie(uptr fixed_addr, uptr size, const char *name) {
142  return MmapFixedImpl(fixed_addr, size, false /*tolerate_enomem*/, name);
143}
144
145void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size, const char *name) {
146  return MmapFixedImpl(fixed_addr, size, true /*tolerate_enomem*/, name);
147}
148
149bool MprotectNoAccess(uptr addr, uptr size) {
150  return 0 == internal_mprotect((void*)addr, size, PROT_NONE);
151}
152
153bool MprotectReadOnly(uptr addr, uptr size) {
154  return 0 == internal_mprotect((void *)addr, size, PROT_READ);
155}
156
157bool MprotectReadWrite(uptr addr, uptr size) {
158  return 0 == internal_mprotect((void *)addr, size, PROT_READ | PROT_WRITE);
159}
160
161#if !SANITIZER_APPLE
162void MprotectMallocZones(void *addr, int prot) {}
163#endif
164
165fd_t OpenFile(const char *filename, FileAccessMode mode, error_t *errno_p) {
166  if (ShouldMockFailureToOpen(filename))
167    return kInvalidFd;
168  int flags;
169  switch (mode) {
170    case RdOnly: flags = O_RDONLY; break;
171    case WrOnly: flags = O_WRONLY | O_CREAT | O_TRUNC; break;
172    case RdWr: flags = O_RDWR | O_CREAT; break;
173  }
174  fd_t res = internal_open(filename, flags, 0660);
175  if (internal_iserror(res, errno_p))
176    return kInvalidFd;
177  return ReserveStandardFds(res);
178}
179
180void CloseFile(fd_t fd) {
181  internal_close(fd);
182}
183
184bool ReadFromFile(fd_t fd, void *buff, uptr buff_size, uptr *bytes_read,
185                  error_t *error_p) {
186  uptr res = internal_read(fd, buff, buff_size);
187  if (internal_iserror(res, error_p))
188    return false;
189  if (bytes_read)
190    *bytes_read = res;
191  return true;
192}
193
194bool WriteToFile(fd_t fd, const void *buff, uptr buff_size, uptr *bytes_written,
195                 error_t *error_p) {
196  uptr res = internal_write(fd, buff, buff_size);
197  if (internal_iserror(res, error_p))
198    return false;
199  if (bytes_written)
200    *bytes_written = res;
201  return true;
202}
203
204void *MapFileToMemory(const char *file_name, uptr *buff_size) {
205  fd_t fd = OpenFile(file_name, RdOnly);
206  CHECK(fd != kInvalidFd);
207  uptr fsize = internal_filesize(fd);
208  CHECK_NE(fsize, (uptr)-1);
209  CHECK_GT(fsize, 0);
210  *buff_size = RoundUpTo(fsize, GetPageSizeCached());
211  uptr map = internal_mmap(nullptr, *buff_size, PROT_READ, MAP_PRIVATE, fd, 0);
212  return internal_iserror(map) ? nullptr : (void *)map;
213}
214
215void *MapWritableFileToMemory(void *addr, uptr size, fd_t fd, OFF_T offset) {
216  uptr flags = MAP_SHARED;
217  if (addr) flags |= MAP_FIXED;
218  uptr p = internal_mmap(addr, size, PROT_READ | PROT_WRITE, flags, fd, offset);
219  int mmap_errno = 0;
220  if (internal_iserror(p, &mmap_errno)) {
221    Printf("could not map writable file (%d, %lld, %zu): %zd, errno: %d\n",
222           fd, (long long)offset, size, p, mmap_errno);
223    return nullptr;
224  }
225  return (void *)p;
226}
227
228static inline bool IntervalsAreSeparate(uptr start1, uptr end1,
229                                        uptr start2, uptr end2) {
230  CHECK(start1 <= end1);
231  CHECK(start2 <= end2);
232  return (end1 < start2) || (end2 < start1);
233}
234
235// FIXME: this is thread-unsafe, but should not cause problems most of the time.
236// When the shadow is mapped only a single thread usually exists (plus maybe
237// several worker threads on Mac, which aren't expected to map big chunks of
238// memory).
239bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
240  MemoryMappingLayout proc_maps(/*cache_enabled*/true);
241  if (proc_maps.Error())
242    return true; // and hope for the best
243  MemoryMappedSegment segment;
244  while (proc_maps.Next(&segment)) {
245    if (segment.start == segment.end) continue;  // Empty range.
246    CHECK_NE(0, segment.end);
247    if (!IntervalsAreSeparate(segment.start, segment.end - 1, range_start,
248                              range_end))
249      return false;
250  }
251  return true;
252}
253
254#if !SANITIZER_APPLE
255void DumpProcessMap() {
256  MemoryMappingLayout proc_maps(/*cache_enabled*/true);
257  const sptr kBufSize = 4095;
258  char *filename = (char*)MmapOrDie(kBufSize, __func__);
259  MemoryMappedSegment segment(filename, kBufSize);
260  Report("Process memory map follows:\n");
261  while (proc_maps.Next(&segment)) {
262    Printf("\t%p-%p\t%s\n", (void *)segment.start, (void *)segment.end,
263           segment.filename);
264  }
265  Report("End of process memory map.\n");
266  UnmapOrDie(filename, kBufSize);
267}
268#endif
269
270const char *GetPwd() {
271  return GetEnv("PWD");
272}
273
274bool IsPathSeparator(const char c) {
275  return c == '/';
276}
277
278bool IsAbsolutePath(const char *path) {
279  return path != nullptr && IsPathSeparator(path[0]);
280}
281
282void ReportFile::Write(const char *buffer, uptr length) {
283  SpinMutexLock l(mu);
284  ReopenIfNecessary();
285  internal_write(fd, buffer, length);
286}
287
288bool GetCodeRangeForFile(const char *module, uptr *start, uptr *end) {
289  MemoryMappingLayout proc_maps(/*cache_enabled*/false);
290  InternalMmapVector<char> buff(kMaxPathLength);
291  MemoryMappedSegment segment(buff.data(), buff.size());
292  while (proc_maps.Next(&segment)) {
293    if (segment.IsExecutable() &&
294        internal_strcmp(module, segment.filename) == 0) {
295      *start = segment.start;
296      *end = segment.end;
297      return true;
298    }
299  }
300  return false;
301}
302
303uptr SignalContext::GetAddress() const {
304  auto si = static_cast<const siginfo_t *>(siginfo);
305  return (uptr)si->si_addr;
306}
307
308bool SignalContext::IsMemoryAccess() const {
309  auto si = static_cast<const siginfo_t *>(siginfo);
310  return si->si_signo == SIGSEGV || si->si_signo == SIGBUS;
311}
312
313int SignalContext::GetType() const {
314  return static_cast<const siginfo_t *>(siginfo)->si_signo;
315}
316
317const char *SignalContext::Describe() const {
318  switch (GetType()) {
319    case SIGFPE:
320      return "FPE";
321    case SIGILL:
322      return "ILL";
323    case SIGABRT:
324      return "ABRT";
325    case SIGSEGV:
326      return "SEGV";
327    case SIGBUS:
328      return "BUS";
329    case SIGTRAP:
330      return "TRAP";
331  }
332  return "UNKNOWN SIGNAL";
333}
334
335fd_t ReserveStandardFds(fd_t fd) {
336  CHECK_GE(fd, 0);
337  if (fd > 2)
338    return fd;
339  bool used[3];
340  internal_memset(used, 0, sizeof(used));
341  while (fd <= 2) {
342    used[fd] = true;
343    fd = internal_dup(fd);
344  }
345  for (int i = 0; i <= 2; ++i)
346    if (used[i])
347      internal_close(i);
348  return fd;
349}
350
351bool ShouldMockFailureToOpen(const char *path) {
352  return common_flags()->test_only_emulate_no_memorymap &&
353         internal_strncmp(path, "/proc/", 6) == 0;
354}
355
356bool OpenReadsVaArgs(int oflag) {
357#  ifdef O_TMPFILE
358  return (oflag & (O_CREAT | O_TMPFILE)) != 0;
359#  else
360  return (oflag & O_CREAT) != 0;
361#  endif
362}
363
364#  if SANITIZER_LINUX && !SANITIZER_ANDROID && !SANITIZER_GO
365int GetNamedMappingFd(const char *name, uptr size, int *flags) {
366  if (!common_flags()->decorate_proc_maps || !name)
367    return -1;
368  char shmname[200];
369  CHECK(internal_strlen(name) < sizeof(shmname) - 10);
370  internal_snprintf(shmname, sizeof(shmname), "/dev/shm/%zu [%s]",
371                    internal_getpid(), name);
372  int o_cloexec = 0;
373#if defined(O_CLOEXEC)
374  o_cloexec = O_CLOEXEC;
375#endif
376  int fd = ReserveStandardFds(
377      internal_open(shmname, O_RDWR | O_CREAT | O_TRUNC | o_cloexec, S_IRWXU));
378  CHECK_GE(fd, 0);
379  int res = internal_ftruncate(fd, size);
380#if !defined(O_CLOEXEC)
381  res = fcntl(fd, F_SETFD, FD_CLOEXEC);
382  CHECK_EQ(0, res);
383#endif
384  CHECK_EQ(0, res);
385  res = internal_unlink(shmname);
386  CHECK_EQ(0, res);
387  *flags &= ~(MAP_ANON | MAP_ANONYMOUS);
388  return fd;
389}
390#else
391int GetNamedMappingFd(const char *name, uptr size, int *flags) {
392  return -1;
393}
394#endif
395
396#if SANITIZER_ANDROID
397#define PR_SET_VMA 0x53564d41
398#define PR_SET_VMA_ANON_NAME 0
399void DecorateMapping(uptr addr, uptr size, const char *name) {
400  if (!common_flags()->decorate_proc_maps || !name)
401    return;
402  internal_prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, addr, size, (uptr)name);
403}
404#else
405void DecorateMapping(uptr addr, uptr size, const char *name) {
406}
407#endif
408
409uptr MmapNamed(void *addr, uptr length, int prot, int flags, const char *name) {
410  int fd = GetNamedMappingFd(name, length, &flags);
411  uptr res = internal_mmap(addr, length, prot, flags, fd, 0);
412  if (!internal_iserror(res))
413    DecorateMapping(res, length, name);
414  return res;
415}
416
417
418} // namespace __sanitizer
419
420#endif // SANITIZER_POSIX