Module Name:    src
Committed By:   kamil
Date:           Tue Jan  8 05:44:58 UTC 2019

Added Files:
        src/sys/external/bsd/compiler_rt/dist/lib/lsan: lsan.cc lsan.h
            lsan_allocator.cc lsan_allocator.h lsan_common.cc lsan_common.h
            lsan_common_linux.cc lsan_interceptors.cc lsan_thread.cc
            lsan_thread.h

Log Message:
merge new version


To generate a diff of this commit:
cvs rdiff -u -r0 -r1.3 src/sys/external/bsd/compiler_rt/dist/lib/lsan/lsan.cc \
    src/sys/external/bsd/compiler_rt/dist/lib/lsan/lsan.h \
    src/sys/external/bsd/compiler_rt/dist/lib/lsan/lsan_allocator.cc \
    src/sys/external/bsd/compiler_rt/dist/lib/lsan/lsan_allocator.h \
    src/sys/external/bsd/compiler_rt/dist/lib/lsan/lsan_common.cc \
    src/sys/external/bsd/compiler_rt/dist/lib/lsan/lsan_common.h \
    src/sys/external/bsd/compiler_rt/dist/lib/lsan/lsan_common_linux.cc \
    src/sys/external/bsd/compiler_rt/dist/lib/lsan/lsan_interceptors.cc \
    src/sys/external/bsd/compiler_rt/dist/lib/lsan/lsan_thread.cc \
    src/sys/external/bsd/compiler_rt/dist/lib/lsan/lsan_thread.h

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Added files:

Index: src/sys/external/bsd/compiler_rt/dist/lib/lsan/lsan.cc
diff -u /dev/null src/sys/external/bsd/compiler_rt/dist/lib/lsan/lsan.cc:1.3
--- /dev/null	Tue Jan  8 05:44:58 2019
+++ src/sys/external/bsd/compiler_rt/dist/lib/lsan/lsan.cc	Tue Jan  8 05:44:58 2019
@@ -0,0 +1,118 @@
+//=-- lsan.cc -------------------------------------------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// Standalone LSan RTL.
+//
+//===----------------------------------------------------------------------===//
+
+#include "lsan.h"
+
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "lsan_allocator.h"
+#include "lsan_common.h"
+#include "lsan_thread.h"
+
+bool lsan_inited;
+bool lsan_init_is_running;
+
+namespace __lsan {
+
+///// Interface to the common LSan module. /////
+bool WordIsPoisoned(uptr addr) {
+  return false;
+}
+
+}  // namespace __lsan
+
+using namespace __lsan;  // NOLINT
+
+static void InitializeFlags() {
+  // Set all the default values.
+  SetCommonFlagsDefaults();
+  {
+    CommonFlags cf;
+    cf.CopyFrom(*common_flags());
+    cf.external_symbolizer_path = GetEnv("LSAN_SYMBOLIZER_PATH");
+    cf.malloc_context_size = 30;
+    cf.intercept_tls_get_addr = true;
+    cf.detect_leaks = true;
+    cf.exitcode = 23;
+    OverrideCommonFlags(cf);
+  }
+
+  Flags *f = flags();
+  f->SetDefaults();
+
+  FlagParser parser;
+  RegisterLsanFlags(&parser, f);
+  RegisterCommonFlags(&parser);
+
+  // Override from user-specified string.
+  const char *lsan_default_options = MaybeCallLsanDefaultOptions();
+  parser.ParseString(lsan_default_options);
+  parser.ParseString(GetEnv("LSAN_OPTIONS"));
+
+  SetVerbosity(common_flags()->verbosity);
+
+  if (Verbosity()) ReportUnrecognizedFlags();
+
+  if (common_flags()->help) parser.PrintFlagDescriptions();
+
+  __sanitizer_set_report_path(common_flags()->log_path);
+}
+
+static void OnStackUnwind(const SignalContext &sig, const void *,
+                          BufferedStackTrace *stack) {
+  GetStackTrace(stack, kStackTraceMax, sig.pc, sig.bp, sig.context,
+                common_flags()->fast_unwind_on_fatal);
+}
+
+static void LsanOnDeadlySignal(int signo, void *siginfo, void *context) {
+  HandleDeadlySignal(siginfo, context, GetCurrentThread(), &OnStackUnwind,
+                     nullptr);
+}
+
+extern "C" void __lsan_init() {
+  CHECK(!lsan_init_is_running);
+  if (lsan_inited)
+    return;
+  lsan_init_is_running = true;
+  SanitizerToolName = "LeakSanitizer";
+  CacheBinaryName();
+  AvoidCVE_2016_2143();
+  InitializeFlags();
+  InitCommonLsan();
+  InitializeAllocator();
+  ReplaceSystemMalloc();
+  InitTlsSize();
+  InitializeInterceptors();
+  InitializeThreadRegistry();
+  InstallDeadlySignalHandlers(LsanOnDeadlySignal);
+  u32 tid = ThreadCreate(0, 0, true);
+  CHECK_EQ(tid, 0);
+  ThreadStart(tid, GetTid());
+  SetCurrentThread(tid);
+
+  if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit)
+    Atexit(DoLeakCheck);
+
+  InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir);
+
+  lsan_inited = true;
+  lsan_init_is_running = false;
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_print_stack_trace() {
+  GET_STACK_TRACE_FATAL;
+  stack.Print();
+}
Index: src/sys/external/bsd/compiler_rt/dist/lib/lsan/lsan.h
diff -u /dev/null src/sys/external/bsd/compiler_rt/dist/lib/lsan/lsan.h:1.3
--- /dev/null	Tue Jan  8 05:44:58 2019
+++ src/sys/external/bsd/compiler_rt/dist/lib/lsan/lsan.h	Tue Jan  8 05:44:58 2019
@@ -0,0 +1,67 @@
+//=-- lsan.h --------------------------------------------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// Private header for standalone LSan RTL.
+//
+//===----------------------------------------------------------------------===//
+
+#include "lsan_thread.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+
+#define GET_STACK_TRACE(max_size, fast)                       \
+  __sanitizer::BufferedStackTrace stack;                      \
+  GetStackTrace(&stack, max_size, StackTrace::GetCurrentPc(), \
+                GET_CURRENT_FRAME(), nullptr, fast);
+
+#define GET_STACK_TRACE_FATAL \
+  GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_fatal)
+
+#define GET_STACK_TRACE_MALLOC                                      \
+  GET_STACK_TRACE(__sanitizer::common_flags()->malloc_context_size, \
+                  common_flags()->fast_unwind_on_malloc)
+
+#define GET_STACK_TRACE_THREAD GET_STACK_TRACE(kStackTraceMax, true)
+
+namespace __lsan {
+
+void InitializeInterceptors();
+void ReplaceSystemMalloc();
+
+#define ENSURE_LSAN_INITED do {   \
+  CHECK(!lsan_init_is_running);   \
+  if (!lsan_inited)               \
+    __lsan_init();                \
+} while (0)
+
+// Get the stack trace with the given pc and bp.
+// The pc will be in the position 0 of the resulting stack trace.
+// The bp may refer to the current frame or to the caller's frame.
+ALWAYS_INLINE
+void GetStackTrace(__sanitizer::BufferedStackTrace *stack,
+                   __sanitizer::uptr max_depth, __sanitizer::uptr pc,
+                   __sanitizer::uptr bp, void *context, bool fast) {
+  uptr stack_top = 0, stack_bottom = 0;
+  ThreadContext *t;
+  if (fast && (t = CurrentThreadContext())) {
+    stack_top = t->stack_end();
+    stack_bottom = t->stack_begin();
+  }
+  if (!SANITIZER_MIPS || IsValidFrame(bp, stack_top, stack_bottom)) {
+    stack->Unwind(max_depth, pc, bp, context, stack_top, stack_bottom, fast);
+  }
+}
+
+}  // namespace __lsan
+
+extern bool lsan_inited;
+extern bool lsan_init_is_running;
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __lsan_init();
Index: src/sys/external/bsd/compiler_rt/dist/lib/lsan/lsan_allocator.cc
diff -u /dev/null src/sys/external/bsd/compiler_rt/dist/lib/lsan/lsan_allocator.cc:1.3
--- /dev/null	Tue Jan  8 05:44:58 2019
+++ src/sys/external/bsd/compiler_rt/dist/lib/lsan/lsan_allocator.cc	Tue Jan  8 05:44:58 2019
@@ -0,0 +1,343 @@
+//=-- lsan_allocator.cc ---------------------------------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// See lsan_allocator.h for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "lsan_allocator.h"
+
+#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_allocator_checks.h"
+#include "sanitizer_common/sanitizer_allocator_interface.h"
+#include "sanitizer_common/sanitizer_allocator_report.h"
+#include "sanitizer_common/sanitizer_errno.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "lsan_common.h"
+
+extern "C" void *memset(void *ptr, int value, uptr num);
+
+namespace __lsan {
+#if defined(__i386__) || defined(__arm__)
+static const uptr kMaxAllowedMallocSize = 1UL << 30;
+#elif defined(__mips64) || defined(__aarch64__)
+static const uptr kMaxAllowedMallocSize = 4UL << 30;
+#else
+static const uptr kMaxAllowedMallocSize = 8UL << 30;
+#endif
+
+static Allocator allocator;
+
+void InitializeAllocator() {
+  SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
+  allocator.InitLinkerInitialized(
+      common_flags()->allocator_release_to_os_interval_ms);
+}
+
+void AllocatorThreadFinish() {
+  allocator.SwallowCache(GetAllocatorCache());
+}
+
+static ChunkMetadata *Metadata(const void *p) {
+  return reinterpret_cast<ChunkMetadata *>(allocator.GetMetaData(p));
+}
+
+static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) {
+  if (!p) return;
+  ChunkMetadata *m = Metadata(p);
+  CHECK(m);
+  m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked;
+  m->stack_trace_id = StackDepotPut(stack);
+  m->requested_size = size;
+  atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed);
+}
+
+static void RegisterDeallocation(void *p) {
+  if (!p) return;
+  ChunkMetadata *m = Metadata(p);
+  CHECK(m);
+  atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed);
+}
+
+static void *ReportAllocationSizeTooBig(uptr size, const StackTrace &stack) {
+  if (AllocatorMayReturnNull()) {
+    Report("WARNING: LeakSanitizer failed to allocate 0x%zx bytes\n", size);
+    return nullptr;
+  }
+  ReportAllocationSizeTooBig(size, kMaxAllowedMallocSize, &stack);
+}
+
+void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
+               bool cleared) {
+  if (size == 0)
+    size = 1;
+  if (size > kMaxAllowedMallocSize)
+    return ReportAllocationSizeTooBig(size, stack);
+  void *p = allocator.Allocate(GetAllocatorCache(), size, alignment);
+  if (UNLIKELY(!p)) {
+    SetAllocatorOutOfMemory();
+    if (AllocatorMayReturnNull())
+      return nullptr;
+    ReportOutOfMemory(size, &stack);
+  }
+  // Do not rely on the allocator to clear the memory (it's slow).
+  if (cleared && allocator.FromPrimary(p))
+    memset(p, 0, size);
+  RegisterAllocation(stack, p, size);
+  if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(p, size);
+  RunMallocHooks(p, size);
+  return p;
+}
+
+static void *Calloc(uptr nmemb, uptr size, const StackTrace &stack) {
+  if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
+    if (AllocatorMayReturnNull())
+      return nullptr;
+    ReportCallocOverflow(nmemb, size, &stack);
+  }
+  size *= nmemb;
+  return Allocate(stack, size, 1, true);
+}
+
+void Deallocate(void *p) {
+  if (&__sanitizer_free_hook) __sanitizer_free_hook(p);
+  RunFreeHooks(p);
+  RegisterDeallocation(p);
+  allocator.Deallocate(GetAllocatorCache(), p);
+}
+
+void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
+                 uptr alignment) {
+  RegisterDeallocation(p);
+  if (new_size > kMaxAllowedMallocSize) {
+    allocator.Deallocate(GetAllocatorCache(), p);
+    return ReportAllocationSizeTooBig(new_size, stack);
+  }
+  p = allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment);
+  RegisterAllocation(stack, p, new_size);
+  return p;
+}
+
+void GetAllocatorCacheRange(uptr *begin, uptr *end) {
+  *begin = (uptr)GetAllocatorCache();
+  *end = *begin + sizeof(AllocatorCache);
+}
+
+uptr GetMallocUsableSize(const void *p) {
+  ChunkMetadata *m = Metadata(p);
+  if (!m) return 0;
+  return m->requested_size;
+}
+
+int lsan_posix_memalign(void **memptr, uptr alignment, uptr size,
+                        const StackTrace &stack) {
+  if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
+    if (AllocatorMayReturnNull())
+      return errno_EINVAL;
+    ReportInvalidPosixMemalignAlignment(alignment, &stack);
+  }
+  void *ptr = Allocate(stack, size, alignment, kAlwaysClearMemory);
+  if (UNLIKELY(!ptr))
+    // OOM error is already taken care of by Allocate.
+    return errno_ENOMEM;
+  CHECK(IsAligned((uptr)ptr, alignment));
+  *memptr = ptr;
+  return 0;
+}
+
+void *lsan_aligned_alloc(uptr alignment, uptr size, const StackTrace &stack) {
+  if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
+    errno = errno_EINVAL;
+    if (AllocatorMayReturnNull())
+      return nullptr;
+    ReportInvalidAlignedAllocAlignment(size, alignment, &stack);
+  }
+  return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory));
+}
+
+void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack) {
+  if (UNLIKELY(!IsPowerOfTwo(alignment))) {
+    errno = errno_EINVAL;
+    if (AllocatorMayReturnNull())
+      return nullptr;
+    ReportInvalidAllocationAlignment(alignment, &stack);
+  }
+  return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory));
+}
+
+void *lsan_malloc(uptr size, const StackTrace &stack) {
+  return SetErrnoOnNull(Allocate(stack, size, 1, kAlwaysClearMemory));
+}
+
+void lsan_free(void *p) {
+  Deallocate(p);
+}
+
+void *lsan_realloc(void *p, uptr size, const StackTrace &stack) {
+  return SetErrnoOnNull(Reallocate(stack, p, size, 1));
+}
+
+void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack) {
+  return SetErrnoOnNull(Calloc(nmemb, size, stack));
+}
+
+void *lsan_valloc(uptr size, const StackTrace &stack) {
+  return SetErrnoOnNull(
+      Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory));
+}
+
+void *lsan_pvalloc(uptr size, const StackTrace &stack) {
+  uptr PageSize = GetPageSizeCached();
+  if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
+    errno = errno_ENOMEM;
+    if (AllocatorMayReturnNull())
+      return nullptr;
+    ReportPvallocOverflow(size, &stack);
+  }
+  // pvalloc(0) should allocate one page.
+  size = size ? RoundUpTo(size, PageSize) : PageSize;
+  return SetErrnoOnNull(Allocate(stack, size, PageSize, kAlwaysClearMemory));
+}
+
+uptr lsan_mz_size(const void *p) {
+  return GetMallocUsableSize(p);
+}
+
+///// Interface to the common LSan module. /////
+
+void LockAllocator() {
+  allocator.ForceLock();
+}
+
+void UnlockAllocator() {
+  allocator.ForceUnlock();
+}
+
+void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
+  *begin = (uptr)&allocator;
+  *end = *begin + sizeof(allocator);
+}
+
+uptr PointsIntoChunk(void* p) {
+  uptr addr = reinterpret_cast<uptr>(p);
+  uptr chunk = reinterpret_cast<uptr>(allocator.GetBlockBeginFastLocked(p));
+  if (!chunk) return 0;
+  // LargeMmapAllocator considers pointers to the meta-region of a chunk to be
+  // valid, but we don't want that.
+  if (addr < chunk) return 0;
+  ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk));
+  CHECK(m);
+  if (!m->allocated)
+    return 0;
+  if (addr < chunk + m->requested_size)
+    return chunk;
+  if (IsSpecialCaseOfOperatorNew0(chunk, m->requested_size, addr))
+    return chunk;
+  return 0;
+}
+
+uptr GetUserBegin(uptr chunk) {
+  return chunk;
+}
+
+LsanMetadata::LsanMetadata(uptr chunk) {
+  metadata_ = Metadata(reinterpret_cast<void *>(chunk));
+  CHECK(metadata_);
+}
+
+bool LsanMetadata::allocated() const {
+  return reinterpret_cast<ChunkMetadata *>(metadata_)->allocated;
+}
+
+ChunkTag LsanMetadata::tag() const {
+  return reinterpret_cast<ChunkMetadata *>(metadata_)->tag;
+}
+
+void LsanMetadata::set_tag(ChunkTag value) {
+  reinterpret_cast<ChunkMetadata *>(metadata_)->tag = value;
+}
+
+uptr LsanMetadata::requested_size() const {
+  return reinterpret_cast<ChunkMetadata *>(metadata_)->requested_size;
+}
+
+u32 LsanMetadata::stack_trace_id() const {
+  return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id;
+}
+
+void ForEachChunk(ForEachChunkCallback callback, void *arg) {
+  allocator.ForEachChunk(callback, arg);
+}
+
+IgnoreObjectResult IgnoreObjectLocked(const void *p) {
+  void *chunk = allocator.GetBlockBegin(p);
+  if (!chunk || p < chunk) return kIgnoreObjectInvalid;
+  ChunkMetadata *m = Metadata(chunk);
+  CHECK(m);
+  if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) {
+    if (m->tag == kIgnored)
+      return kIgnoreObjectAlreadyIgnored;
+    m->tag = kIgnored;
+    return kIgnoreObjectSuccess;
+  } else {
+    return kIgnoreObjectInvalid;
+  }
+}
+} // namespace __lsan
+
+using namespace __lsan;
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_current_allocated_bytes() {
+  uptr stats[AllocatorStatCount];
+  allocator.GetStats(stats);
+  return stats[AllocatorStatAllocated];
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_heap_size() {
+  uptr stats[AllocatorStatCount];
+  allocator.GetStats(stats);
+  return stats[AllocatorStatMapped];
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_free_bytes() { return 0; }
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_unmapped_bytes() { return 0; }
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __sanitizer_get_ownership(const void *p) { return Metadata(p) != nullptr; }
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_allocated_size(const void *p) {
+  return GetMallocUsableSize(p);
+}
+
+#if !SANITIZER_SUPPORTS_WEAK_HOOKS
+// Provide default (no-op) implementation of malloc hooks.
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_malloc_hook(void *ptr, uptr size) {
+  (void)ptr;
+  (void)size;
+}
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_free_hook(void *ptr) {
+  (void)ptr;
+}
+#endif
+} // extern "C"
Index: src/sys/external/bsd/compiler_rt/dist/lib/lsan/lsan_allocator.h
diff -u /dev/null src/sys/external/bsd/compiler_rt/dist/lib/lsan/lsan_allocator.h:1.3
--- /dev/null	Tue Jan  8 05:44:58 2019
+++ src/sys/external/bsd/compiler_rt/dist/lib/lsan/lsan_allocator.h	Tue Jan  8 05:44:58 2019
@@ -0,0 +1,133 @@
+//=-- lsan_allocator.h ----------------------------------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// Allocator for standalone LSan.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LSAN_ALLOCATOR_H
+#define LSAN_ALLOCATOR_H
+
+#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "lsan_common.h"
+
+namespace __lsan {
+
+void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
+               bool cleared);
+void Deallocate(void *p);
+void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
+                 uptr alignment);
+uptr GetMallocUsableSize(const void *p);
+
+template<typename Callable>
+void ForEachChunk(const Callable &callback);
+
+void GetAllocatorCacheRange(uptr *begin, uptr *end);
+void AllocatorThreadFinish();
+void InitializeAllocator();
+
+const bool kAlwaysClearMemory = true;
+
+struct ChunkMetadata {
+  u8 allocated : 8;  // Must be first.
+  ChunkTag tag : 2;
+#if SANITIZER_WORDSIZE == 64
+  uptr requested_size : 54;
+#else
+  uptr requested_size : 32;
+  uptr padding : 22;
+#endif
+  u32 stack_trace_id;
+};
+
+#if defined(__mips64) || defined(__aarch64__) || defined(__i386__) || \
+    defined(__arm__)
+static const uptr kRegionSizeLog = 20;
+static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
+template <typename AddressSpaceView>
+using ByteMapASVT =
+    TwoLevelByteMap<(kNumRegions >> 12), 1 << 12, AddressSpaceView>;
+
+template <typename AddressSpaceViewTy>
+struct AP32 {
+  static const uptr kSpaceBeg = 0;
+  static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
+  static const uptr kMetadataSize = sizeof(ChunkMetadata);
+  typedef __sanitizer::CompactSizeClassMap SizeClassMap;
+  static const uptr kRegionSizeLog = __lsan::kRegionSizeLog;
+  using AddressSpaceView = AddressSpaceViewTy;
+  using ByteMap = __lsan::ByteMapASVT<AddressSpaceView>;
+  typedef NoOpMapUnmapCallback MapUnmapCallback;
+  static const uptr kFlags = 0;
+};
+template <typename AddressSpaceView>
+using PrimaryAllocatorASVT = SizeClassAllocator32<AP32<AddressSpaceView>>;
+using PrimaryAllocator = PrimaryAllocatorASVT<LocalAddressSpaceView>;
+#elif defined(__x86_64__) || defined(__powerpc64__)
+# if defined(__powerpc64__)
+const uptr kAllocatorSpace = 0xa0000000000ULL;
+const uptr kAllocatorSize  = 0x20000000000ULL;  // 2T.
+# else
+const uptr kAllocatorSpace = 0x600000000000ULL;
+const uptr kAllocatorSize  = 0x40000000000ULL;  // 4T.
+# endif
+template <typename AddressSpaceViewTy>
+struct AP64 {  // Allocator64 parameters. Deliberately using a short name.
+  static const uptr kSpaceBeg = kAllocatorSpace;
+  static const uptr kSpaceSize = kAllocatorSize;
+  static const uptr kMetadataSize = sizeof(ChunkMetadata);
+  typedef DefaultSizeClassMap SizeClassMap;
+  typedef NoOpMapUnmapCallback MapUnmapCallback;
+  static const uptr kFlags = 0;
+  using AddressSpaceView = AddressSpaceViewTy;
+};
+
+template <typename AddressSpaceView>
+using PrimaryAllocatorASVT = SizeClassAllocator64<AP64<AddressSpaceView>>;
+using PrimaryAllocator = PrimaryAllocatorASVT<LocalAddressSpaceView>;
+#endif
+
+template <typename AddressSpaceView>
+using AllocatorCacheASVT =
+    SizeClassAllocatorLocalCache<PrimaryAllocatorASVT<AddressSpaceView>>;
+using AllocatorCache = AllocatorCacheASVT<LocalAddressSpaceView>;
+
+template <typename AddressSpaceView>
+using SecondaryAllocatorASVT =
+    LargeMmapAllocator<NoOpMapUnmapCallback, DefaultLargeMmapAllocatorPtrArray,
+                       AddressSpaceView>;
+
+template <typename AddressSpaceView>
+using AllocatorASVT =
+    CombinedAllocator<PrimaryAllocatorASVT<AddressSpaceView>,
+                      AllocatorCacheASVT<AddressSpaceView>,
+                      SecondaryAllocatorASVT<AddressSpaceView>>;
+using Allocator = AllocatorASVT<LocalAddressSpaceView>;
+
+AllocatorCache *GetAllocatorCache();
+
+int lsan_posix_memalign(void **memptr, uptr alignment, uptr size,
+                        const StackTrace &stack);
+void *lsan_aligned_alloc(uptr alignment, uptr size, const StackTrace &stack);
+void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack);
+void *lsan_malloc(uptr size, const StackTrace &stack);
+void lsan_free(void *p);
+void *lsan_realloc(void *p, uptr size, const StackTrace &stack);
+void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack);
+void *lsan_valloc(uptr size, const StackTrace &stack);
+void *lsan_pvalloc(uptr size, const StackTrace &stack);
+uptr lsan_mz_size(const void *p);
+
+}  // namespace __lsan
+
+#endif  // LSAN_ALLOCATOR_H
Index: src/sys/external/bsd/compiler_rt/dist/lib/lsan/lsan_common.cc
diff -u /dev/null src/sys/external/bsd/compiler_rt/dist/lib/lsan/lsan_common.cc:1.3
--- /dev/null	Tue Jan  8 05:44:58 2019
+++ src/sys/external/bsd/compiler_rt/dist/lib/lsan/lsan_common.cc	Tue Jan  8 05:44:58 2019
@@ -0,0 +1,905 @@
+//=-- lsan_common.cc ------------------------------------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// Implementation of common leak checking functionality.
+//
+//===----------------------------------------------------------------------===//
+
+#include "lsan_common.h"
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_procmaps.h"
+#include "sanitizer_common/sanitizer_report_decorator.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "sanitizer_common/sanitizer_suppressions.h"
+#include "sanitizer_common/sanitizer_thread_registry.h"
+#include "sanitizer_common/sanitizer_tls_get_addr.h"
+
+#if CAN_SANITIZE_LEAKS
+namespace __lsan {
+
+// This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
+// also to protect the global list of root regions.
+BlockingMutex global_mutex(LINKER_INITIALIZED);
+
+Flags lsan_flags;
+
+void DisableCounterUnderflow() {
+  if (common_flags()->detect_leaks) {
+    Report("Unmatched call to __lsan_enable().\n");
+    Die();
+  }
+}
+
+void Flags::SetDefaults() {
+#define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+#include "lsan_flags.inc"
+#undef LSAN_FLAG
+}
+
+void RegisterLsanFlags(FlagParser *parser, Flags *f) {
+#define LSAN_FLAG(Type, Name, DefaultValue, Description) \
+  RegisterFlag(parser, #Name, Description, &f->Name);
+#include "lsan_flags.inc"
+#undef LSAN_FLAG
+}
+
+#define LOG_POINTERS(...)                           \
+  do {                                              \
+    if (flags()->log_pointers) Report(__VA_ARGS__); \
+  } while (0)
+
+#define LOG_THREADS(...)                           \
+  do {                                             \
+    if (flags()->log_threads) Report(__VA_ARGS__); \
+  } while (0)
+
+ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)];
+static SuppressionContext *suppression_ctx = nullptr;
+static const char kSuppressionLeak[] = "leak";
+static const char *kSuppressionTypes[] = { kSuppressionLeak };
+static const char kStdSuppressions[] =
+#if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
+  // For more details refer to the SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
+  // definition.
+  "leak:*pthread_exit*\n"
+#endif  // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
+#if SANITIZER_MAC
+  // For Darwin and os_log/os_trace: https://reviews.llvm.org/D35173
+  "leak:*_os_trace*\n"
+#endif
+  // TLS leak in some glibc versions, described in
+  // https://sourceware.org/bugzilla/show_bug.cgi?id=12650.
+  "leak:*tls_get_addr*\n";
+
+void InitializeSuppressions() {
+  CHECK_EQ(nullptr, suppression_ctx);
+  suppression_ctx = new (suppression_placeholder) // NOLINT
+      SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
+  suppression_ctx->ParseFromFile(flags()->suppressions);
+  if (&__lsan_default_suppressions)
+    suppression_ctx->Parse(__lsan_default_suppressions());
+  suppression_ctx->Parse(kStdSuppressions);
+}
+
+static SuppressionContext *GetSuppressionContext() {
+  CHECK(suppression_ctx);
+  return suppression_ctx;
+}
+
+static InternalMmapVector<RootRegion> *root_regions;
+
+InternalMmapVector<RootRegion> const *GetRootRegions() { return root_regions; }
+
+void InitializeRootRegions() {
+  CHECK(!root_regions);
+  ALIGNED(64) static char placeholder[sizeof(InternalMmapVector<RootRegion>)];
+  root_regions = new (placeholder) InternalMmapVector<RootRegion>();  // NOLINT
+}
+
+const char *MaybeCallLsanDefaultOptions() {
+  return (&__lsan_default_options) ? __lsan_default_options() : "";
+}
+
+void InitCommonLsan() {
+  InitializeRootRegions();
+  if (common_flags()->detect_leaks) {
+    // Initialization which can fail or print warnings should only be done if
+    // LSan is actually enabled.
+    InitializeSuppressions();
+    InitializePlatformSpecificModules();
+  }
+}
+
+class Decorator: public __sanitizer::SanitizerCommonDecorator {
+ public:
+  Decorator() : SanitizerCommonDecorator() { }
+  const char *Error() { return Red(); }
+  const char *Leak() { return Blue(); }
+};
+
+static inline bool CanBeAHeapPointer(uptr p) {
+  // Since our heap is located in mmap-ed memory, we can assume a sensible lower
+  // bound on heap addresses.
+  const uptr kMinAddress = 4 * 4096;
+  if (p < kMinAddress) return false;
+#if defined(__x86_64__)
+  // Accept only canonical form user-space addresses.
+  return ((p >> 47) == 0);
+#elif defined(__mips64)
+  return ((p >> 40) == 0);
+#elif defined(__aarch64__)
+  unsigned runtimeVMA =
+    (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
+  return ((p >> runtimeVMA) == 0);
+#else
+  return true;
+#endif
+}
+
+// Scans the memory range, looking for byte patterns that point into allocator
+// chunks. Marks those chunks with |tag| and adds them to |frontier|.
+// There are two usage modes for this function: finding reachable chunks
+// (|tag| = kReachable) and finding indirectly leaked chunks
+// (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
+// so |frontier| = 0.
+void ScanRangeForPointers(uptr begin, uptr end,
+                          Frontier *frontier,
+                          const char *region_type, ChunkTag tag) {
+  CHECK(tag == kReachable || tag == kIndirectlyLeaked);
+  const uptr alignment = flags()->pointer_alignment();
+  LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, begin, end);
+  uptr pp = begin;
+  if (pp % alignment)
+    pp = pp + alignment - pp % alignment;
+  for (; pp + sizeof(void *) <= end; pp += alignment) {  // NOLINT
+    void *p = *reinterpret_cast<void **>(pp);
+    if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue;
+    uptr chunk = PointsIntoChunk(p);
+    if (!chunk) continue;
+    // Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
+    if (chunk == begin) continue;
+    LsanMetadata m(chunk);
+    if (m.tag() == kReachable || m.tag() == kIgnored) continue;
+
+    // Do this check relatively late so we can log only the interesting cases.
+    if (!flags()->use_poisoned && WordIsPoisoned(pp)) {
+      LOG_POINTERS(
+          "%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
+          "%zu.\n",
+          pp, p, chunk, chunk + m.requested_size(), m.requested_size());
+      continue;
+    }
+
+    m.set_tag(tag);
+    LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p,
+                 chunk, chunk + m.requested_size(), m.requested_size());
+    if (frontier)
+      frontier->push_back(chunk);
+  }
+}
+
+// Scans a global range for pointers
+void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) {
+  uptr allocator_begin = 0, allocator_end = 0;
+  GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
+  if (begin <= allocator_begin && allocator_begin < end) {
+    CHECK_LE(allocator_begin, allocator_end);
+    CHECK_LE(allocator_end, end);
+    if (begin < allocator_begin)
+      ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
+                           kReachable);
+    if (allocator_end < end)
+      ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL", kReachable);
+  } else {
+    ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable);
+  }
+}
+
+void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) {
+  Frontier *frontier = reinterpret_cast<Frontier *>(arg);
+  ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable);
+}
+
+// Scans thread data (stacks and TLS) for heap pointers.
+static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
+                           Frontier *frontier) {
+  InternalMmapVector<uptr> registers(suspended_threads.RegisterCount());
+  uptr registers_begin = reinterpret_cast<uptr>(registers.data());
+  uptr registers_end =
+      reinterpret_cast<uptr>(registers.data() + registers.size());
+  for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) {
+    tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(i));
+    LOG_THREADS("Processing thread %d.\n", os_id);
+    uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
+    DTLS *dtls;
+    bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end,
+                                              &tls_begin, &tls_end,
+                                              &cache_begin, &cache_end, &dtls);
+    if (!thread_found) {
+      // If a thread can't be found in the thread registry, it's probably in the
+      // process of destruction. Log this event and move on.
+      LOG_THREADS("Thread %d not found in registry.\n", os_id);
+      continue;
+    }
+    uptr sp;
+    PtraceRegistersStatus have_registers =
+        suspended_threads.GetRegistersAndSP(i, registers.data(), &sp);
+    if (have_registers != REGISTERS_AVAILABLE) {
+      Report("Unable to get registers from thread %d.\n", os_id);
+      // If unable to get SP, consider the entire stack to be reachable unless
+      // GetRegistersAndSP failed with ESRCH.
+      if (have_registers == REGISTERS_UNAVAILABLE_FATAL) continue;
+      sp = stack_begin;
+    }
+
+    if (flags()->use_registers && have_registers)
+      ScanRangeForPointers(registers_begin, registers_end, frontier,
+                           "REGISTERS", kReachable);
+
+    if (flags()->use_stacks) {
+      LOG_THREADS("Stack at %p-%p (SP = %p).\n", stack_begin, stack_end, sp);
+      if (sp < stack_begin || sp >= stack_end) {
+        // SP is outside the recorded stack range (e.g. the thread is running a
+        // signal handler on alternate stack, or swapcontext was used).
+        // Again, consider the entire stack range to be reachable.
+        LOG_THREADS("WARNING: stack pointer not in stack range.\n");
+        uptr page_size = GetPageSizeCached();
+        int skipped = 0;
+        while (stack_begin < stack_end &&
+               !IsAccessibleMemoryRange(stack_begin, 1)) {
+          skipped++;
+          stack_begin += page_size;
+        }
+        LOG_THREADS("Skipped %d guard page(s) to obtain stack %p-%p.\n",
+                    skipped, stack_begin, stack_end);
+      } else {
+        // Shrink the stack range to ignore out-of-scope values.
+        stack_begin = sp;
+      }
+      ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
+                           kReachable);
+      ForEachExtraStackRange(os_id, ForEachExtraStackRangeCb, frontier);
+    }
+
+    if (flags()->use_tls) {
+      if (tls_begin) {
+        LOG_THREADS("TLS at %p-%p.\n", tls_begin, tls_end);
+        // If the tls and cache ranges don't overlap, scan full tls range,
+        // otherwise, only scan the non-overlapping portions
+        if (cache_begin == cache_end || tls_end < cache_begin ||
+            tls_begin > cache_end) {
+          ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
+        } else {
+          if (tls_begin < cache_begin)
+            ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
+                                 kReachable);
+          if (tls_end > cache_end)
+            ScanRangeForPointers(cache_end, tls_end, frontier, "TLS",
+                                 kReachable);
+        }
+      }
+      if (dtls && !DTLSInDestruction(dtls)) {
+        for (uptr j = 0; j < dtls->dtv_size; ++j) {
+          uptr dtls_beg = dtls->dtv[j].beg;
+          uptr dtls_end = dtls_beg + dtls->dtv[j].size;
+          if (dtls_beg < dtls_end) {
+            LOG_THREADS("DTLS %zu at %p-%p.\n", j, dtls_beg, dtls_end);
+            ScanRangeForPointers(dtls_beg, dtls_end, frontier, "DTLS",
+                                 kReachable);
+          }
+        }
+      } else {
+        // We are handling a thread with DTLS under destruction. Log about
+        // this and continue.
+        LOG_THREADS("Thread %d has DTLS under destruction.\n", os_id);
+      }
+    }
+  }
+}
+
+void ScanRootRegion(Frontier *frontier, const RootRegion &root_region,
+                    uptr region_begin, uptr region_end, bool is_readable) {
+  uptr intersection_begin = Max(root_region.begin, region_begin);
+  uptr intersection_end = Min(region_end, root_region.begin + root_region.size);
+  if (intersection_begin >= intersection_end) return;
+  LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
+               root_region.begin, root_region.begin + root_region.size,
+               region_begin, region_end,
+               is_readable ? "readable" : "unreadable");
+  if (is_readable)
+    ScanRangeForPointers(intersection_begin, intersection_end, frontier, "ROOT",
+                         kReachable);
+}
+
+static void ProcessRootRegion(Frontier *frontier,
+                              const RootRegion &root_region) {
+  MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
+  MemoryMappedSegment segment;
+  while (proc_maps.Next(&segment)) {
+    ScanRootRegion(frontier, root_region, segment.start, segment.end,
+                   segment.IsReadable());
+  }
+}
+
+// Scans root regions for heap pointers.
+static void ProcessRootRegions(Frontier *frontier) {
+  if (!flags()->use_root_regions) return;
+  CHECK(root_regions);
+  for (uptr i = 0; i < root_regions->size(); i++) {
+    ProcessRootRegion(frontier, (*root_regions)[i]);
+  }
+}
+
+static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
+  while (frontier->size()) {
+    uptr next_chunk = frontier->back();
+    frontier->pop_back();
+    LsanMetadata m(next_chunk);
+    ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
+                         "HEAP", tag);
+  }
+}
+
+// ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
+// which are reachable from it as indirectly leaked.
+static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
+  chunk = GetUserBegin(chunk);
+  LsanMetadata m(chunk);
+  if (m.allocated() && m.tag() != kReachable) {
+    ScanRangeForPointers(chunk, chunk + m.requested_size(),
+                         /* frontier */ nullptr, "HEAP", kIndirectlyLeaked);
+  }
+}
+
+// ForEachChunk callback. If chunk is marked as ignored, adds its address to
+// frontier.
+static void CollectIgnoredCb(uptr chunk, void *arg) {
+  CHECK(arg);
+  chunk = GetUserBegin(chunk);
+  LsanMetadata m(chunk);
+  if (m.allocated() && m.tag() == kIgnored) {
+    LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n",
+                 chunk, chunk + m.requested_size(), m.requested_size());
+    reinterpret_cast<Frontier *>(arg)->push_back(chunk);
+  }
+}
+
+static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) {
+  CHECK(stack_id);
+  StackTrace stack = map->Get(stack_id);
+  // The top frame is our malloc/calloc/etc. The next frame is the caller.
+  if (stack.size >= 2)
+    return stack.trace[1];
+  return 0;
+}
+
+struct InvalidPCParam {
+  Frontier *frontier;
+  StackDepotReverseMap *stack_depot_reverse_map;
+  bool skip_linker_allocations;
+};
+
+// ForEachChunk callback. If the caller pc is invalid or is within the linker,
+// mark as reachable. Called by ProcessPlatformSpecificAllocations.
+static void MarkInvalidPCCb(uptr chunk, void *arg) {
+  CHECK(arg);
+  InvalidPCParam *param = reinterpret_cast<InvalidPCParam *>(arg);
+  chunk = GetUserBegin(chunk);
+  LsanMetadata m(chunk);
+  if (m.allocated() && m.tag() != kReachable && m.tag() != kIgnored) {
+    u32 stack_id = m.stack_trace_id();
+    uptr caller_pc = 0;
+    if (stack_id > 0)
+      caller_pc = GetCallerPC(stack_id, param->stack_depot_reverse_map);
+    // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
+    // it as reachable, as we can't properly report its allocation stack anyway.
+    if (caller_pc == 0 || (param->skip_linker_allocations &&
+                           GetLinker()->containsAddress(caller_pc))) {
+      m.set_tag(kReachable);
+      param->frontier->push_back(chunk);
+    }
+  }
+}
+
+// On Linux, treats all chunks allocated from ld-linux.so as reachable, which
+// covers dynamically allocated TLS blocks, internal dynamic loader's loaded
+// modules accounting etc.
+// Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
+// They are allocated with a __libc_memalign() call in allocate_and_init()
+// (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
+// blocks, but we can make sure they come from our own allocator by intercepting
+// __libc_memalign(). On top of that, there is no easy way to reach them. Their
+// addresses are stored in a dynamically allocated array (the DTV) which is
+// referenced from the static TLS. Unfortunately, we can't just rely on the DTV
+// being reachable from the static TLS, and the dynamic TLS being reachable from
+// the DTV. This is because the initial DTV is allocated before our interception
+// mechanism kicks in, and thus we don't recognize it as allocated memory. We
+// can't special-case it either, since we don't know its size.
+// Our solution is to include in the root set all allocations made from
+// ld-linux.so (which is where allocate_and_init() is implemented). This is
+// guaranteed to include all dynamic TLS blocks (and possibly other allocations
+// which we don't care about).
+// On all other platforms, this simply checks to ensure that the caller pc is
+// valid before reporting chunks as leaked.
+void ProcessPC(Frontier *frontier) {
+  StackDepotReverseMap stack_depot_reverse_map;
+  InvalidPCParam arg;
+  arg.frontier = frontier;
+  arg.stack_depot_reverse_map = &stack_depot_reverse_map;
+  arg.skip_linker_allocations =
+      flags()->use_tls && flags()->use_ld_allocations && GetLinker() != nullptr;
+  ForEachChunk(MarkInvalidPCCb, &arg);
+}
+
+// Sets the appropriate tag on each chunk.
+static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
+  // Holds the flood fill frontier.
+  Frontier frontier;
+
+  ForEachChunk(CollectIgnoredCb, &frontier);
+  ProcessGlobalRegions(&frontier);
+  ProcessThreads(suspended_threads, &frontier);
+  ProcessRootRegions(&frontier);
+  FloodFillTag(&frontier, kReachable);
+
+  CHECK_EQ(0, frontier.size());
+  ProcessPC(&frontier);
+
+  // The check here is relatively expensive, so we do this in a separate flood
+  // fill. That way we can skip the check for chunks that are reachable
+  // otherwise.
+  LOG_POINTERS("Processing platform-specific allocations.\n");
+  ProcessPlatformSpecificAllocations(&frontier);
+  FloodFillTag(&frontier, kReachable);
+
+  // Iterate over leaked chunks and mark those that are reachable from other
+  // leaked chunks.
+  LOG_POINTERS("Scanning leaked chunks.\n");
+  ForEachChunk(MarkIndirectlyLeakedCb, nullptr);
+}
+
+// ForEachChunk callback. Resets the tags to pre-leak-check state.
+static void ResetTagsCb(uptr chunk, void *arg) {
+  (void)arg;
+  chunk = GetUserBegin(chunk);
+  LsanMetadata m(chunk);
+  if (m.allocated() && m.tag() != kIgnored)
+    m.set_tag(kDirectlyLeaked);
+}
+
+static void PrintStackTraceById(u32 stack_trace_id) {
+  CHECK(stack_trace_id);
+  StackDepotGet(stack_trace_id).Print();
+}
+
+// ForEachChunk callback. Aggregates information about unreachable chunks into
+// a LeakReport.
+static void CollectLeaksCb(uptr chunk, void *arg) {
+  CHECK(arg);
+  LeakReport *leak_report = reinterpret_cast<LeakReport *>(arg);
+  chunk = GetUserBegin(chunk);
+  LsanMetadata m(chunk);
+  if (!m.allocated()) return;
+  if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
+    u32 resolution = flags()->resolution;
+    u32 stack_trace_id = 0;
+    if (resolution > 0) {
+      StackTrace stack = StackDepotGet(m.stack_trace_id());
+      stack.size = Min(stack.size, resolution);
+      stack_trace_id = StackDepotPut(stack);
+    } else {
+      stack_trace_id = m.stack_trace_id();
+    }
+    leak_report->AddLeakedChunk(chunk, stack_trace_id, m.requested_size(),
+                                m.tag());
+  }
+}
+
+static void PrintMatchedSuppressions() {
+  InternalMmapVector<Suppression *> matched;
+  GetSuppressionContext()->GetMatched(&matched);
+  if (!matched.size())
+    return;
+  const char *line = "-----------------------------------------------------";
+  Printf("%s\n", line);
+  Printf("Suppressions used:\n");
+  Printf("  count      bytes template\n");
+  for (uptr i = 0; i < matched.size(); i++)
+    Printf("%7zu %10zu %s\n", static_cast<uptr>(atomic_load_relaxed(
+        &matched[i]->hit_count)), matched[i]->weight, matched[i]->templ);
+  Printf("%s\n\n", line);
+}
+
+struct CheckForLeaksParam {
+  bool success;
+  LeakReport leak_report;
+};
+
+static void ReportIfNotSuspended(ThreadContextBase *tctx, void *arg) {
+  const InternalMmapVector<tid_t> &suspended_threads =
+      *(const InternalMmapVector<tid_t> *)arg;
+  if (tctx->status == ThreadStatusRunning) {
+    uptr i = InternalLowerBound(suspended_threads, 0, suspended_threads.size(),
+                                tctx->os_id, CompareLess<int>());
+    if (i >= suspended_threads.size() || suspended_threads[i] != tctx->os_id)
+      Report("Running thread %d was not suspended. False leaks are possible.\n",
+             tctx->os_id);
+  };
+}
+
+static void ReportUnsuspendedThreads(
+    const SuspendedThreadsList &suspended_threads) {
+  InternalMmapVector<tid_t> threads(suspended_threads.ThreadCount());
+  for (uptr i = 0; i < suspended_threads.ThreadCount(); ++i)
+    threads[i] = suspended_threads.GetThreadID(i);
+
+  Sort(threads.data(), threads.size());
+
+  GetThreadRegistryLocked()->RunCallbackForEachThreadLocked(
+      &ReportIfNotSuspended, &threads);
+}
+
+static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
+                                  void *arg) {
+  CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg);
+  CHECK(param);
+  CHECK(!param->success);
+  ReportUnsuspendedThreads(suspended_threads);
+  ClassifyAllChunks(suspended_threads);
+  ForEachChunk(CollectLeaksCb, &param->leak_report);
+  // Clean up for subsequent leak checks. This assumes we did not overwrite any
+  // kIgnored tags.
+  ForEachChunk(ResetTagsCb, nullptr);
+  param->success = true;
+}
+
+static bool CheckForLeaks() {
+  if (&__lsan_is_turned_off && __lsan_is_turned_off())
+      return false;
+  EnsureMainThreadIDIsCorrect();
+  CheckForLeaksParam param;
+  param.success = false;
+  LockThreadRegistry();
+  LockAllocator();
+  DoStopTheWorld(CheckForLeaksCallback, &param);
+  UnlockAllocator();
+  UnlockThreadRegistry();
+
+  if (!param.success) {
+    Report("LeakSanitizer has encountered a fatal error.\n");
+    Report(
+        "HINT: For debugging, try setting environment variable "
+        "LSAN_OPTIONS=verbosity=1:log_threads=1\n");
+    Report(
+        "HINT: LeakSanitizer does not work under ptrace (strace, gdb, etc)\n");
+    Die();
+  }
+  param.leak_report.ApplySuppressions();
+  uptr unsuppressed_count = param.leak_report.UnsuppressedLeakCount();
+  if (unsuppressed_count > 0) {
+    Decorator d;
+    Printf("\n"
+           "================================================================="
+           "\n");
+    Printf("%s", d.Error());
+    Report("ERROR: LeakSanitizer: detected memory leaks\n");
+    Printf("%s", d.Default());
+    param.leak_report.ReportTopLeaks(flags()->max_leaks);
+  }
+  if (common_flags()->print_suppressions)
+    PrintMatchedSuppressions();
+  if (unsuppressed_count > 0) {
+    param.leak_report.PrintSummary();
+    return true;
+  }
+  return false;
+}
+
+static bool has_reported_leaks = false;
+bool HasReportedLeaks() { return has_reported_leaks; }
+
+void DoLeakCheck() {
+  BlockingMutexLock l(&global_mutex);
+  static bool already_done;
+  if (already_done) return;
+  already_done = true;
+  has_reported_leaks = CheckForLeaks();
+  if (has_reported_leaks) HandleLeaks();
+}
+
+static int DoRecoverableLeakCheck() {
+  BlockingMutexLock l(&global_mutex);
+  bool have_leaks = CheckForLeaks();
+  return have_leaks ? 1 : 0;
+}
+
+void DoRecoverableLeakCheckVoid() { DoRecoverableLeakCheck(); }
+
+static Suppression *GetSuppressionForAddr(uptr addr) {
+  Suppression *s = nullptr;
+
+  // Suppress by module name.
+  SuppressionContext *suppressions = GetSuppressionContext();
+  if (const char *module_name =
+          Symbolizer::GetOrInit()->GetModuleNameForPc(addr))
+    if (suppressions->Match(module_name, kSuppressionLeak, &s))
+      return s;
+
+  // Suppress by file or function name.
+  SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr);
+  for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
+    if (suppressions->Match(cur->info.function, kSuppressionLeak, &s) ||
+        suppressions->Match(cur->info.file, kSuppressionLeak, &s)) {
+      break;
+    }
+  }
+  frames->ClearAll();
+  return s;
+}
+
+static Suppression *GetSuppressionForStack(u32 stack_trace_id) {
+  StackTrace stack = StackDepotGet(stack_trace_id);
+  for (uptr i = 0; i < stack.size; i++) {
+    Suppression *s = GetSuppressionForAddr(
+        StackTrace::GetPreviousInstructionPc(stack.trace[i]));
+    if (s) return s;
+  }
+  return nullptr;
+}
+
+///// LeakReport implementation. /////
+
+// A hard limit on the number of distinct leaks, to avoid quadratic complexity
+// in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
+// in real-world applications.
+// FIXME: Get rid of this limit by changing the implementation of LeakReport to
+// use a hash table.
+const uptr kMaxLeaksConsidered = 5000;
+
+void LeakReport::AddLeakedChunk(uptr chunk, u32 stack_trace_id,
+                                uptr leaked_size, ChunkTag tag) {
+  CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
+  bool is_directly_leaked = (tag == kDirectlyLeaked);
+  uptr i;
+  for (i = 0; i < leaks_.size(); i++) {
+    if (leaks_[i].stack_trace_id == stack_trace_id &&
+        leaks_[i].is_directly_leaked == is_directly_leaked) {
+      leaks_[i].hit_count++;
+      leaks_[i].total_size += leaked_size;
+      break;
+    }
+  }
+  if (i == leaks_.size()) {
+    if (leaks_.size() == kMaxLeaksConsidered) return;
+    Leak leak = { next_id_++, /* hit_count */ 1, leaked_size, stack_trace_id,
+                  is_directly_leaked, /* is_suppressed */ false };
+    leaks_.push_back(leak);
+  }
+  if (flags()->report_objects) {
+    LeakedObject obj = {leaks_[i].id, chunk, leaked_size};
+    leaked_objects_.push_back(obj);
+  }
+}
+
+static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
+  if (leak1.is_directly_leaked == leak2.is_directly_leaked)
+    return leak1.total_size > leak2.total_size;
+  else
+    return leak1.is_directly_leaked;
+}
+
+void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
+  CHECK(leaks_.size() <= kMaxLeaksConsidered);
+  Printf("\n");
+  if (leaks_.size() == kMaxLeaksConsidered)
+    Printf("Too many leaks! Only the first %zu leaks encountered will be "
+           "reported.\n",
+           kMaxLeaksConsidered);
+
+  uptr unsuppressed_count = UnsuppressedLeakCount();
+  if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count)
+    Printf("The %zu top leak(s):\n", num_leaks_to_report);
+  Sort(leaks_.data(), leaks_.size(), &LeakComparator);
+  uptr leaks_reported = 0;
+  for (uptr i = 0; i < leaks_.size(); i++) {
+    if (leaks_[i].is_suppressed) continue;
+    PrintReportForLeak(i);
+    leaks_reported++;
+    if (leaks_reported == num_leaks_to_report) break;
+  }
+  if (leaks_reported < unsuppressed_count) {
+    uptr remaining = unsuppressed_count - leaks_reported;
+    Printf("Omitting %zu more leak(s).\n", remaining);
+  }
+}
+
+void LeakReport::PrintReportForLeak(uptr index) {
+  Decorator d;
+  Printf("%s", d.Leak());
+  Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
+         leaks_[index].is_directly_leaked ? "Direct" : "Indirect",
+         leaks_[index].total_size, leaks_[index].hit_count);
+  Printf("%s", d.Default());
+
+  PrintStackTraceById(leaks_[index].stack_trace_id);
+
+  if (flags()->report_objects) {
+    Printf("Objects leaked above:\n");
+    PrintLeakedObjectsForLeak(index);
+    Printf("\n");
+  }
+}
+
+void LeakReport::PrintLeakedObjectsForLeak(uptr index) {
+  u32 leak_id = leaks_[index].id;
+  for (uptr j = 0; j < leaked_objects_.size(); j++) {
+    if (leaked_objects_[j].leak_id == leak_id)
+      Printf("%p (%zu bytes)\n", leaked_objects_[j].addr,
+             leaked_objects_[j].size);
+  }
+}
+
+void LeakReport::PrintSummary() {
+  CHECK(leaks_.size() <= kMaxLeaksConsidered);
+  uptr bytes = 0, allocations = 0;
+  for (uptr i = 0; i < leaks_.size(); i++) {
+      if (leaks_[i].is_suppressed) continue;
+      bytes += leaks_[i].total_size;
+      allocations += leaks_[i].hit_count;
+  }
+  InternalScopedString summary(kMaxSummaryLength);
+  summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes,
+                 allocations);
+  ReportErrorSummary(summary.data());
+}
+
+void LeakReport::ApplySuppressions() {
+  for (uptr i = 0; i < leaks_.size(); i++) {
+    Suppression *s = GetSuppressionForStack(leaks_[i].stack_trace_id);
+    if (s) {
+      s->weight += leaks_[i].total_size;
+      atomic_store_relaxed(&s->hit_count, atomic_load_relaxed(&s->hit_count) +
+          leaks_[i].hit_count);
+      leaks_[i].is_suppressed = true;
+    }
+  }
+}
+
+uptr LeakReport::UnsuppressedLeakCount() {
+  uptr result = 0;
+  for (uptr i = 0; i < leaks_.size(); i++)
+    if (!leaks_[i].is_suppressed) result++;
+  return result;
+}
+
+} // namespace __lsan
+#else // CAN_SANITIZE_LEAKS
+namespace __lsan {
+void InitCommonLsan() { }
+void DoLeakCheck() { }
+void DoRecoverableLeakCheckVoid() { }
+void DisableInThisThread() { }
+void EnableInThisThread() { }
+}
+#endif // CAN_SANITIZE_LEAKS
+
+using namespace __lsan;  // NOLINT
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+void __lsan_ignore_object(const void *p) {
+#if CAN_SANITIZE_LEAKS
+  if (!common_flags()->detect_leaks)
+    return;
+  // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
+  // locked.
+  BlockingMutexLock l(&global_mutex);
+  IgnoreObjectResult res = IgnoreObjectLocked(p);
+  if (res == kIgnoreObjectInvalid)
+    VReport(1, "__lsan_ignore_object(): no heap object found at %p", p);
+  if (res == kIgnoreObjectAlreadyIgnored)
+    VReport(1, "__lsan_ignore_object(): "
+           "heap object at %p is already being ignored\n", p);
+  if (res == kIgnoreObjectSuccess)
+    VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p);
+#endif // CAN_SANITIZE_LEAKS
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __lsan_register_root_region(const void *begin, uptr size) {
+#if CAN_SANITIZE_LEAKS
+  BlockingMutexLock l(&global_mutex);
+  CHECK(root_regions);
+  RootRegion region = {reinterpret_cast<uptr>(begin), size};
+  root_regions->push_back(region);
+  VReport(1, "Registered root region at %p of size %llu\n", begin, size);
+#endif // CAN_SANITIZE_LEAKS
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __lsan_unregister_root_region(const void *begin, uptr size) {
+#if CAN_SANITIZE_LEAKS
+  BlockingMutexLock l(&global_mutex);
+  CHECK(root_regions);
+  bool removed = false;
+  for (uptr i = 0; i < root_regions->size(); i++) {
+    RootRegion region = (*root_regions)[i];
+    if (region.begin == reinterpret_cast<uptr>(begin) && region.size == size) {
+      removed = true;
+      uptr last_index = root_regions->size() - 1;
+      (*root_regions)[i] = (*root_regions)[last_index];
+      root_regions->pop_back();
+      VReport(1, "Unregistered root region at %p of size %llu\n", begin, size);
+      break;
+    }
+  }
+  if (!removed) {
+    Report(
+        "__lsan_unregister_root_region(): region at %p of size %llu has not "
+        "been registered.\n",
+        begin, size);
+    Die();
+  }
+#endif // CAN_SANITIZE_LEAKS
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __lsan_disable() {
+#if CAN_SANITIZE_LEAKS
+  __lsan::DisableInThisThread();
+#endif
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __lsan_enable() {
+#if CAN_SANITIZE_LEAKS
+  __lsan::EnableInThisThread();
+#endif
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __lsan_do_leak_check() {
+#if CAN_SANITIZE_LEAKS
+  if (common_flags()->detect_leaks)
+    __lsan::DoLeakCheck();
+#endif // CAN_SANITIZE_LEAKS
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __lsan_do_recoverable_leak_check() {
+#if CAN_SANITIZE_LEAKS
+  if (common_flags()->detect_leaks)
+    return __lsan::DoRecoverableLeakCheck();
+#endif // CAN_SANITIZE_LEAKS
+  return 0;
+}
+
+#if !SANITIZER_SUPPORTS_WEAK_HOOKS
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+const char * __lsan_default_options() {
+  return "";
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+int __lsan_is_turned_off() {
+  return 0;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+const char *__lsan_default_suppressions() {
+  return "";
+}
+#endif
+} // extern "C"
Index: src/sys/external/bsd/compiler_rt/dist/lib/lsan/lsan_common.h
diff -u /dev/null src/sys/external/bsd/compiler_rt/dist/lib/lsan/lsan_common.h:1.3
--- /dev/null	Tue Jan  8 05:44:58 2019
+++ src/sys/external/bsd/compiler_rt/dist/lib/lsan/lsan_common.h	Tue Jan  8 05:44:58 2019
@@ -0,0 +1,265 @@
+//=-- lsan_common.h -------------------------------------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// Private LSan header.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LSAN_COMMON_H
+#define LSAN_COMMON_H
+
+#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_platform.h"
+#include "sanitizer_common/sanitizer_stoptheworld.h"
+#include "sanitizer_common/sanitizer_symbolizer.h"
+
+// LeakSanitizer relies on some Glibc's internals (e.g. TLS machinery) thus
+// supported for Linux only. Also, LSan doesn't like 32 bit architectures
+// because of "small" (4 bytes) pointer size that leads to high false negative
+// ratio on large leaks. But we still want to have it for some 32 bit arches
+// (e.g. x86), see https://github.com/google/sanitizers/issues/403.
+// To enable LeakSanitizer on a new architecture, one needs to implement the
+// internal_clone function as well as (probably) adjust the TLS machinery for
+// the new architecture inside the sanitizer library.
+#if (SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC) && \
+    (SANITIZER_WORDSIZE == 64) &&                               \
+    (defined(__x86_64__) || defined(__mips64) || defined(__aarch64__) || \
+     defined(__powerpc64__))
+#define CAN_SANITIZE_LEAKS 1
+#elif defined(__i386__) && \
+    (SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC)
+#define CAN_SANITIZE_LEAKS 1
+#elif defined(__arm__) && \
+    SANITIZER_LINUX && !SANITIZER_ANDROID
+#define CAN_SANITIZE_LEAKS 1
+#else
+#define CAN_SANITIZE_LEAKS 0
+#endif
+
+namespace __sanitizer {
+class FlagParser;
+class ThreadRegistry;
+struct DTLS;
+}
+
+namespace __lsan {
+
+// Chunk tags.
+enum ChunkTag {
+  kDirectlyLeaked = 0,  // default
+  kIndirectlyLeaked = 1,
+  kReachable = 2,
+  kIgnored = 3
+};
+
+const u32 kInvalidTid = (u32) -1;
+
+struct Flags {
+#define LSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "lsan_flags.inc"
+#undef LSAN_FLAG
+
+  void SetDefaults();
+  uptr pointer_alignment() const {
+    return use_unaligned ? 1 : sizeof(uptr);
+  }
+};
+
+extern Flags lsan_flags;
+inline Flags *flags() { return &lsan_flags; }
+void RegisterLsanFlags(FlagParser *parser, Flags *f);
+
+struct Leak {
+  u32 id;
+  uptr hit_count;
+  uptr total_size;
+  u32 stack_trace_id;
+  bool is_directly_leaked;
+  bool is_suppressed;
+};
+
+struct LeakedObject {
+  u32 leak_id;
+  uptr addr;
+  uptr size;
+};
+
+// Aggregates leaks by stack trace prefix.
+class LeakReport {
+ public:
+  LeakReport() {}
+  void AddLeakedChunk(uptr chunk, u32 stack_trace_id, uptr leaked_size,
+                      ChunkTag tag);
+  void ReportTopLeaks(uptr max_leaks);
+  void PrintSummary();
+  void ApplySuppressions();
+  uptr UnsuppressedLeakCount();
+
+ private:
+  void PrintReportForLeak(uptr index);
+  void PrintLeakedObjectsForLeak(uptr index);
+
+  u32 next_id_ = 0;
+  InternalMmapVector<Leak> leaks_;
+  InternalMmapVector<LeakedObject> leaked_objects_;
+};
+
+typedef InternalMmapVector<uptr> Frontier;
+
+// Platform-specific functions.
+void InitializePlatformSpecificModules();
+void ProcessGlobalRegions(Frontier *frontier);
+void ProcessPlatformSpecificAllocations(Frontier *frontier);
+
+struct RootRegion {
+  uptr begin;
+  uptr size;
+};
+
+InternalMmapVector<RootRegion> const *GetRootRegions();
+void ScanRootRegion(Frontier *frontier, RootRegion const &region,
+                    uptr region_begin, uptr region_end, bool is_readable);
+// Run stoptheworld while holding any platform-specific locks.
+void DoStopTheWorld(StopTheWorldCallback callback, void* argument);
+
+void ScanRangeForPointers(uptr begin, uptr end,
+                          Frontier *frontier,
+                          const char *region_type, ChunkTag tag);
+void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier);
+
+enum IgnoreObjectResult {
+  kIgnoreObjectSuccess,
+  kIgnoreObjectAlreadyIgnored,
+  kIgnoreObjectInvalid
+};
+
+// Functions called from the parent tool.
+const char *MaybeCallLsanDefaultOptions();
+void InitCommonLsan();
+void DoLeakCheck();
+void DoRecoverableLeakCheckVoid();
+void DisableCounterUnderflow();
+bool DisabledInThisThread();
+
+// Used to implement __lsan::ScopedDisabler.
+void DisableInThisThread();
+void EnableInThisThread();
+// Can be used to ignore memory allocated by an intercepted
+// function.
+struct ScopedInterceptorDisabler {
+  ScopedInterceptorDisabler() { DisableInThisThread(); }
+  ~ScopedInterceptorDisabler() { EnableInThisThread(); }
+};
+
+// According to Itanium C++ ABI array cookie is a one word containing
+// size of allocated array.
+static inline bool IsItaniumABIArrayCookie(uptr chunk_beg, uptr chunk_size,
+                                           uptr addr) {
+  return chunk_size == sizeof(uptr) && chunk_beg + chunk_size == addr &&
+         *reinterpret_cast<uptr *>(chunk_beg) == 0;
+}
+
+// According to ARM C++ ABI array cookie consists of two words:
+// struct array_cookie {
+//   std::size_t element_size; // element_size != 0
+//   std::size_t element_count;
+// };
+static inline bool IsARMABIArrayCookie(uptr chunk_beg, uptr chunk_size,
+                                       uptr addr) {
+  return chunk_size == 2 * sizeof(uptr) && chunk_beg + chunk_size == addr &&
+         *reinterpret_cast<uptr *>(chunk_beg + sizeof(uptr)) == 0;
+}
+
+// Special case for "new T[0]" where T is a type with DTOR.
+// new T[0] will allocate a cookie (one or two words) for the array size (0)
+// and store a pointer to the end of allocated chunk. The actual cookie layout
+// varies between platforms according to their C++ ABI implementation.
+inline bool IsSpecialCaseOfOperatorNew0(uptr chunk_beg, uptr chunk_size,
+                                        uptr addr) {
+#if defined(__arm__)
+  return IsARMABIArrayCookie(chunk_beg, chunk_size, addr);
+#else
+  return IsItaniumABIArrayCookie(chunk_beg, chunk_size, addr);
+#endif
+}
+
+// The following must be implemented in the parent tool.
+
+void ForEachChunk(ForEachChunkCallback callback, void *arg);
+// Returns the address range occupied by the global allocator object.
+void GetAllocatorGlobalRange(uptr *begin, uptr *end);
+// Wrappers for allocator's ForceLock()/ForceUnlock().
+void LockAllocator();
+void UnlockAllocator();
+// Returns true if [addr, addr + sizeof(void *)) is poisoned.
+bool WordIsPoisoned(uptr addr);
+// Wrappers for ThreadRegistry access.
+void LockThreadRegistry();
+void UnlockThreadRegistry();
+ThreadRegistry *GetThreadRegistryLocked();
+bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
+                           uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
+                           uptr *cache_end, DTLS **dtls);
+void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback,
+                            void *arg);
+// If called from the main thread, updates the main thread's TID in the thread
+// registry. We need this to handle processes that fork() without a subsequent
+// exec(), which invalidates the recorded TID. To update it, we must call
+// gettid() from the main thread. Our solution is to call this function before
+// leak checking and also before every call to pthread_create() (to handle cases
+// where leak checking is initiated from a non-main thread).
+void EnsureMainThreadIDIsCorrect();
+// If p points into a chunk that has been allocated to the user, returns its
+// user-visible address. Otherwise, returns 0.
+uptr PointsIntoChunk(void *p);
+// Returns address of user-visible chunk contained in this allocator chunk.
+uptr GetUserBegin(uptr chunk);
+// Helper for __lsan_ignore_object().
+IgnoreObjectResult IgnoreObjectLocked(const void *p);
+
+// Return the linker module, if valid for the platform.
+LoadedModule *GetLinker();
+
+// Return true if LSan has finished leak checking and reported leaks.
+bool HasReportedLeaks();
+
+// Run platform-specific leak handlers.
+void HandleLeaks();
+
+// Wrapper for chunk metadata operations.
+class LsanMetadata {
+ public:
+  // Constructor accepts address of user-visible chunk.
+  explicit LsanMetadata(uptr chunk);
+  bool allocated() const;
+  ChunkTag tag() const;
+  void set_tag(ChunkTag value);
+  uptr requested_size() const;
+  u32 stack_trace_id() const;
+ private:
+  void *metadata_;
+};
+
+}  // namespace __lsan
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+const char *__lsan_default_options();
+
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+int __lsan_is_turned_off();
+
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+const char *__lsan_default_suppressions();
+}  // extern "C"
+
+#endif  // LSAN_COMMON_H
Index: src/sys/external/bsd/compiler_rt/dist/lib/lsan/lsan_common_linux.cc
diff -u /dev/null src/sys/external/bsd/compiler_rt/dist/lib/lsan/lsan_common_linux.cc:1.3
--- /dev/null	Tue Jan  8 05:44:58 2019
+++ src/sys/external/bsd/compiler_rt/dist/lib/lsan/lsan_common_linux.cc	Tue Jan  8 05:44:58 2019
@@ -0,0 +1,140 @@
+//=-- lsan_common_linux.cc ------------------------------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// Implementation of common leak checking functionality. Linux-specific code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#include "lsan_common.h"
+
+#if CAN_SANITIZE_LEAKS && SANITIZER_LINUX
+#include <link.h>
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_getauxval.h"
+#include "sanitizer_common/sanitizer_linux.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+
+namespace __lsan {
+
+static const char kLinkerName[] = "ld";
+
+static char linker_placeholder[sizeof(LoadedModule)] ALIGNED(64);
+static LoadedModule *linker = nullptr;
+
+static bool IsLinker(const LoadedModule& module) {
+#if SANITIZER_USE_GETAUXVAL
+  return module.base_address() == getauxval(AT_BASE);
+#else
+  return LibraryNameIs(module.full_name(), kLinkerName);
+#endif  // SANITIZER_USE_GETAUXVAL
+}
+
+__attribute__((tls_model("initial-exec")))
+THREADLOCAL int disable_counter;
+bool DisabledInThisThread() { return disable_counter > 0; }
+void DisableInThisThread() { disable_counter++; }
+void EnableInThisThread() {
+  if (disable_counter == 0) {
+    DisableCounterUnderflow();
+  }
+  disable_counter--;
+}
+
+void InitializePlatformSpecificModules() {
+  ListOfModules modules;
+  modules.init();
+  for (LoadedModule &module : modules) {
+    if (!IsLinker(module))
+      continue;
+    if (linker == nullptr) {
+      linker = reinterpret_cast<LoadedModule *>(linker_placeholder);
+      *linker = module;
+      module = LoadedModule();
+    } else {
+      VReport(1, "LeakSanitizer: Multiple modules match \"%s\". "
+              "TLS and other allocations originating from linker might be "
+              "falsely reported as leaks.\n", kLinkerName);
+      linker->clear();
+      linker = nullptr;
+      return;
+    }
+  }
+  if (linker == nullptr) {
+    VReport(1, "LeakSanitizer: Dynamic linker not found. TLS and other "
+               "allocations originating from linker might be falsely reported "
+                "as leaks.\n");
+  }
+}
+
+static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size,
+                                        void *data) {
+  Frontier *frontier = reinterpret_cast<Frontier *>(data);
+  for (uptr j = 0; j < info->dlpi_phnum; j++) {
+    const ElfW(Phdr) *phdr = &(info->dlpi_phdr[j]);
+    // We're looking for .data and .bss sections, which reside in writeable,
+    // loadable segments.
+    if (!(phdr->p_flags & PF_W) || (phdr->p_type != PT_LOAD) ||
+        (phdr->p_memsz == 0))
+      continue;
+    uptr begin = info->dlpi_addr + phdr->p_vaddr;
+    uptr end = begin + phdr->p_memsz;
+    ScanGlobalRange(begin, end, frontier);
+  }
+  return 0;
+}
+
+// Scans global variables for heap pointers.
+void ProcessGlobalRegions(Frontier *frontier) {
+  if (!flags()->use_globals) return;
+  dl_iterate_phdr(ProcessGlobalRegionsCallback, frontier);
+}
+
+LoadedModule *GetLinker() { return linker; }
+
+void ProcessPlatformSpecificAllocations(Frontier *frontier) {}
+
+struct DoStopTheWorldParam {
+  StopTheWorldCallback callback;
+  void *argument;
+};
+
+// While calling Die() here is undefined behavior and can potentially
+// cause race conditions, it isn't possible to intercept exit on linux,
+// so we have no choice but to call Die() from the atexit handler.
+void HandleLeaks() {
+  if (common_flags()->exitcode) Die();
+}
+
+static int DoStopTheWorldCallback(struct dl_phdr_info *info, size_t size,
+                                  void *data) {
+  DoStopTheWorldParam *param = reinterpret_cast<DoStopTheWorldParam *>(data);
+  StopTheWorld(param->callback, param->argument);
+  return 1;
+}
+
+// LSan calls dl_iterate_phdr() from the tracer task. This may deadlock: if one
+// of the threads is frozen while holding the libdl lock, the tracer will hang
+// in dl_iterate_phdr() forever.
+// Luckily, (a) the lock is reentrant and (b) libc can't distinguish between the
+// tracer task and the thread that spawned it. Thus, if we run the tracer task
+// while holding the libdl lock in the parent thread, we can safely reenter it
+// in the tracer. The solution is to run stoptheworld from a dl_iterate_phdr()
+// callback in the parent thread.
+void DoStopTheWorld(StopTheWorldCallback callback, void *argument) {
+  DoStopTheWorldParam param = {callback, argument};
+  dl_iterate_phdr(DoStopTheWorldCallback, &param);
+}
+
+} // namespace __lsan
+
+#endif // CAN_SANITIZE_LEAKS && SANITIZER_LINUX
Index: src/sys/external/bsd/compiler_rt/dist/lib/lsan/lsan_interceptors.cc
diff -u /dev/null src/sys/external/bsd/compiler_rt/dist/lib/lsan/lsan_interceptors.cc:1.3
--- /dev/null	Tue Jan  8 05:44:58 2019
+++ src/sys/external/bsd/compiler_rt/dist/lib/lsan/lsan_interceptors.cc	Tue Jan  8 05:44:58 2019
@@ -0,0 +1,460 @@
+//=-- lsan_interceptors.cc ------------------------------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// Interceptors for standalone LSan.
+//
+//===----------------------------------------------------------------------===//
+
+#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_allocator_report.h"
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_linux.h"
+#include "sanitizer_common/sanitizer_platform_interceptors.h"
+#include "sanitizer_common/sanitizer_platform_limits_netbsd.h"
+#include "sanitizer_common/sanitizer_platform_limits_posix.h"
+#include "sanitizer_common/sanitizer_posix.h"
+#include "sanitizer_common/sanitizer_tls_get_addr.h"
+#include "lsan.h"
+#include "lsan_allocator.h"
+#include "lsan_common.h"
+#include "lsan_thread.h"
+
+#include <stddef.h>
+
+using namespace __lsan;
+
+extern "C" {
+int pthread_attr_init(void *attr);
+int pthread_attr_destroy(void *attr);
+int pthread_attr_getdetachstate(void *attr, int *v);
+int pthread_key_create(unsigned *key, void (*destructor)(void* v));
+int pthread_setspecific(unsigned key, const void *v);
+}
+
+///// Malloc/free interceptors. /////
+
+namespace std {
+  struct nothrow_t;
+  enum class align_val_t: size_t;
+}
+
+#if !SANITIZER_MAC
+INTERCEPTOR(void*, malloc, uptr size) {
+  ENSURE_LSAN_INITED;
+  GET_STACK_TRACE_MALLOC;
+  return lsan_malloc(size, stack);
+}
+
+INTERCEPTOR(void, free, void *p) {
+  ENSURE_LSAN_INITED;
+  lsan_free(p);
+}
+
+INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
+  if (lsan_init_is_running) {
+    // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
+    const uptr kCallocPoolSize = 1024;
+    static uptr calloc_memory_for_dlsym[kCallocPoolSize];
+    static uptr allocated;
+    uptr size_in_words = ((nmemb * size) + kWordSize - 1) / kWordSize;
+    void *mem = (void*)&calloc_memory_for_dlsym[allocated];
+    allocated += size_in_words;
+    CHECK(allocated < kCallocPoolSize);
+    return mem;
+  }
+  ENSURE_LSAN_INITED;
+  GET_STACK_TRACE_MALLOC;
+  return lsan_calloc(nmemb, size, stack);
+}
+
+INTERCEPTOR(void*, realloc, void *q, uptr size) {
+  ENSURE_LSAN_INITED;
+  GET_STACK_TRACE_MALLOC;
+  return lsan_realloc(q, size, stack);
+}
+
+INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
+  ENSURE_LSAN_INITED;
+  GET_STACK_TRACE_MALLOC;
+  return lsan_posix_memalign(memptr, alignment, size, stack);
+}
+
+INTERCEPTOR(void*, valloc, uptr size) {
+  ENSURE_LSAN_INITED;
+  GET_STACK_TRACE_MALLOC;
+  return lsan_valloc(size, stack);
+}
+#endif
+
+#if SANITIZER_INTERCEPT_MEMALIGN
+INTERCEPTOR(void*, memalign, uptr alignment, uptr size) {
+  ENSURE_LSAN_INITED;
+  GET_STACK_TRACE_MALLOC;
+  return lsan_memalign(alignment, size, stack);
+}
+#define LSAN_MAYBE_INTERCEPT_MEMALIGN INTERCEPT_FUNCTION(memalign)
+
+INTERCEPTOR(void *, __libc_memalign, uptr alignment, uptr size) {
+  ENSURE_LSAN_INITED;
+  GET_STACK_TRACE_MALLOC;
+  void *res = lsan_memalign(alignment, size, stack);
+  DTLS_on_libc_memalign(res, size);
+  return res;
+}
+#define LSAN_MAYBE_INTERCEPT___LIBC_MEMALIGN INTERCEPT_FUNCTION(__libc_memalign)
+#else
+#define LSAN_MAYBE_INTERCEPT_MEMALIGN
+#define LSAN_MAYBE_INTERCEPT___LIBC_MEMALIGN
+#endif // SANITIZER_INTERCEPT_MEMALIGN
+
+#if SANITIZER_INTERCEPT_ALIGNED_ALLOC
+INTERCEPTOR(void*, aligned_alloc, uptr alignment, uptr size) {
+  ENSURE_LSAN_INITED;
+  GET_STACK_TRACE_MALLOC;
+  return lsan_aligned_alloc(alignment, size, stack);
+}
+#define LSAN_MAYBE_INTERCEPT_ALIGNED_ALLOC INTERCEPT_FUNCTION(aligned_alloc)
+#else
+#define LSAN_MAYBE_INTERCEPT_ALIGNED_ALLOC
+#endif
+
+#if SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE
+INTERCEPTOR(uptr, malloc_usable_size, void *ptr) {
+  ENSURE_LSAN_INITED;
+  return GetMallocUsableSize(ptr);
+}
+#define LSAN_MAYBE_INTERCEPT_MALLOC_USABLE_SIZE \
+        INTERCEPT_FUNCTION(malloc_usable_size)
+#else
+#define LSAN_MAYBE_INTERCEPT_MALLOC_USABLE_SIZE
+#endif
+
+#if SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO
+struct fake_mallinfo {
+  int x[10];
+};
+
+INTERCEPTOR(struct fake_mallinfo, mallinfo, void) {
+  struct fake_mallinfo res;
+  internal_memset(&res, 0, sizeof(res));
+  return res;
+}
+#define LSAN_MAYBE_INTERCEPT_MALLINFO INTERCEPT_FUNCTION(mallinfo)
+
+INTERCEPTOR(int, mallopt, int cmd, int value) {
+  return 0;
+}
+#define LSAN_MAYBE_INTERCEPT_MALLOPT INTERCEPT_FUNCTION(mallopt)
+#else
+#define LSAN_MAYBE_INTERCEPT_MALLINFO
+#define LSAN_MAYBE_INTERCEPT_MALLOPT
+#endif // SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO
+
+#if SANITIZER_INTERCEPT_PVALLOC
+INTERCEPTOR(void*, pvalloc, uptr size) {
+  ENSURE_LSAN_INITED;
+  GET_STACK_TRACE_MALLOC;
+  return lsan_pvalloc(size, stack);
+}
+#define LSAN_MAYBE_INTERCEPT_PVALLOC INTERCEPT_FUNCTION(pvalloc)
+#else
+#define LSAN_MAYBE_INTERCEPT_PVALLOC
+#endif // SANITIZER_INTERCEPT_PVALLOC
+
+#if SANITIZER_INTERCEPT_CFREE
+INTERCEPTOR(void, cfree, void *p) ALIAS(WRAPPER_NAME(free));
+#define LSAN_MAYBE_INTERCEPT_CFREE INTERCEPT_FUNCTION(cfree)
+#else
+#define LSAN_MAYBE_INTERCEPT_CFREE
+#endif // SANITIZER_INTERCEPT_CFREE
+
+#if SANITIZER_INTERCEPT_MCHECK_MPROBE
+INTERCEPTOR(int, mcheck, void (*abortfunc)(int mstatus)) {
+  return 0;
+}
+
+INTERCEPTOR(int, mcheck_pedantic, void (*abortfunc)(int mstatus)) {
+  return 0;
+}
+
+INTERCEPTOR(int, mprobe, void *ptr) {
+  return 0;
+}
+#endif // SANITIZER_INTERCEPT_MCHECK_MPROBE
+
+
+// TODO(alekseys): throw std::bad_alloc instead of dying on OOM.
+#define OPERATOR_NEW_BODY(nothrow)\
+  ENSURE_LSAN_INITED;\
+  GET_STACK_TRACE_MALLOC;\
+  void *res = lsan_malloc(size, stack);\
+  if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\
+  return res;
+#define OPERATOR_NEW_BODY_ALIGN(nothrow)\
+  ENSURE_LSAN_INITED;\
+  GET_STACK_TRACE_MALLOC;\
+  void *res = lsan_memalign((uptr)align, size, stack);\
+  if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\
+  return res;
+
+#define OPERATOR_DELETE_BODY\
+  ENSURE_LSAN_INITED;\
+  lsan_free(ptr);
+
+// On OS X it's not enough to just provide our own 'operator new' and
+// 'operator delete' implementations, because they're going to be in the runtime
+// dylib, and the main executable will depend on both the runtime dylib and
+// libstdc++, each of has its implementation of new and delete.
+// To make sure that C++ allocation/deallocation operators are overridden on
+// OS X we need to intercept them using their mangled names.
+#if !SANITIZER_MAC
+
+INTERCEPTOR_ATTRIBUTE
+void *operator new(size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); }
+INTERCEPTOR_ATTRIBUTE
+void *operator new[](size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); }
+INTERCEPTOR_ATTRIBUTE
+void *operator new(size_t size, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY(true /*nothrow*/); }
+INTERCEPTOR_ATTRIBUTE
+void *operator new[](size_t size, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY(true /*nothrow*/); }
+INTERCEPTOR_ATTRIBUTE
+void *operator new(size_t size, std::align_val_t align)
+{ OPERATOR_NEW_BODY_ALIGN(false /*nothrow*/); }
+INTERCEPTOR_ATTRIBUTE
+void *operator new[](size_t size, std::align_val_t align)
+{ OPERATOR_NEW_BODY_ALIGN(false /*nothrow*/); }
+INTERCEPTOR_ATTRIBUTE
+void *operator new(size_t size, std::align_val_t align, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY_ALIGN(true /*nothrow*/); }
+INTERCEPTOR_ATTRIBUTE
+void *operator new[](size_t size, std::align_val_t align, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY_ALIGN(true /*nothrow*/); }
+
+INTERCEPTOR_ATTRIBUTE
+void operator delete(void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete[](void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete(void *ptr, std::nothrow_t const&) { OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete[](void *ptr, std::nothrow_t const &)
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete(void *ptr, size_t size) NOEXCEPT
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete[](void *ptr, size_t size) NOEXCEPT
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete(void *ptr, std::align_val_t) NOEXCEPT
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete[](void *ptr, std::align_val_t) NOEXCEPT
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete(void *ptr, std::align_val_t, std::nothrow_t const&)
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete[](void *ptr, std::align_val_t, std::nothrow_t const&)
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete(void *ptr, size_t size, std::align_val_t) NOEXCEPT
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete[](void *ptr, size_t size, std::align_val_t) NOEXCEPT
+{ OPERATOR_DELETE_BODY; }
+
+#else  // SANITIZER_MAC
+
+INTERCEPTOR(void *, _Znwm, size_t size)
+{ OPERATOR_NEW_BODY(false /*nothrow*/); }
+INTERCEPTOR(void *, _Znam, size_t size)
+{ OPERATOR_NEW_BODY(false /*nothrow*/); }
+INTERCEPTOR(void *, _ZnwmRKSt9nothrow_t, size_t size, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY(true /*nothrow*/); }
+INTERCEPTOR(void *, _ZnamRKSt9nothrow_t, size_t size, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY(true /*nothrow*/); }
+
+INTERCEPTOR(void, _ZdlPv, void *ptr)
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR(void, _ZdaPv, void *ptr)
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR(void, _ZdlPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&)
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR(void, _ZdaPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&)
+{ OPERATOR_DELETE_BODY; }
+
+#endif  // !SANITIZER_MAC
+
+
+///// Thread initialization and finalization. /////
+
+#if !SANITIZER_NETBSD && !SANITIZER_FREEBSD
+static unsigned g_thread_finalize_key;
+
+static void thread_finalize(void *v) {
+  uptr iter = (uptr)v;
+  if (iter > 1) {
+    if (pthread_setspecific(g_thread_finalize_key, (void*)(iter - 1))) {
+      Report("LeakSanitizer: failed to set thread key.\n");
+      Die();
+    }
+    return;
+  }
+  ThreadFinish();
+}
+#endif
+
+#if SANITIZER_NETBSD
+INTERCEPTOR(void, _lwp_exit) {
+  ENSURE_LSAN_INITED;
+  ThreadFinish();
+  REAL(_lwp_exit)();
+}
+#define LSAN_MAYBE_INTERCEPT__LWP_EXIT INTERCEPT_FUNCTION(_lwp_exit)
+#else
+#define LSAN_MAYBE_INTERCEPT__LWP_EXIT
+#endif
+
+#if SANITIZER_INTERCEPT_THR_EXIT
+INTERCEPTOR(void, thr_exit, tid_t *state) {
+  ENSURE_LSAN_INITED;
+  ThreadFinish();
+  REAL(thr_exit)(state);
+}
+#define LSAN_MAYBE_INTERCEPT_THR_EXIT INTERCEPT_FUNCTION(thr_exit)
+#else
+#define LSAN_MAYBE_INTERCEPT_THR_EXIT
+#endif
+
+struct ThreadParam {
+  void *(*callback)(void *arg);
+  void *param;
+  atomic_uintptr_t tid;
+};
+
+extern "C" void *__lsan_thread_start_func(void *arg) {
+  ThreadParam *p = (ThreadParam*)arg;
+  void* (*callback)(void *arg) = p->callback;
+  void *param = p->param;
+  // Wait until the last iteration to maximize the chance that we are the last
+  // destructor to run.
+#if !SANITIZER_NETBSD && !SANITIZER_FREEBSD
+  if (pthread_setspecific(g_thread_finalize_key,
+                          (void*)GetPthreadDestructorIterations())) {
+    Report("LeakSanitizer: failed to set thread key.\n");
+    Die();
+  }
+#endif
+  int tid = 0;
+  while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0)
+    internal_sched_yield();
+  SetCurrentThread(tid);
+  ThreadStart(tid, GetTid());
+  atomic_store(&p->tid, 0, memory_order_release);
+  return callback(param);
+}
+
+INTERCEPTOR(int, pthread_create, void *th, void *attr,
+            void *(*callback)(void *), void *param) {
+  ENSURE_LSAN_INITED;
+  EnsureMainThreadIDIsCorrect();
+  __sanitizer_pthread_attr_t myattr;
+  if (!attr) {
+    pthread_attr_init(&myattr);
+    attr = &myattr;
+  }
+  AdjustStackSize(attr);
+  int detached = 0;
+  pthread_attr_getdetachstate(attr, &detached);
+  ThreadParam p;
+  p.callback = callback;
+  p.param = param;
+  atomic_store(&p.tid, 0, memory_order_relaxed);
+  int res;
+  {
+    // Ignore all allocations made by pthread_create: thread stack/TLS may be
+    // stored by pthread for future reuse even after thread destruction, and
+    // the linked list it's stored in doesn't even hold valid pointers to the
+    // objects, the latter are calculated by obscure pointer arithmetic.
+    ScopedInterceptorDisabler disabler;
+    res = REAL(pthread_create)(th, attr, __lsan_thread_start_func, &p);
+  }
+  if (res == 0) {
+    int tid = ThreadCreate(GetCurrentThread(), *(uptr *)th,
+                           IsStateDetached(detached));
+    CHECK_NE(tid, 0);
+    atomic_store(&p.tid, tid, memory_order_release);
+    while (atomic_load(&p.tid, memory_order_acquire) != 0)
+      internal_sched_yield();
+  }
+  if (attr == &myattr)
+    pthread_attr_destroy(&myattr);
+  return res;
+}
+
+INTERCEPTOR(int, pthread_join, void *th, void **ret) {
+  ENSURE_LSAN_INITED;
+  int tid = ThreadTid((uptr)th);
+  int res = REAL(pthread_join)(th, ret);
+  if (res == 0)
+    ThreadJoin(tid);
+  return res;
+}
+
+INTERCEPTOR(void, _exit, int status) {
+  if (status == 0 && HasReportedLeaks()) status = common_flags()->exitcode;
+  REAL(_exit)(status);
+}
+
+#define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name)
+#include "sanitizer_common/sanitizer_signal_interceptors.inc"
+
+namespace __lsan {
+
+void InitializeInterceptors() {
+  InitializeSignalInterceptors();
+
+  INTERCEPT_FUNCTION(malloc);
+  INTERCEPT_FUNCTION(free);
+  LSAN_MAYBE_INTERCEPT_CFREE;
+  INTERCEPT_FUNCTION(calloc);
+  INTERCEPT_FUNCTION(realloc);
+  LSAN_MAYBE_INTERCEPT_MEMALIGN;
+  LSAN_MAYBE_INTERCEPT___LIBC_MEMALIGN;
+  LSAN_MAYBE_INTERCEPT_ALIGNED_ALLOC;
+  INTERCEPT_FUNCTION(posix_memalign);
+  INTERCEPT_FUNCTION(valloc);
+  LSAN_MAYBE_INTERCEPT_PVALLOC;
+  LSAN_MAYBE_INTERCEPT_MALLOC_USABLE_SIZE;
+  LSAN_MAYBE_INTERCEPT_MALLINFO;
+  LSAN_MAYBE_INTERCEPT_MALLOPT;
+  INTERCEPT_FUNCTION(pthread_create);
+  INTERCEPT_FUNCTION(pthread_join);
+  INTERCEPT_FUNCTION(_exit);
+
+  LSAN_MAYBE_INTERCEPT__LWP_EXIT;
+  LSAN_MAYBE_INTERCEPT_THR_EXIT;
+
+#if !SANITIZER_NETBSD && !SANITIZER_FREEBSD
+  if (pthread_key_create(&g_thread_finalize_key, &thread_finalize)) {
+    Report("LeakSanitizer: failed to create thread key.\n");
+    Die();
+  }
+#endif
+}
+
+} // namespace __lsan
Index: src/sys/external/bsd/compiler_rt/dist/lib/lsan/lsan_thread.cc
diff -u /dev/null src/sys/external/bsd/compiler_rt/dist/lib/lsan/lsan_thread.cc:1.3
--- /dev/null	Tue Jan  8 05:44:58 2019
+++ src/sys/external/bsd/compiler_rt/dist/lib/lsan/lsan_thread.cc	Tue Jan  8 05:44:58 2019
@@ -0,0 +1,163 @@
+//=-- lsan_thread.cc ------------------------------------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// See lsan_thread.h for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "lsan_thread.h"
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_thread_registry.h"
+#include "sanitizer_common/sanitizer_tls_get_addr.h"
+#include "lsan_allocator.h"
+#include "lsan_common.h"
+
+namespace __lsan {
+
+static ThreadRegistry *thread_registry;
+
+static ThreadContextBase *CreateThreadContext(u32 tid) {
+  void *mem = MmapOrDie(sizeof(ThreadContext), "ThreadContext");
+  return new(mem) ThreadContext(tid);
+}
+
+static const uptr kMaxThreads = 1 << 13;
+static const uptr kThreadQuarantineSize = 64;
+
+void InitializeThreadRegistry() {
+  static ALIGNED(64) char thread_registry_placeholder[sizeof(ThreadRegistry)];
+  thread_registry = new(thread_registry_placeholder)
+    ThreadRegistry(CreateThreadContext, kMaxThreads, kThreadQuarantineSize);
+}
+
+ThreadContext::ThreadContext(int tid)
+    : ThreadContextBase(tid),
+      stack_begin_(0),
+      stack_end_(0),
+      cache_begin_(0),
+      cache_end_(0),
+      tls_begin_(0),
+      tls_end_(0),
+      dtls_(nullptr) {}
+
+struct OnStartedArgs {
+  uptr stack_begin, stack_end,
+       cache_begin, cache_end,
+       tls_begin, tls_end;
+  DTLS *dtls;
+};
+
+void ThreadContext::OnStarted(void *arg) {
+  OnStartedArgs *args = reinterpret_cast<OnStartedArgs *>(arg);
+  stack_begin_ = args->stack_begin;
+  stack_end_ = args->stack_end;
+  tls_begin_ = args->tls_begin;
+  tls_end_ = args->tls_end;
+  cache_begin_ = args->cache_begin;
+  cache_end_ = args->cache_end;
+  dtls_ = args->dtls;
+}
+
+void ThreadContext::OnFinished() {
+  AllocatorThreadFinish();
+  DTLS_Destroy();
+}
+
+u32 ThreadCreate(u32 parent_tid, uptr user_id, bool detached) {
+  return thread_registry->CreateThread(user_id, detached, parent_tid,
+                                       /* arg */ nullptr);
+}
+
+void ThreadStart(u32 tid, tid_t os_id, bool workerthread) {
+  OnStartedArgs args;
+  uptr stack_size = 0;
+  uptr tls_size = 0;
+  GetThreadStackAndTls(tid == 0, &args.stack_begin, &stack_size,
+                       &args.tls_begin, &tls_size);
+  args.stack_end = args.stack_begin + stack_size;
+  args.tls_end = args.tls_begin + tls_size;
+  GetAllocatorCacheRange(&args.cache_begin, &args.cache_end);
+  args.dtls = DTLS_Get();
+  thread_registry->StartThread(tid, os_id, workerthread, &args);
+}
+
+void ThreadFinish() {
+  thread_registry->FinishThread(GetCurrentThread());
+  SetCurrentThread(kInvalidTid);
+}
+
+ThreadContext *CurrentThreadContext() {
+  if (!thread_registry) return nullptr;
+  if (GetCurrentThread() == kInvalidTid)
+    return nullptr;
+  // No lock needed when getting current thread.
+  return (ThreadContext *)thread_registry->GetThreadLocked(GetCurrentThread());
+}
+
+static bool FindThreadByUid(ThreadContextBase *tctx, void *arg) {
+  uptr uid = (uptr)arg;
+  if (tctx->user_id == uid && tctx->status != ThreadStatusInvalid) {
+    return true;
+  }
+  return false;
+}
+
+u32 ThreadTid(uptr uid) {
+  return thread_registry->FindThread(FindThreadByUid, (void*)uid);
+}
+
+void ThreadJoin(u32 tid) {
+  CHECK_NE(tid, kInvalidTid);
+  thread_registry->JoinThread(tid, /* arg */nullptr);
+}
+
+void EnsureMainThreadIDIsCorrect() {
+  if (GetCurrentThread() == 0)
+    CurrentThreadContext()->os_id = GetTid();
+}
+
+///// Interface to the common LSan module. /////
+
+bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
+                           uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
+                           uptr *cache_end, DTLS **dtls) {
+  ThreadContext *context = static_cast<ThreadContext *>(
+      thread_registry->FindThreadContextByOsIDLocked(os_id));
+  if (!context) return false;
+  *stack_begin = context->stack_begin();
+  *stack_end = context->stack_end();
+  *tls_begin = context->tls_begin();
+  *tls_end = context->tls_end();
+  *cache_begin = context->cache_begin();
+  *cache_end = context->cache_end();
+  *dtls = context->dtls();
+  return true;
+}
+
+void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback,
+                            void *arg) {
+}
+
+void LockThreadRegistry() {
+  thread_registry->Lock();
+}
+
+void UnlockThreadRegistry() {
+  thread_registry->Unlock();
+}
+
+ThreadRegistry *GetThreadRegistryLocked() {
+  thread_registry->CheckLocked();
+  return thread_registry;
+}
+
+} // namespace __lsan
Index: src/sys/external/bsd/compiler_rt/dist/lib/lsan/lsan_thread.h
diff -u /dev/null src/sys/external/bsd/compiler_rt/dist/lib/lsan/lsan_thread.h:1.3
--- /dev/null	Tue Jan  8 05:44:58 2019
+++ src/sys/external/bsd/compiler_rt/dist/lib/lsan/lsan_thread.h	Tue Jan  8 05:44:58 2019
@@ -0,0 +1,60 @@
+//=-- lsan_thread.h -------------------------------------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// Thread registry for standalone LSan.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LSAN_THREAD_H
+#define LSAN_THREAD_H
+
+#include "sanitizer_common/sanitizer_thread_registry.h"
+
+namespace __sanitizer {
+struct DTLS;
+}
+
+namespace __lsan {
+
+class ThreadContext : public ThreadContextBase {
+ public:
+  explicit ThreadContext(int tid);
+  void OnStarted(void *arg) override;
+  void OnFinished() override;
+  uptr stack_begin() { return stack_begin_; }
+  uptr stack_end() { return stack_end_; }
+  uptr tls_begin() { return tls_begin_; }
+  uptr tls_end() { return tls_end_; }
+  uptr cache_begin() { return cache_begin_; }
+  uptr cache_end() { return cache_end_; }
+  DTLS *dtls() { return dtls_; }
+
+ private:
+  uptr stack_begin_, stack_end_,
+       cache_begin_, cache_end_,
+       tls_begin_, tls_end_;
+  DTLS *dtls_;
+};
+
+void InitializeThreadRegistry();
+
+void ThreadStart(u32 tid, tid_t os_id, bool workerthread = false);
+void ThreadFinish();
+u32 ThreadCreate(u32 tid, uptr uid, bool detached);
+void ThreadJoin(u32 tid);
+u32 ThreadTid(uptr uid);
+
+u32 GetCurrentThread();
+void SetCurrentThread(u32 tid);
+ThreadContext *CurrentThreadContext();
+void EnsureMainThreadIDIsCorrect();
+}  // namespace __lsan
+
+#endif  // LSAN_THREAD_H

Reply via email to