unbreak infinite loop under valgrind more robustly See discussion in https://github.com/gperftools/gperftools/issues/1615 This change avoids the issue even if we're unable to detect valgrind presence.
diff --git a/src/malloc_extension.cc b/src/malloc_extension.cc index 9934c3f..e973343 100644 --- a/src/malloc_extension.cc +++ b/src/malloc_extension.cc
@@ -174,16 +174,8 @@ // if MallocExtension isn't set up yet, it could be we're called // super-early. Trigger tcmalloc initialization and assume it will // set up instance(). - if (!RunningOnValgrind()) { - tc_free(tc_malloc(32)); - } else { - // Valgrind intercepts tc_{malloc,free}, so we do more direct - // "initialize the guts" procedure. We only do that for "valgrind - // path" because we want normal case to exercise - // "tc_malloc/tc_free can be called from here" behavior. - CHECK_CONDITION(!tcmalloc::Static::IsInited()); - tcmalloc::ThreadCache::InitModule(); - } + tcmalloc::ThreadCache::EnsureMallocInitialized(); + return instance(); }
diff --git a/src/tcmalloc.cc b/src/tcmalloc.cc index 32d68e6..9f372eb 100644 --- a/src/tcmalloc.cc +++ b/src/tcmalloc.cc
@@ -1075,7 +1075,7 @@ // ThreadCache::InitModule is not inlined which would cause nallocx to // become non-leaf function with stack frame and stack spills. static ATTRIBUTE_NOINLINE size_t nallocx_slow(size_t size, int flags) { - if (PREDICT_FALSE(!Static::IsInited())) ThreadCache::InitModule(); + ThreadCache::EnsureMallocInitialized(); size_t align = static_cast<size_t>(1ull << (flags & 0x3f)); uint32_t cl;
diff --git a/src/thread_cache.h b/src/thread_cache.h index 513acc4..a3aca1b 100644 --- a/src/thread_cache.h +++ b/src/thread_cache.h
@@ -90,7 +90,13 @@ bool TryRecordAllocationFast(size_t k); - static void InitModule(); + static void InitModule(); + static void EnsureMallocInitialized() { + if (Static::IsInited()) { + return; + } + InitModule(); + } // Return the number of thread heaps in use. static inline int HeapsInUse();