/* -*- Mode: C; tab-width: 8; c-basic-offset: 8 -*- */ | |
/* vim:set softtabstop=8 shiftwidth=8: */ | |
/*- | |
* Copyright (C) 2006-2008 Jason Evans <jasone@FreeBSD.org>. | |
* All rights reserved. | |
* | |
* Redistribution and use in source and binary forms, with or without | |
* modification, are permitted provided that the following conditions | |
* are met: | |
* 1. Redistributions of source code must retain the above copyright | |
* notice(s), this list of conditions and the following disclaimer as | |
* the first lines of this file unmodified other than the possible | |
* addition of one or more copyright notices. | |
* 2. Redistributions in binary form must reproduce the above copyright | |
* notice(s), this list of conditions and the following disclaimer in | |
* the documentation and/or other materials provided with the | |
* distribution. | |
* | |
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY | |
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE | |
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | |
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, | |
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE | |
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, | |
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
* | |
******************************************************************************* | |
* | |
* This allocator implementation is designed to provide scalable performance | |
* for multi-threaded programs on multi-processor systems. The following | |
* features are included for this purpose: | |
* | |
* + Multiple arenas are used if there are multiple CPUs, which reduces lock | |
* contention and cache sloshing. | |
* | |
* + Cache line sharing between arenas is avoided for internal data | |
* structures. | |
* | |
* + Memory is managed in chunks and runs (chunks can be split into runs), | |
* rather than as individual pages. This provides a constant-time | |
* mechanism for associating allocations with particular arenas. | |
* | |
* Allocation requests are rounded up to the nearest size class, and no record | |
* of the original request size is maintained. Allocations are broken into | |
* categories according to size class. Assuming runtime defaults, 4 kB pages | |
* and a 16 byte quantum on a 32-bit system, the size classes in each category | |
* are as follows: | |
* | |
* |=====================================| | |
* | Category | Subcategory | Size | | |
* |=====================================| | |
* | Small | Tiny | 2 | | |
* | | | 4 | | |
* | | | 8 | | |
* | |----------------+---------| | |
* | | Quantum-spaced | 16 | | |
* | | | 32 | | |
* | | | 48 | | |
* | | | ... | | |
* | | | 480 | | |
* | | | 496 | | |
* | | | 512 | | |
* | |----------------+---------| | |
* | | Sub-page | 1 kB | | |
* | | | 2 kB | | |
* |=====================================| | |
* | Large | 4 kB | | |
* | | 8 kB | | |
* | | 12 kB | | |
* | | ... | | |
* | | 1012 kB | | |
* | | 1016 kB | | |
* | | 1020 kB | | |
* |=====================================| | |
* | Huge | 1 MB | | |
* | | 2 MB | | |
* | | 3 MB | | |
* | | ... | | |
* |=====================================| | |
* | |
* A different mechanism is used for each category: | |
* | |
* Small : Each size class is segregated into its own set of runs. Each run | |
* maintains a bitmap of which regions are free/allocated. | |
* | |
* Large : Each allocation is backed by a dedicated run. Metadata are stored | |
* in the associated arena chunk header maps. | |
* | |
* Huge : Each allocation is backed by a dedicated contiguous set of chunks. | |
* Metadata are stored in a separate red-black tree. | |
* | |
******************************************************************************* | |
*/ | |
/* | |
* NOTE(mbelshe): Added these defines to fit within chromium build system. | |
*/ | |
#define MOZ_MEMORY_WINDOWS | |
#define MOZ_MEMORY | |
#define DONT_OVERRIDE_LIBC | |
/* | |
* MALLOC_PRODUCTION disables assertions and statistics gathering. It also | |
* defaults the A and J runtime options to off. These settings are appropriate | |
* for production systems. | |
*/ | |
#ifndef MOZ_MEMORY_DEBUG | |
# define MALLOC_PRODUCTION | |
#endif | |
/* | |
* Use only one arena by default. Mozilla does not currently make extensive | |
* use of concurrent allocation, so the increased fragmentation associated with | |
* multiple arenas is not warranted. | |
*/ | |
#define MOZ_MEMORY_NARENAS_DEFAULT_ONE | |
/* | |
* MALLOC_STATS enables statistics calculation, and is required for | |
* jemalloc_stats(). | |
*/ | |
#define MALLOC_STATS | |
#ifndef MALLOC_PRODUCTION | |
/* | |
* MALLOC_DEBUG enables assertions and other sanity checks, and disables | |
* inline functions. | |
*/ | |
# define MALLOC_DEBUG | |
/* Memory filling (junk/zero). */ | |
# define MALLOC_FILL | |
/* Allocation tracing. */ | |
# ifndef MOZ_MEMORY_WINDOWS | |
# define MALLOC_UTRACE | |
# endif | |
/* Support optional abort() on OOM. */ | |
# define MALLOC_XMALLOC | |
/* Support SYSV semantics. */ | |
# define MALLOC_SYSV | |
#endif | |
/* | |
* MALLOC_VALIDATE causes malloc_usable_size() to perform some pointer | |
* validation. There are many possible errors that validation does not even | |
* attempt to detect. | |
*/ | |
#define MALLOC_VALIDATE | |
/* Embed no-op macros that support memory allocation tracking via valgrind. */ | |
#ifdef MOZ_VALGRIND | |
# define MALLOC_VALGRIND | |
#endif | |
#ifdef MALLOC_VALGRIND | |
# include <valgrind/valgrind.h> | |
#else | |
# define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) | |
# define VALGRIND_FREELIKE_BLOCK(addr, rzB) | |
#endif | |
/* | |
* MALLOC_BALANCE enables monitoring of arena lock contention and dynamically | |
* re-balances arena load if exponentially averaged contention exceeds a | |
* certain threshold. | |
*/ | |
/* #define MALLOC_BALANCE */ | |
#if (!defined(MOZ_MEMORY_WINDOWS) && !defined(MOZ_MEMORY_DARWIN)) | |
/* | |
* MALLOC_PAGEFILE causes all mmap()ed memory to be backed by temporary | |
* files, so that if a chunk is mapped, it is guaranteed to be swappable. | |
* This avoids asynchronous OOM failures that are due to VM over-commit. | |
* | |
* XXX OS X over-commits, so we should probably use mmap() instead of | |
* vm_allocate(), so that MALLOC_PAGEFILE works. | |
*/ | |
#define MALLOC_PAGEFILE | |
#endif | |
#ifdef MALLOC_PAGEFILE | |
/* Write size when initializing a page file. */ | |
# define MALLOC_PAGEFILE_WRITE_SIZE 512 | |
#endif | |
#ifdef MOZ_MEMORY_LINUX | |
#define _GNU_SOURCE /* For mremap(2). */ | |
#define issetugid() 0 | |
#if 0 /* Enable in order to test decommit code on Linux. */ | |
# define MALLOC_DECOMMIT | |
#endif | |
#endif | |
#ifndef MOZ_MEMORY_WINCE | |
#include <sys/types.h> | |
#include <errno.h> | |
#include <stdlib.h> | |
#endif | |
#include <limits.h> | |
#include <stdarg.h> | |
#include <stdio.h> | |
#include <string.h> | |
#ifdef MOZ_MEMORY_WINDOWS | |
#ifndef MOZ_MEMORY_WINCE | |
//#include <cruntime.h> | |
//#include <internal.h> | |
#include <io.h> | |
#else | |
#include <cmnintrin.h> | |
#include <crtdefs.h> | |
#define SIZE_MAX UINT_MAX | |
#endif | |
#include <windows.h> | |
#pragma warning( disable: 4267 4996 4146 ) | |
#define false FALSE | |
#define true TRUE | |
#define inline __inline | |
#define SIZE_T_MAX SIZE_MAX | |
#define STDERR_FILENO 2 | |
#define PATH_MAX MAX_PATH | |
#define vsnprintf _vsnprintf | |
#ifndef NO_TLS | |
static unsigned long tlsIndex = 0xffffffff; | |
#endif | |
#define __thread | |
#ifdef MOZ_MEMORY_WINCE | |
#define _pthread_self() GetCurrentThreadId() | |
#else | |
#define _pthread_self() __threadid() | |
#endif | |
#define issetugid() 0 | |
#ifndef MOZ_MEMORY_WINCE | |
/* use MSVC intrinsics */ | |
#pragma intrinsic(_BitScanForward) | |
static __forceinline int | |
ffs(int x) | |
{ | |
unsigned long i; | |
if (_BitScanForward(&i, x) != 0) | |
return (i + 1); | |
return (0); | |
} | |
/* Implement getenv without using malloc */ | |
static char mozillaMallocOptionsBuf[64]; | |
#define getenv xgetenv | |
static char * | |
getenv(const char *name) | |
{ | |
if (GetEnvironmentVariableA(name, (LPSTR)&mozillaMallocOptionsBuf, | |
sizeof(mozillaMallocOptionsBuf)) > 0) | |
return (mozillaMallocOptionsBuf); | |
return (NULL); | |
} | |
#else /* WIN CE */ | |
#define ENOMEM 12 | |
#define EINVAL 22 | |
static __forceinline int | |
ffs(int x) | |
{ | |
return 32 - _CountLeadingZeros((-x) & x); | |
} | |
#endif | |
typedef unsigned char uint8_t; | |
typedef unsigned uint32_t; | |
typedef unsigned long long uint64_t; | |
typedef unsigned long long uintmax_t; | |
typedef long ssize_t; | |
#define MALLOC_DECOMMIT | |
#endif | |
#ifndef MOZ_MEMORY_WINDOWS | |
#ifndef MOZ_MEMORY_SOLARIS | |
#include <sys/cdefs.h> | |
#endif | |
#ifndef __DECONST | |
# define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var)) | |
#endif | |
#ifndef MOZ_MEMORY | |
__FBSDID("$FreeBSD: head/lib/libc/stdlib/malloc.c 180599 2008-07-18 19:35:44Z jasone $"); | |
#include "libc_private.h" | |
#ifdef MALLOC_DEBUG | |
# define _LOCK_DEBUG | |
#endif | |
#include "spinlock.h" | |
#include "namespace.h" | |
#endif | |
#include <sys/mman.h> | |
#ifndef MADV_FREE | |
# define MADV_FREE MADV_DONTNEED | |
#endif | |
#ifndef MAP_NOSYNC | |
# define MAP_NOSYNC 0 | |
#endif | |
#include <sys/param.h> | |
#ifndef MOZ_MEMORY | |
#include <sys/stddef.h> | |
#endif | |
#include <sys/time.h> | |
#include <sys/types.h> | |
#ifndef MOZ_MEMORY_SOLARIS | |
#include <sys/sysctl.h> | |
#endif | |
#include <sys/uio.h> | |
#ifndef MOZ_MEMORY | |
#include <sys/ktrace.h> /* Must come after several other sys/ includes. */ | |
#include <machine/atomic.h> | |
#include <machine/cpufunc.h> | |
#include <machine/vmparam.h> | |
#endif | |
#include <errno.h> | |
#include <limits.h> | |
#ifndef SIZE_T_MAX | |
# define SIZE_T_MAX SIZE_MAX | |
#endif | |
#include <pthread.h> | |
#ifdef MOZ_MEMORY_DARWIN | |
#define _pthread_self pthread_self | |
#define _pthread_mutex_init pthread_mutex_init | |
#define _pthread_mutex_trylock pthread_mutex_trylock | |
#define _pthread_mutex_lock pthread_mutex_lock | |
#define _pthread_mutex_unlock pthread_mutex_unlock | |
#endif | |
#include <sched.h> | |
#include <stdarg.h> | |
#include <stdbool.h> | |
#include <stdio.h> | |
#include <stdint.h> | |
#include <stdlib.h> | |
#include <string.h> | |
#ifndef MOZ_MEMORY_DARWIN | |
#include <strings.h> | |
#endif | |
#include <unistd.h> | |
#ifdef MOZ_MEMORY_DARWIN | |
#include <libkern/OSAtomic.h> | |
#include <mach/mach_error.h> | |
#include <mach/mach_init.h> | |
#include <mach/vm_map.h> | |
#include <malloc/malloc.h> | |
#endif | |
#ifndef MOZ_MEMORY | |
#include "un-namespace.h" | |
#endif | |
#endif | |
#include "jemalloc.h" | |
#undef bool | |
#define bool jemalloc_bool | |
#ifdef MOZ_MEMORY_DARWIN | |
static const bool __isthreaded = true; | |
#endif | |
#if defined(MOZ_MEMORY_SOLARIS) && defined(MAP_ALIGN) && !defined(JEMALLOC_NEVER_USES_MAP_ALIGN) | |
#define JEMALLOC_USES_MAP_ALIGN /* Required on Solaris 10. Might improve performance elsewhere. */ | |
#endif | |
#if defined(MOZ_MEMORY_WINCE) && !defined(MOZ_MEMORY_WINCE6) | |
#define JEMALLOC_USES_MAP_ALIGN /* Required for Windows CE < 6 */ | |
#endif | |
#define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var)) | |
#include "qr.h" | |
#include "ql.h" | |
#ifdef MOZ_MEMORY_WINDOWS | |
/* MSVC++ does not support C99 variable-length arrays. */ | |
# define RB_NO_C99_VARARRAYS | |
#endif | |
#include "rb.h" | |
#ifdef MALLOC_DEBUG | |
/* Disable inlining to make debugging easier. */ | |
#ifdef inline | |
#undef inline | |
#endif | |
# define inline | |
#endif | |
/* Size of stack-allocated buffer passed to strerror_r(). */ | |
#define STRERROR_BUF 64 | |
/* Minimum alignment of allocations is 2^QUANTUM_2POW_MIN bytes. */ | |
# define QUANTUM_2POW_MIN 4 | |
#ifdef MOZ_MEMORY_SIZEOF_PTR_2POW | |
# define SIZEOF_PTR_2POW MOZ_MEMORY_SIZEOF_PTR_2POW | |
#else | |
# define SIZEOF_PTR_2POW 2 | |
#endif | |
#define PIC | |
#ifndef MOZ_MEMORY_DARWIN | |
static const bool __isthreaded = true; | |
#else | |
# define NO_TLS | |
#endif | |
#if 0 | |
#ifdef __i386__ | |
# define QUANTUM_2POW_MIN 4 | |
# define SIZEOF_PTR_2POW 2 | |
# define CPU_SPINWAIT __asm__ volatile("pause") | |
#endif | |
#ifdef __ia64__ | |
# define QUANTUM_2POW_MIN 4 | |
# define SIZEOF_PTR_2POW 3 | |
#endif | |
#ifdef __alpha__ | |
# define QUANTUM_2POW_MIN 4 | |
# define SIZEOF_PTR_2POW 3 | |
# define NO_TLS | |
#endif | |
#ifdef __sparc64__ | |
# define QUANTUM_2POW_MIN 4 | |
# define SIZEOF_PTR_2POW 3 | |
# define NO_TLS | |
#endif | |
#ifdef __amd64__ | |
# define QUANTUM_2POW_MIN 4 | |
# define SIZEOF_PTR_2POW 3 | |
# define CPU_SPINWAIT __asm__ volatile("pause") | |
#endif | |
#ifdef __arm__ | |
# define QUANTUM_2POW_MIN 3 | |
# define SIZEOF_PTR_2POW 2 | |
# define NO_TLS | |
#endif | |
#ifdef __mips__ | |
# define QUANTUM_2POW_MIN 3 | |
# define SIZEOF_PTR_2POW 2 | |
# define NO_TLS | |
#endif | |
#ifdef __powerpc__ | |
# define QUANTUM_2POW_MIN 4 | |
# define SIZEOF_PTR_2POW 2 | |
#endif | |
#endif | |
#define SIZEOF_PTR (1U << SIZEOF_PTR_2POW) | |
/* sizeof(int) == (1U << SIZEOF_INT_2POW). */ | |
#ifndef SIZEOF_INT_2POW | |
# define SIZEOF_INT_2POW 2 | |
#endif | |
/* We can't use TLS in non-PIC programs, since TLS relies on loader magic. */ | |
#if (!defined(PIC) && !defined(NO_TLS)) | |
# define NO_TLS | |
#endif | |
#ifdef NO_TLS | |
/* MALLOC_BALANCE requires TLS. */ | |
# ifdef MALLOC_BALANCE | |
# undef MALLOC_BALANCE | |
# endif | |
#endif | |
/* | |
* Size and alignment of memory chunks that are allocated by the OS's virtual | |
* memory system. | |
*/ | |
#if defined(MOZ_MEMORY_WINCE) && !defined(MOZ_MEMORY_WINCE6) | |
#define CHUNK_2POW_DEFAULT 21 | |
#else | |
#define CHUNK_2POW_DEFAULT 20 | |
#endif | |
/* Maximum number of dirty pages per arena. */ | |
#define DIRTY_MAX_DEFAULT (1U << 10) | |
/* Default reserve chunks. */ | |
#define RESERVE_MIN_2POW_DEFAULT 1 | |
/* | |
* Default range (in chunks) between reserve_min and reserve_max, in addition | |
* to the mandatory one chunk per arena. | |
*/ | |
#ifdef MALLOC_PAGEFILE | |
# define RESERVE_RANGE_2POW_DEFAULT 5 | |
#else | |
# define RESERVE_RANGE_2POW_DEFAULT 0 | |
#endif | |
/* | |
* Maximum size of L1 cache line. This is used to avoid cache line aliasing, | |
* so over-estimates are okay (up to a point), but under-estimates will | |
* negatively affect performance. | |
*/ | |
#define CACHELINE_2POW 6 | |
#define CACHELINE ((size_t)(1U << CACHELINE_2POW)) | |
/* Smallest size class to support. */ | |
#define TINY_MIN_2POW 1 | |
/* | |
* Maximum size class that is a multiple of the quantum, but not (necessarily) | |
* a power of 2. Above this size, allocations are rounded up to the nearest | |
* power of 2. | |
*/ | |
#define SMALL_MAX_2POW_DEFAULT 9 | |
#define SMALL_MAX_DEFAULT (1U << SMALL_MAX_2POW_DEFAULT) | |
/* | |
* RUN_MAX_OVRHD indicates maximum desired run header overhead. Runs are sized | |
* as small as possible such that this setting is still honored, without | |
* violating other constraints. The goal is to make runs as small as possible | |
* without exceeding a per run external fragmentation threshold. | |
* | |
* We use binary fixed point math for overhead computations, where the binary | |
* point is implicitly RUN_BFP bits to the left. | |
* | |
* Note that it is possible to set RUN_MAX_OVRHD low enough that it cannot be | |
* honored for some/all object sizes, since there is one bit of header overhead | |
* per object (plus a constant). This constraint is relaxed (ignored) for runs | |
* that are so small that the per-region overhead is greater than: | |
* | |
* (RUN_MAX_OVRHD / (reg_size << (3+RUN_BFP)) | |
*/ | |
#define RUN_BFP 12 | |
/* \/ Implicit binary fixed point. */ | |
#define RUN_MAX_OVRHD 0x0000003dU | |
#define RUN_MAX_OVRHD_RELAX 0x00001800U | |
/* Put a cap on small object run size. This overrides RUN_MAX_OVRHD. */ | |
#define RUN_MAX_SMALL_2POW 15 | |
#define RUN_MAX_SMALL (1U << RUN_MAX_SMALL_2POW) | |
/* | |
* Hyper-threaded CPUs may need a special instruction inside spin loops in | |
* order to yield to another virtual CPU. If no such instruction is defined | |
* above, make CPU_SPINWAIT a no-op. | |
*/ | |
#ifndef CPU_SPINWAIT | |
# define CPU_SPINWAIT | |
#endif | |
/* | |
* Adaptive spinning must eventually switch to blocking, in order to avoid the | |
* potential for priority inversion deadlock. Backing off past a certain point | |
* can actually waste time. | |
*/ | |
#define SPIN_LIMIT_2POW 11 | |
/* | |
* Conversion from spinning to blocking is expensive; we use (1U << | |
* BLOCK_COST_2POW) to estimate how many more times costly blocking is than | |
* worst-case spinning. | |
*/ | |
#define BLOCK_COST_2POW 4 | |
#ifdef MALLOC_BALANCE | |
/* | |
* We use an exponential moving average to track recent lock contention, | |
* where the size of the history window is N, and alpha=2/(N+1). | |
* | |
* Due to integer math rounding, very small values here can cause | |
* substantial degradation in accuracy, thus making the moving average decay | |
* faster than it would with precise calculation. | |
*/ | |
# define BALANCE_ALPHA_INV_2POW 9 | |
/* | |
* Threshold value for the exponential moving contention average at which to | |
* re-assign a thread. | |
*/ | |
# define BALANCE_THRESHOLD_DEFAULT (1U << (SPIN_LIMIT_2POW-4)) | |
#endif | |
/******************************************************************************/ | |
/* | |
* Mutexes based on spinlocks. We can't use normal pthread spinlocks in all | |
* places, because they require malloc()ed memory, which causes bootstrapping | |
* issues in some cases. | |
*/ | |
#if defined(MOZ_MEMORY_WINDOWS) | |
#define malloc_mutex_t CRITICAL_SECTION | |
#define malloc_spinlock_t CRITICAL_SECTION | |
#elif defined(MOZ_MEMORY_DARWIN) | |
typedef struct { | |
OSSpinLock lock; | |
} malloc_mutex_t; | |
typedef struct { | |
OSSpinLock lock; | |
} malloc_spinlock_t; | |
#elif defined(MOZ_MEMORY) | |
typedef pthread_mutex_t malloc_mutex_t; | |
typedef pthread_mutex_t malloc_spinlock_t; | |
#else | |
/* XXX these should #ifdef these for freebsd (and linux?) only */ | |
typedef struct { | |
spinlock_t lock; | |
} malloc_mutex_t; | |
typedef malloc_spinlock_t malloc_mutex_t; | |
#endif | |
/* Set to true once the allocator has been initialized. */ | |
static bool malloc_initialized = false; | |
#if defined(MOZ_MEMORY_WINDOWS) | |
/* No init lock for Windows. */ | |
#elif defined(MOZ_MEMORY_DARWIN) | |
static malloc_mutex_t init_lock = {OS_SPINLOCK_INIT}; | |
#elif defined(MOZ_MEMORY_LINUX) | |
static malloc_mutex_t init_lock = PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP; | |
#elif defined(MOZ_MEMORY) | |
static malloc_mutex_t init_lock = PTHREAD_MUTEX_INITIALIZER; | |
#else | |
static malloc_mutex_t init_lock = {_SPINLOCK_INITIALIZER}; | |
#endif | |
/******************************************************************************/ | |
/* | |
* Statistics data structures. | |
*/ | |
#ifdef MALLOC_STATS | |
typedef struct malloc_bin_stats_s malloc_bin_stats_t; | |
struct malloc_bin_stats_s { | |
/* | |
* Number of allocation requests that corresponded to the size of this | |
* bin. | |
*/ | |
uint64_t nrequests; | |
/* Total number of runs created for this bin's size class. */ | |
uint64_t nruns; | |
/* | |
* Total number of runs reused by extracting them from the runs tree for | |
* this bin's size class. | |
*/ | |
uint64_t reruns; | |
/* High-water mark for this bin. */ | |
unsigned long highruns; | |
/* Current number of runs in this bin. */ | |
unsigned long curruns; | |
}; | |
typedef struct arena_stats_s arena_stats_t; | |
struct arena_stats_s { | |
/* Number of bytes currently mapped. */ | |
size_t mapped; | |
/* | |
* Total number of purge sweeps, total number of madvise calls made, | |
* and total pages purged in order to keep dirty unused memory under | |
* control. | |
*/ | |
uint64_t npurge; | |
uint64_t nmadvise; | |
uint64_t purged; | |
#ifdef MALLOC_DECOMMIT | |
/* | |
* Total number of decommit/commit operations, and total number of | |
* pages decommitted. | |
*/ | |
uint64_t ndecommit; | |
uint64_t ncommit; | |
uint64_t decommitted; | |
#endif | |
/* Per-size-category statistics. */ | |
size_t allocated_small; | |
uint64_t nmalloc_small; | |
uint64_t ndalloc_small; | |
size_t allocated_large; | |
uint64_t nmalloc_large; | |
uint64_t ndalloc_large; | |
#ifdef MALLOC_BALANCE | |
/* Number of times this arena reassigned a thread due to contention. */ | |
uint64_t nbalance; | |
#endif | |
}; | |
typedef struct chunk_stats_s chunk_stats_t; | |
struct chunk_stats_s { | |
/* Number of chunks that were allocated. */ | |
uint64_t nchunks; | |
/* High-water mark for number of chunks allocated. */ | |
unsigned long highchunks; | |
/* | |
* Current number of chunks allocated. This value isn't maintained for | |
* any other purpose, so keep track of it in order to be able to set | |
* highchunks. | |
*/ | |
unsigned long curchunks; | |
}; | |
#endif /* #ifdef MALLOC_STATS */ | |
/******************************************************************************/ | |
/* | |
* Extent data structures. | |
*/ | |
/* Tree of extents. */ | |
typedef struct extent_node_s extent_node_t; | |
struct extent_node_s { | |
/* Linkage for the size/address-ordered tree. */ | |
rb_node(extent_node_t) link_szad; | |
/* Linkage for the address-ordered tree. */ | |
rb_node(extent_node_t) link_ad; | |
/* Pointer to the extent that this tree node is responsible for. */ | |
void *addr; | |
/* Total region size. */ | |
size_t size; | |
}; | |
typedef rb_tree(extent_node_t) extent_tree_t; | |
/******************************************************************************/ | |
/* | |
* Radix tree data structures. | |
*/ | |
#ifdef MALLOC_VALIDATE | |
/* | |
* Size of each radix tree node (must be a power of 2). This impacts tree | |
* depth. | |
*/ | |
# if (SIZEOF_PTR == 4) | |
# define MALLOC_RTREE_NODESIZE (1U << 14) | |
# else | |
# define MALLOC_RTREE_NODESIZE CACHELINE | |
# endif | |
typedef struct malloc_rtree_s malloc_rtree_t; | |
struct malloc_rtree_s { | |
malloc_spinlock_t lock; | |
void **root; | |
unsigned height; | |
unsigned level2bits[1]; /* Dynamically sized. */ | |
}; | |
#endif | |
/******************************************************************************/ | |
/* | |
* Reserve data structures. | |
*/ | |
/* Callback registration. */ | |
typedef struct reserve_reg_s reserve_reg_t; | |
struct reserve_reg_s { | |
/* Linkage for list of all registered callbacks. */ | |
ql_elm(reserve_reg_t) link; | |
/* Callback function pointer. */ | |
reserve_cb_t *cb; | |
/* Opaque application data pointer. */ | |
void *ctx; | |
/* | |
* Sequence number of condition notification most recently sent to this | |
* callback. | |
*/ | |
uint64_t seq; | |
}; | |
/******************************************************************************/ | |
/* | |
* Arena data structures. | |
*/ | |
typedef struct arena_s arena_t; | |
typedef struct arena_bin_s arena_bin_t; | |
/* Each element of the chunk map corresponds to one page within the chunk. */ | |
typedef struct arena_chunk_map_s arena_chunk_map_t; | |
struct arena_chunk_map_s { | |
/* | |
* Linkage for run trees. There are two disjoint uses: | |
* | |
* 1) arena_t's runs_avail tree. | |
* 2) arena_run_t conceptually uses this linkage for in-use non-full | |
* runs, rather than directly embedding linkage. | |
*/ | |
rb_node(arena_chunk_map_t) link; | |
/* | |
* Run address (or size) and various flags are stored together. The bit | |
* layout looks like (assuming 32-bit system): | |
* | |
* ???????? ???????? ????---- --ckdzla | |
* | |
* ? : Unallocated: Run address for first/last pages, unset for internal | |
* pages. | |
* Small: Run address. | |
* Large: Run size for first page, unset for trailing pages. | |
* - : Unused. | |
* c : decommitted? | |
* k : key? | |
* d : dirty? | |
* z : zeroed? | |
* l : large? | |
* a : allocated? | |
* | |
* Following are example bit patterns for the three types of runs. | |
* | |
* r : run address | |
* s : run size | |
* x : don't care | |
* - : 0 | |
* [cdzla] : bit set | |
* | |
* Unallocated: | |
* ssssssss ssssssss ssss---- --c----- | |
* xxxxxxxx xxxxxxxx xxxx---- ----d--- | |
* ssssssss ssssssss ssss---- -----z-- | |
* | |
* Small: | |
* rrrrrrrr rrrrrrrr rrrr---- -------a | |
* rrrrrrrr rrrrrrrr rrrr---- -------a | |
* rrrrrrrr rrrrrrrr rrrr---- -------a | |
* | |
* Large: | |
* ssssssss ssssssss ssss---- ------la | |
* -------- -------- -------- ------la | |
* -------- -------- -------- ------la | |
*/ | |
size_t bits; | |
#ifdef MALLOC_DECOMMIT | |
#define CHUNK_MAP_DECOMMITTED ((size_t)0x20U) | |
#endif | |
#define CHUNK_MAP_KEY ((size_t)0x10U) | |
#define CHUNK_MAP_DIRTY ((size_t)0x08U) | |
#define CHUNK_MAP_ZEROED ((size_t)0x04U) | |
#define CHUNK_MAP_LARGE ((size_t)0x02U) | |
#define CHUNK_MAP_ALLOCATED ((size_t)0x01U) | |
}; | |
typedef rb_tree(arena_chunk_map_t) arena_avail_tree_t; | |
typedef rb_tree(arena_chunk_map_t) arena_run_tree_t; | |
/* Arena chunk header. */ | |
typedef struct arena_chunk_s arena_chunk_t; | |
struct arena_chunk_s { | |
/* Arena that owns the chunk. */ | |
arena_t *arena; | |
/* Linkage for the arena's chunks_dirty tree. */ | |
rb_node(arena_chunk_t) link_dirty; | |
/* Number of dirty pages. */ | |
size_t ndirty; | |
/* Map of pages within chunk that keeps track of free/large/small. */ | |
arena_chunk_map_t map[1]; /* Dynamically sized. */ | |
}; | |
typedef rb_tree(arena_chunk_t) arena_chunk_tree_t; | |
typedef struct arena_run_s arena_run_t; | |
struct arena_run_s { | |
#ifdef MALLOC_DEBUG | |
uint32_t magic; | |
# define ARENA_RUN_MAGIC 0x384adf93 | |
#endif | |
/* Bin this run is associated with. */ | |
arena_bin_t *bin; | |
/* Index of first element that might have a free region. */ | |
unsigned regs_minelm; | |
/* Number of free regions in run. */ | |
unsigned nfree; | |
/* Bitmask of in-use regions (0: in use, 1: free). */ | |
unsigned regs_mask[1]; /* Dynamically sized. */ | |
}; | |
struct arena_bin_s { | |
/* | |
* Current run being used to service allocations of this bin's size | |
* class. | |
*/ | |
arena_run_t *runcur; | |
/* | |
* Tree of non-full runs. This tree is used when looking for an | |
* existing run when runcur is no longer usable. We choose the | |
* non-full run that is lowest in memory; this policy tends to keep | |
* objects packed well, and it can also help reduce the number of | |
* almost-empty chunks. | |
*/ | |
arena_run_tree_t runs; | |
/* Size of regions in a run for this bin's size class. */ | |
size_t reg_size; | |
/* Total size of a run for this bin's size class. */ | |
size_t run_size; | |
/* Total number of regions in a run for this bin's size class. */ | |
uint32_t nregs; | |
/* Number of elements in a run's regs_mask for this bin's size class. */ | |
uint32_t regs_mask_nelms; | |
/* Offset of first region in a run for this bin's size class. */ | |
uint32_t reg0_offset; | |
#ifdef MALLOC_STATS | |
/* Bin statistics. */ | |
malloc_bin_stats_t stats; | |
#endif | |
}; | |
struct arena_s { | |
#ifdef MALLOC_DEBUG | |
uint32_t magic; | |
# define ARENA_MAGIC 0x947d3d24 | |
#endif | |
/* All operations on this arena require that lock be locked. */ | |
#ifdef MOZ_MEMORY | |
malloc_spinlock_t lock; | |
#else | |
pthread_mutex_t lock; | |
#endif | |
#ifdef MALLOC_STATS | |
arena_stats_t stats; | |
#endif | |
/* | |
* Chunk allocation sequence number, used to detect races with other | |
* threads during chunk allocation, and then discard unnecessary chunks. | |
*/ | |
uint64_t chunk_seq; | |
/* Tree of dirty-page-containing chunks this arena manages. */ | |
arena_chunk_tree_t chunks_dirty; | |
/* | |
* In order to avoid rapid chunk allocation/deallocation when an arena | |
* oscillates right on the cusp of needing a new chunk, cache the most | |
* recently freed chunk. The spare is left in the arena's chunk trees | |
* until it is deleted. | |
* | |
* There is one spare chunk per arena, rather than one spare total, in | |
* order to avoid interactions between multiple threads that could make | |
* a single spare inadequate. | |
*/ | |
arena_chunk_t *spare; | |
/* | |
* Current count of pages within unused runs that are potentially | |
* dirty, and for which madvise(... MADV_FREE) has not been called. By | |
* tracking this, we can institute a limit on how much dirty unused | |
* memory is mapped for each arena. | |
*/ | |
size_t ndirty; | |
/* | |
* Size/address-ordered tree of this arena's available runs. This tree | |
* is used for first-best-fit run allocation. | |
*/ | |
arena_avail_tree_t runs_avail; | |
#ifdef MALLOC_BALANCE | |
/* | |
* The arena load balancing machinery needs to keep track of how much | |
* lock contention there is. This value is exponentially averaged. | |
*/ | |
uint32_t contention; | |
#endif | |
/* | |
* bins is used to store rings of free regions of the following sizes, | |
* assuming a 16-byte quantum, 4kB pagesize, and default MALLOC_OPTIONS. | |
* | |
* bins[i] | size | | |
* --------+------+ | |
* 0 | 2 | | |
* 1 | 4 | | |
* 2 | 8 | | |
* --------+------+ | |
* 3 | 16 | | |
* 4 | 32 | | |
* 5 | 48 | | |
* 6 | 64 | | |
* : : | |
* : : | |
* 33 | 496 | | |
* 34 | 512 | | |
* --------+------+ | |
* 35 | 1024 | | |
* 36 | 2048 | | |
* --------+------+ | |
*/ | |
arena_bin_t bins[1]; /* Dynamically sized. */ | |
}; | |
/******************************************************************************/ | |
/* | |
* Data. | |
*/ | |
/* Number of CPUs. */ | |
static unsigned ncpus; | |
/* VM page size. */ | |
static size_t pagesize; | |
static size_t pagesize_mask; | |
static size_t pagesize_2pow; | |
/* Various bin-related settings. */ | |
static size_t bin_maxclass; /* Max size class for bins. */ | |
static unsigned ntbins; /* Number of (2^n)-spaced tiny bins. */ | |
static unsigned nqbins; /* Number of quantum-spaced bins. */ | |
static unsigned nsbins; /* Number of (2^n)-spaced sub-page bins. */ | |
static size_t small_min; | |
static size_t small_max; | |
/* Various quantum-related settings. */ | |
static size_t quantum; | |
static size_t quantum_mask; /* (quantum - 1). */ | |
/* Various chunk-related settings. */ | |
static size_t chunksize; | |
static size_t chunksize_mask; /* (chunksize - 1). */ | |
static size_t chunk_npages; | |
static size_t arena_chunk_header_npages; | |
static size_t arena_maxclass; /* Max size class for arenas. */ | |
/********/ | |
/* | |
* Chunks. | |
*/ | |
#ifdef MALLOC_VALIDATE | |
static malloc_rtree_t *chunk_rtree; | |
#endif | |
/* Protects chunk-related data structures. */ | |
static malloc_mutex_t huge_mtx; | |
/* Tree of chunks that are stand-alone huge allocations. */ | |
static extent_tree_t huge; | |
#ifdef MALLOC_STATS | |
/* Huge allocation statistics. */ | |
static uint64_t huge_nmalloc; | |
static uint64_t huge_ndalloc; | |
static size_t huge_allocated; | |
#endif | |
/****************/ | |
/* | |
* Memory reserve. | |
*/ | |
#ifdef MALLOC_PAGEFILE | |
static char pagefile_templ[PATH_MAX]; | |
#endif | |
/* Protects reserve-related data structures. */ | |
static malloc_mutex_t reserve_mtx; | |
/* | |
* Bounds on acceptable reserve size, and current reserve size. Reserve | |
* depletion may cause (reserve_cur < reserve_min). | |
*/ | |
static size_t reserve_min; | |
static size_t reserve_cur; | |
static size_t reserve_max; | |
/* List of registered callbacks. */ | |
static ql_head(reserve_reg_t) reserve_regs; | |
/* | |
* Condition notification sequence number, used to determine whether all | |
* registered callbacks have been notified of the most current condition. | |
*/ | |
static uint64_t reserve_seq; | |
/* | |
* Trees of chunks currently in the memory reserve. Depending on function, | |
* different tree orderings are needed, which is why there are two trees with | |
* the same contents. | |
*/ | |
static extent_tree_t reserve_chunks_szad; | |
static extent_tree_t reserve_chunks_ad; | |
/****************************/ | |
/* | |
* base (internal allocation). | |
*/ | |
/* | |
* Current pages that are being used for internal memory allocations. These | |
* pages are carved up in cacheline-size quanta, so that there is no chance of | |
* false cache line sharing. | |
*/ | |
static void *base_pages; | |
static void *base_next_addr; | |
#ifdef MALLOC_DECOMMIT | |
static void *base_next_decommitted; | |
#endif | |
static void *base_past_addr; /* Addr immediately past base_pages. */ | |
static extent_node_t *base_nodes; | |
static reserve_reg_t *base_reserve_regs; | |
static malloc_mutex_t base_mtx; | |
#ifdef MALLOC_STATS | |
static size_t base_mapped; | |
#endif | |
/********/ | |
/* | |
* Arenas. | |
*/ | |
/* | |
* Arenas that are used to service external requests. Not all elements of the | |
* arenas array are necessarily used; arenas are created lazily as needed. | |
*/ | |
static arena_t **arenas; | |
static unsigned narenas; | |
static unsigned narenas_2pow; | |
#ifndef NO_TLS | |
# ifdef MALLOC_BALANCE | |
static unsigned narenas_2pow; | |
# else | |
static unsigned next_arena; | |
# endif | |
#endif | |
#ifdef MOZ_MEMORY | |
static malloc_spinlock_t arenas_lock; /* Protects arenas initialization. */ | |
#else | |
static pthread_mutex_t arenas_lock; /* Protects arenas initialization. */ | |
#endif | |
#ifndef NO_TLS | |
/* | |
* Map of pthread_self() --> arenas[???], used for selecting an arena to use | |
* for allocations. | |
*/ | |
#ifndef MOZ_MEMORY_WINDOWS | |
static __thread arena_t *arenas_map; | |
#endif | |
#endif | |
#ifdef MALLOC_STATS | |
/* Chunk statistics. */ | |
static chunk_stats_t stats_chunks; | |
#endif | |
/*******************************/ | |
/* | |
* Runtime configuration options. | |
*/ | |
const char *_malloc_options; | |
#ifndef MALLOC_PRODUCTION | |
static bool opt_abort = true; | |
#ifdef MALLOC_FILL | |
static bool opt_junk = true; | |
#endif | |
#else | |
static bool opt_abort = false; | |
#ifdef MALLOC_FILL | |
static bool opt_junk = false; | |
#endif | |
#endif | |
static size_t opt_dirty_max = DIRTY_MAX_DEFAULT; | |
#ifdef MALLOC_BALANCE | |
static uint64_t opt_balance_threshold = BALANCE_THRESHOLD_DEFAULT; | |
#endif | |
static bool opt_print_stats = false; | |
static size_t opt_quantum_2pow = QUANTUM_2POW_MIN; | |
static size_t opt_small_max_2pow = SMALL_MAX_2POW_DEFAULT; | |
static size_t opt_chunk_2pow = CHUNK_2POW_DEFAULT; | |
static int opt_reserve_min_lshift = 0; | |
static int opt_reserve_range_lshift = 0; | |
#ifdef MALLOC_PAGEFILE | |
static bool opt_pagefile = false; | |
#endif | |
#ifdef MALLOC_UTRACE | |
static bool opt_utrace = false; | |
#endif | |
#ifdef MALLOC_SYSV | |
static bool opt_sysv = false; | |
#endif | |
#ifdef MALLOC_XMALLOC | |
static bool opt_xmalloc = false; | |
#endif | |
#ifdef MALLOC_FILL | |
static bool opt_zero = false; | |
#endif | |
static int opt_narenas_lshift = 0; | |
#ifdef MALLOC_UTRACE | |
typedef struct { | |
void *p; | |
size_t s; | |
void *r; | |
} malloc_utrace_t; | |
#define UTRACE(a, b, c) \ | |
if (opt_utrace) { \ | |
malloc_utrace_t ut; \ | |
ut.p = (a); \ | |
ut.s = (b); \ | |
ut.r = (c); \ | |
utrace(&ut, sizeof(ut)); \ | |
} | |
#else | |
#define UTRACE(a, b, c) | |
#endif | |
/******************************************************************************/ | |
/* | |
* Begin function prototypes for non-inline static functions. | |
*/ | |
static char *umax2s(uintmax_t x, char *s); | |
static bool malloc_mutex_init(malloc_mutex_t *mutex); | |
static bool malloc_spin_init(malloc_spinlock_t *lock); | |
static void wrtmessage(const char *p1, const char *p2, const char *p3, | |
const char *p4); | |
#ifdef MALLOC_STATS | |
#ifdef MOZ_MEMORY_DARWIN | |
/* Avoid namespace collision with OS X's malloc APIs. */ | |
#define malloc_printf moz_malloc_printf | |
#endif | |
static void malloc_printf(const char *format, ...); | |
#endif | |
static bool base_pages_alloc_mmap(size_t minsize); | |
static bool base_pages_alloc(size_t minsize); | |
static void *base_alloc(size_t size); | |
static void *base_calloc(size_t number, size_t size); | |
static extent_node_t *base_node_alloc(void); | |
static void base_node_dealloc(extent_node_t *node); | |
static reserve_reg_t *base_reserve_reg_alloc(void); | |
static void base_reserve_reg_dealloc(reserve_reg_t *reg); | |
#ifdef MALLOC_STATS | |
static void stats_print(arena_t *arena); | |
#endif | |
static void *pages_map(void *addr, size_t size, int pfd); | |
static void pages_unmap(void *addr, size_t size); | |
static void *chunk_alloc_mmap(size_t size, bool pagefile); | |
#ifdef MALLOC_PAGEFILE | |
static int pagefile_init(size_t size); | |
static void pagefile_close(int pfd); | |
#endif | |
static void *chunk_recycle_reserve(size_t size, bool zero); | |
static void *chunk_alloc(size_t size, bool zero, bool pagefile); | |
static extent_node_t *chunk_dealloc_reserve(void *chunk, size_t size); | |
static void chunk_dealloc_mmap(void *chunk, size_t size); | |
static void chunk_dealloc(void *chunk, size_t size); | |
#ifndef NO_TLS | |
static arena_t *choose_arena_hard(void); | |
#endif | |
static void arena_run_split(arena_t *arena, arena_run_t *run, size_t size, | |
bool large, bool zero); | |
static void arena_chunk_init(arena_t *arena, arena_chunk_t *chunk); | |
static void arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk); | |
static arena_run_t *arena_run_alloc(arena_t *arena, arena_bin_t *bin, | |
size_t size, bool large, bool zero); | |
static void arena_purge(arena_t *arena); | |
static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty); | |
static void arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, | |
arena_run_t *run, size_t oldsize, size_t newsize); | |
static void arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, | |
arena_run_t *run, size_t oldsize, size_t newsize, bool dirty); | |
static arena_run_t *arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin); | |
static void *arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin); | |
static size_t arena_bin_run_size_calc(arena_bin_t *bin, size_t min_run_size); | |
#ifdef MALLOC_BALANCE | |
static void arena_lock_balance_hard(arena_t *arena); | |
#endif | |
static void *arena_malloc_large(arena_t *arena, size_t size, bool zero); | |
static void *arena_palloc(arena_t *arena, size_t alignment, size_t size, | |
size_t alloc_size); | |
static size_t arena_salloc(const void *ptr); | |
static void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, | |
void *ptr); | |
static void arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, | |
void *ptr, size_t size, size_t oldsize); | |
static bool arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, | |
void *ptr, size_t size, size_t oldsize); | |
static bool arena_ralloc_large(void *ptr, size_t size, size_t oldsize); | |
static void *arena_ralloc(void *ptr, size_t size, size_t oldsize); | |
static bool arena_new(arena_t *arena); | |
static arena_t *arenas_extend(unsigned ind); | |
static void *huge_malloc(size_t size, bool zero); | |
static void *huge_palloc(size_t alignment, size_t size); | |
static void *huge_ralloc(void *ptr, size_t size, size_t oldsize); | |
static void huge_dalloc(void *ptr); | |
static void malloc_print_stats(void); | |
#ifndef MOZ_MEMORY_WINDOWS | |
static | |
#endif | |
bool malloc_init_hard(void); | |
static void reserve_shrink(void); | |
static uint64_t reserve_notify(reserve_cnd_t cnd, size_t size, uint64_t seq); | |
static uint64_t reserve_crit(size_t size, const char *fname, uint64_t seq); | |
static void reserve_fail(size_t size, const char *fname); | |
void _malloc_prefork(void); | |
void _malloc_postfork(void); | |
/* | |
* End function prototypes. | |
*/ | |
/******************************************************************************/ | |
/* | |
* umax2s() provides minimal integer printing functionality, which is | |
* especially useful for situations where allocation in vsnprintf() calls would | |
* potentially cause deadlock. | |
*/ | |
#define UMAX2S_BUFSIZE 21 | |
static char * | |
umax2s(uintmax_t x, char *s) | |
{ | |
unsigned i; | |
i = UMAX2S_BUFSIZE - 1; | |
s[i] = '\0'; | |
do { | |
i--; | |
s[i] = "0123456789"[x % 10]; | |
x /= 10; | |
} while (x > 0); | |
return (&s[i]); | |
} | |
static void | |
wrtmessage(const char *p1, const char *p2, const char *p3, const char *p4) | |
{ | |
#ifdef MOZ_MEMORY_WINCE | |
wchar_t buf[1024]; | |
#define WRT_PRINT(s) \ | |
MultiByteToWideChar(CP_ACP, 0, s, -1, buf, 1024); \ | |
OutputDebugStringW(buf) | |
WRT_PRINT(p1); | |
WRT_PRINT(p2); | |
WRT_PRINT(p3); | |
WRT_PRINT(p4); | |
#else | |
#if defined(MOZ_MEMORY) && !defined(MOZ_MEMORY_WINDOWS) | |
#define _write write | |
#endif | |
_write(STDERR_FILENO, p1, (unsigned int) strlen(p1)); | |
_write(STDERR_FILENO, p2, (unsigned int) strlen(p2)); | |
_write(STDERR_FILENO, p3, (unsigned int) strlen(p3)); | |
_write(STDERR_FILENO, p4, (unsigned int) strlen(p4)); | |
#endif | |
} | |
#define _malloc_message malloc_message | |
void (*_malloc_message)(const char *p1, const char *p2, const char *p3, | |
const char *p4) = wrtmessage; | |
#ifdef MALLOC_DEBUG | |
# define assert(e) do { \ | |
if (!(e)) { \ | |
char line_buf[UMAX2S_BUFSIZE]; \ | |
_malloc_message(__FILE__, ":", umax2s(__LINE__, \ | |
line_buf), ": Failed assertion: "); \ | |
_malloc_message("\"", #e, "\"\n", ""); \ | |
abort(); \ | |
} \ | |
} while (0) | |
#else | |
#define assert(e) | |
#endif | |
/******************************************************************************/ | |
/* | |
* Begin mutex. We can't use normal pthread mutexes in all places, because | |
* they require malloc()ed memory, which causes bootstrapping issues in some | |
* cases. | |
*/ | |
static bool | |
malloc_mutex_init(malloc_mutex_t *mutex) | |
{ | |
#if defined(MOZ_MEMORY_WINCE) | |
InitializeCriticalSection(mutex); | |
#elif defined(MOZ_MEMORY_WINDOWS) | |
// XXXMB | |
//if (__isthreaded) | |
// if (! __crtInitCritSecAndSpinCount(mutex, _CRT_SPINCOUNT)) | |
// return (true); | |
if (!InitializeCriticalSectionAndSpinCount(mutex, 4000)) | |
return true; | |
#elif defined(MOZ_MEMORY_DARWIN) | |
mutex->lock = OS_SPINLOCK_INIT; | |
#elif defined(MOZ_MEMORY_LINUX) | |
pthread_mutexattr_t attr; | |
if (pthread_mutexattr_init(&attr) != 0) | |
return (true); | |
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP); | |
if (pthread_mutex_init(mutex, &attr) != 0) { | |
pthread_mutexattr_destroy(&attr); | |
return (true); | |
} | |
pthread_mutexattr_destroy(&attr); | |
#elif defined(MOZ_MEMORY) | |
if (pthread_mutex_init(mutex, NULL) != 0) | |
return (true); | |
#else | |
static const spinlock_t lock = _SPINLOCK_INITIALIZER; | |
mutex->lock = lock; | |
#endif | |
return (false); | |
} | |
static inline void | |
malloc_mutex_lock(malloc_mutex_t *mutex) | |
{ | |
#if defined(MOZ_MEMORY_WINDOWS) | |
EnterCriticalSection(mutex); | |
#elif defined(MOZ_MEMORY_DARWIN) | |
OSSpinLockLock(&mutex->lock); | |
#elif defined(MOZ_MEMORY) | |
pthread_mutex_lock(mutex); | |
#else | |
if (__isthreaded) | |
_SPINLOCK(&mutex->lock); | |
#endif | |
} | |
static inline void | |
malloc_mutex_unlock(malloc_mutex_t *mutex) | |
{ | |
#if defined(MOZ_MEMORY_WINDOWS) | |
LeaveCriticalSection(mutex); | |
#elif defined(MOZ_MEMORY_DARWIN) | |
OSSpinLockUnlock(&mutex->lock); | |
#elif defined(MOZ_MEMORY) | |
pthread_mutex_unlock(mutex); | |
#else | |
if (__isthreaded) | |
_SPINUNLOCK(&mutex->lock); | |
#endif | |
} | |
static bool | |
malloc_spin_init(malloc_spinlock_t *lock) | |
{ | |
#if defined(MOZ_MEMORY_WINCE) | |
InitializeCriticalSection(lock); | |
#elif defined(MOZ_MEMORY_WINDOWS) | |
// XXXMB | |
//if (__isthreaded) | |
// if (! __crtInitCritSecAndSpinCount(lock, _CRT_SPINCOUNT)) | |
// return (true); | |
#elif defined(MOZ_MEMORY_DARWIN) | |
lock->lock = OS_SPINLOCK_INIT; | |
#elif defined(MOZ_MEMORY_LINUX) | |
pthread_mutexattr_t attr; | |
if (pthread_mutexattr_init(&attr) != 0) | |
return (true); | |
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP); | |
if (pthread_mutex_init(lock, &attr) != 0) { | |
pthread_mutexattr_destroy(&attr); | |
return (true); | |
} | |
pthread_mutexattr_destroy(&attr); | |
#elif defined(MOZ_MEMORY) | |
if (pthread_mutex_init(lock, NULL) != 0) | |
return (true); | |
#else | |
lock->lock = _SPINLOCK_INITIALIZER; | |
#endif | |
return (false); | |
} | |
static inline void | |
malloc_spin_lock(malloc_spinlock_t *lock) | |
{ | |
#if defined(MOZ_MEMORY_WINDOWS) | |
EnterCriticalSection(lock); | |
#elif defined(MOZ_MEMORY_DARWIN) | |
OSSpinLockLock(&lock->lock); | |
#elif defined(MOZ_MEMORY) | |
pthread_mutex_lock(lock); | |
#else | |
if (__isthreaded) | |
_SPINLOCK(&lock->lock); | |
#endif | |
} | |
static inline void | |
malloc_spin_unlock(malloc_spinlock_t *lock) | |
{ | |
#if defined(MOZ_MEMORY_WINDOWS) | |
LeaveCriticalSection(lock); | |
#elif defined(MOZ_MEMORY_DARWIN) | |
OSSpinLockUnlock(&lock->lock); | |
#elif defined(MOZ_MEMORY) | |
pthread_mutex_unlock(lock); | |
#else | |
if (__isthreaded) | |
_SPINUNLOCK(&lock->lock); | |
#endif | |
} | |
/* | |
* End mutex. | |
*/ | |
/******************************************************************************/ | |
/* | |
* Begin spin lock. Spin locks here are actually adaptive mutexes that block | |
* after a period of spinning, because unbounded spinning would allow for | |
* priority inversion. | |
*/ | |
#if defined(MOZ_MEMORY) && !defined(MOZ_MEMORY_DARWIN) | |
# define malloc_spin_init malloc_mutex_init | |
# define malloc_spin_lock malloc_mutex_lock | |
# define malloc_spin_unlock malloc_mutex_unlock | |
#endif | |
#ifndef MOZ_MEMORY | |
/* | |
* We use an unpublished interface to initialize pthread mutexes with an | |
* allocation callback, in order to avoid infinite recursion. | |
*/ | |
int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, | |
void *(calloc_cb)(size_t, size_t)); | |
__weak_reference(_pthread_mutex_init_calloc_cb_stub, | |
_pthread_mutex_init_calloc_cb); | |
int | |
_pthread_mutex_init_calloc_cb_stub(pthread_mutex_t *mutex, | |
void *(calloc_cb)(size_t, size_t)) | |
{ | |
return (0); | |
} | |
static bool | |
malloc_spin_init(pthread_mutex_t *lock) | |
{ | |
if (_pthread_mutex_init_calloc_cb(lock, base_calloc) != 0) | |
return (true); | |
return (false); | |
} | |
static inline unsigned | |
malloc_spin_lock(pthread_mutex_t *lock) | |
{ | |
unsigned ret = 0; | |
if (__isthreaded) { | |
if (_pthread_mutex_trylock(lock) != 0) { | |
unsigned i; | |
volatile unsigned j; | |
/* Exponentially back off. */ | |
for (i = 1; i <= SPIN_LIMIT_2POW; i++) { | |
for (j = 0; j < (1U << i); j++) | |
ret++; | |
CPU_SPINWAIT; | |
if (_pthread_mutex_trylock(lock) == 0) | |
return (ret); | |
} | |
/* | |
* Spinning failed. Block until the lock becomes | |
* available, in order to avoid indefinite priority | |
* inversion. | |
*/ | |
_pthread_mutex_lock(lock); | |
assert((ret << BLOCK_COST_2POW) != 0); | |
return (ret << BLOCK_COST_2POW); | |
} | |
} | |
return (ret); | |
} | |
static inline void | |
malloc_spin_unlock(pthread_mutex_t *lock) | |
{ | |
if (__isthreaded) | |
_pthread_mutex_unlock(lock); | |
} | |
#endif | |
/* | |
* End spin lock. | |
*/ | |
/******************************************************************************/ | |
/* | |
* Begin Utility functions/macros. | |
*/ | |
/* Return the chunk address for allocation address a. */ | |
#define CHUNK_ADDR2BASE(a) \ | |
((void *)((uintptr_t)(a) & ~chunksize_mask)) | |
/* Return the chunk offset of address a. */ | |
#define CHUNK_ADDR2OFFSET(a) \ | |
((size_t)((uintptr_t)(a) & chunksize_mask)) | |
/* Return the smallest chunk multiple that is >= s. */ | |
#define CHUNK_CEILING(s) \ | |
(((s) + chunksize_mask) & ~chunksize_mask) | |
/* Return the smallest cacheline multiple that is >= s. */ | |
#define CACHELINE_CEILING(s) \ | |
(((s) + (CACHELINE - 1)) & ~(CACHELINE - 1)) | |
/* Return the smallest quantum multiple that is >= a. */ | |
#define QUANTUM_CEILING(a) \ | |
(((a) + quantum_mask) & ~quantum_mask) | |
/* Return the smallest pagesize multiple that is >= s. */ | |
#define PAGE_CEILING(s) \ | |
(((s) + pagesize_mask) & ~pagesize_mask) | |
/* Compute the smallest power of 2 that is >= x. */ | |
static inline size_t | |
pow2_ceil(size_t x) | |
{ | |
x--; | |
x |= x >> 1; | |
x |= x >> 2; | |
x |= x >> 4; | |
x |= x >> 8; | |
x |= x >> 16; | |
#if (SIZEOF_PTR == 8) | |
x |= x >> 32; | |
#endif | |
x++; | |
return (x); | |
} | |
#ifdef MALLOC_BALANCE | |
/* | |
* Use a simple linear congruential pseudo-random number generator: | |
* | |
* prn(y) = (a*x + c) % m | |
* | |
* where the following constants ensure maximal period: | |
* | |
* a == Odd number (relatively prime to 2^n), and (a-1) is a multiple of 4. | |
* c == Odd number (relatively prime to 2^n). | |
* m == 2^32 | |
* | |
* See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints. | |
* | |
* This choice of m has the disadvantage that the quality of the bits is | |
* proportional to bit position. For example. the lowest bit has a cycle of 2, | |
* the next has a cycle of 4, etc. For this reason, we prefer to use the upper | |
* bits. | |
*/ | |
# define PRN_DEFINE(suffix, var, a, c) \ | |
static inline void \ | |
sprn_##suffix(uint32_t seed) \ | |
{ \ | |
var = seed; \ | |
} \ | |
\ | |
static inline uint32_t \ | |
prn_##suffix(uint32_t lg_range) \ | |
{ \ | |
uint32_t ret, x; \ | |
\ | |
assert(lg_range > 0); \ | |
assert(lg_range <= 32); \ | |
\ | |
x = (var * (a)) + (c); \ | |
var = x; \ | |
ret = x >> (32 - lg_range); \ | |
\ | |
return (ret); \ | |
} | |
# define SPRN(suffix, seed) sprn_##suffix(seed) | |
# define PRN(suffix, lg_range) prn_##suffix(lg_range) | |
#endif | |
#ifdef MALLOC_BALANCE | |
/* Define the PRNG used for arena assignment. */ | |
static __thread uint32_t balance_x; | |
PRN_DEFINE(balance, balance_x, 1297, 1301) | |
#endif | |
#ifdef MALLOC_UTRACE | |
static int | |
utrace(const void *addr, size_t len) | |
{ | |
malloc_utrace_t *ut = (malloc_utrace_t *)addr; | |
assert(len == sizeof(malloc_utrace_t)); | |
if (ut->p == NULL && ut->s == 0 && ut->r == NULL) | |
malloc_printf("%d x USER malloc_init()\n", getpid()); | |
else if (ut->p == NULL && ut->r != NULL) { | |
malloc_printf("%d x USER %p = malloc(%zu)\n", getpid(), ut->r, | |
ut->s); | |
} else if (ut->p != NULL && ut->r != NULL) { | |
malloc_printf("%d x USER %p = realloc(%p, %zu)\n", getpid(), | |
ut->r, ut->p, ut->s); | |
} else | |
malloc_printf("%d x USER free(%p)\n", getpid(), ut->p); | |
return (0); | |
} | |
#endif | |
static inline const char * | |
_getprogname(void) | |
{ | |
return ("<jemalloc>"); | |
} | |
#ifdef MALLOC_STATS | |
/* | |
* Print to stderr in such a way as to (hopefully) avoid memory allocation. | |
*/ | |
static void | |
malloc_printf(const char *format, ...) | |
{ | |
#ifndef WINCE | |
char buf[4096]; | |
va_list ap; | |
va_start(ap, format); | |
vsnprintf(buf, sizeof(buf), format, ap); | |
va_end(ap); | |
_malloc_message(buf, "", "", ""); | |
#endif | |
} | |
#endif | |
/******************************************************************************/ | |
#ifdef MALLOC_DECOMMIT | |
static inline void | |
pages_decommit(void *addr, size_t size) | |
{ | |
#ifdef MOZ_MEMORY_WINDOWS | |
VirtualFree(addr, size, MEM_DECOMMIT); | |
#else | |
if (mmap(addr, size, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, | |
0) == MAP_FAILED) | |
abort(); | |
#endif | |
} | |
static inline void | |
pages_commit(void *addr, size_t size) | |
{ | |
# ifdef MOZ_MEMORY_WINDOWS | |
VirtualAlloc(addr, size, MEM_COMMIT, PAGE_READWRITE); | |
# else | |
if (mmap(addr, size, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_PRIVATE | | |
MAP_ANON, -1, 0) == MAP_FAILED) | |
abort(); | |
# endif | |
} | |
#endif | |
static bool | |
base_pages_alloc_mmap(size_t minsize) | |
{ | |
bool ret; | |
size_t csize; | |
#ifdef MALLOC_DECOMMIT | |
size_t pminsize; | |
#endif | |
int pfd; | |
assert(minsize != 0); | |
csize = CHUNK_CEILING(minsize); | |
#ifdef MALLOC_PAGEFILE | |
if (opt_pagefile) { | |
pfd = pagefile_init(csize); | |
if (pfd == -1) | |
return (true); | |
} else | |
#endif | |
pfd = -1; | |
base_pages = pages_map(NULL, csize, pfd); | |
if (base_pages == NULL) { | |
ret = true; | |
goto RETURN; | |
} | |
base_next_addr = base_pages; | |
base_past_addr = (void *)((uintptr_t)base_pages + csize); | |
#ifdef MALLOC_DECOMMIT | |
/* | |
* Leave enough pages for minsize committed, since otherwise they would | |
* have to be immediately recommitted. | |
*/ | |
pminsize = PAGE_CEILING(minsize); | |
base_next_decommitted = (void *)((uintptr_t)base_pages + pminsize); | |
if (pminsize < csize) | |
pages_decommit(base_next_decommitted, csize - pminsize); | |
#endif | |
#ifdef MALLOC_STATS | |
base_mapped += csize; | |
#endif | |
ret = false; | |
RETURN: | |
#ifdef MALLOC_PAGEFILE | |
if (pfd != -1) | |
pagefile_close(pfd); | |
#endif | |
return (false); | |
} | |
static bool | |
base_pages_alloc(size_t minsize) | |
{ | |
if (base_pages_alloc_mmap(minsize) == false) | |
return (false); | |
return (true); | |
} | |
static void * | |
base_alloc(size_t size) | |
{ | |
void *ret; | |
size_t csize; | |
/* Round size up to nearest multiple of the cacheline size. */ | |
csize = CACHELINE_CEILING(size); | |
malloc_mutex_lock(&base_mtx); | |
/* Make sure there's enough space for the allocation. */ | |
if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) { | |
if (base_pages_alloc(csize)) { | |
malloc_mutex_unlock(&base_mtx); | |
return (NULL); | |
} | |
} | |
/* Allocate. */ | |
ret = base_next_addr; | |
base_next_addr = (void *)((uintptr_t)base_next_addr + csize); | |
#ifdef MALLOC_DECOMMIT | |
/* Make sure enough pages are committed for the new allocation. */ | |
if ((uintptr_t)base_next_addr > (uintptr_t)base_next_decommitted) { | |
void *pbase_next_addr = | |
(void *)(PAGE_CEILING((uintptr_t)base_next_addr)); | |
pages_commit(base_next_decommitted, (uintptr_t)pbase_next_addr - | |
(uintptr_t)base_next_decommitted); | |
base_next_decommitted = pbase_next_addr; | |
} | |
#endif | |
malloc_mutex_unlock(&base_mtx); | |
VALGRIND_MALLOCLIKE_BLOCK(ret, size, 0, false); | |
return (ret); | |
} | |
static void * | |
base_calloc(size_t number, size_t size) | |
{ | |
void *ret; | |
ret = base_alloc(number * size); | |
#ifdef MALLOC_VALGRIND | |
if (ret != NULL) { | |
VALGRIND_FREELIKE_BLOCK(ret, 0); | |
VALGRIND_MALLOCLIKE_BLOCK(ret, size, 0, true); | |
} | |
#endif | |
memset(ret, 0, number * size); | |
return (ret); | |
} | |
static extent_node_t * | |
base_node_alloc(void) | |
{ | |
extent_node_t *ret; | |
malloc_mutex_lock(&base_mtx); | |
if (base_nodes != NULL) { | |
ret = base_nodes; | |
base_nodes = *(extent_node_t **)ret; | |
VALGRIND_FREELIKE_BLOCK(ret, 0); | |
VALGRIND_MALLOCLIKE_BLOCK(ret, sizeof(extent_node_t), 0, false); | |
malloc_mutex_unlock(&base_mtx); | |
} else { | |
malloc_mutex_unlock(&base_mtx); | |
ret = (extent_node_t *)base_alloc(sizeof(extent_node_t)); | |
} | |
return (ret); | |
} | |
static void | |
base_node_dealloc(extent_node_t *node) | |
{ | |
malloc_mutex_lock(&base_mtx); | |
VALGRIND_FREELIKE_BLOCK(node, 0); | |
VALGRIND_MALLOCLIKE_BLOCK(node, sizeof(extent_node_t *), 0, false); | |
*(extent_node_t **)node = base_nodes; | |
base_nodes = node; | |
malloc_mutex_unlock(&base_mtx); | |
} | |
static reserve_reg_t * | |
base_reserve_reg_alloc(void) | |
{ | |
reserve_reg_t *ret; | |
malloc_mutex_lock(&base_mtx); | |
if (base_reserve_regs != NULL) { | |
ret = base_reserve_regs; | |
base_reserve_regs = *(reserve_reg_t **)ret; | |
VALGRIND_FREELIKE_BLOCK(ret, 0); | |
VALGRIND_MALLOCLIKE_BLOCK(ret, sizeof(reserve_reg_t), 0, false); | |
malloc_mutex_unlock(&base_mtx); | |
} else { | |
malloc_mutex_unlock(&base_mtx); | |
ret = (reserve_reg_t *)base_alloc(sizeof(reserve_reg_t)); | |
} | |
return (ret); | |
} | |
static void | |
base_reserve_reg_dealloc(reserve_reg_t *reg) | |
{ | |
malloc_mutex_lock(&base_mtx); | |
VALGRIND_FREELIKE_BLOCK(reg, 0); | |
VALGRIND_MALLOCLIKE_BLOCK(reg, sizeof(reserve_reg_t *), 0, false); | |
*(reserve_reg_t **)reg = base_reserve_regs; | |
base_reserve_regs = reg; | |
malloc_mutex_unlock(&base_mtx); | |
} | |
/******************************************************************************/ | |
#ifdef MALLOC_STATS | |
static void | |
stats_print(arena_t *arena) | |
{ | |
unsigned i, gap_start; | |
#ifdef MOZ_MEMORY_WINDOWS | |
malloc_printf("dirty: %Iu page%s dirty, %I64u sweep%s," | |
" %I64u madvise%s, %I64u page%s purged\n", | |
arena->ndirty, arena->ndirty == 1 ? "" : "s", | |
arena->stats.npurge, arena->stats.npurge == 1 ? "" : "s", | |
arena->stats.nmadvise, arena->stats.nmadvise == 1 ? "" : "s", | |
arena->stats.purged, arena->stats.purged == 1 ? "" : "s"); | |
# ifdef MALLOC_DECOMMIT | |
malloc_printf("decommit: %I64u decommit%s, %I64u commit%s," | |
" %I64u page%s decommitted\n", | |
arena->stats.ndecommit, (arena->stats.ndecommit == 1) ? "" : "s", | |
arena->stats.ncommit, (arena->stats.ncommit == 1) ? "" : "s", | |
arena->stats.decommitted, | |
(arena->stats.decommitted == 1) ? "" : "s"); | |
# endif | |
malloc_printf(" allocated nmalloc ndalloc\n"); | |
malloc_printf("small: %12Iu %12I64u %12I64u\n", | |
arena->stats.allocated_small, arena->stats.nmalloc_small, | |
arena->stats.ndalloc_small); | |
malloc_printf("large: %12Iu %12I64u %12I64u\n", | |
arena->stats.allocated_large, arena->stats.nmalloc_large, | |
arena->stats.ndalloc_large); | |
malloc_printf("total: %12Iu %12I64u %12I64u\n", | |
arena->stats.allocated_small + arena->stats.allocated_large, | |
arena->stats.nmalloc_small + arena->stats.nmalloc_large, | |
arena->stats.ndalloc_small + arena->stats.ndalloc_large); | |
malloc_printf("mapped: %12Iu\n", arena->stats.mapped); | |
#else | |
malloc_printf("dirty: %zu page%s dirty, %llu sweep%s," | |
" %llu madvise%s, %llu page%s purged\n", | |
arena->ndirty, arena->ndirty == 1 ? "" : "s", | |
arena->stats.npurge, arena->stats.npurge == 1 ? "" : "s", | |
arena->stats.nmadvise, arena->stats.nmadvise == 1 ? "" : "s", | |
arena->stats.purged, arena->stats.purged == 1 ? "" : "s"); | |
# ifdef MALLOC_DECOMMIT | |
malloc_printf("decommit: %llu decommit%s, %llu commit%s," | |
" %llu page%s decommitted\n", | |
arena->stats.ndecommit, (arena->stats.ndecommit == 1) ? "" : "s", | |
arena->stats.ncommit, (arena->stats.ncommit == 1) ? "" : "s", | |
arena->stats.decommitted, | |
(arena->stats.decommitted == 1) ? "" : "s"); | |
# endif | |
malloc_printf(" allocated nmalloc ndalloc\n"); | |
malloc_printf("small: %12zu %12llu %12llu\n", | |
arena->stats.allocated_small, arena->stats.nmalloc_small, | |
arena->stats.ndalloc_small); | |
malloc_printf("large: %12zu %12llu %12llu\n", | |
arena->stats.allocated_large, arena->stats.nmalloc_large, | |
arena->stats.ndalloc_large); | |
malloc_printf("total: %12zu %12llu %12llu\n", | |
arena->stats.allocated_small + arena->stats.allocated_large, | |
arena->stats.nmalloc_small + arena->stats.nmalloc_large, | |
arena->stats.ndalloc_small + arena->stats.ndalloc_large); | |
malloc_printf("mapped: %12zu\n", arena->stats.mapped); | |
#endif | |
malloc_printf("bins: bin size regs pgs requests newruns" | |
" reruns maxruns curruns\n"); | |
for (i = 0, gap_start = UINT_MAX; i < ntbins + nqbins + nsbins; i++) { | |
if (arena->bins[i].stats.nrequests == 0) { | |
if (gap_start == UINT_MAX) | |
gap_start = i; | |
} else { | |
if (gap_start != UINT_MAX) { | |
if (i > gap_start + 1) { | |
/* Gap of more than one size class. */ | |
malloc_printf("[%u..%u]\n", | |
gap_start, i - 1); | |
} else { | |
/* Gap of one size class. */ | |
malloc_printf("[%u]\n", gap_start); | |
} | |
gap_start = UINT_MAX; | |
} | |
malloc_printf( | |
#if defined(MOZ_MEMORY_WINDOWS) | |
"%13u %1s %4u %4u %3u %9I64u %9I64u" | |
" %9I64u %7u %7u\n", | |
#else | |
"%13u %1s %4u %4u %3u %9llu %9llu" | |
" %9llu %7lu %7lu\n", | |
#endif | |
i, | |
i < ntbins ? "T" : i < ntbins + nqbins ? "Q" : "S", | |
arena->bins[i].reg_size, | |
arena->bins[i].nregs, | |
arena->bins[i].run_size >> pagesize_2pow, | |
arena->bins[i].stats.nrequests, | |
arena->bins[i].stats.nruns, | |
arena->bins[i].stats.reruns, | |
arena->bins[i].stats.highruns, | |
arena->bins[i].stats.curruns); | |
} | |
} | |
if (gap_start != UINT_MAX) { | |
if (i > gap_start + 1) { | |
/* Gap of more than one size class. */ | |
malloc_printf("[%u..%u]\n", gap_start, i - 1); | |
} else { | |
/* Gap of one size class. */ | |
malloc_printf("[%u]\n", gap_start); | |
} | |
} | |
} | |
#endif | |
/* | |
* End Utility functions/macros. | |
*/ | |
/******************************************************************************/ | |
/* | |
* Begin extent tree code. | |
*/ | |
static inline int | |
extent_szad_comp(extent_node_t *a, extent_node_t *b) | |
{ | |
int ret; | |
size_t a_size = a->size; | |
size_t b_size = b->size; | |
ret = (a_size > b_size) - (a_size < b_size); | |
if (ret == 0) { | |
uintptr_t a_addr = (uintptr_t)a->addr; | |
uintptr_t b_addr = (uintptr_t)b->addr; | |
ret = (a_addr > b_addr) - (a_addr < b_addr); | |
} | |
return (ret); | |
} | |
/* Wrap red-black tree macros in functions. */ | |
rb_wrap(static, extent_tree_szad_, extent_tree_t, extent_node_t, | |
link_szad, extent_szad_comp) | |
static inline int | |
extent_ad_comp(extent_node_t *a, extent_node_t *b) | |
{ | |
uintptr_t a_addr = (uintptr_t)a->addr; | |
uintptr_t b_addr = (uintptr_t)b->addr; | |
return ((a_addr > b_addr) - (a_addr < b_addr)); | |
} | |
/* Wrap red-black tree macros in functions. */ | |
rb_wrap(static, extent_tree_ad_, extent_tree_t, extent_node_t, link_ad, | |
extent_ad_comp) | |
/* | |
* End extent tree code. | |
*/ | |
/******************************************************************************/ | |
/* | |
* Begin chunk management functions. | |
*/ | |
#ifdef MOZ_MEMORY_WINDOWS | |
#ifdef MOZ_MEMORY_WINCE | |
#define ALIGN_ADDR2OFFSET(al, ad) \ | |
((uintptr_t)ad & (al - 1)) | |
static void * | |
pages_map_align(size_t size, int pfd, size_t alignment) | |
{ | |
void *ret; | |
int offset; | |
if (size % alignment) | |
size += (alignment - (size % alignment)); | |
assert(size >= alignment); | |
ret = pages_map(NULL, size, pfd); | |
offset = ALIGN_ADDR2OFFSET(alignment, ret); | |
if (offset) { | |
/* try to over allocate by the ammount we're offset */ | |
void *tmp; | |
pages_unmap(ret, size); | |
tmp = VirtualAlloc(NULL, size + alignment - offset, | |
MEM_RESERVE, PAGE_NOACCESS); | |
if (offset == ALIGN_ADDR2OFFSET(alignment, tmp)) | |
ret = VirtualAlloc((void*)((intptr_t)tmp + alignment | |
- offset), size, MEM_COMMIT, | |
PAGE_READWRITE); | |
else | |
VirtualFree(tmp, 0, MEM_RELEASE); | |
offset = ALIGN_ADDR2OFFSET(alignment, ret); | |
if (offset) { | |
/* over allocate to ensure we have an aligned region */ | |
ret = VirtualAlloc(NULL, size + alignment, MEM_RESERVE, | |
PAGE_NOACCESS); | |
offset = ALIGN_ADDR2OFFSET(alignment, ret); | |
ret = VirtualAlloc((void*)((intptr_t)ret + | |
alignment - offset), | |
size, MEM_COMMIT, PAGE_READWRITE); | |
} | |
} | |
return (ret); | |
} | |
#endif | |
static void * | |
pages_map(void *addr, size_t size, int pfd) | |
{ | |
void *ret = NULL; | |
#if defined(MOZ_MEMORY_WINCE) && !defined(MOZ_MEMORY_WINCE6) | |
void *va_ret; | |
assert(addr == NULL); | |
va_ret = VirtualAlloc(addr, size, MEM_RESERVE, PAGE_NOACCESS); | |
if (va_ret) | |
ret = VirtualAlloc(va_ret, size, MEM_COMMIT, PAGE_READWRITE); | |
assert(va_ret == ret); | |
#else | |
ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE, | |
PAGE_READWRITE); | |
#endif | |
return (ret); | |
} | |
static void | |
pages_unmap(void *addr, size_t size) | |
{ | |
if (VirtualFree(addr, 0, MEM_RELEASE) == 0) { | |
#if defined(MOZ_MEMORY_WINCE) && !defined(MOZ_MEMORY_WINCE6) | |
if (GetLastError() == ERROR_INVALID_PARAMETER) { | |
MEMORY_BASIC_INFORMATION info; | |
VirtualQuery(addr, &info, sizeof(info)); | |
if (VirtualFree(info.AllocationBase, 0, MEM_RELEASE)) | |
return; | |
} | |
#endif | |
_malloc_message(_getprogname(), | |
": (malloc) Error in VirtualFree()\n", "", ""); | |
if (opt_abort) | |
abort(); | |
} | |
} | |
#elif (defined(MOZ_MEMORY_DARWIN)) | |
static void * | |
pages_map(void *addr, size_t size, int pfd) | |
{ | |
void *ret; | |
kern_return_t err; | |
int flags; | |
if (addr != NULL) { | |
ret = addr; | |
flags = 0; | |
} else | |
flags = VM_FLAGS_ANYWHERE; | |
err = vm_allocate((vm_map_t)mach_task_self(), (vm_address_t *)&ret, | |
(vm_size_t)size, flags); | |
if (err != KERN_SUCCESS) | |
ret = NULL; | |
assert(ret == NULL || (addr == NULL && ret != addr) | |
|| (addr != NULL && ret == addr)); | |
return (ret); | |
} | |
static void | |
pages_unmap(void *addr, size_t size) | |
{ | |
kern_return_t err; | |
err = vm_deallocate((vm_map_t)mach_task_self(), (vm_address_t)addr, | |
(vm_size_t)size); | |
if (err != KERN_SUCCESS) { | |
malloc_message(_getprogname(), | |
": (malloc) Error in vm_deallocate(): ", | |
mach_error_string(err), "\n"); | |
if (opt_abort) | |
abort(); | |
} | |
} | |
#define VM_COPY_MIN (pagesize << 5) | |
static inline void | |
pages_copy(void *dest, const void *src, size_t n) | |
{ | |
assert((void *)((uintptr_t)dest & ~pagesize_mask) == dest); | |
assert(n >= VM_COPY_MIN); | |
assert((void *)((uintptr_t)src & ~pagesize_mask) == src); | |
vm_copy(mach_task_self(), (vm_address_t)src, (vm_size_t)n, | |
(vm_address_t)dest); | |
} | |
#else /* MOZ_MEMORY_DARWIN */ | |
#ifdef JEMALLOC_USES_MAP_ALIGN | |
static void * | |
pages_map_align(size_t size, int pfd, size_t alignment) | |
{ | |
void *ret; | |
/* | |
* We don't use MAP_FIXED here, because it can cause the *replacement* | |
* of existing mappings, and we only want to create new mappings. | |
*/ | |
#ifdef MALLOC_PAGEFILE | |
if (pfd != -1) { | |
ret = mmap((void *)alignment, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | | |
MAP_NOSYNC | MAP_ALIGN, pfd, 0); | |
} else | |
#endif | |
{ | |
ret = mmap((void *)alignment, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | | |
MAP_NOSYNC | MAP_ALIGN | MAP_ANON, -1, 0); | |
} | |
assert(ret != NULL); | |
if (ret == MAP_FAILED) | |
ret = NULL; | |
return (ret); | |
} | |
#endif | |
static void * | |
pages_map(void *addr, size_t size, int pfd) | |
{ | |
void *ret; | |
/* | |
* We don't use MAP_FIXED here, because it can cause the *replacement* | |
* of existing mappings, and we only want to create new mappings. | |
*/ | |
#ifdef MALLOC_PAGEFILE | |
if (pfd != -1) { | |
ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | | |
MAP_NOSYNC, pfd, 0); | |
} else | |
#endif | |
{ | |
ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | | |
MAP_ANON, -1, 0); | |
} | |
assert(ret != NULL); | |
if (ret == MAP_FAILED) | |
ret = NULL; | |
else if (addr != NULL && ret != addr) { | |
/* | |
* We succeeded in mapping memory, but not in the right place. | |
*/ | |
if (munmap(ret, size) == -1) { | |
char buf[STRERROR_BUF]; | |
strerror_r(errno, buf, sizeof(buf)); | |
_malloc_message(_getprogname(), | |
": (malloc) Error in munmap(): ", buf, "\n"); | |
if (opt_abort) | |
abort(); | |
} | |
ret = NULL; | |
} | |
assert(ret == NULL || (addr == NULL && ret != addr) | |
|| (addr != NULL && ret == addr)); | |
return (ret); | |
} | |
static void | |
pages_unmap(void *addr, size_t size) | |
{ | |
if (munmap(addr, size) == -1) { | |
char buf[STRERROR_BUF]; | |
strerror_r(errno, buf, sizeof(buf)); | |
_malloc_message(_getprogname(), | |
": (malloc) Error in munmap(): ", buf, "\n"); | |
if (opt_abort) | |
abort(); | |
} | |
} | |
#endif | |
#ifdef MALLOC_VALIDATE | |
static inline malloc_rtree_t * | |
malloc_rtree_new(unsigned bits) | |
{ | |
malloc_rtree_t *ret; | |
unsigned bits_per_level, height, i; | |
bits_per_level = ffs(pow2_ceil((MALLOC_RTREE_NODESIZE / | |
sizeof(void *)))) - 1; | |
height = bits / bits_per_level; | |
if (height * bits_per_level != bits) | |
height++; | |
assert(height * bits_per_level >= bits); | |
ret = (malloc_rtree_t*)base_calloc(1, sizeof(malloc_rtree_t) + (sizeof(unsigned) * | |
(height - 1))); | |
if (ret == NULL) | |
return (NULL); | |
malloc_spin_init(&ret->lock); | |
ret->height = height; | |
if (bits_per_level * height > bits) | |
ret->level2bits[0] = bits % bits_per_level; | |
else | |
ret->level2bits[0] = bits_per_level; | |
for (i = 1; i < height; i++) | |
ret->level2bits[i] = bits_per_level; | |
ret->root = (void**)base_calloc(1, sizeof(void *) << ret->level2bits[0]); | |
if (ret->root == NULL) { | |
/* | |
* We leak the rtree here, since there's no generic base | |
* deallocation. | |
*/ | |
return (NULL); | |
} | |
return (ret); | |
} | |
/* The least significant bits of the key are ignored. */ | |
static inline void * | |
malloc_rtree_get(malloc_rtree_t *rtree, uintptr_t key) | |
{ | |
void *ret; | |
uintptr_t subkey; | |
unsigned i, lshift, height, bits; | |
void **node, **child; | |
malloc_spin_lock(&rtree->lock); | |
for (i = lshift = 0, height = rtree->height, node = rtree->root; | |
i < height - 1; | |
i++, lshift += bits, node = child) { | |
bits = rtree->level2bits[i]; | |
subkey = (key << lshift) >> ((SIZEOF_PTR << 3) - bits); | |
child = (void**)node[subkey]; | |
if (child == NULL) { | |
malloc_spin_unlock(&rtree->lock); | |
return (NULL); | |
} | |
} | |
/* node is a leaf, so it contains values rather than node pointers. */ | |
bits = rtree->level2bits[i]; | |
subkey = (key << lshift) >> ((SIZEOF_PTR << 3) - bits); | |
ret = node[subkey]; | |
malloc_spin_unlock(&rtree->lock); | |
return (ret); | |
} | |
static inline bool | |
malloc_rtree_set(malloc_rtree_t *rtree, uintptr_t key, void *val) | |
{ | |
uintptr_t subkey; | |
unsigned i, lshift, height, bits; | |
void **node, **child; | |
malloc_spin_lock(&rtree->lock); | |
for (i = lshift = 0, height = rtree->height, node = rtree->root; | |
i < height - 1; | |
i++, lshift += bits, node = child) { | |
bits = rtree->level2bits[i]; | |
subkey = (key << lshift) >> ((SIZEOF_PTR << 3) - bits); | |
child = (void**)node[subkey]; | |
if (child == NULL) { | |
child = (void**)base_calloc(1, sizeof(void *) << | |
rtree->level2bits[i+1]); | |
if (child == NULL) { | |
malloc_spin_unlock(&rtree->lock); | |
return (true); | |
} | |
node[subkey] = child; | |
} | |
} | |
/* node is a leaf, so it contains values rather than node pointers. */ | |
bits = rtree->level2bits[i]; | |
subkey = (key << lshift) >> ((SIZEOF_PTR << 3) - bits); | |
node[subkey] = val; | |
malloc_spin_unlock(&rtree->lock); | |
return (false); | |
} | |
#endif | |
static void * | |
chunk_alloc_mmap(size_t size, bool pagefile) | |
{ | |
void *ret; | |
#ifndef JEMALLOC_USES_MAP_ALIGN | |
size_t offset; | |
#endif | |
int pfd; | |
#ifdef MALLOC_PAGEFILE | |
if (opt_pagefile && pagefile) { | |
pfd = pagefile_init(size); | |
if (pfd == -1) | |
return (NULL); | |
} else | |
#endif | |
pfd = -1; | |
/* | |
* Windows requires that there be a 1:1 mapping between VM | |
* allocation/deallocation operations. Therefore, take care here to | |
* acquire the final result via one mapping operation. This means | |
* unmapping any preliminary result that is not correctly aligned. | |
* | |
* The MALLOC_PAGEFILE code also benefits from this mapping algorithm, | |
* since it reduces the number of page files. | |
*/ | |
#ifdef JEMALLOC_USES_MAP_ALIGN | |
ret = pages_map_align(size, pfd, chunksize); | |
#else | |
ret = pages_map(NULL, size, pfd); | |
if (ret == NULL) | |
goto RETURN; | |
offset = CHUNK_ADDR2OFFSET(ret); | |
if (offset != 0) { | |
/* Deallocate, then try to allocate at (ret + size - offset). */ | |
pages_unmap(ret, size); | |
ret = pages_map((void *)((uintptr_t)ret + size - offset), size, | |
pfd); | |
while (ret == NULL) { | |
/* | |
* Over-allocate in order to map a memory region that | |
* is definitely large enough. | |
*/ | |
ret = pages_map(NULL, size + chunksize, -1); | |
if (ret == NULL) | |
goto RETURN; | |
/* | |
* Deallocate, then allocate the correct size, within | |
* the over-sized mapping. | |
*/ | |
offset = CHUNK_ADDR2OFFSET(ret); | |
pages_unmap(ret, size + chunksize); | |
if (offset == 0) | |
ret = pages_map(ret, size, pfd); | |
else { | |
ret = pages_map((void *)((uintptr_t)ret + | |
chunksize - offset), size, pfd); | |
} | |
/* | |
* Failure here indicates a race with another thread, so | |
* try again. | |
*/ | |
} | |
} | |
RETURN: | |
#endif | |
#ifdef MALLOC_PAGEFILE | |
if (pfd != -1) | |
pagefile_close(pfd); | |
#endif | |
#ifdef MALLOC_STATS | |
if (ret != NULL) | |
stats_chunks.nchunks += (size / chunksize); | |
#endif | |
return (ret); | |
} | |
#ifdef MALLOC_PAGEFILE | |
static int | |
pagefile_init(size_t size) | |
{ | |
int ret; | |
size_t i; | |
char pagefile_path[PATH_MAX]; | |
char zbuf[MALLOC_PAGEFILE_WRITE_SIZE]; | |
/* | |
* Create a temporary file, then immediately unlink it so that it will | |
* not persist. | |
*/ | |
strcpy(pagefile_path, pagefile_templ); | |
ret = mkstemp(pagefile_path); | |
if (ret == -1) | |
return (ret); | |
if (unlink(pagefile_path)) { | |
char buf[STRERROR_BUF]; | |
strerror_r(errno, buf, sizeof(buf)); | |
_malloc_message(_getprogname(), ": (malloc) Error in unlink(\"", | |
pagefile_path, "\"):"); | |
_malloc_message(buf, "\n", "", ""); | |
if (opt_abort) | |
abort(); | |
} | |
/* | |
* Write sequential zeroes to the file in order to assure that disk | |
* space is committed, with minimal fragmentation. It would be | |
* sufficient to write one zero per disk block, but that potentially | |
* results in more system calls, for no real gain. | |
*/ | |
memset(zbuf, 0, sizeof(zbuf)); | |
for (i = 0; i < size; i += sizeof(zbuf)) { | |
if (write(ret, zbuf, sizeof(zbuf)) != sizeof(zbuf)) { | |
if (errno != ENOSPC) { | |
char buf[STRERROR_BUF]; | |
strerror_r(errno, buf, sizeof(buf)); | |
_malloc_message(_getprogname(), | |
": (malloc) Error in write(): ", buf, "\n"); | |
if (opt_abort) | |
abort(); | |
} | |
pagefile_close(ret); | |
return (-1); | |
} | |
} | |
return (ret); | |
} | |
static void | |
pagefile_close(int pfd) | |
{ | |
if (close(pfd)) { | |
char buf[STRERROR_BUF]; | |
strerror_r(errno, buf, sizeof(buf)); | |
_malloc_message(_getprogname(), | |
": (malloc) Error in close(): ", buf, "\n"); | |
if (opt_abort) | |
abort(); | |
} | |
} | |
#endif | |
static void * | |
chunk_recycle_reserve(size_t size, bool zero) | |
{ | |
extent_node_t *node, key; | |
#ifdef MALLOC_DECOMMIT | |
if (size != chunksize) | |
return (NULL); | |
#endif | |
key.addr = NULL; | |
key.size = size; | |
malloc_mutex_lock(&reserve_mtx); | |
node = extent_tree_szad_nsearch(&reserve_chunks_szad, &key); | |
if (node != NULL) { | |
void *ret = node->addr; | |
/* Remove node from the tree. */ | |
extent_tree_szad_remove(&reserve_chunks_szad, node); | |
#ifndef MALLOC_DECOMMIT | |
if (node->size == size) { | |
#else | |
assert(node->size == size); | |
#endif | |
extent_tree_ad_remove(&reserve_chunks_ad, node); | |
base_node_dealloc(node); | |
#ifndef MALLOC_DECOMMIT | |
} else { | |
/* | |
* Insert the remainder of node's address range as a | |
* smaller chunk. Its position within reserve_chunks_ad | |
* does not change. | |
*/ | |
assert(node->size > size); | |
node->addr = (void *)((uintptr_t)node->addr + size); | |
node->size -= size; | |
extent_tree_szad_insert(&reserve_chunks_szad, node); | |
} | |
#endif | |
reserve_cur -= size; | |
/* | |
* Try to replenish the reserve if this allocation depleted it. | |
*/ | |
#ifndef MALLOC_DECOMMIT | |
if (reserve_cur < reserve_min) { | |
size_t diff = reserve_min - reserve_cur; | |
#else | |
while (reserve_cur < reserve_min) { | |
# define diff chunksize | |
#endif | |
void *chunk; | |
malloc_mutex_unlock(&reserve_mtx); | |
chunk = chunk_alloc_mmap(diff, true); | |
malloc_mutex_lock(&reserve_mtx); | |
if (chunk == NULL) { | |
uint64_t seq = 0; | |
do { | |
seq = reserve_notify(RESERVE_CND_LOW, | |
size, seq); | |
if (seq == 0) | |
goto MALLOC_OUT; | |
} while (reserve_cur < reserve_min); | |
} else { | |
extent_node_t *node; | |
node = chunk_dealloc_reserve(chunk, diff); | |
if (node == NULL) { | |
uint64_t seq = 0; | |
pages_unmap(chunk, diff); | |
do { | |
seq = reserve_notify( | |
RESERVE_CND_LOW, size, seq); | |
if (seq == 0) | |
goto MALLOC_OUT; | |
} while (reserve_cur < reserve_min); | |
} | |
} | |
} | |
MALLOC_OUT: | |
malloc_mutex_unlock(&reserve_mtx); | |
#ifdef MALLOC_DECOMMIT | |
pages_commit(ret, size); | |
# undef diff | |
#else | |
if (zero) | |
memset(ret, 0, size); | |
#endif | |
return (ret); | |
} | |
malloc_mutex_unlock(&reserve_mtx); | |
return (NULL); | |
} | |
static void * | |
chunk_alloc(size_t size, bool zero, bool pagefile) | |
{ | |
void *ret; | |
assert(size != 0); | |
assert((size & chunksize_mask) == 0); | |
ret = chunk_recycle_reserve(size, zero); | |
if (ret != NULL) | |
goto RETURN; | |
ret = chunk_alloc_mmap(size, pagefile); | |
if (ret != NULL) { | |
goto RETURN; | |
} | |
/* All strategies for allocation failed. */ | |
ret = NULL; | |
RETURN: | |
#ifdef MALLOC_STATS | |
if (ret != NULL) | |
stats_chunks.curchunks += (size / chunksize); | |
if (stats_chunks.curchunks > stats_chunks.highchunks) | |
stats_chunks.highchunks = stats_chunks.curchunks; | |
#endif | |
#ifdef MALLOC_VALIDATE | |
if (ret != NULL) { | |
if (malloc_rtree_set(chunk_rtree, (uintptr_t)ret, ret)) { | |
chunk_dealloc(ret, size); | |
return (NULL); | |
} | |
} | |
#endif | |
assert(CHUNK_ADDR2BASE(ret) == ret); | |
return (ret); | |
} | |
static extent_node_t * | |
chunk_dealloc_reserve(void *chunk, size_t size) | |
{ | |
extent_node_t *node; | |
#ifdef MALLOC_DECOMMIT | |
if (size != chunksize) | |
return (NULL); | |
#else | |
extent_node_t *prev, key; | |
key.addr = (void *)((uintptr_t)chunk + size); | |
node = extent_tree_ad_nsearch(&reserve_chunks_ad, &key); | |
/* Try to coalesce forward. */ | |
if (node != NULL && node->addr == key.addr) { | |
/* | |
* Coalesce chunk with the following address range. This does | |
* not change the position within reserve_chunks_ad, so only | |
* remove/insert from/into reserve_chunks_szad. | |
*/ | |
extent_tree_szad_remove(&reserve_chunks_szad, node); | |
node->addr = chunk; | |
node->size += size; | |
extent_tree_szad_insert(&reserve_chunks_szad, node); | |
} else { | |
#endif | |
/* Coalescing forward failed, so insert a new node. */ | |
node = base_node_alloc(); | |
if (node == NULL) | |
return (NULL); | |
node->addr = chunk; | |
node->size = size; | |
extent_tree_ad_insert(&reserve_chunks_ad, node); | |
extent_tree_szad_insert(&reserve_chunks_szad, node); | |
#ifndef MALLOC_DECOMMIT | |
} | |
/* Try to coalesce backward. */ | |
prev = extent_tree_ad_prev(&reserve_chunks_ad, node); | |
if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) == | |
chunk) { | |
/* | |
* Coalesce chunk with the previous address range. This does | |
* not change the position within reserve_chunks_ad, so only | |
* remove/insert node from/into reserve_chunks_szad. | |
*/ | |
extent_tree_szad_remove(&reserve_chunks_szad, prev); | |
extent_tree_ad_remove(&reserve_chunks_ad, prev); | |
extent_tree_szad_remove(&reserve_chunks_szad, node); | |
node->addr = prev->addr; | |
node->size += prev->size; | |
extent_tree_szad_insert(&reserve_chunks_szad, node); | |
base_node_dealloc(prev); | |
} | |
#endif | |
#ifdef MALLOC_DECOMMIT | |
pages_decommit(chunk, size); | |
#else | |
madvise(chunk, size, MADV_FREE); | |
#endif | |
reserve_cur += size; | |
if (reserve_cur > reserve_max) | |
reserve_shrink(); | |
return (node); | |
} | |
static void | |
chunk_dealloc_mmap(void *chunk, size_t size) | |
{ | |
pages_unmap(chunk, size); | |
} | |
static void | |
chunk_dealloc(void *chunk, size_t size) | |
{ | |
extent_node_t *node; | |
assert(chunk != NULL); | |
assert(CHUNK_ADDR2BASE(chunk) == chunk); | |
assert(size != 0); | |
assert((size & chunksize_mask) == 0); | |
#ifdef MALLOC_STATS | |
stats_chunks.curchunks -= (size / chunksize); | |
#endif | |
#ifdef MALLOC_VALIDATE | |
malloc_rtree_set(chunk_rtree, (uintptr_t)chunk, NULL); | |
#endif | |
/* Try to merge chunk into the reserve. */ | |
malloc_mutex_lock(&reserve_mtx); | |
node = chunk_dealloc_reserve(chunk, size); | |
malloc_mutex_unlock(&reserve_mtx); | |
if (node == NULL) | |
chunk_dealloc_mmap(chunk, size); | |
} | |
/* | |
* End chunk management functions. | |
*/ | |
/******************************************************************************/ | |
/* | |
* Begin arena. | |
*/ | |
/* | |
* Choose an arena based on a per-thread value (fast-path code, calls slow-path | |
* code if necessary). | |
*/ | |
static inline arena_t * | |
choose_arena(void) | |
{ | |
arena_t *ret; | |
/* | |
* We can only use TLS if this is a PIC library, since for the static | |
* library version, libc's malloc is used by TLS allocation, which | |
* introduces a bootstrapping issue. | |
*/ | |
#ifndef NO_TLS | |
if (__isthreaded == false) { | |
/* Avoid the overhead of TLS for single-threaded operation. */ | |
return (arenas[0]); | |
} | |
# ifdef MOZ_MEMORY_WINDOWS | |
ret = (arena_t*)TlsGetValue(tlsIndex); | |
# else | |
ret = arenas_map; | |
# endif | |
if (ret == NULL) { | |
ret = choose_arena_hard(); | |
assert(ret != NULL); | |
} | |
#else | |
if (__isthreaded && narenas > 1) { | |
unsigned long ind; | |
/* | |
* Hash _pthread_self() to one of the arenas. There is a prime | |
* number of arenas, so this has a reasonable chance of | |
* working. Even so, the hashing can be easily thwarted by | |
* inconvenient _pthread_self() values. Without specific | |
* knowledge of how _pthread_self() calculates values, we can't | |
* easily do much better than this. | |
*/ | |
ind = (unsigned long) _pthread_self() % narenas; | |
/* | |
* Optimistially assume that arenas[ind] has been initialized. | |
* At worst, we find out that some other thread has already | |
* done so, after acquiring the lock in preparation. Note that | |
* this lazy locking also has the effect of lazily forcing | |
* cache coherency; without the lock acquisition, there's no | |
* guarantee that modification of arenas[ind] by another thread | |
* would be seen on this CPU for an arbitrary amount of time. | |
* | |
* In general, this approach to modifying a synchronized value | |
* isn't a good idea, but in this case we only ever modify the | |
* value once, so things work out well. | |
*/ | |
ret = arenas[ind]; | |
if (ret == NULL) { | |
/* | |
* Avoid races with another thread that may have already | |
* initialized arenas[ind]. | |
*/ | |
malloc_spin_lock(&arenas_lock); | |
if (arenas[ind] == NULL) | |
ret = arenas_extend((unsigned)ind); | |
else | |
ret = arenas[ind]; | |
malloc_spin_unlock(&arenas_lock); | |
} | |
} else | |
ret = arenas[0]; | |
#endif | |
assert(ret != NULL); | |
return (ret); | |
} | |
#ifndef NO_TLS | |
/* | |
* Choose an arena based on a per-thread value (slow-path code only, called | |
* only by choose_arena()). | |
*/ | |
static arena_t * | |
choose_arena_hard(void) | |
{ | |
arena_t *ret; | |
assert(__isthreaded); | |
#ifdef MALLOC_BALANCE | |
/* Seed the PRNG used for arena load balancing. */ | |
SPRN(balance, (uint32_t)(uintptr_t)(_pthread_self())); | |
#endif | |
if (narenas > 1) { | |
#ifdef MALLOC_BALANCE | |
unsigned ind; | |
ind = PRN(balance, narenas_2pow); | |
if ((ret = arenas[ind]) == NULL) { | |
malloc_spin_lock(&arenas_lock); | |
if ((ret = arenas[ind]) == NULL) | |
ret = arenas_extend(ind); | |
malloc_spin_unlock(&arenas_lock); | |
} | |
#else | |
malloc_spin_lock(&arenas_lock); | |
if ((ret = arenas[next_arena]) == NULL) | |
ret = arenas_extend(next_arena); | |
next_arena = (next_arena + 1) % narenas; | |
malloc_spin_unlock(&arenas_lock); | |
#endif | |
} else | |
ret = arenas[0]; | |
#ifdef MOZ_MEMORY_WINDOWS | |
TlsSetValue(tlsIndex, ret); | |
#else | |
arenas_map = ret; | |
#endif | |
return (ret); | |
} | |
#endif | |
static inline int | |
arena_chunk_comp(arena_chunk_t *a, arena_chunk_t *b) | |
{ | |
uintptr_t a_chunk = (uintptr_t)a; | |
uintptr_t b_chunk = (uintptr_t)b; | |
assert(a != NULL); | |
assert(b != NULL); | |
return ((a_chunk > b_chunk) - (a_chunk < b_chunk)); | |
} | |
/* Wrap red-black tree macros in functions. */ | |
rb_wrap(static, arena_chunk_tree_dirty_, arena_chunk_tree_t, | |
arena_chunk_t, link_dirty, arena_chunk_comp) | |
static inline int | |
arena_run_comp(arena_chunk_map_t *a, arena_chunk_map_t *b) | |
{ | |
uintptr_t a_mapelm = (uintptr_t)a; | |
uintptr_t b_mapelm = (uintptr_t)b; | |
assert(a != NULL); | |
assert(b != NULL); | |
return ((a_mapelm > b_mapelm) - (a_mapelm < b_mapelm)); | |
} | |
/* Wrap red-black tree macros in functions. */ | |
rb_wrap(static, arena_run_tree_, arena_run_tree_t, arena_chunk_map_t, link, | |
arena_run_comp) | |
static inline int | |
arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b) | |
{ | |
int ret; | |
size_t a_size = a->bits & ~pagesize_mask; | |
size_t b_size = b->bits & ~pagesize_mask; | |
ret = (a_size > b_size) - (a_size < b_size); | |
if (ret == 0) { | |
uintptr_t a_mapelm, b_mapelm; | |
if ((a->bits & CHUNK_MAP_KEY) == 0) | |
a_mapelm = (uintptr_t)a; | |
else { | |
/* | |
* Treat keys as though they are lower than anything | |
* else. | |
*/ | |
a_mapelm = 0; | |
} | |
b_mapelm = (uintptr_t)b; | |
ret = (a_mapelm > b_mapelm) - (a_mapelm < b_mapelm); | |
} | |
return (ret); | |
} | |
/* Wrap red-black tree macros in functions. */ | |
rb_wrap(static, arena_avail_tree_, arena_avail_tree_t, arena_chunk_map_t, link, | |
arena_avail_comp) | |
static inline void * | |
arena_run_reg_alloc(arena_run_t *run, arena_bin_t *bin) | |
{ | |
void *ret; | |
unsigned i, mask, bit, regind; | |
assert(run->magic == ARENA_RUN_MAGIC); | |
assert(run->regs_minelm < bin->regs_mask_nelms); | |
/* | |
* Move the first check outside the loop, so that run->regs_minelm can | |
* be updated unconditionally, without the possibility of updating it | |
* multiple times. | |
*/ | |
i = run->regs_minelm; | |
mask = run->regs_mask[i]; | |
if (mask != 0) { | |
/* Usable allocation found. */ | |
bit = ffs((int)mask) - 1; | |
regind = ((i << (SIZEOF_INT_2POW + 3)) + bit); | |
assert(regind < bin->nregs); | |
ret = (void *)(((uintptr_t)run) + bin->reg0_offset | |
+ (bin->reg_size * regind)); | |
/* Clear bit. */ | |
mask ^= (1U << bit); | |
run->regs_mask[i] = mask; | |
return (ret); | |
} | |
for (i++; i < bin->regs_mask_nelms; i++) { | |
mask = run->regs_mask[i]; | |
if (mask != 0) { | |
/* Usable allocation found. */ | |
bit = ffs((int)mask) - 1; | |
regind = ((i << (SIZEOF_INT_2POW + 3)) + bit); | |
assert(regind < bin->nregs); | |
ret = (void *)(((uintptr_t)run) + bin->reg0_offset | |
+ (bin->reg_size * regind)); | |
/* Clear bit. */ | |
mask ^= (1U << bit); | |
run->regs_mask[i] = mask; | |
/* | |
* Make a note that nothing before this element | |
* contains a free region. | |
*/ | |
run->regs_minelm = i; /* Low payoff: + (mask == 0); */ | |
return (ret); | |
} | |
} | |
/* Not reached. */ | |
assert(0); | |
return (NULL); | |
} | |
static inline void | |
arena_run_reg_dalloc(arena_run_t *run, arena_bin_t *bin, void *ptr, size_t size) | |
{ | |
/* | |
* To divide by a number D that is not a power of two we multiply | |
* by (2^21 / D) and then right shift by 21 positions. | |
* | |
* X / D | |
* | |
* becomes | |
* | |
* (X * size_invs[(D >> QUANTUM_2POW_MIN) - 3]) >> SIZE_INV_SHIFT | |
*/ | |
#define SIZE_INV_SHIFT 21 | |
#define SIZE_INV(s) (((1U << SIZE_INV_SHIFT) / (s << QUANTUM_2POW_MIN)) + 1) | |
static const unsigned size_invs[] = { | |
SIZE_INV(3), | |