| /* ********************************************************** |
| * Copyright (c) 2010-2013 Google, Inc. All rights reserved. |
| * Copyright (c) 2001-2010 VMware, Inc. All rights reserved. |
| * **********************************************************/ |
| |
| /* |
| * Redistribution and use in source and binary forms, with or without |
| * modification, are permitted provided that the following conditions are met: |
| * |
| * * Redistributions of source code must retain the above copyright notice, |
| * this list of conditions and the following disclaimer. |
| * |
| * * Redistributions in binary form must reproduce the above copyright notice, |
| * this list of conditions and the following disclaimer in the documentation |
| * and/or other materials provided with the distribution. |
| * |
| * * Neither the name of VMware, Inc. nor the names of its contributors may be |
| * used to endorse or promote products derived from this software without |
| * specific prior written permission. |
| * |
| * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
| * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| * ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE |
| * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR |
| * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER |
| * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
| * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
| * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH |
| * DAMAGE. |
| */ |
| |
| /* Copyright (c) 2003-2007 Determina Corp. */ |
| /* Copyright (c) 2001-2003 Massachusetts Institute of Technology */ |
| /* Copyright (c) 2001 Hewlett-Packard Company */ |
| |
| /* |
| * heap.c - heap manager |
| */ |
| |
| #include "globals.h" |
| #include <string.h> /* for memcpy */ |
| #include <limits.h> |
| |
| #include "fragment.h" /* for struct sizes */ |
| #include "link.h" /* for struct sizes */ |
| #include "instr.h" /* for struct sizes */ |
| #include "fcache.h" /* fcache_low_on_memory */ |
| #ifdef DEBUG |
| # include "hotpatch.h" /* To handle leak for case 9593. */ |
| #endif |
| |
| #ifdef HEAP_ACCOUNTING |
| # ifndef DEBUG |
| # error HEAP_ACCOUNTING requires DEBUG |
| # endif |
| #endif |
| |
| #ifdef DEBUG_MEMORY |
| /* on by default but higher than general asserts */ |
| # define CHKLVL_MEMFILL CHKLVL_DEFAULT |
| #endif |
| |
| extern bool vm_areas_exited; |
| |
| /*************************************************************************** |
| * we cannot use malloc in the middle of interpreting the client program |
| * because we could be in the middle of interpreting malloc, which is not |
| * always reentrant |
| * |
| * We have a virtual memory manager which makes sure memory is |
| * reserved within the application address space so that we don't have |
| * to fight with the application. We call os_heap_reserve to allocate |
| * virtual space in a single consecutive region. We later use |
| * os_heap_commit to get committed memory in large chunks and manage |
| * the chunks using a simple scheme of free lists of different sizes. |
| * The virtual memory manager has to store out of band information about |
| * used and free blocks, since of course there is no real memory to use. |
| * The chunks (heap units) store in band extra information both for |
| * used and free. However, in the allocated blocks within a unit we |
| * don't need to store any information since heap_free passes in the |
| * size; we store the next pointers for the free lists at the start of |
| * the free blocks themselves. We have one large reservation for most of |
| * our allocations, and yet another for allocations that we do not |
| * plan on ever freeing up on detach - the only unavoidable tombstones |
| * are those for thread private code system calls that may be stuck on |
| * callbacks. In case we run out of reserved memory we do fall back |
| * on requests from the OS, but any of these may fail if we are |
| * competing with the application. |
| * |
| * looking at dynamo behavior as of Jan 2001, most heap_alloc requests are |
| * for < 128 bytes, very few for larger, so we have a bunch of fixed-size |
| * blocks of small sizes |
| * |
| * the UINT_MAX size is a variable-length block, we keep one byte to store |
| * the size (again storing the next pointer when free at the start of |
| * what we pass to the user) |
| */ |
| |
| static const uint BLOCK_SIZES[] = { |
| 8, /* for instr bits */ |
| #ifndef X64 |
| /* for x64 future_fragment_t is 24 bytes (could be 20 if we could put flags last) */ |
| sizeof(future_fragment_t), /* 12 (24 x64) */ |
| #endif |
| /* we have a lot of size 16 requests for IR but they are transient */ |
| 24, /* fcache empties and vm_area_t are now 20, vm area extras still 24 */ |
| ALIGN_FORWARD(sizeof(fragment_t) + sizeof(indirect_linkstub_t), HEAP_ALIGNMENT), /* 40 dbg / 36 rel */ |
| #if defined(X64) || defined(PROFILE_LINKCOUNT) || defined(CUSTOM_EXIT_STUBS) |
| sizeof(instr_t), /* 64 (104 x64) */ |
| sizeof(fragment_t) + sizeof(direct_linkstub_t) |
| + sizeof(cbr_fallthrough_linkstub_t), /* 68 dbg / 64 rel, 112 x64 */ |
| /* all other bb/trace buckets are 8 larger but in same order */ |
| #else |
| sizeof(fragment_t) + sizeof(direct_linkstub_t) |
| + sizeof(cbr_fallthrough_linkstub_t), /* 60 dbg / 56 rel */ |
| sizeof(instr_t), /* 64 */ |
| #endif |
| /* we keep this bucket even though only 10% or so of normal bbs |
| * hit this. |
| * FIXME: release == instr_t here so a small waste when walking buckets |
| */ |
| ALIGN_FORWARD(sizeof(fragment_t) + 2*sizeof(direct_linkstub_t), |
| HEAP_ALIGNMENT), /* 68 dbg / 64 rel (128 x64) */ |
| ALIGN_FORWARD(sizeof(trace_t) + 2*sizeof(direct_linkstub_t) + sizeof(uint), |
| HEAP_ALIGNMENT), /* 80 dbg / 76 rel (148 x64 => 152) */ |
| /* FIXME: measure whether should put in indirect mixes as well */ |
| ALIGN_FORWARD(sizeof(trace_t) + 3*sizeof(direct_linkstub_t) + sizeof(uint), |
| HEAP_ALIGNMENT), /* 96 dbg / 92 rel (180 x64 => 184) */ |
| ALIGN_FORWARD(sizeof(trace_t) + 5*sizeof(direct_linkstub_t) + sizeof(uint), |
| HEAP_ALIGNMENT), /* 128 dbg / 124 rel (244 x64 => 248) */ |
| 256, |
| 512, |
| UINT_MAX /* variable-length */ |
| }; |
| #define BLOCK_TYPES (sizeof(BLOCK_SIZES)/sizeof(uint)) |
| |
| #ifdef DEBUG |
| /* FIXME: would be nice to have these stats per HEAPACCT category */ |
| /* These are ints only b/c we used to do non-atomic adds and wanted to |
| * gracefully handle underflow to negative values |
| */ |
| DECLARE_NEVERPROT_VAR(static int block_total_count[BLOCK_TYPES], {0}); |
| DECLARE_NEVERPROT_VAR(static int block_count[BLOCK_TYPES], {0}); |
| DECLARE_NEVERPROT_VAR(static int block_peak_count[BLOCK_TYPES], {0}); |
| DECLARE_NEVERPROT_VAR(static int block_wasted[BLOCK_TYPES], {0}); |
| DECLARE_NEVERPROT_VAR(static int block_peak_wasted[BLOCK_TYPES], {0}); |
| DECLARE_NEVERPROT_VAR(static int block_align_pad[BLOCK_TYPES], {0}); |
| DECLARE_NEVERPROT_VAR(static int block_peak_align_pad[BLOCK_TYPES], {0}); |
| DECLARE_NEVERPROT_VAR(static bool out_of_vmheap_once, false); |
| #endif |
| |
| /* variable-length: we steal one int for the size */ |
| #define HEADER_SIZE (sizeof(size_t)) |
| /* VARIABLE_SIZE is assignable */ |
| #define VARIABLE_SIZE(p) (*(size_t *)((p)-HEADER_SIZE)) |
| #define MEMSET_HEADER(p, value) VARIABLE_SIZE(p) = HEAP_TO_PTR_UINT(value) |
| #define GET_VARIABLE_ALLOCATION_SIZE(p) (VARIABLE_SIZE(p) + HEADER_SIZE) |
| |
| /* heap is allocated in units |
| * we start out with a small unit, then each additional unit we |
| * need doubles in size, up to a maximum, we default to 32kb initial size |
| * (24kb useful with guard pages), max size defaults to 64kb (56kb useful with |
| * guard pages), we keep the max small to save memory, it doesn't seem to be |
| * perf hit! Though with guard pages we are wasting quite a bit of reserved |
| * (though not committed) space */ |
| /* the only big things global heap is used for are pc sampling |
| * hash table and sideline sampling hash table -- if none of those |
| * are in use, 16KB should be plenty, we default to 32kb since guard |
| * pages are on by default (gives 24kb useful) max size is same as for |
| * normal heap units. |
| */ |
| /* the old defaults were 32kb (usable) for initial thread private and 16kb |
| * (usable) for initial global, changed to simplify the logic for allocating |
| * in multiples of the os allocation granularity. The new defaults prob. |
| * make more sense with the shared cache then the old did anyways. |
| */ |
| /* restrictions - |
| * any guard pages are included in the size, size must be > UNITOVERHEAD |
| * for best performance sizes should be of the form |
| * 2^n * page_size (where n is a positve integer) and max should be a multiple |
| * of the os allocation granularity so that with enough doublings we are |
| * reserving memory in multiples of the allocation granularity and not wasting |
| * any virtual address space (beyond our guard pages) |
| */ |
| #define HEAP_UNIT_MIN_SIZE INTERNAL_OPTION(initial_heap_unit_size) |
| #define HEAP_UNIT_MAX_SIZE INTERNAL_OPTION(max_heap_unit_size) |
| #define GLOBAL_UNIT_MIN_SIZE INTERNAL_OPTION(initial_global_heap_unit_size) |
| |
| #define GUARD_PAGE_ADJUSTMENT (dynamo_options.guard_pages ? 2 * PAGE_SIZE : 0) |
| |
| /* gets usable space in the unit */ |
| #define UNITROOM(u) ((size_t) (u->end_pc - u->start_pc)) |
| #define UNIT_RESERVED_ROOM(u) (u->reserved_end_pc - u->start_pc) |
| /* we keep the heap_unit_t header at top of the unit, this macro calculates |
| * the committed size of the unit by adding header size to available size |
| */ |
| #define UNIT_COMMIT_SIZE(u) (UNITROOM(u) + sizeof(heap_unit_t)) |
| #define UNIT_RESERVED_SIZE(u) (UNIT_RESERVED_ROOM(u) + sizeof(heap_unit_t)) |
| #define UNIT_ALLOC_START(u) (u->start_pc - sizeof(heap_unit_t)) |
| #define UNIT_GET_START_PC(u) (byte*)(((ptr_uint_t)u) + sizeof(heap_unit_t)) |
| #define UNIT_COMMIT_END(u) (u->end_pc) |
| #define UNIT_RESERVED_END(u) (u->reserved_end_pc) |
| |
| /* gets the allocated size of the unit (reserved size + guard pages) */ |
| #define UNITALLOC(u) (UNIT_RESERVED_SIZE(u) + GUARD_PAGE_ADJUSTMENT) |
| /* gets unit overhead, includes the reserved (guard pages) and committed |
| * (sizeof(heap_unit_t)) portions |
| */ |
| #define UNITOVERHEAD (sizeof(heap_unit_t) + GUARD_PAGE_ADJUSTMENT) |
| |
| /* any alloc request larger than this needs a special unit */ |
| #define MAXROOM (HEAP_UNIT_MAX_SIZE - UNITOVERHEAD) |
| |
| /* maximum valid allocation (to guard against internal integer overflows) */ |
| #define MAX_VALID_HEAP_ALLOCATION INT_MAX |
| |
| /* thread-local heap structure |
| * this struct is kept at top of unit itself, not in separate allocation |
| */ |
| typedef struct _heap_unit_t { |
| heap_pc start_pc; /* start address of heap storage */ |
| heap_pc end_pc; /* open-ended end address of heap storage */ |
| heap_pc cur_pc; /* open-ended current end of allocated storage */ |
| heap_pc reserved_end_pc; /* open-ended end of reserved (not nec committed) memory */ |
| bool in_vmarea_list; /* perf opt for delayed batch vmarea updating */ |
| #ifdef DEBUG |
| int id; /* # of this unit */ |
| #endif |
| struct _heap_unit_t *next_local; /* used to link thread's units */ |
| struct _heap_unit_t *next_global; /* used to link all units */ |
| struct _heap_unit_t *prev_global; /* used to link all units */ |
| } heap_unit_t; |
| |
| #ifdef HEAP_ACCOUNTING |
| typedef struct _heap_acct_t { |
| size_t alloc_reuse[ACCT_LAST]; |
| size_t alloc_new[ACCT_LAST]; |
| size_t cur_usage[ACCT_LAST]; |
| size_t max_usage[ACCT_LAST]; |
| size_t max_single[ACCT_LAST]; |
| uint num_alloc[ACCT_LAST]; |
| } heap_acct_t; |
| #endif |
| |
| /* FIXME (case 6336): rename to heap_t: |
| * a heap_t is a collection of units with the same properties |
| * to reflect that this is used for more than just thread-private memory. |
| * Also rename the "tu" vars to "h" |
| */ |
| typedef struct _thread_units_t { |
| heap_unit_t *top_unit; /* start of linked list of heap units */ |
| heap_unit_t *cur_unit; /* current unit in heap list */ |
| heap_pc free_list[BLOCK_TYPES]; |
| #ifdef DEBUG |
| int num_units; /* total # of heap units */ |
| #endif |
| dcontext_t *dcontext; /* back pointer to owner */ |
| bool writable; /* remember state of heap protection */ |
| #ifdef HEAP_ACCOUNTING |
| heap_acct_t acct; |
| #endif |
| } thread_units_t; |
| |
| /* per-thread structure: */ |
| typedef struct _thread_heap_t { |
| thread_units_t *local_heap; |
| thread_units_t *nonpersistent_heap; |
| } thread_heap_t; |
| |
| /* global, unique thread-shared structure: |
| * FIXME: give this name to thread_units_t, and name this AllHeapUnits |
| */ |
| typedef struct _heap_t { |
| heap_unit_t *units; /* list of all allocated units */ |
| heap_unit_t *dead; /* list of deleted units ready for re-allocation */ |
| /* FIXME: num_dead duplicates stats->heap_num_free, but we want num_dead |
| * for release build too, so it's separate...can we do better? |
| */ |
| uint num_dead; |
| } heap_t; |
| |
| /* no synch needed since only written once */ |
| static bool heap_exiting = false; |
| |
| #ifdef DEBUG |
| DECLARE_NEVERPROT_VAR(static bool ever_beyond_vmm, false); |
| #endif |
| |
| /* Lock used only for managing heap units, not for normal thread-local alloc. |
| * Must be recursive due to circular dependencies between vmareas and global heap. |
| * Furthermore, always grab dynamo_vm_areas_lock() before grabbing this lock, |
| * to make DR areas update and heap alloc/free atomic! |
| */ |
| DECLARE_CXTSWPROT_VAR(static recursive_lock_t heap_unit_lock, |
| INIT_RECURSIVE_LOCK(heap_unit_lock)); |
| /* N.B.: if these two locks are ever owned at the same time, the convention is |
| * that global_alloc_lock MUST be grabbed first, to avoid deadlocks |
| */ |
| /* separate lock for global heap access to avoid contention between local unit |
| * creation and global heap alloc |
| * must be recursive so that heap_vmareas_synch_units can hold it and heap_unit_lock |
| * up front to avoid deadlocks, and still allow vmareas to global_alloc -- |
| * BUT we do NOT want global_alloc() to be able to recurse! |
| * FIXME: either find a better solution to the heap_vmareas_synch_units deadlock |
| * that is as efficient, or find a way to assert that the only recursion is |
| * from heap_vmareas_synch_units to global_alloc |
| */ |
| DECLARE_CXTSWPROT_VAR(static recursive_lock_t global_alloc_lock, |
| INIT_RECURSIVE_LOCK(global_alloc_lock)); |
| |
| #if defined(DEBUG) && defined(HEAP_ACCOUNTING) && defined(HOT_PATCHING_INTERFACE) |
| static int get_special_heap_header_size(void); |
| #endif |
| |
| vm_area_vector_t *landing_pad_areas; /* PR 250294 */ |
| #ifdef WINDOWS |
| /* i#939: we steal space from ntdll's +rx segment */ |
| static app_pc lpad_temp_writable_start; |
| static size_t lpad_temp_writable_size; |
| static void release_landing_pad_mem(void); |
| #endif |
| |
| /* Indicates whether should back out of a global alloc/free and grab the |
| * DR areas lock first, to retry |
| */ |
| static bool |
| safe_to_allocate_or_free_heap_units() |
| { |
| return ((!self_owns_recursive_lock(&global_alloc_lock) && |
| !self_owns_recursive_lock(&heap_unit_lock)) || |
| self_owns_dynamo_vm_area_lock()); |
| } |
| |
| /* indicates a dynamo vm area remove was delayed |
| * protected by the heap_unit_lock |
| */ |
| DECLARE_FREQPROT_VAR(static bool dynamo_areas_pending_remove, false); |
| |
| #ifdef HEAP_ACCOUNTING |
| const char * whichheap_name[] = { |
| /* max length for aligned output is length of "BB Fragments" */ |
| "BB Fragments", |
| "Coarse Links", |
| "Future Frag", |
| "Frag Tables", |
| "IBL Tables", |
| "Traces", |
| "FC Empties", |
| "Vm Multis", |
| "IR", |
| "RCT Tables", |
| "VM Areas", |
| "Symbols", |
| # ifdef SIDELINE |
| "Sideline", |
| # endif |
| "TH Counter", |
| "Tombstone", |
| "Hot Patching", |
| "Thread Mgt", |
| "Memory Mgt", |
| "Stats", |
| "SpecialHeap", |
| # ifdef CLIENT_INTERFACE |
| "Client", |
| # endif |
| "Lib Dup", |
| "Clean Call", |
| /* NOTE: Add your heap name here */ |
| "Other", |
| }; |
| |
| /* Since using a lock for these stats adds a lot of contention, we |
| * follow a two-pronged strategy: |
| * 1) For accurate stats we add a thread's final stats to the global only |
| * when it is cleaned up. But, this prevents global stats from being |
| * available in the middle of a run or if a run is not cleaned up nicely. |
| * 2) We have a set of heap_accounting stats for incremental global stats |
| * that are available at any time, yet racy and so may be off a little. |
| */ |
| /* all set to 0 is only initialization we need */ |
| DECLARE_NEVERPROT_VAR(static thread_units_t global_racy_units, {0}); |
| |
| /* macro to get the type abstracted */ |
| # define ACCOUNT_FOR_ALLOC_HELPER(type, tu, which, alloc_sz, ask_sz) do { \ |
| (tu)->acct.type[which] += alloc_sz; \ |
| (tu)->acct.num_alloc[which]++; \ |
| (tu)->acct.cur_usage[which] += alloc_sz; \ |
| if ((tu)->acct.cur_usage[which] > (tu)->acct.max_usage[which]) \ |
| (tu)->acct.max_usage[which] = (tu)->acct.cur_usage[which]; \ |
| if (ask_sz > (tu)->acct.max_single[which]) \ |
| (tu)->acct.max_single[which] = ask_sz; \ |
| } while (0) |
| |
| # define ACCOUNT_FOR_ALLOC(type, tu, which, alloc_sz, ask_sz) do { \ |
| STATS_ADD_PEAK(heap_claimed, alloc_sz); \ |
| ACCOUNT_FOR_ALLOC_HELPER(type, tu, which, alloc_sz, ask_sz); \ |
| ACCOUNT_FOR_ALLOC_HELPER(type, &global_racy_units, which, \ |
| alloc_sz, ask_sz); \ |
| } while (0) |
| |
| # define ACCOUNT_FOR_FREE(tu, which, size) do { \ |
| STATS_SUB(heap_claimed, (size)); \ |
| (tu)->acct.cur_usage[which] -= size; \ |
| global_racy_units.acct.cur_usage[which] -= size; \ |
| } while (0) |
| |
| #else |
| # define ACCOUNT_FOR_ALLOC(type, tu, which, alloc_sz, ask_sz) |
| # define ACCOUNT_FOR_FREE(tu, which, size) |
| #endif |
| |
| typedef byte *vm_addr_t; |
| |
| #ifdef X64 |
| /* designates the closed interval within which we must allocate DR heap space */ |
| static byte *heap_allowable_region_start = (byte *)PTR_UINT_0; |
| static byte *heap_allowable_region_end = (byte *)POINTER_MAX; |
| |
| /* used only to protect read/write access to the must_reach_* static variables in |
| * request_region_be_heap_reachable() */ |
| DECLARE_CXTSWPROT_VAR(static mutex_t request_region_be_heap_reachable_lock, |
| INIT_LOCK_FREE(request_region_be_heap_reachable_lock)); |
| |
| /* Request that the supplied region be 32bit offset reachable from the DR heap. Should |
| * be called before vmm_heap_init() so we can place the DR heap to meet these constraints. |
| * Can also be called post vmm_heap_init() but at that point acts as an assert that the |
| * supplied region is reachable since the heap is already reserved. |
| * |
| * Must be called at least once up front, for the -heap_in_lower_4GB code here |
| * to kick in! |
| */ |
| void |
| request_region_be_heap_reachable(byte *start, size_t size) |
| { |
| /* initialize so will be overridden on first call; protected by the |
| * request_region_be_heap_reachable_lock */ |
| static byte *must_reach_region_start = (byte *)POINTER_MAX; |
| static byte *must_reach_region_end = (byte *)PTR_UINT_0; /* closed */ |
| |
| LOG(GLOBAL, LOG_HEAP, 2, |
| "Adding must-be-reachable-from-heap region "PFX"-"PFX"\n" |
| "Existing must-be-reachable region "PFX"-"PFX"\n" |
| "Existing allowed range "PFX"-"PFX"\n", |
| start, start+size, must_reach_region_start, must_reach_region_end, |
| heap_allowable_region_start, heap_allowable_region_end); |
| ASSERT(!POINTER_OVERFLOW_ON_ADD(start, size)); |
| ASSERT(size > 0); |
| |
| mutex_lock(&request_region_be_heap_reachable_lock); |
| if (start < must_reach_region_start) { |
| byte *allowable_end_tmp; |
| SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT); |
| must_reach_region_start = start; |
| allowable_end_tmp = REACHABLE_32BIT_END(must_reach_region_start, |
| must_reach_region_end); |
| /* PR 215395 - add in absolute address reachability */ |
| if (DYNAMO_OPTION(heap_in_lower_4GB) && |
| allowable_end_tmp > ( byte *)POINTER_MAX_32BIT) { |
| allowable_end_tmp = (byte *)POINTER_MAX_32BIT; |
| } |
| /* Write assumed to be atomic so we don't have to hold a lock to use |
| * heap_allowable_region_end. */ |
| heap_allowable_region_end = allowable_end_tmp; |
| SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); |
| } |
| if (start + size - 1 > must_reach_region_end) { |
| SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT); |
| must_reach_region_end = start + size - 1; /* closed */ |
| /* Write assumed to be atomic so we don't have to hold a lock to use |
| * heap_allowable_region_start. */ |
| heap_allowable_region_start = REACHABLE_32BIT_START(must_reach_region_start, |
| must_reach_region_end); |
| SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); |
| } |
| ASSERT(must_reach_region_start <= must_reach_region_end); /* correctness check */ |
| /* verify can be addressed absolutely (if required), correctness check */ |
| ASSERT(!DYNAMO_OPTION(heap_in_lower_4GB) || |
| heap_allowable_region_end <= (byte *)POINTER_MAX_32BIT); |
| mutex_unlock(&request_region_be_heap_reachable_lock); |
| |
| LOG(GLOBAL, LOG_HEAP, 1, |
| "Added must-be-reachable-from-heap region "PFX"-"PFX"\n" |
| "New must-be-reachable region "PFX"-"PFX"\n" |
| "New allowed range "PFX"-"PFX"\n", |
| start, start+size, must_reach_region_start, must_reach_region_end, |
| heap_allowable_region_start, heap_allowable_region_end); |
| |
| /* Reachability checks (xref PR 215395, note since we currently can't directly |
| * control where DR/client dlls are loaded these could fire if rebased). */ |
| ASSERT(heap_allowable_region_start <= must_reach_region_start && |
| "x64 reachability contraints not satisfiable"); |
| ASSERT(must_reach_region_end <= heap_allowable_region_end && |
| "x64 reachability contraints not satisfiable"); |
| |
| /* Handle release build failure. */ |
| if (heap_allowable_region_start > must_reach_region_start || |
| must_reach_region_end > heap_allowable_region_end) { |
| /* FIXME - in a released product we may want to detach or something else less |
| * drastic than triggering a FATAL_USAGE_ERROR. */ |
| FATAL_USAGE_ERROR(HEAP_CONTRAINTS_UNSATISFIABLE, 2, |
| get_application_name(), get_application_pid()); |
| } |
| } |
| |
| void |
| vmcode_get_reachable_region(byte **region_start OUT, byte **region_end OUT) |
| { |
| /* We track sub-page for more accuracy on additional constraints, and |
| * align when asked about it. |
| */ |
| if (region_start != NULL) |
| *region_start = (byte *) ALIGN_FORWARD(heap_allowable_region_start, PAGE_SIZE); |
| if (region_end != NULL) |
| *region_end = (byte *) ALIGN_BACKWARD(heap_allowable_region_end, PAGE_SIZE); |
| } |
| #endif |
| |
| /* forward declarations of static functions */ |
| static void threadunits_init(dcontext_t *dcontext, thread_units_t *tu, size_t size); |
| /* dcontext only used for debugging */ |
| static void threadunits_exit(thread_units_t *tu, dcontext_t *dcontext); |
| static void *common_heap_alloc(thread_units_t *tu, size_t size |
| HEAPACCT(which_heap_t which)); |
| static bool common_heap_free(thread_units_t *tu, void *p, size_t size |
| HEAPACCT(which_heap_t which)); |
| static void release_real_memory(void *p, size_t size, bool remove_vm); |
| static void release_guarded_real_memory(vm_addr_t p, size_t size, bool remove_vm, |
| bool guarded); |
| |
| typedef enum { |
| /* I - Init, Interop - first allocation failed |
| * check for incompatible kernel drivers |
| */ |
| OOM_INIT = 0x1, |
| /* R - Reserve - out of virtual reservation * |
| * increase -vm_size to reserve more memory |
| */ |
| OOM_RESERVE = 0x2, |
| /* C - Commit - systemwide page file limit, or current process job limit hit |
| * Increase pagefile size, check for memory leak in any application. |
| * |
| * FIXME: possible automatic actions |
| * if systemwide failure we may want to wait if transient |
| * FIXME: if in a job latter we want to detect and just die |
| * (though after freeing as much memory as we can) |
| */ |
| OOM_COMMIT = 0x4, |
| /* E - Extending Commit - same reasons as Commit |
| * as a possible workaround increasing -heap_commit_increment |
| * may make expose us to commit-ing less frequently, |
| * On the other hand committing smaller chunks has a higher |
| * chance of getting through when there is very little memory. |
| * |
| * FIXME: not much more informative than OOM_COMMIT |
| */ |
| OOM_EXTEND = 0x8, |
| } oom_source_t; |
| |
| static void report_low_on_memory(oom_source_t source, |
| heap_error_code_t os_error_code); |
| |
| /* virtual memory manager */ |
| enum {VMM_BLOCK_SIZE = IF_WINDOWS_ELSE(64,16)*1024}; /* 64KB or 16KB */ |
| /* Our current allocation unit matches the allocation granularity on |
| * windows, to avoid worrying about external fragmentation |
| * Since most of our allocations fall within this range this makes the |
| * common operation be finding a single empty block. |
| * |
| * On Linux we save a lot of wasted alignment space by using a smaller |
| * granularity (PR 415959). |
| * |
| * FIXME: for Windows, if we reserve the whole region up front and |
| * just commit pieces, why do we need to match the Windows kernel |
| * alloc granularity? |
| */ |
| |
| enum { |
| /* maximum 512MB virtual memory units */ |
| MAX_VMM_HEAP_UNIT_SIZE = 512*1024*1024, |
| /* We should normally have only one large unit, so this is in fact |
| * the maximum we should count on in one process |
| */ |
| /* minimum will be used only if an invalid option is set */ |
| MIN_VMM_HEAP_UNIT_SIZE = VMM_BLOCK_SIZE |
| }; |
| |
| typedef struct { |
| vm_addr_t start_addr; /* base virtual address */ |
| vm_addr_t end_addr; /* noninclusive virtual memory range [start,end) */ |
| vm_addr_t alloc_start; /* base allocation virtual address */ |
| size_t alloc_size; /* allocation size */ |
| /* for 64-bit do we want to shift to size_t to allow a larger region? |
| * if so must update the bitmap_t routines |
| */ |
| uint num_blocks; /* total number of blocks in virtual allocation */ |
| |
| mutex_t lock; /* write access to the rest of the fields is protected */ |
| /* We make an assumption about the bitmap_t implementation being |
| static therefore we don't grab locks on read accesses. Anyways, |
| currently the bitmap_t is used with no write intent only for ASSERTs. */ |
| uint num_free_blocks; /* currently free blocks */ |
| /* Bitmap uses 1KB static data for granularity 64KB and static maximum 512MB */ |
| /* Since we expect only two of these, for now it is ok for users |
| to have static max rather than dynamically allocating with |
| exact size - however this field is left last in the structure |
| in case we do want to save some memory |
| */ |
| bitmap_element_t blocks[BITMAP_INDEX(MAX_VMM_HEAP_UNIT_SIZE/VMM_BLOCK_SIZE)]; |
| } vm_heap_t; |
| |
| /* We keep our heap management structs on the heap for selfprot (case 8074). |
| * Note that we do have static structs for bootstrapping and we later move |
| * the data here. |
| */ |
| typedef struct _heap_management_t { |
| /* high-level management */ |
| /* we reserve only a single vm_heap_t for guaranteed allocation, |
| * we fall back to OS when run out of reservation space */ |
| vm_heap_t vmheap; |
| heap_t heap; |
| /* thread-shared heaps: */ |
| thread_units_t global_units; |
| thread_units_t global_nonpersistent_units; |
| bool global_heap_writable; |
| thread_units_t global_unprotected_units; |
| } heap_management_t; |
| |
| /* For bootstrapping until we can allocate our real heapmgt (case 8074). |
| * temp_heapmgt.lock is initialized in vmm_heap_unit_init(). |
| */ |
| static heap_management_t temp_heapmgt; |
| static heap_management_t *heapmgt = &temp_heapmgt; /* initial value until alloced */ |
| |
| static bool vmm_heap_exited = false; /* FIXME: used only to thwart stack_free from trying, |
| should change the interface for the last stack |
| */ |
| |
| static inline |
| uint |
| vmm_addr_to_block(vm_heap_t *vmh, vm_addr_t p) |
| { |
| ASSERT(CHECK_TRUNCATE_TYPE_uint((p - vmh->start_addr) / VMM_BLOCK_SIZE)); |
| return (uint) ((p - vmh->start_addr) / VMM_BLOCK_SIZE); |
| } |
| |
| static inline |
| vm_addr_t |
| vmm_block_to_addr(vm_heap_t *vmh, uint block) |
| { |
| ASSERT(block >=0 && block < vmh->num_blocks); |
| return (vm_addr_t)(vmh->start_addr + block*VMM_BLOCK_SIZE); |
| } |
| |
| static bool |
| vmm_in_same_block(vm_addr_t p1, vm_addr_t p2) |
| { |
| return vmm_addr_to_block(&heapmgt->vmheap, p1) == |
| vmm_addr_to_block(&heapmgt->vmheap, p2); |
| } |
| |
| #if defined(DEBUG) && defined(INTERNAL) |
| static void |
| vmm_dump_map(vm_heap_t *vmh) |
| { |
| uint i; |
| bitmap_element_t *b = vmh->blocks; |
| uint bitmap_size = vmh->num_blocks; |
| uint last_i = 0; |
| bool is_used = bitmap_test(b, 0) == 0; |
| |
| LOG(GLOBAL, LOG_HEAP, 3, "vmm_dump_map("PFX")\n", vmh); |
| /* raw dump first - if you really want binary dump use windbg's dyd */ |
| DOLOG(3, LOG_HEAP, { |
| dump_buffer_as_bytes(GLOBAL, b, |
| BITMAP_INDEX(bitmap_size)*sizeof(bitmap_element_t), |
| DUMP_RAW|DUMP_ADDRESS); |
| }); |
| |
| LOG(GLOBAL, LOG_HEAP, 1, "\nvmm_dump_map("PFX") virtual regions\n", vmh); |
| #define VMM_DUMP_MAP_LOG(i, last_i) \ |
| LOG(GLOBAL, LOG_HEAP, 1, PFX"-"PFX" size=%d %s\n", vmm_block_to_addr(vmh, last_i), \ |
| vmm_block_to_addr(vmh, i-1) + VMM_BLOCK_SIZE - 1, \ |
| (i-last_i)*VMM_BLOCK_SIZE, \ |
| is_used ? "reserved" : "free"); |
| |
| for (i=0; i < bitmap_size; i++) { |
| /* start counting at free/used boundaries */ |
| if (is_used != (bitmap_test(b, i) == 0)) { |
| VMM_DUMP_MAP_LOG(i, last_i); |
| is_used = (bitmap_test(b, i) == 0); |
| last_i = i; |
| } |
| } |
| VMM_DUMP_MAP_LOG(bitmap_size, last_i); |
| } |
| #endif /* DEBUG */ |
| |
| void |
| print_vmm_heap_data(file_t outf) |
| { |
| mutex_lock(&heapmgt->vmheap.lock); |
| print_file(outf, "VM heap: addr range "PFX"--"PFX", # free blocks %d\n", |
| heapmgt->vmheap.start_addr, heapmgt->vmheap.end_addr, |
| heapmgt->vmheap.num_free_blocks); |
| mutex_unlock(&heapmgt->vmheap.lock); |
| } |
| |
| static inline |
| void |
| vmm_heap_initialize_unusable(vm_heap_t *vmh) |
| { |
| vmh->start_addr = vmh->end_addr = NULL; |
| vmh->num_free_blocks = vmh->num_blocks = 0; |
| } |
| |
| static |
| void |
| vmm_heap_unit_init(vm_heap_t *vmh, size_t size) |
| { |
| ptr_uint_t preferred; |
| heap_error_code_t error_code; |
| ASSIGN_INIT_LOCK_FREE(vmh->lock, vmh_lock); |
| |
| size = ALIGN_FORWARD(size, VMM_BLOCK_SIZE); |
| ASSERT(size <= MAX_VMM_HEAP_UNIT_SIZE); |
| vmh->alloc_size = size; |
| |
| if (size == 0) { |
| vmm_heap_initialize_unusable(&heapmgt->vmheap); |
| return; |
| } |
| |
| /* Out of 32 bits = 12 bits are page offset, windows wastes 4 more |
| * since its allocation base is 64KB, and if we want to stay |
| * safely in say 0x20000000-0x2fffffff we're left with only 12 |
| * bits of randomness - which may be too little. On the other |
| * hand changing any of the lower 16 bits will make our bugs |
| * non-deterministic. */ |
| /* Make sure we don't waste the lower bits from our random number */ |
| preferred = (DYNAMO_OPTION(vm_base) |
| + get_random_offset(DYNAMO_OPTION(vm_max_offset)/VMM_BLOCK_SIZE) |
| *VMM_BLOCK_SIZE); |
| preferred = ALIGN_FORWARD(preferred, VMM_BLOCK_SIZE); |
| /* overflow check: w/ vm_base shouldn't happen so debug-only check */ |
| ASSERT(!POINTER_OVERFLOW_ON_ADD(preferred, size)); |
| |
| /* let's assume a single chunk is sufficient to reserve */ |
| vmh->start_addr = NULL; |
| #ifdef X64 |
| if ((byte *)preferred < heap_allowable_region_start || |
| (byte *)preferred + size > heap_allowable_region_end) { |
| error_code = HEAP_ERROR_NOT_AT_PREFERRED; |
| } else { |
| #endif |
| vmh->start_addr = os_heap_reserve((void*)preferred, size, &error_code, |
| true/*+x*/); |
| LOG(GLOBAL, LOG_HEAP, 1, |
| "vmm_heap_unit_init preferred="PFX" got start_addr="PFX"\n", |
| preferred, vmh->start_addr); |
| #ifdef X64 |
| } |
| #endif |
| while (vmh->start_addr == NULL && DYNAMO_OPTION(vm_allow_not_at_base)) { |
| /* Since we prioritize low-4GB or near-app over -vm_base, we do not |
| * syslog or assert here |
| */ |
| /* need extra size to ensure alignment */ |
| vmh->alloc_size = size + VMM_BLOCK_SIZE; |
| #ifdef X64 |
| /* PR 215395, make sure allocation satisfies heap reachability contraints */ |
| vmh->alloc_start = os_heap_reserve_in_region |
| ((void *)ALIGN_FORWARD(heap_allowable_region_start, PAGE_SIZE), |
| (void *)ALIGN_BACKWARD(heap_allowable_region_end, PAGE_SIZE), |
| size + VMM_BLOCK_SIZE, &error_code, |
| true/*+x*/); |
| #else |
| vmh->alloc_start = (heap_pc) |
| os_heap_reserve(NULL, size + VMM_BLOCK_SIZE, &error_code, true/*+x*/); |
| #endif |
| vmh->start_addr = (heap_pc) ALIGN_FORWARD(vmh->alloc_start, VMM_BLOCK_SIZE); |
| LOG(GLOBAL, LOG_HEAP, 1, "vmm_heap_unit_init unable to allocate at preferred=" |
| PFX" letting OS place sz=%dM addr="PFX" \n", |
| preferred, size/(1024*1024), vmh->start_addr); |
| if (vmh->alloc_start == NULL && DYNAMO_OPTION(vm_allow_smaller)) { |
| /* Just a little smaller might fit */ |
| size_t sub = (size_t) ALIGN_FORWARD(size/16, 1024*1024); |
| SYSLOG_INTERNAL_WARNING_ONCE("Full size vmm heap allocation failed"); |
| if (size > sub) |
| size -= sub; |
| else |
| break; |
| } else |
| break; |
| } |
| #ifdef X64 |
| /* ensure future out-of-block heap allocations are reachable from this allocation */ |
| if (vmh->start_addr != NULL) { |
| ASSERT(vmh->start_addr >= heap_allowable_region_start && |
| !POINTER_OVERFLOW_ON_ADD(vmh->start_addr, size) && |
| vmh->start_addr + size <= heap_allowable_region_end); |
| request_region_be_heap_reachable(vmh->start_addr, size); |
| } |
| #endif |
| if (vmh->start_addr == 0) { |
| vmm_heap_initialize_unusable(vmh); |
| /* we couldn't even reserve initial virtual memory - we're out of luck */ |
| /* XXX case 7373: make sure we tag as a potential |
| * interoperability issue, in staging mode we should probably |
| * get out from the process since we haven't really started yet |
| */ |
| report_low_on_memory(OOM_INIT, error_code); |
| ASSERT_NOT_REACHED(); |
| } |
| vmh->end_addr = vmh->start_addr + size; |
| ASSERT_TRUNCATE(vmh->num_blocks, uint, size / VMM_BLOCK_SIZE); |
| vmh->num_blocks = (uint) (size / VMM_BLOCK_SIZE); |
| vmh->num_free_blocks = vmh->num_blocks; |
| LOG(GLOBAL, LOG_HEAP, 2, "vmm_heap_unit_init ["PFX","PFX") total=%d free=%d\n", |
| vmh->start_addr, vmh->end_addr, vmh->num_blocks, vmh->num_free_blocks); |
| |
| /* make sure static bitmap_t size is properly aligned on block boundaries */ |
| ASSERT(ALIGNED(MAX_VMM_HEAP_UNIT_SIZE, VMM_BLOCK_SIZE)); |
| bitmap_initialize_free(vmh->blocks, vmh->num_blocks); |
| DOLOG(1, LOG_HEAP, { |
| vmm_dump_map(vmh); |
| }); |
| ASSERT(bitmap_check_consistency(vmh->blocks, vmh->num_blocks, vmh->num_free_blocks)); |
| } |
| |
| static |
| void |
| vmm_heap_unit_exit(vm_heap_t *vmh) |
| { |
| LOG(GLOBAL, LOG_HEAP, 1, "vmm_heap_unit_exit ["PFX","PFX") total=%d free=%d\n", |
| vmh->start_addr, vmh->end_addr, vmh->num_blocks, vmh->num_free_blocks); |
| /* we assume single thread in DR at this point */ |
| DELETE_LOCK(vmh->lock); |
| |
| if (vmh->start_addr == NULL) |
| return; |
| |
| DOLOG(1, LOG_HEAP, { vmm_dump_map(vmh); }); |
| ASSERT(bitmap_check_consistency(vmh->blocks, |
| vmh->num_blocks, vmh->num_free_blocks)); |
| ASSERT(vmh->num_blocks * VMM_BLOCK_SIZE == |
| (ptr_uint_t)(vmh->end_addr - vmh->start_addr)); |
| |
| /* In case there are no tombstones we can just free the unit and |
| * that is what we'll do, otherwise it will stay up forever. |
| */ |
| if (vmh->num_free_blocks == vmh->num_blocks) { |
| heap_error_code_t error_code; |
| os_heap_free(vmh->alloc_start, vmh->alloc_size, &error_code); |
| ASSERT(error_code == HEAP_ERROR_SUCCESS); |
| } else { |
| /* FIXME: doing nothing for now - we only care about this in |
| * detach scenarios where we should try to clean up from the |
| * virtual address space |
| */ |
| } |
| vmm_heap_initialize_unusable(vmh); |
| } |
| |
| /* Returns whether within the region we reserved from the OS for doling |
| * out internally via our vm_heap_t; asserts that the address was also |
| * logically reserved within the vm_heap_t. |
| */ |
| static |
| bool |
| vmm_is_reserved_unit(vm_heap_t *vmh, vm_addr_t p, size_t size) |
| { |
| size = ALIGN_FORWARD(size, VMM_BLOCK_SIZE); |
| if (p < vmh->start_addr || vmh->end_addr < p/*overflow*/ || |
| vmh->end_addr < (p + size)) |
| return false; |
| ASSERT(CHECK_TRUNCATE_TYPE_uint(size/VMM_BLOCK_SIZE)); |
| ASSERT(bitmap_are_reserved_blocks(vmh->blocks, vmh->num_blocks, |
| vmm_addr_to_block(vmh, p), |
| (uint)size/VMM_BLOCK_SIZE)); |
| return true; |
| } |
| |
| /* Returns whether entirely within the region we reserved from the OS for doling |
| * out internally via our vm_heap_t |
| */ |
| bool |
| is_vmm_reserved_address(byte *pc, size_t size) |
| { |
| ASSERT(heapmgt != NULL); |
| /* Case 10293: we don't call vmm_is_reserved_unit to avoid its |
| * assert, which we want to maintain for callers only dealing with |
| * DR-allocated addresses, while this routine is called w/ random |
| * addresses |
| */ |
| return (heapmgt != NULL && heapmgt->vmheap.start_addr != NULL && |
| pc >= heapmgt->vmheap.start_addr && |
| !POINTER_OVERFLOW_ON_ADD(pc, size) && |
| (pc + size) <= heapmgt->vmheap.end_addr); |
| } |
| |
| void |
| get_vmm_heap_bounds(byte **heap_start/*OUT*/, byte **heap_end/*OUT*/) |
| { |
| ASSERT(heapmgt != NULL); |
| ASSERT(heap_start != NULL && heap_end != NULL); |
| *heap_start = heapmgt->vmheap.start_addr; |
| *heap_end = heapmgt->vmheap.end_addr; |
| } |
| |
| /* i#774: eventually we'll split vmheap from vmcode. For now, vmcode queries |
| * refer to the single vmheap reservation. |
| */ |
| byte * |
| vmcode_get_start(void) |
| { |
| byte *start, *end; |
| get_vmm_heap_bounds(&start, &end); |
| return start; |
| } |
| |
| byte * |
| vmcode_get_end(void) |
| { |
| byte *start, *end; |
| get_vmm_heap_bounds(&start, &end); |
| return end; |
| } |
| |
| byte * |
| vmcode_unreachable_pc(void) |
| { |
| ptr_uint_t start, end; |
| get_vmm_heap_bounds((byte **)&start, (byte **)&end); |
| if (start > INT_MAX) |
| return NULL; |
| else |
| return (byte *) PTR_UINT_MINUS_1; |
| } |
| |
| bool |
| rel32_reachable_from_vmcode(byte *tgt) |
| { |
| byte *vmcode_start = vmcode_get_start(); |
| byte *vmcode_end = vmcode_get_end(); |
| ptr_int_t new_offs = (tgt > vmcode_start) ? (tgt - vmcode_start) : (vmcode_end - tgt); |
| /* Beyond-vmm-reservation allocs are handled b/c those are subject to the |
| * reachability constraints we set up on every new reservation, including |
| * the initial vm_reserve. |
| */ |
| return REL32_REACHABLE_OFFS(new_offs); |
| } |
| |
| /* Reservations here are done with VMM_BLOCK_SIZE alignment |
| * (e.g. 64KB) but the caller is not forced to request at that |
| * alignment. We explicitly synchronize reservations and decommits |
| * within the vm_heap_t. |
| |
| * Returns NULL if the VMMHeap is full or too fragmented to satisfy |
| * the request. |
| */ |
| static vm_addr_t |
| vmm_heap_reserve_blocks(vm_heap_t *vmh, size_t size_in) |
| { |
| vm_addr_t p; |
| uint request; |
| uint first_block; |
| size_t size; |
| |
| size = ALIGN_FORWARD(size_in, VMM_BLOCK_SIZE); |
| ASSERT_TRUNCATE(request, uint, size/VMM_BLOCK_SIZE); |
| request = (uint) size/VMM_BLOCK_SIZE; |
| |
| LOG(GLOBAL, LOG_HEAP, 2, |
| "vmm_heap_reserve_blocks: size=%d => %d in blocks=%d free_blocks~=%d\n", |
| size_in, size, request, vmh->num_free_blocks); |
| |
| mutex_lock(&vmh->lock); |
| if (vmh->num_free_blocks < request) { |
| mutex_unlock(&vmh->lock); |
| return NULL; |
| } |
| first_block = bitmap_allocate_blocks(vmh->blocks, vmh->num_blocks, request); |
| if (first_block != BITMAP_NOT_FOUND) { |
| vmh->num_free_blocks -= request; |
| } |
| mutex_unlock(&vmh->lock); |
| |
| if (first_block != BITMAP_NOT_FOUND) { |
| p = vmm_block_to_addr(vmh, first_block); |
| STATS_ADD_PEAK(vmm_vsize_used, size); |
| STATS_ADD_PEAK(vmm_vsize_blocks_used, request); |
| STATS_ADD_PEAK(vmm_vsize_wasted, size - size_in); |
| DOSTATS({ |
| if (request > 1) { |
| STATS_INC(vmm_multi_block_allocs); |
| STATS_ADD(vmm_multi_blocks, request); |
| } |
| }); |
| } else { |
| p = NULL; |
| } |
| LOG(GLOBAL, LOG_HEAP, 2, "vmm_heap_reserve_blocks: size=%d blocks=%d p="PFX"\n", |
| size, request, p); |
| DOLOG(5, LOG_HEAP, { vmm_dump_map(vmh); }); |
| return p; |
| } |
| |
| /* We explicitly synchronize reservations and decommits within the vm_heap_t. |
| * Update bookkeeping information about the freed region. |
| */ |
| static void |
| vmm_heap_free_blocks(vm_heap_t *vmh, vm_addr_t p, size_t size_in) |
| { |
| uint first_block = vmm_addr_to_block(vmh, p); |
| uint request; |
| size_t size; |
| |
| size = ALIGN_FORWARD(size_in, VMM_BLOCK_SIZE); |
| ASSERT_TRUNCATE(request, uint, size/VMM_BLOCK_SIZE); |
| request = (uint) size/VMM_BLOCK_SIZE; |
| |
| LOG(GLOBAL, LOG_HEAP, 2, "vmm_heap_free_blocks: size=%d blocks=%d p="PFX"\n", |
| size, request, p); |
| |
| mutex_lock(&vmh->lock); |
| bitmap_free_blocks(vmh->blocks, vmh->num_blocks, first_block, request); |
| vmh->num_free_blocks += request; |
| mutex_unlock(&vmh->lock); |
| |
| ASSERT(vmh->num_free_blocks <= vmh->num_blocks); |
| STATS_SUB(vmm_vsize_used, size); |
| STATS_SUB(vmm_vsize_blocks_used, request); |
| STATS_SUB(vmm_vsize_wasted, size - size_in); |
| } |
| |
| /* This is the proper interface for the rest of heap.c to the os_heap_* functions */ |
| |
| |
| /* place all the local-scope static vars (from DO_THRESHOLD) into .fspdata to avoid |
| * protection changes */ |
| START_DATA_SECTION(FREQ_PROTECTED_SECTION, "w"); |
| |
| static bool |
| at_reset_at_vmm_limit() |
| { |
| return |
| (DYNAMO_OPTION(reset_at_vmm_percent_free_limit) != 0 && |
| 100 * heapmgt->vmheap.num_free_blocks < |
| DYNAMO_OPTION(reset_at_vmm_percent_free_limit) * heapmgt->vmheap.num_blocks) || |
| (DYNAMO_OPTION(reset_at_vmm_free_limit) != 0 && |
| heapmgt->vmheap.num_free_blocks * VMM_BLOCK_SIZE < |
| DYNAMO_OPTION(reset_at_vmm_free_limit)); |
| } |
| |
| /* Reserve virtual address space without committing swap space for it */ |
| static vm_addr_t |
| vmm_heap_reserve(size_t size, heap_error_code_t *error_code, bool executable) |
| { |
| vm_addr_t p; |
| /* should only be used on sizable aligned pieces */ |
| ASSERT(size > 0 && ALIGNED(size, PAGE_SIZE)); |
| ASSERT(!OWN_MUTEX(&reset_pending_lock)); |
| |
| if (DYNAMO_OPTION(vm_reserve)) { |
| /* FIXME: should we make this an external option? */ |
| if (INTERNAL_OPTION(vm_use_last) || |
| (DYNAMO_OPTION(switch_to_os_at_vmm_reset_limit) && at_reset_at_vmm_limit())) { |
| DO_ONCE({ |
| if (DYNAMO_OPTION(reset_at_switch_to_os_at_vmm_limit)) |
| schedule_reset(RESET_ALL); |
| DOCHECK(1, { |
| if (!INTERNAL_OPTION(vm_use_last)) { |
| ASSERT_CURIOSITY(false && "running low on vm reserve"); |
| } |
| }); |
| /* FIXME - for our testing would be nice to have some release build |
| * notification of this ... */ |
| }); |
| DODEBUG(ever_beyond_vmm = true;); |
| #ifdef X64 |
| /* PR 215395, make sure allocation satisfies heap reachability contraints */ |
| p = os_heap_reserve_in_region |
| ((void *)ALIGN_FORWARD(heap_allowable_region_start, PAGE_SIZE), |
| (void *)ALIGN_BACKWARD(heap_allowable_region_end, PAGE_SIZE), |
| size, error_code, executable); |
| /* ensure future heap allocations are reachable from this allocation */ |
| if (p != NULL) |
| request_region_be_heap_reachable(p, size); |
| #else |
| p = os_heap_reserve(NULL, size, error_code, executable); |
| #endif |
| if (p != NULL) |
| return p; |
| LOG(GLOBAL, LOG_HEAP, 1, "vmm_heap_reserve: failed "PFX"\n", |
| *error_code); |
| } |
| |
| if (at_reset_at_vmm_limit()) { |
| /* We're running low on our reservation, trigger a reset */ |
| if (schedule_reset(RESET_ALL)) { |
| STATS_INC(reset_low_vmm_count); |
| DO_THRESHOLD_SAFE(DYNAMO_OPTION(report_reset_vmm_threshold), |
| FREQ_PROTECTED_SECTION, |
| {/* < max - nothing */}, |
| {/* >= max */ |
| /* FIXME - do we want to report more then once to give some idea of |
| * how much thrashing there is? */ |
| DO_ONCE({ |
| SYSLOG_CUSTOM_NOTIFY(SYSLOG_WARNING, MSG_LOW_ON_VMM_MEMORY, 2, |
| "Potentially thrashing on low virtual " |
| "memory resetting.", get_application_name(), |
| get_application_pid()); |
| /* want QA to notice */ |
| ASSERT_CURIOSITY(false && "vmm heap limit reset thrashing"); |
| }); |
| }); |
| } |
| } |
| |
| p = vmm_heap_reserve_blocks(&heapmgt->vmheap, size); |
| LOG(GLOBAL, LOG_HEAP, 2, "vmm_heap_reserve: size=%d p="PFX"\n", |
| size, p); |
| |
| if (p) |
| return p; |
| DO_ONCE({ |
| DODEBUG({ out_of_vmheap_once = true; }); |
| if (!INTERNAL_OPTION(skip_out_of_vm_reserve_curiosity)) { |
| /* this maybe unsafe for early services w.r.t. case 666 */ |
| SYSLOG_INTERNAL_WARNING("Out of vmheap reservation - reserving %dKB." |
| "Falling back onto OS allocation", size/1024); |
| ASSERT_CURIOSITY(false && "Out of vmheap reservation"); |
| } |
| /* This actually-out trigger is only trying to help issues like a |
| * thread-private configuration being a memory hog (and thus we use up |
| * our reserve). Reset needs memory, and this is asynchronous, so no |
| * guarantees here anyway (the app may have already reserved all memory |
| * beyond our reservation, see sqlsrvr.exe and cisvc.exe for ex.) which is |
| * why we have -reset_at_vmm_threshold to make reset success more likely. */ |
| if (DYNAMO_OPTION(reset_at_vmm_full)) { |
| schedule_reset(RESET_ALL); |
| } |
| }); |
| } |
| /* if we fail to allocate from our reservation we fall back to the OS */ |
| DODEBUG(ever_beyond_vmm = true;); |
| #ifdef X64 |
| /* PR 215395, make sure allocation satisfies heap reachability contraints */ |
| p = os_heap_reserve_in_region |
| ((void *)ALIGN_FORWARD(heap_allowable_region_start, PAGE_SIZE), |
| (void *)ALIGN_BACKWARD(heap_allowable_region_end, PAGE_SIZE), |
| size, error_code, executable); |
| /* ensure future heap allocations are reachable from this allocation */ |
| if (p != NULL) |
| request_region_be_heap_reachable(p, size); |
| #else |
| p = os_heap_reserve(NULL, size, error_code, executable); |
| #endif |
| return p; |
| } |
| |
| /* Commit previously reserved pages, returns false when out of memory |
| * This is here just to complement the vmm interface, in fact it is |
| * almost an alias for os_heap_commit. (If we had strict types then |
| * here we'd convert a vm_addr_t into a heap_pc.) |
| */ |
| static inline bool |
| vmm_heap_commit(vm_addr_t p, size_t size, uint prot, heap_error_code_t *error_code) |
| { |
| bool res = os_heap_commit(p, size, prot, error_code); |
| size_t commit_used, commit_limit; |
| ASSERT(!OWN_MUTEX(&reset_pending_lock)); |
| if ((DYNAMO_OPTION(reset_at_commit_percent_free_limit) != 0 || |
| DYNAMO_OPTION(reset_at_commit_free_limit) != 0) && |
| os_heap_get_commit_limit(&commit_used, &commit_limit)) { |
| size_t commit_left = commit_limit - commit_used; |
| ASSERT(commit_used <= commit_limit); |
| /* FIXME - worry about overflow in the multiplies below? With 4kb pages isn't |
| * an issue till 160GB of committable memory. */ |
| if ((DYNAMO_OPTION(reset_at_commit_free_limit) != 0 && |
| commit_left < DYNAMO_OPTION(reset_at_commit_free_limit) / PAGE_SIZE) || |
| (DYNAMO_OPTION(reset_at_commit_percent_free_limit) != 0 && |
| 100 * commit_left < |
| DYNAMO_OPTION(reset_at_commit_percent_free_limit) * commit_limit)) { |
| /* Machine is getting low on memory, trigger a reset */ |
| /* FIXME - if we aren't the ones hogging committed memory (rougue app) then |
| * do we want a version of reset that doesn't de-commit our already grabbed |
| * memory to avoid someone else stealing it (or perhaps keep just a minimal |
| * level to ensure we make some progress)? */ |
| /* FIXME - the commit limit is for the whole system; we have no good way of |
| * telling if we're running in a job and if so what the commit limit for the |
| * job is. */ |
| /* FIXME - if a new process is started under dr while the machine is already |
| * past the threshold we will just spin resetting here and not make any |
| * progress, may be better to only reset when we have a reasonable amount of |
| * non-persistent memory to free (so that we can at least make some progress |
| * before resetting again). */ |
| /* FIXME - the threshold is calculated at the current page file size, but |
| * it's possible that the pagefile is expandable (dependent on disk space of |
| * course) and thus we're preventing a potentially beneficial (to us) |
| * upsizing of the pagefile here. See "HKLM\SYSTEM\CCS\ControlSession / |
| * Manager\Memory Management" for the initial/max size of the various page |
| * files (query SystemPafefileInformation only gets you the current size). */ |
| /* xref case 345 on fixmes (and link to wiki discussion) */ |
| if (schedule_reset(RESET_ALL)) { |
| STATS_INC(reset_low_commit_count); |
| DO_THRESHOLD_SAFE(DYNAMO_OPTION(report_reset_commit_threshold), |
| FREQ_PROTECTED_SECTION, |
| {/* < max - nothing */}, |
| {/* >= max */ |
| /* FIXME - do we want to report more then once to give some idea of |
| * how much thrashing there is? */ |
| DO_ONCE({ |
| SYSLOG_CUSTOM_NOTIFY(SYSLOG_WARNING, |
| MSG_LOW_ON_COMMITTABLE_MEMORY, 2, |
| "Potentially thrashing on low committable " |
| "memory resetting.", get_application_name(), |
| get_application_pid()); |
| /* want QA to notice */ |
| ASSERT_CURIOSITY(false && "commit limit reset thrashing"); |
| }); |
| }); |
| } |
| } |
| } |
| if (!res && |
| DYNAMO_OPTION(oom_timeout) != 0) { |
| DEBUG_DECLARE(heap_error_code_t old_error_code = *error_code;) |
| ASSERT(old_error_code != HEAP_ERROR_SUCCESS); |
| |
| /* check whether worth retrying */ |
| if (!os_heap_systemwide_overcommit(*error_code)) { |
| /* FIXME: we should check whether current process is the hog */ |
| /* unless we have used the memory, there is still a |
| * miniscule chance another thread will free up some or |
| * will attempt suicide, so could retry even if current |
| * process has a leak */ |
| ASSERT_NOT_IMPLEMENTED(false); |
| /* retry */ |
| } |
| |
| SYSLOG_INTERNAL_WARNING("vmm_heap_commit oom: timeout and retry"); |
| /* let's hope a memory hog dies in the mean time */ |
| os_timeout(DYNAMO_OPTION(oom_timeout)); |
| |
| res = os_heap_commit(p, size, prot, error_code); |
| DODEBUG({ |
| if (res) { |
| SYSLOG_INTERNAL_WARNING("vmm_heap_commit retried, got away! old="PFX" new="PFX"\n", |
| old_error_code, *error_code); |
| } else { |
| SYSLOG_INTERNAL_WARNING("vmm_heap_commit retrying, no luck. old="PFX" new="PFX"\n", |
| old_error_code, *error_code); |
| } |
| }); |
| } |
| |
| return res; |
| } |
| |
| /* back to normal section */ |
| END_DATA_SECTION() |
| |
| /* Free previously reserved and possibly committed memory. Check if |
| * it is within the memory managed by the virtual memory manager we |
| * only decommit back to the OS, and we remove the vmm reservation. |
| * Keep in mind that this can be called on units that are not fully |
| * committed, e.g. guard pages are added to this - as long as the |
| * os_heap_decommit interface can handle this we're OK |
| */ |
| static void |
| vmm_heap_free(vm_addr_t p, size_t size, heap_error_code_t *error_code) |
| { |
| LOG(GLOBAL, LOG_HEAP, 2, "vmm_heap_free: size=%d p="PFX" is_reserved=%d\n", |
| size, p, vmm_is_reserved_unit(&heapmgt->vmheap, p, size)); |
| |
| /* the memory doesn't have to be within our VM reserve if it |
| was allocated as an extra OS call when if we ran out */ |
| if (DYNAMO_OPTION(vm_reserve)) { |
| if (vmm_is_reserved_unit(&heapmgt->vmheap, p, size)) { |
| os_heap_decommit(p, size, error_code); |
| vmm_heap_free_blocks(&heapmgt->vmheap, p, size); |
| LOG(GLOBAL, LOG_HEAP, 2, "vmm_heap_free: freed size=%d p="PFX"\n", |
| size, p); |
| return; |
| } else { |
| /* FIXME: check if this is stack_free getting in the way, then ignore it */ |
| /* FIXME: could do this by overriding the meaning of the vmheap fields |
| after cleanup to a different combination that start_pc = end_pc = NULL |
| */ |
| /* FIXME: see vmm_heap_unit_exit for the current stack_free problem */ |
| if (vmm_heap_exited) { |
| *error_code = HEAP_ERROR_SUCCESS; |
| return; |
| } |
| } |
| } |
| os_heap_free(p, size, error_code); |
| } |
| |
| static void |
| vmm_heap_decommit(vm_addr_t p, size_t size, heap_error_code_t *error_code) |
| { |
| LOG(GLOBAL, LOG_HEAP, 2, "vmm_heap_decommit: size=%d p="PFX" is_reserved=%d\n", |
| size, p, vmm_is_reserved_unit(&heapmgt->vmheap, p, size)); |
| os_heap_decommit(p, size, error_code); |
| /* nothing to be done to vmm blocks */ |
| } |
| |
| /* Caller is required to handle thread synchronization and to update dynamo vm areas. |
| * size must be PAGE_SIZE-aligned. |
| * Returns NULL if fails to allocate memory! |
| */ |
| static void * |
| vmm_heap_alloc(size_t size, uint prot, heap_error_code_t *error_code) |
| { |
| vm_addr_t p = vmm_heap_reserve(size, error_code, TEST(MEMPROT_EXEC, prot)); |
| if (!p) |
| return NULL; /* out of reserved memory */ |
| |
| if (!vmm_heap_commit(p, size, prot, error_code)) |
| return NULL; /* out of committed memory */ |
| return p; |
| } |
| |
| /* set reachability constraints before loading any client libs */ |
| void |
| vmm_heap_init_constraints() |
| { |
| #ifdef X64 |
| /* add reachable regions before we allocate the heap, xref PR 215395 */ |
| /* i#774, i#901: we no longer need the DR library nor ntdll.dll to be |
| * reachable by the vmheap reservation. But, for -heap_in_lower_4GB, |
| * we must call request_region_be_heap_reachable() up front. |
| */ |
| if (DYNAMO_OPTION(heap_in_lower_4GB)) |
| request_region_be_heap_reachable((byte *)(ptr_uint_t)0x80000000/*middle*/, 1); |
| else if (DYNAMO_OPTION(vm_base_near_app)) { |
| /* Required for STATIC_LIBRARY: must be near app b/c clients are there. |
| * Non-static: still a good idea for fewer rip-rel manglings. |
| * Asking for app base means we'll prefer before the app, which |
| * has less of an impact on its heap. |
| */ |
| app_pc base = get_application_base(); |
| request_region_be_heap_reachable(base, get_application_end() - base); |
| } else { |
| /* It seems silly to let the 1st client lib set the region, so we give |
| * -vm_base priority. |
| */ |
| request_region_be_heap_reachable |
| ((byte *)DYNAMO_OPTION(vm_base), DYNAMO_OPTION(vm_size)); |
| } |
| /* XXX: really we should iterate and try other options: right now we'll |
| * just fail if we run out of space. E.g., if the app is quite large, we might |
| * fit the client near, and then our vm reservation could fail and at that |
| * point we'd just abort. We need to restructure the code to allow |
| * iterating over the client lib loads and vm reservation at once. |
| */ |
| #endif /* X64 */ |
| } |
| |
| /* virtual memory manager initialization */ |
| void |
| vmm_heap_init() |
| { |
| IF_WINDOWS(ASSERT(VMM_BLOCK_SIZE == OS_ALLOC_GRANULARITY)); |
| if (DYNAMO_OPTION(vm_reserve)) { |
| vmm_heap_unit_init(&heapmgt->vmheap, DYNAMO_OPTION(vm_size)); |
| } |
| } |
| |
| void |
| vmm_heap_exit() |
| { |
| /* virtual memory manager exit */ |
| if (DYNAMO_OPTION(vm_reserve)) { |
| /* FIXME: we have three regions that are not explicitly |
| * deallocated current stack, init stack, global_do_syscall |
| */ |
| DOCHECK(1, { |
| uint perstack = |
| ALIGN_FORWARD_UINT(dynamo_options.stack_size + |
| (dynamo_options.guard_pages ? (2*PAGE_SIZE) : 0), |
| VMM_BLOCK_SIZE) / |
| VMM_BLOCK_SIZE; |
| uint unfreed_blocks = perstack * 1 /* initstack */ + |
| /* current stack */ |
| perstack * ((IF_WINDOWS_ELSE(doing_detach, false) |
| IF_APP_EXPORTS(|| dr_api_exit)) ? 0 : 1); |
| /* FIXME: on detach arch_thread_exit should explicitly mark as |
| left behind all TPCs needed so then we can assert even for |
| detach |
| */ |
| ASSERT(IF_WINDOWS(doing_detach || ) /* not deterministic when detaching */ |
| heapmgt->vmheap.num_free_blocks == heapmgt->vmheap.num_blocks |
| - unfreed_blocks || |
| /* >=, not ==, b/c if we hit the vmm limit the cur dstack |
| * could be outside of vmm (i#1164) |
| */ |
| (ever_beyond_vmm && heapmgt->vmheap.num_free_blocks >= |
| heapmgt->vmheap.num_blocks - unfreed_blocks)); |
| }); |
| |
| /* FIXME: On process exit we are currently executing off a |
| * stack in this region so we cannot free the whole allocation. |
| |
| * FIXME: Any tombstone allocations will have to use a |
| * different interface than the generic heap_mmap() which is |
| * sometimes used to leave things behind. FIXME: Currently |
| * we'll leave behind the whole vm unit if any tombstones are |
| * left - which in fact is always the case, no matter whether |
| * thread private code needs to be left or not. |
| |
| * global_do_syscall 32 byte allocation should be part of our |
| * dll and won't have to be left. |
| |
| * The current stack is the main problem because it is later |
| * cleaned up in cleanup_and_terminate by calling stack_free which |
| * in turn gets all the way to vmm_heap_free. Therefore we add an |
| * explicit test for vmm_heap_exited, so that we can otherwise free |
| * bookkeeping information and delete the lock now. |
| |
| * Potential solution to most of these problems is to have |
| * cleanup_and_terminate call vmm_heap_exit when cleaning up |
| * the process, or to just leave the vm mapping behind and |
| * simply pass a different argument to stack_free. |
| */ |
| vmm_heap_unit_exit(&heapmgt->vmheap); |
| |
| vmm_heap_exited = true; |
| } |
| } |
| |
| /* checks for compatibility among heap options, returns true if |
| * modified the value of any options to make them compatible |
| */ |
| bool |
| heap_check_option_compatibility() |
| { |
| bool ret = false; |
| |
| ret = check_param_bounds(&dynamo_options.vm_size, MIN_VMM_HEAP_UNIT_SIZE, |
| MAX_VMM_HEAP_UNIT_SIZE, "vm_size") |
| || ret; |
| #ifdef INTERNAL |
| /* if max_heap_unit_size is too small you may get a funny message |
| * "initial_heap_unit_size must be >= 8229 and <= 4096" but in |
| * release build we will take the min and then complain about |
| * max_heap_unit_size and set it to the min also, so it all works |
| * out w/o needing an extra check() call. |
| */ |
| /* case 7626: don't short-circuit checks, as later ones may be needed */ |
| ret = check_param_bounds(&dynamo_options.initial_heap_unit_size, |
| /* if have units smaller than a page we end up |
| * allocating 64KB chunks for "oversized" units |
| * for just about every alloc! so round up to |
| * at least a page. |
| */ |
| ALIGN_FORWARD(UNITOVERHEAD + 1, PAGE_SIZE), |
| HEAP_UNIT_MAX_SIZE, "initial_heap_unit_size") |
| || ret; |
| ret = check_param_bounds(&dynamo_options.initial_global_heap_unit_size, |
| ALIGN_FORWARD(UNITOVERHEAD + 1, PAGE_SIZE), |
| HEAP_UNIT_MAX_SIZE, "initial_global_heap_unit_size") |
| || ret; |
| ret = check_param_bounds(&dynamo_options.max_heap_unit_size, |
| MAX(HEAP_UNIT_MIN_SIZE, GLOBAL_UNIT_MIN_SIZE), |
| INT_MAX, "max_heap_unit_size") |
| || ret; |
| #endif |
| return ret; |
| } |
| |
| /* thread-shared initialization that should be repeated after a reset */ |
| void |
| heap_reset_init() |
| { |
| if (DYNAMO_OPTION(enable_reset)) { |
| threadunits_init(GLOBAL_DCONTEXT, &heapmgt->global_nonpersistent_units, |
| GLOBAL_UNIT_MIN_SIZE); |
| } |
| } |
| |
| /* initialization */ |
| void |
| heap_init() |
| { |
| int i; |
| uint prev_sz = 0; |
| |
| LOG(GLOBAL, LOG_TOP|LOG_HEAP, 2, "Heap bucket sizes are:\n"); |
| /* make sure we'll preserve alignment */ |
| ASSERT(ALIGNED(HEADER_SIZE, HEAP_ALIGNMENT)); |
| /* make sure free list pointers will fit */ |
| ASSERT(BLOCK_SIZES[0] >= sizeof(heap_pc*)); |
| /* since sizes depend on size of structs, make sure they're in order */ |
| for (i = 0; i < BLOCK_TYPES; i++) { |
| ASSERT(BLOCK_SIZES[i] > prev_sz); |
| /* we assume all of our heap allocs are aligned */ |
| ASSERT(i == BLOCK_TYPES-1 || ALIGNED(BLOCK_SIZES[i], HEAP_ALIGNMENT)); |
| prev_sz = BLOCK_SIZES[i]; |
| LOG(GLOBAL, LOG_TOP|LOG_HEAP, 2, "\t%d bytes\n", BLOCK_SIZES[i]); |
| } |
| |
| /* we assume writes to some static vars are atomic, |
| * i.e., the vars don't cross cache lines. they shouldn't since |
| * they should all be 4-byte-aligned in the data segment. |
| * FIXME: ensure that release build aligns ok? |
| * I would be quite surprised if static vars were not 4-byte-aligned! |
| */ |
| ASSERT(ALIGN_BACKWARD(&heap_exiting, CACHE_LINE_SIZE()) == |
| ALIGN_BACKWARD(&heap_exiting + 1, CACHE_LINE_SIZE())); |
| ASSERT(ALIGN_BACKWARD(&heap_unit_lock.owner, CACHE_LINE_SIZE()) == |
| ALIGN_BACKWARD(&heap_unit_lock.owner + 1, CACHE_LINE_SIZE())); |
| |
| /* For simplicity we go through our normal heap mechanism to allocate |
| * our post-init heapmgt struct |
| */ |
| ASSERT(heapmgt == &temp_heapmgt); |
| heapmgt->global_heap_writable = true; |
| threadunits_init(GLOBAL_DCONTEXT, &heapmgt->global_units, GLOBAL_UNIT_MIN_SIZE); |
| |
| heapmgt = HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, heap_management_t, ACCT_MEM_MGT, PROTECTED); |
| memset(heapmgt, 0, sizeof(*heapmgt)); |
| ASSERT(sizeof(temp_heapmgt) == sizeof(*heapmgt)); |
| memcpy(heapmgt, &temp_heapmgt, sizeof(temp_heapmgt)); |
| |
| threadunits_init(GLOBAL_DCONTEXT, &heapmgt->global_unprotected_units, |
| GLOBAL_UNIT_MIN_SIZE); |
| heap_reset_init(); |
| |
| #ifdef WINDOWS |
| /* PR 250294: As part of 64-bit hook work, hook reachability was addressed |
| * using landing pads (see win32/callback.c for more explanation). Landing |
| * pad areas are a type of special heap, so they should be initialized |
| * during heap init. |
| * Each landing pad area has its own allocation pointer, so they shouldn't |
| * be merged automatically. |
| */ |
| VMVECTOR_ALLOC_VECTOR(landing_pad_areas, GLOBAL_DCONTEXT, |
| VECTOR_SHARED | VECTOR_NEVER_MERGE, |
| landing_pad_areas_lock); |
| #endif |
| } |
| |
| /* need to not remove from vmareas on process exit -- vmareas has already exited! */ |
| static void |
| really_free_unit(heap_unit_t *u) |
| { |
| STATS_SUB(heap_capacity, UNIT_COMMIT_SIZE(u)); |
| STATS_ADD(heap_reserved_only, (stats_int_t)(UNIT_COMMIT_SIZE(u) - UNIT_RESERVED_SIZE(u))); |
| /* remember that u itself is inside unit, not separately allocated */ |
| release_guarded_real_memory((vm_addr_t)u, UNIT_RESERVED_SIZE(u), |
| false/*do not update DR areas now*/, true); |
| } |
| |
| /* Free all thread-shared state not critical to forward progress; |
| * heap_reset_init() will be called before continuing. |
| */ |
| void |
| heap_reset_free() |
| { |
| heap_unit_t *u, *next_u; |
| /* FIXME: share some code w/ heap_exit -- currently only called by reset */ |
| ASSERT(DYNAMO_OPTION(enable_reset)); |
| |
| /* we must grab this lock before heap_unit_lock to avoid rank |
| * order violations when freeing |
| */ |
| dynamo_vm_areas_lock(); |
| |
| /* for combining stats into global_units we need this lock |
| * FIXME: remove if we go to separate stats sum location |
| */ |
| DODEBUG({ acquire_recursive_lock(&global_alloc_lock); }); |
| |
| acquire_recursive_lock(&heap_unit_lock); |
| |
| LOG(GLOBAL, LOG_HEAP, 1, "Pre-reset, global heap unit stats:\n"); |
| /* FIXME: free directly rather than putting on dead list first */ |
| threadunits_exit(&heapmgt->global_nonpersistent_units, GLOBAL_DCONTEXT); |
| |
| /* free all dead units */ |
| u = heapmgt->heap.dead; |
| while (u != NULL) { |
| next_u = u->next_global; |
| LOG(GLOBAL, LOG_HEAP, 1, "\tfreeing dead unit "PFX"-"PFX" [-"PFX"]\n", |
| u, UNIT_COMMIT_END(u), UNIT_RESERVED_END(u)); |
| RSTATS_DEC(heap_num_free); |
| really_free_unit(u); |
| u = next_u; |
| } |
| heapmgt->heap.dead = NULL; |
| heapmgt->heap.num_dead = 0; |
| release_recursive_lock(&heap_unit_lock); |
| DODEBUG({ release_recursive_lock(&global_alloc_lock); }); |
| dynamo_vm_areas_unlock(); |
| } |
| |
| /* atexit cleanup */ |
| void |
| heap_exit() |
| { |
| heap_unit_t *u, *next_u; |
| heap_management_t *temp; |
| |
| heap_exiting = true; |
| /* FIXME: we shouldn't need either lock if executed last */ |
| dynamo_vm_areas_lock(); |
| acquire_recursive_lock(&heap_unit_lock); |
| |
| #ifdef WINDOWS |
| release_landing_pad_mem(); /* PR 250294 */ |
| #endif |
| |
| LOG(GLOBAL, LOG_HEAP, 1, "Global unprotected heap unit stats:\n"); |
| threadunits_exit(&heapmgt->global_unprotected_units, GLOBAL_DCONTEXT); |
| if (DYNAMO_OPTION(enable_reset)) { |
| LOG(GLOBAL, LOG_HEAP, 1, "Global nonpersistent heap unit stats:\n"); |
| threadunits_exit(&heapmgt->global_nonpersistent_units, GLOBAL_DCONTEXT); |
| } |
| |
| /* Now we need to go back to the static struct to clean up */ |
| ASSERT(heapmgt != &temp_heapmgt); |
| memcpy(&temp_heapmgt, heapmgt, sizeof(temp_heapmgt)); |
| temp = heapmgt; |
| heapmgt = &temp_heapmgt; |
| HEAP_TYPE_FREE(GLOBAL_DCONTEXT, temp, heap_management_t, ACCT_MEM_MGT, PROTECTED); |
| |
| LOG(GLOBAL, LOG_HEAP, 1, "Global heap unit stats:\n"); |
| threadunits_exit(&heapmgt->global_units, GLOBAL_DCONTEXT); |
| |
| /* free heap for all unfreed units */ |
| LOG(GLOBAL, LOG_HEAP, 1, "Unfreed units:\n"); |
| u = heapmgt->heap.units; |
| while (u != NULL) { |
| next_u = u->next_global; |
| LOG(GLOBAL, LOG_HEAP, 1, "\tfreeing live unit "PFX"-"PFX" [-"PFX"]\n", |
| u, UNIT_COMMIT_END(u), UNIT_RESERVED_END(u)); |
| RSTATS_DEC(heap_num_live); |
| really_free_unit(u); |
| u = next_u; |
| } |
| heapmgt->heap.units = NULL; |
| u = heapmgt->heap.dead; |
| while (u != NULL) { |
| next_u = u->next_global; |
| LOG(GLOBAL, LOG_HEAP, 1, "\tfreeing dead unit "PFX"-"PFX" [-"PFX"]\n", |
| u, UNIT_COMMIT_END(u), UNIT_RESERVED_END(u)); |
| RSTATS_DEC(heap_num_free); |
| really_free_unit(u); |
| u = next_u; |
| } |
| heapmgt->heap.dead = NULL; |
| release_recursive_lock(&heap_unit_lock); |
| dynamo_vm_areas_unlock(); |
| |
| DELETE_RECURSIVE_LOCK(heap_unit_lock); |
| DELETE_RECURSIVE_LOCK(global_alloc_lock); |
| #ifdef X64 |
| DELETE_LOCK(request_region_be_heap_reachable_lock); |
| #endif |
| } |
| |
| /* FIXME: |
| * detect if the app is who we're fighting for memory, if so, don't |
| * free memory, else the app will just keep grabbing more. |
| * need a test for hitting 2GB (or 3GB!) user mode limit. |
| */ |
| static void |
| heap_low_on_memory() |
| { |
| /* free some memory! */ |
| heap_unit_t *u, *next_u; |
| size_t freed = 0; |
| LOG(GLOBAL, LOG_CACHE|LOG_STATS, 1, |
| "heap_low_on_memory: about to free dead list units\n"); |
| /* WARNING: this routine is called at arbitrary allocation failure points, |
| * so we have to be careful what locks we grab |
| * However, no allocation site can hold a lock weaker in rank than |
| * heap_unit_lock, b/c it could deadlock on the allocation itself! |
| * So we're safe. |
| */ |
| /* must grab this lock prior to heap_unit_lock if affecting DR vm areas |
| * this is recursive so ok if we ran out of memory while holding DR vm area lock |
| */ |
| ASSERT(safe_to_allocate_or_free_heap_units()); |
| dynamo_vm_areas_lock(); |
| acquire_recursive_lock(&heap_unit_lock); |
| u = heapmgt->heap.dead; |
| while (u != NULL) { |
| next_u = u->next_global; |
| freed += UNIT_COMMIT_SIZE(u); |
| /* FIXME: if out of committed pages only, could keep our reservations */ |
| LOG(GLOBAL, LOG_HEAP, 1, "\tfreeing dead unit "PFX"-"PFX" [-"PFX"]\n", |
| u, UNIT_COMMIT_END(u), UNIT_RESERVED_END(u)); |
| RSTATS_DEC(heap_num_free); |
| really_free_unit(u); |
| u = next_u; |
| heapmgt->heap.num_dead--; |
| } |
| heapmgt->heap.dead = NULL; |
| release_recursive_lock(&heap_unit_lock); |
| dynamo_vm_areas_unlock(); |
| LOG(GLOBAL, LOG_CACHE|LOG_STATS, 1, |
| "heap_low_on_memory: freed %d KB\n", freed/1024); |
| /* FIXME: we don't keep a list of guard pages, which we may decide to throw |
| * out or compact at this time. |
| */ |
| /* FIXME: should also fix up the allocator to look in other free lists |
| * of sizes larger than asked for, we may have plenty of memory available |
| * in other lists! see comments in common_heap_alloc |
| */ |
| } |
| |
| static const char* |
| get_oom_source_name(oom_source_t source) |
| { |
| /* currently only single character codenames, |
| * (still as a string though) |
| */ |
| const char *code_name = "?"; |
| |
| switch (source) { |
| case OOM_INIT : code_name = "I"; break; |
| case OOM_RESERVE : code_name = "R"; break; |
| case OOM_COMMIT : code_name = "C"; break; |
| case OOM_EXTEND : code_name = "E"; break; |
| default: |
| ASSERT_NOT_REACHED(); |
| } |
| return code_name; |
| } |
| |
| static bool |
| silent_oom_for_process(oom_source_t source) |
| { |
| if (TESTANY(OOM_COMMIT|OOM_EXTEND, source) && |
| !IS_STRING_OPTION_EMPTY(silent_commit_oom_list)) { |
| bool onlist; |
| const char *process_name = get_short_name(get_application_name()); |
| string_option_read_lock(); |
| onlist = check_filter_with_wildcards(DYNAMO_OPTION(silent_commit_oom_list), |
| process_name); |
| string_option_read_unlock(); |
| |
| if (onlist) { |
| SYSLOG_INTERNAL_WARNING("not reporting last words of executable %s", |
| process_name); |
| return true; |
| } |
| } |
| return false; |
| } |
| |
| /* oom_source_t identifies the action we were taking, os_error_code is |
| * the returned value from the last system call - opaque at this OS |
| * independent layer. |
| */ |
| static void |
| report_low_on_memory(oom_source_t source, heap_error_code_t os_error_code) |
| { |
| if (TESTANY(DYNAMO_OPTION(silent_oom_mask), source) |
| || silent_oom_for_process(source)) { |
| SYSLOG_INTERNAL_WARNING("Mostly silent OOM: %s "PFX".\n", |
| get_oom_source_name(source), os_error_code); |
| /* still produce an ldmp for internal use */ |
| if (TEST(DUMPCORE_OUT_OF_MEM_SILENT, DYNAMO_OPTION(dumpcore_mask))) |
| os_dump_core("Out of memory, silently aborting program."); |
| } else { |
| const char *oom_source_code = get_oom_source_name(source); |
| char status_hex[19]; /* FIXME: for 64bit hex need 16+NULL */ |
| /* note 0x prefix added by the syslog */ |
| snprintf(status_hex, |
| BUFFER_SIZE_ELEMENTS(status_hex), PFX, /* FIXME: 32bit */ |
| os_error_code); |
| NULL_TERMINATE_BUFFER(status_hex); |
| /* SYSLOG first */ |
| SYSLOG_CUSTOM_NOTIFY(SYSLOG_CRITICAL, MSG_OUT_OF_MEMORY, 4, |
| "Out of memory. Program aborted.", |
| get_application_name(), get_application_pid(), |
| oom_source_code, status_hex |
| ); |
| |
| /* FIXME: case 7306 can't specify arguments in SYSLOG_CUSTOM_NOTIFY */ |
| SYSLOG_INTERNAL_WARNING("OOM Status: %s %s", oom_source_code, status_hex); |
| |
| /* FIXME: case 7296 - ldmp even if we have decided not to produce an event above */ |
| if (TEST(DUMPCORE_OUT_OF_MEM, DYNAMO_OPTION(dumpcore_mask))) |
| os_dump_core("Out of memory, aborting program."); |
| |
| /* passing only status code to XML where we should have a stack dump and callstack */ |
| report_diagnostics("Out of memory", status_hex, NO_VIOLATION_BAD_INTERNAL_STATE); |
| } |
| os_terminate(NULL, TERMINATE_PROCESS); |
| ASSERT_NOT_REACHED(); |
| } |
| |
| /* update statistics for committed memory, and add to vm_areas */ |
| static inline void |
| account_for_memory(void *p, size_t size, uint prot, bool add_vm, bool image |
| _IF_DEBUG(const char *comment)) |
| { |
| STATS_ADD_PEAK(memory_capacity, size); |
| |
| /* case 3045: areas inside the vmheap reservation are not added to the list |
| * for clients that use DR-allocated memory, we have get_memory_info() |
| * query from the OS to see inside |
| */ |
| if (vmm_is_reserved_unit(&heapmgt->vmheap, p, size)) { |
| return; |
| } |
| |
| if (add_vm) { |
| add_dynamo_vm_area(p, ((app_pc)p) + size, prot, image _IF_DEBUG(comment)); |
| } else { |
| /* due to circular dependencies bet vmareas and global heap we do not call |
| * add_dynamo_vm_area here, instead we indicate that something has changed |
| */ |
| mark_dynamo_vm_areas_stale(); |
| /* NOTE: 'prot' info is lost about this region, but is needed in |
| * heap_vmareas_synch_units to update all_memory_areas. Currently |
| * heap_create_unit is the only place that passes 'false' with prot rw-. |
| */ |
| ASSERT(TESTALL(MEMPROT_READ | MEMPROT_WRITE, prot)); |
| } |
| } |
| |
| /* remove_vm MUST be false iff this is heap memory, which is updated separately */ |
| static void |
| update_dynamo_areas_on_release(app_pc start, app_pc end, bool remove_vm) |
| { |
| if (!vm_areas_exited && !heap_exiting) { /* avoid problems when exiting */ |
| /* case 3045: areas inside the vmheap reservation are not added to the list |
| * for clients that use DR-allocated memory, we have get_memory_info() |
| * query from the OS to see inside |
| */ |
| if (vmm_is_reserved_unit(&heapmgt->vmheap, start, end - start)) { |
| return; |
| } |
| if (remove_vm) { |
| remove_dynamo_vm_area(start, end); |
| } else { |
| /* due to cyclic dependencies bet heap and vmareas we cannot remove incrementally. |
| * the pending set is protected by the same lock needed to synch the vm areas, |
| * so we will never mis-identify free memory as DR memory. |
| */ |
| mark_dynamo_vm_areas_stale(); |
| dynamo_areas_pending_remove = true; |
| } |
| } |
| } |
| |
| bool |
| lockwise_safe_to_allocate_memory() |
| { |
| /* check whether it's safe to hold a lock that normally can be held |
| * for memory allocation -- i.e., check whether we hold the |
| * global_alloc_lock |
| */ |
| return !self_owns_recursive_lock(&global_alloc_lock); |
| } |
| |
| /* we indirect all os memory requests through here so we have a central place |
| * to handle the out-of-memory condition. |
| * add_vm MUST be false iff this is heap memory, which is updated separately. |
| */ |
| static void * |
| get_real_memory(size_t size, uint prot, bool add_vm _IF_DEBUG(const char *comment)) |
| { |
| void *p; |
| heap_error_code_t error_code; |
| /* must round up to page sizes, else vmm_heap_alloc assert triggers */ |
| size = ALIGN_FORWARD(size, PAGE_SIZE); |
| |
| /* memory alloc/dealloc and updating DR list must be atomic */ |
| dynamo_vm_areas_lock(); /* if already hold lock this is a nop */ |
| |
| p = vmm_heap_alloc(size, prot, &error_code); |
| if (p == NULL) { |
| SYSLOG_INTERNAL_WARNING_ONCE("Out of memory -- cannot reserve or " |
| "commit %dKB. Trying to recover.", size/1024); |
| /* we should be ok here, shouldn't come in here holding global_alloc_lock |
| * or heap_unit_lock w/o first having grabbed DR areas lock |
| */ |
| ASSERT(safe_to_allocate_or_free_heap_units()); |
| heap_low_on_memory(); |
| fcache_low_on_memory(); |
| /* try again |
| * FIXME: have more sophisticated strategy of freeing a little, then getting |
| * more drastic with each subsequent failure |
| * FIXME: can only free live fcache units for current thread w/ current |
| * impl...should we wait a while and try again if out of memory, hoping |
| * other threads have freed some?!?! |
| */ |
| p = vmm_heap_alloc(size, prot, &error_code); |
| if (p == NULL) { |
| report_low_on_memory(OOM_RESERVE, error_code); |
| } |
| SYSLOG_INTERNAL_WARNING_ONCE("Out of memory -- but still alive after " |
| "emergency free."); |
| } |
| |
| account_for_memory(p, size, prot, add_vm, false _IF_DEBUG(comment)); |
| dynamo_vm_areas_unlock(); |
| |
| return p; |
| } |
| |
| static void |
| release_memory_and_update_areas(app_pc p, size_t size, bool decommit, bool remove_vm) |
| { |
| heap_error_code_t error_code; |
| /* these two operations need to be atomic wrt DR area updates */ |
| dynamo_vm_areas_lock(); /* if already hold lock this is a nop */ |
| /* ref case 3035, we must remove from dynamo_areas before we free in case |
| * we end up allocating memory in the process of removing the area |
| * (we don't want to end up getting the memory we just freed since that |
| * would lead to errors in the list when we finally did remove it) |
| */ |
| update_dynamo_areas_on_release(p, p + size, remove_vm); |
| if (decommit) |
| vmm_heap_decommit(p, size, &error_code); |
| else |
| vmm_heap_free(p, size, &error_code); |
| ASSERT(error_code == HEAP_ERROR_SUCCESS); |
| dynamo_vm_areas_unlock(); |
| } |
| |
| /* remove_vm MUST be false iff this is heap memory, which is updated separately */ |
| static void |
| release_real_memory(void *p, size_t size, bool remove_vm) |
| { |
| /* must round up to page sizes for vmm_heap_free */ |
| size = ALIGN_FORWARD(size, PAGE_SIZE); |
| |
| release_memory_and_update_areas((app_pc)p, size, false/*free*/, remove_vm); |
| |
| /* avoid problem w/ being called by cleanup_and_terminate after dynamo_process_exit */ |
| DOSTATS({ |
| if (!dynamo_exited_log_and_stats) |
| STATS_SUB(memory_capacity, size); |
| }); |
| } |
| |
| static void |
| extend_commitment(vm_addr_t p, size_t size, uint prot, |
| bool initial_commit) |
| { |
| heap_error_code_t error_code; |
| ASSERT(ALIGNED(p, PAGE_SIZE)); |
| size = ALIGN_FORWARD(size, PAGE_SIZE); |
| if (!vmm_heap_commit(p, size, prot, &error_code)) { |
| SYSLOG_INTERNAL_WARNING_ONCE("Out of memory - cannot extend commit " |
| "%dKB. Trying to recover.", size/1024); |
| heap_low_on_memory(); |
| fcache_low_on_memory(); |
| /* see low-memory ideas in get_real_memory */ |
| if (!vmm_heap_commit(p, size, prot, &error_code)) { |
| report_low_on_memory(initial_commit ? OOM_COMMIT : OOM_EXTEND, |
| error_code); |
| } |
| |
| SYSLOG_INTERNAL_WARNING_ONCE("Out of memory in extend - still alive " |
| "after emergency free."); |
| } |
| } |
| |
| /* a wrapper around get_real_memory that adds a guard page on each side of the requested unit. |
| * These should consume only uncommitted virtual address and should not use any physical memory. |
| * add_vm MUST be false iff this is heap memory, which is updated separately. |
| */ |
| static vm_addr_t |
| get_guarded_real_memory(size_t reserve_size, size_t commit_size, uint prot, |
| bool add_vm, bool guarded _IF_DEBUG(const char *comment)) |
| { |
| vm_addr_t p; |
| uint guard_size = PAGE_SIZE; |
| heap_error_code_t error_code; |
| ASSERT(reserve_size >= commit_size); |
| if (!guarded || !dynamo_options.guard_pages) { |
| if (reserve_size == commit_size) |
| return get_real_memory(reserve_size, prot, add_vm _IF_DEBUG(comment)); |
| guard_size = 0; |
| } |
| |
| reserve_size = ALIGN_FORWARD(reserve_size, PAGE_SIZE); |
| commit_size = ALIGN_FORWARD(commit_size, PAGE_SIZE); |
| |
| reserve_size += 2* guard_size; /* add top and bottom guards */ |
| |
| /* memory alloc/dealloc and updating DR list must be atomic */ |
| dynamo_vm_areas_lock(); /* if already hold lock this is a nop */ |
| p = vmm_heap_reserve(reserve_size, &error_code, TEST(MEMPROT_EXEC, prot)); |
| if (p == NULL) { |
| /* This is very unlikely to happen - we have to reach at least 2GB reserved memory. */ |
| SYSLOG_INTERNAL_WARNING_ONCE("Out of memory - cannot reserve %dKB. " |
| "Trying to recover.", reserve_size/1024); |
| heap_low_on_memory(); |
| fcache_low_on_memory(); |
| |
| p = vmm_heap_reserve(reserve_size, &error_code, TEST(MEMPROT_EXEC, prot)); |
| if (p == NULL) { |
| report_low_on_memory(OOM_RESERVE, error_code); |
| } |
| |
| SYSLOG_INTERNAL_WARNING_ONCE("Out of memory on reserve - but still " |
| "alive after emergency free."); |
| } |
| /* includes guard pages if add_vm -- else, heap_vmareas_synch_units() will |
| * add guard pages in by assuming one page on each side of every heap unit |
| * if dynamo_options.guard_pages |
| */ |
| account_for_memory((void *)p, reserve_size, prot, add_vm, false _IF_DEBUG(comment)); |
| dynamo_vm_areas_unlock(); |
| |
| STATS_ADD_PEAK(reserved_memory_capacity, reserve_size); |
| STATS_ADD_PEAK(guard_pages, 2); |
| |
| p += guard_size; |
| extend_commitment(p, commit_size, prot, true /* initial commit */); |
| |
| return p; |
| } |
| |
| /* a wrapper around get_release_memory that also frees the guard pages on each side of the requested unit. |
| * remove_vm MUST be false iff this is heap memory, which is updated separately |
| */ |
| static void |
| release_guarded_real_memory(vm_addr_t p, size_t size, bool remove_vm, bool guarded) |
| { |
| if (!guarded || !dynamo_options.guard_pages) { |
| release_real_memory(p, size, remove_vm); |
| return; |
| } |
| |
| size = ALIGN_FORWARD(size, PAGE_SIZE); |
| size += PAGE_SIZE * 2; /* add top and bottom guards */ |
| p -= PAGE_SIZE; |
| |
| release_memory_and_update_areas((app_pc)p, size, false/*free*/, remove_vm); |
| |
| /* avoid problem w/ being called by cleanup_and_terminate after dynamo_process_exit */ |
| DOSTATS({ |
| if (!dynamo_exited_log_and_stats) { |
| STATS_SUB(memory_capacity, size); |
| STATS_SUB(reserved_memory_capacity, size); |
| STATS_ADD(guard_pages, -2); |
| } |
| }); |
| } |
| |
| /* use heap_mmap to allocate large chunks of executable memory |
| * it's mainly used to allocate our fcache units |
| */ |
| void * |
| heap_mmap_ex(size_t reserve_size, size_t commit_size, uint prot, bool guarded) |
| { |
| /* XXX i#774: when we split vmheap and vmcode, if MEMPROT_EXEC is requested |
| * here (or this is a call from a client, for reachability |
| * compatibility), put it in vmcode; else in vmheap. |
| */ |
| void *p = get_guarded_real_memory(reserve_size, commit_size, prot, true, guarded |
| _IF_DEBUG("heap_mmap")); |
| #ifdef DEBUG_MEMORY |
| if (TEST(MEMPROT_WRITE, prot)) |
| memset(p, HEAP_ALLOCATED_BYTE, commit_size); |
| #endif |
| /* We rely on this for freeing _post_stack in absence of dcontext */ |
| ASSERT(!DYNAMO_OPTION(vm_reserve) || |
| !DYNAMO_OPTION(stack_shares_gencode) || |
| (ptr_uint_t)p - (guarded ? (GUARD_PAGE_ADJUSTMENT/2) : 0) == |
| ALIGN_BACKWARD(p, VMM_BLOCK_SIZE) || |
| at_reset_at_vmm_limit()); |
| LOG(GLOBAL, LOG_HEAP, 2, "heap_mmap: %d bytes [/ %d] @ "PFX"\n", |
| commit_size, reserve_size, p); |
| STATS_ADD_PEAK(mmap_capacity, commit_size); |
| STATS_ADD_PEAK(mmap_reserved_only, (reserve_size - commit_size)); |
| return p; |
| } |
| |
| /* use heap_mmap to allocate large chunks of executable memory |
| * it's mainly used to allocate our fcache units |
| */ |
| void * |
| heap_mmap_reserve(size_t reserve_size, size_t commit_size) |
| { |
| /* heap_mmap always marks as executable */ |
| return heap_mmap_ex(reserve_size, commit_size, |
| MEMPROT_EXEC|MEMPROT_READ|MEMPROT_WRITE, true); |
| } |
| |
| /* It is up to the caller to ensure commit_size is a page size multiple, |
| * and that it does not extend beyond the initial reservation. |
| */ |
| void |
| heap_mmap_extend_commitment(void *p, size_t commit_size) |
| { |
| extend_commitment(p, commit_size, MEMPROT_EXEC|MEMPROT_READ|MEMPROT_WRITE, |
| false /*not initial commit*/); |
| STATS_SUB(mmap_reserved_only, commit_size); |
| STATS_ADD_PEAK(mmap_capacity, commit_size); |
| #ifdef DEBUG_MEMORY |
| memset(p, HEAP_ALLOCATED_BYTE, commit_size); |
| #endif |
| } |
| |
| /* De-commits from a committed region. */ |
| void |
| heap_mmap_retract_commitment(void *retract_start, size_t decommit_size) |
| { |
| heap_error_code_t error_code; |
| ASSERT(ALIGNED(decommit_size, PAGE_SIZE)); |
| vmm_heap_decommit(retract_start, decommit_size, &error_code); |
| STATS_ADD(mmap_reserved_only, decommit_size); |
| STATS_ADD_PEAK(mmap_capacity, -(stats_int_t)decommit_size); |
| } |
| |
| /* Allocates executable memory in the same allocation region as this thread's |
| * stack, to save address space (case 9474). |
| */ |
| void * |
| heap_mmap_reserve_post_stack(dcontext_t *dcontext, |
| size_t reserve_size, size_t commit_size) |
| { |
| void *p; |
| byte *stack_reserve_end = NULL; |
| heap_error_code_t error_code; |
| size_t available = 0; |
| uint prot; |
| bool known_stack = false; |
| ASSERT(reserve_size > 0 && commit_size < reserve_size); |
| /* 1.5 * guard page adjustment since we'll share the middle one */ |
| if (DYNAMO_OPTION(stack_size) + reserve_size + |
| GUARD_PAGE_ADJUSTMENT + GUARD_PAGE_ADJUSTMENT / 2 > VMM_BLOCK_SIZE) { |
| /* there's not enough room to share the allocation block, stack is too big */ |
| LOG(GLOBAL, LOG_HEAP, 1, "Not enough room to allocate 0x%08x bytes post stack " |
| "of size 0x%08x\n", reserve_size, DYNAMO_OPTION(stack_size)); |
| return heap_mmap_reserve(reserve_size, commit_size); |
| } |
| if (DYNAMO_OPTION(stack_shares_gencode) && |
| /* FIXME: we could support this w/o vm_reserve, or when beyond |
| * the reservation, but we don't bother */ |
| DYNAMO_OPTION(vm_reserve) && |
| dcontext != GLOBAL_DCONTEXT && dcontext != NULL) { |
| stack_reserve_end = dcontext->dstack + GUARD_PAGE_ADJUSTMENT/2; |
| #if defined(UNIX) && !defined(HAVE_MEMINFO) |
| prot = 0; /* avoid compiler warning: should only need inside if */ |
| if (!dynamo_initialized) { |
| /* memory info is not yet set up. since so early we only support |
| * post-stack if inside vmm (won't be true only for pathological |
| * tiny vmm sizes) |
| */ |
| if (vmm_is_reserved_unit(&heapmgt->vmheap, stack_reserve_end, reserve_size)) { |
| known_stack = true; |
| available = reserve_size; |
| } else |
| known_stack = false; |
| } else |
| #elif defined(UNIX) |
| /* the all_memory_areas list doesn't keep details inside vmheap */ |
| known_stack = get_memory_info_from_os(stack_reserve_end, NULL, |
| &available, &prot); |
| #else |
| known_stack = get_memory_info(stack_reserve_end, NULL, &available, &prot); |
| #endif |
| /* If ever out of vmheap, then may have free space beyond stack, |
| * which we could support but don't (see FIXME above) */ |
| ASSERT(out_of_vmheap_once || |
| (known_stack && available >= reserve_size && prot == 0)); |
| } |
| if (!known_stack || |
| /* if -no_vm_reserve will short-circuit so no vmh deref danger */ |
| !vmm_in_same_block(dcontext->dstack, |
| /* we do want a guard page at the end */ |
| stack_reserve_end + reserve_size) || |
| available < reserve_size) { |
| ASSERT(!DYNAMO_OPTION(stack_shares_gencode) || |
| !DYNAMO_OPTION(vm_reserve) || out_of_vmheap_once); |
| DOLOG(1, LOG_HEAP, { |
| if (known_stack && available < reserve_size) { |
| LOG(GLOBAL, LOG_HEAP, 1, |
| "heap_mmap_reserve_post_stack: avail %d < needed %d\n", |
| available, reserve_size); |
| } |
| }); |
| STATS_INC(mmap_no_share_stack_region); |
| return heap_mmap_reserve(reserve_size, commit_size); |
| } |
| ASSERT(DYNAMO_OPTION(vm_reserve)); |
| ASSERT(stack_reserve_end != NULL); |
| prot = MEMPROT_EXEC|MEMPROT_READ|MEMPROT_WRITE; |
| /* memory alloc/dealloc and updating DR list must be atomic */ |
| dynamo_vm_areas_lock(); /* if already hold lock this is a nop */ |
| /* We share the stack's end guard page as our start guard page */ |
| if (vmm_is_reserved_unit(&heapmgt->vmheap, stack_reserve_end, reserve_size)) { |
| /* Memory is already reserved with OS */ |
| p = stack_reserve_end; |
| } else { |
| p = os_heap_reserve(stack_reserve_end, reserve_size, &error_code, true/*+x*/); |
| #ifdef X64 |
| /* ensure future heap allocations are reachable from this allocation |
| * (this will also verify that this region meets reachability requirements) */ |
| if (p != NULL) |
| request_region_be_heap_reachable(p, reserve_size); |
| #endif |
| if (p == NULL) { |
| ASSERT_NOT_REACHED(); |
| LOG(GLOBAL, LOG_HEAP, 1, |
| "heap_mmap_reserve_post_stack: reserve failed "PFX"\n", error_code); |
| dynamo_vm_areas_unlock(); |
| STATS_INC(mmap_no_share_stack_region); |
| return heap_mmap_reserve(reserve_size, commit_size); |
| } |
| ASSERT(error_code == HEAP_ERROR_SUCCESS); |
| } |
| if (!vmm_heap_commit(p, commit_size, prot, &error_code)) { |
| ASSERT_NOT_REACHED(); |
| LOG(GLOBAL, LOG_HEAP, 1, "heap_mmap_reserve_post_stack: commit failed "PFX"\n", |
| error_code); |
| if (!vmm_is_reserved_unit(&heapmgt->vmheap, stack_reserve_end, reserve_size)) { |
| os_heap_free(p, reserve_size, &error_code); |
| ASSERT(error_code == HEAP_ERROR_SUCCESS); |
| } |
| dynamo_vm_areas_unlock(); |
| STATS_INC(mmap_no_share_stack_region); |
| return heap_mmap_reserve(reserve_size, commit_size); |
| } |
| account_for_memory(p, reserve_size, prot, true/*add now*/, false |
| _IF_DEBUG("heap_mmap_reserve_post_stack")); |
| dynamo_vm_areas_unlock(); |
| /* We rely on this for freeing in absence of dcontext */ |
| ASSERT((ptr_uint_t)p - GUARD_PAGE_ADJUSTMENT/2 != |
| ALIGN_BACKWARD(p, VMM_BLOCK_SIZE)); |
| #ifdef DEBUG_MEMORY |
| memset(p, HEAP_ALLOCATED_BYTE, commit_size); |
| #endif |
| LOG(GLOBAL, LOG_HEAP, 2, "heap_mmap w/ stack: %d bytes [/ %d] @ "PFX"\n", |
| commit_size, reserve_size, p); |
| STATS_ADD_PEAK(mmap_capacity, commit_size); |
| STATS_ADD_PEAK(mmap_reserved_only, (reserve_size - commit_size)); |
| STATS_INC(mmap_share_stack_region); |
| return p; |
| } |
| |
| /* De-commits memory that was allocated in the same allocation region as this |
| * thread's stack (case 9474). |
| */ |
| void |
| heap_munmap_post_stack(dcontext_t *dcontext, void *p, size_t reserve_size) |
| { |
| /* We would require a valid dcontext and compare to the stack reserve end, |
| * but on detach we have no dcontext, so we instead use block alignment. |
| */ |
| DOCHECK(1, { |
| if (dcontext != NULL && dcontext != GLOBAL_DCONTEXT && |
| DYNAMO_OPTION(vm_reserve) && DYNAMO_OPTION(stack_shares_gencode)) { |
| bool at_stack_end = (p == dcontext->dstack + GUARD_PAGE_ADJUSTMENT/2); |
| bool at_block_start = ((ptr_uint_t)p - GUARD_PAGE_ADJUSTMENT/2 == |
| ALIGN_BACKWARD(p, VMM_BLOCK_SIZE)); |
| ASSERT((at_stack_end && !at_block_start) || |
| (!at_stack_end && at_block_start)); |
| } |
| }); |
| if (!DYNAMO_OPTION(vm_reserve) || |
| !DYNAMO_OPTION(stack_shares_gencode) || |
| (ptr_uint_t)p - GUARD_PAGE_ADJUSTMENT/2 == ALIGN_BACKWARD(p, VMM_BLOCK_SIZE)) { |
| heap_munmap(p, reserve_size); |
| } else { |
| /* Detach makes it a pain to pass in the commit size so |
| * we use the reserve size, which works fine. |
| */ |
| release_memory_and_update_areas((app_pc)p, reserve_size, true/*decommit*/, |
| true/*update now*/); |
| LOG(GLOBAL, LOG_HEAP, 2, "heap_munmap_post_stack: %d bytes @ "PFX"\n", |
| reserve_size, p); |
| STATS_SUB(mmap_capacity, reserve_size); |
| STATS_SUB(mmap_reserved_only, reserve_size); |
| } |
| } |
| |
| /* use heap_mmap to allocate large chunks of executable memory |
| * it's mainly used to allocate our fcache units |
| */ |
| void * |
| heap_mmap(size_t size) |
| { |
| return heap_mmap_reserve(size, size); |
| } |
| |
| /* free memory-mapped storage */ |
| void |
| heap_munmap_ex(void *p, size_t size, bool guarded) |
| { |
| #ifdef DEBUG_MEMORY |
| /* can't set to HEAP_UNALLOCATED_BYTE since really not in our address |
| * space anymore */ |
| #endif |
| release_guarded_real_memory((vm_addr_t)p, size, true/*update DR areas immediately*/, |
| guarded); |
| |
| DOSTATS({ |
| /* avoid problem w/ being called by cleanup_and_terminate after dynamo_process_exit */ |
| if (!dynamo_exited_log_and_stats) { |
| LOG(GLOBAL, LOG_HEAP, 2, "heap_munmap: %d bytes @ "PFX"\n", size, p); |
| STATS_SUB(mmap_capacity, size); |
| STATS_SUB(mmap_reserved_only, size); |
| } |
| }); |
| } |
| |
| /* free memory-mapped storage */ |
| void |
| heap_munmap(void *p, size_t size) |
| { |
| heap_munmap_ex(p, size, true/*guarded*/); |
| } |
| |
| #ifdef STACK_GUARD_PAGE |
| # define STACK_GUARD_PAGES 1 |
| #endif |
| |
| /* use stack_alloc to build a stack -- it returns TOS |
| * For STACK_GUARD_PAGE, it also marks the bottom STACK_GUARD_PAGES==1 |
| * to detect overflows when used. |
| */ |
| void * |
| stack_alloc(size_t size) |
| { |
| void *p; |
| |
| /* we reserve and commit at once for now |
| * FIXME case 2330: commit-on-demand could allow larger max sizes w/o |
| * hurting us in the common case |
| */ |
| p = get_guarded_real_memory(size, size, MEMPROT_READ|MEMPROT_WRITE, true, true |
| _IF_DEBUG("stack_alloc")); |
| #ifdef DEBUG_MEMORY |
| memset(p, HEAP_ALLOCATED_BYTE, size); |
| #endif |
| |
| #ifdef STACK_GUARD_PAGE |
| /* mark the bottom page non-accessible to trap stack overflow */ |
| /* NOTE: the guard page should be included in the total memory requested */ |
| # ifdef WINDOWS |
| mark_page_as_guard((byte *)p + ((STACK_GUARD_PAGES - 1) * PAGE_SIZE)); |
| # else |
| /* FIXME: make no access, not just no write -- and update signal.c to |
| * look at reads and not just writes -- though unwritable is nearly as good |
| */ |
| # if defined(CLIENT_INTERFACE) || defined(STANDALONE_UNIT_TEST) |
| if (!standalone_library) |
| # endif |
| make_unwritable(p, STACK_GUARD_PAGES * PAGE_SIZE); |
| # endif |
| #endif |
| |
| STATS_ADD(stack_capacity, size); |
| STATS_MAX(peak_stack_capacity, stack_capacity); |
| /* stack grows from high to low */ |
| return (void *) ((ptr_uint_t)p + size); |
| } |
| |
| /* free stack storage */ |
| void |
| stack_free(void *p, size_t size) |
| { |
| if (size == 0) |
| size = DYNAMORIO_STACK_SIZE; |
| p = (void *) ((vm_addr_t)p - size); |
| release_guarded_real_memory((vm_addr_t)p, size, true/*update DR areas immediately*/, |
| true); |
| DOSTATS({ |
| if (!dynamo_exited_log_and_stats) |
| STATS_SUB(stack_capacity, size); |
| }); |
| } |
| |
| #ifdef STACK_GUARD_PAGE |
| /* only checks initstack and current dcontext |
| * does not check any dstacks on the callback stack (win32) */ |
| bool |
| is_stack_overflow(dcontext_t *dcontext, byte *sp) |
| { |
| /* ASSUMPTION: size of stack is DYNAMORIO_STACK_SIZE = dynamo_options.stack_size |
| * currently sideline violates that for a thread stack |
| * but all dstacks and initstack should be this size |
| */ |
| byte *bottom = dcontext->dstack - DYNAMORIO_STACK_SIZE; |
| /* see if in bottom guard page of dstack */ |
| if (sp >= bottom && sp < bottom + (STACK_GUARD_PAGES * PAGE_SIZE)) |
| return true; |
| /* now check the initstack */ |
| bottom = initstack - DYNAMORIO_STACK_SIZE; |
| if (sp >= bottom && sp < bottom + (STACK_GUARD_PAGES * PAGE_SIZE)) |
| return true; |
| return false; |
| } |
| #endif |
| |
| byte * |
| map_file(file_t f, size_t *size INOUT, uint64 offs, app_pc addr, uint prot, |
| map_flags_t map_flags) |
| { |
| byte *view; |
| /* memory alloc/dealloc and updating DR list must be atomic */ |
| dynamo_vm_areas_lock(); /* if already hold lock this is a nop */ |
| view = os_map_file(f, size, offs, addr, prot, map_flags); |
| if (view != NULL) { |
| STATS_ADD_PEAK(file_map_capacity, *size); |
| account_for_memory((void *)view, *size, prot, true/*add now*/, true/*image*/ |
| _IF_DEBUG("map_file")); |
| } |
| dynamo_vm_areas_unlock(); |
| return view; |
| } |
| |
| bool |
| unmap_file(byte *map, size_t size) |
| { |
| bool success; |
| ASSERT(map != NULL && ALIGNED(map, PAGE_SIZE)); |
| size = ALIGN_FORWARD(size, PAGE_SIZE); |
| /* memory alloc/dealloc and updating DR list must be atomic */ |
| dynamo_vm_areas_lock(); /* if already hold lock this is a nop */ |
| success = os_unmap_file(map, size); |
| if (success) { |
| /* Only update the all_memory_areas on success. |
| * It should still be atomic to the outside observers. |
| */ |
| update_dynamo_areas_on_release(map, map+size, true/*remove now*/); |
| STATS_SUB(file_map_capacity, size); |
| } |
| dynamo_vm_areas_unlock(); |
| return success; |
| } |
| |
| |
| /* We cannot incrementally keep dynamo vm area list up to date due to |
| * circular dependencies bet vmareas and global heap (trust me, I've tried |
| * to support it with reentrant routines and recursive locks, the hard part |
| * is getting add_vm_area to be reentrant or to queue up adding areas, |
| * I think this solution is much more elegant, plus it avoids race conditions |
| * between DR memory allocation and the vmareas list by ensuring the list |
| * is up to date at the exact time of each query). |
| * Instead we on-demand walk the units. |
| * Freed units can usually be removed incrementally, except when we |
| * hold the heap_unit_lock when we run out of memory -- when we set |
| * a flag telling the caller of this routine to remove all heap areas |
| * from the vm list prior to calling us to add the real ones back in. |
| * Re-adding everyone is the simplest policy, so we don't have to keep |
| * track of who's been added. |
| * The caller is assumed to hold the dynamo vm areas write lock. |
| */ |
| void |
| heap_vmareas_synch_units() |
| { |
| heap_unit_t *u, *next; |
| /* make sure to add guard page on each side, as well */ |
| uint offs = (dynamo_options.guard_pages) ? PAGE_SIZE : 0; |
| /* we again have circular dependence w/ vmareas if it happens to need a |
| * new unit in the course of adding these areas, so we use a recursive lock! |
| * furthermore, we need to own the global lock now, to avoid deadlock with |
| * another thread who does global_alloc and then needs a new unit! |
| * which means that the global_alloc lock must be recursive since vmareas |
| * may need to global_alloc... |
| */ |
| /* if chance could own both locks, must grab both now |
| * always grab global_alloc first, then we won't have deadlocks |
| */ |
| acquire_recursive_lock(&global_alloc_lock); |
| acquire_recursive_lock(&heap_unit_lock); |
| if (dynamo_areas_pending_remove) { |
| dynamo_areas_pending_remove = false; |
| remove_dynamo_heap_areas(); |
| |
| /* When heap units are removed from the dynamo_area, they should be |
| * marked so. See case 4196. |
| */ |
| for (u = heapmgt->heap.units; u != NULL; u = u->next_global) |
| u->in_vmarea_list = false; |
| for (u = heapmgt->heap.dead; u != NULL; u = u->next_global) |
| u->in_vmarea_list = false; |
| } |
| for (u = heapmgt->heap.units; u != NULL; u = next) { |
| app_pc start = (app_pc)u - offs; |
| /* support un-aligned heap reservation end: PR 415269 (though as |
| * part of that PR we shouldn't have un-aligned anymore) |
| */ |
| app_pc end_align = (app_pc) ALIGN_FORWARD(UNIT_RESERVED_END(u), PAGE_SIZE); |
| app_pc end = end_align + offs; |
| /* u can be moved to dead list, so cache the next link; case 4196. */ |
| next = u->next_global; |
| /* case 3045: areas inside the vmheap reservation are not added to the list */ |
| if (!u->in_vmarea_list && !vmm_is_reserved_unit(&heapmgt->vmheap, |
| start, end - start)) { |
| /* case 4196 if next is used by dynamo_vmareas then next |
| * may become dead if vector is resized, then u should be |
| * alive and u->next_global should be reset AFTER add */ |
| bool next_may_die = |
| is_dynamo_area_buffer(UNIT_GET_START_PC(next)); /* keep breaking abstractions */ |
| /* dynamo_areas.buf vector may get resized and u can either |
| * go to the dead unit list, or it can be released back to |
| * the OS. We'll mark it as being in vmarea list to avoid |
| * re-adding when going through dead one's, and we'll mark |
| * _before_ the potential free. If dynamo_areas.buf is |
| * freed back to the OS we'll have another iteration in |
| * update_dynamo_vm_areas() until we get fully |
| * synchronized, so we don't need to worry about the |
| * inconsistency. |
| */ |
| u->in_vmarea_list = true; |
| add_dynamo_heap_vm_area(start, end, true, false _IF_DEBUG("heap unit")); |
| /* NOTE: Since we could mark_dynamo_vm_areas_stale instead of adding to |
| * it, we may lose prot info about this unit. |
| * FIXME: Currently, this is done only at one place, which allocates unit |
| * as MEMPROT_READ | MEMPROT_WRITE. If other places are added, then this |
| * needs to change. |
| */ |
| update_all_memory_areas((app_pc)u, end_align, |
| MEMPROT_READ | MEMPROT_WRITE, |
| DR_MEMTYPE_DATA); /* unit */ |
| if (offs != 0) { |
| /* guard pages */ |
| update_all_memory_areas((app_pc)u - offs, (app_pc)u, MEMPROT_NONE, |
| DR_MEMTYPE_DATA); |
| update_all_memory_areas(end_align, end, MEMPROT_NONE, |
| DR_MEMTYPE_DATA); |
| } |
| if (next_may_die) { |
| STATS_INC(num_vmareas_resize_synch); |
| /* if next was potentially on dead row, then current |
| * should still be live and point to the next live |
| */ |
| next = u->next_global; |
| } |
| } |
| } |
| for (u = heapmgt->heap.dead; u != NULL; u = next) { |
| app_pc start = (app_pc)u - offs; |
| /* support un-aligned heap reservation end: PR 415269 (though as |
| * part of that PR we shouldn't have un-aligned anymore) |
| */ |
| app_pc end_align = (app_pc) ALIGN_FORWARD(UNIT_RESERVED_END(u), PAGE_SIZE); |
| app_pc end = end_align + offs; |
| /* u can be moved to live list, so cache the next link; case 4196. */ |
| next = u->next_global; |
| /* case 3045: areas inside the vmheap reservation are not added to the list */ |
| if (!u->in_vmarea_list && !vmm_is_reserved_unit(&heapmgt->vmheap, |
| start, end - start)) { |
| u->in_vmarea_list = true; |
| add_dynamo_heap_vm_area(start, end, true, false _IF_DEBUG("dead heap unit")); |
| update_all_memory_areas((app_pc)u, end_align, |
| MEMPROT_READ | MEMPROT_WRITE, |
| DR_MEMTYPE_DATA); /* unit */ |
| if (offs != 0) { |
| /* guard pages */ |
| update_all_memory_areas(start, (app_pc)u, MEMPROT_NONE, |
| DR_MEMTYPE_DATA); |
| update_all_memory_areas(end_align, end, MEMPROT_NONE, |
| DR_MEMTYPE_DATA); |
| } |
| /* case 4196 if next was put back on live list for |
| * dynamo_areas.buf vector, then next will no longer be a |
| * valid iterator over dead list |
| */ |
| if (is_dynamo_area_buffer(UNIT_GET_START_PC(next))) { /* keep breaking abstractions */ |
| STATS_INC(num_vmareas_resize_synch); |
| ASSERT_NOT_TESTED(); |
| next = u->next_global; |
| } |
| } |
| } |
| release_recursive_lock(&heap_unit_lock); |
| release_recursive_lock(&global_alloc_lock); |
| } |
| |
| /* shared between global and global_unprotected */ |
| static void * |
| common_global_heap_alloc(thread_units_t *tu, size_t size HEAPACCT(which_heap_t which)) |
| { |
| void *p; |
| acquire_recursive_lock(&global_alloc_lock); |
| p = common_heap_alloc(tu, size HEAPACCT(which)); |
| release_recursive_lock(&global_alloc_lock); |
| if (p == NULL) { |
| /* circular dependence solution: we need to hold DR lock before |
| * global alloc lock -- so we back out, grab it, and retry |
| */ |
| dynamo_vm_areas_lock(); |
| acquire_recursive_lock(&global_alloc_lock); |
| p = common_heap_alloc(tu, size HEAPACCT(which)); |
| release_recursive_lock(&global_alloc_lock); |
| dynamo_vm_areas_unlock(); |
| } |
| ASSERT(p != NULL); |
| return p; |
| } |
| |
| /* shared between global and global_unprotected */ |
| static void |
| common_global_heap_free(thread_units_t *tu, void *p, size_t size HEAPACCT(which_heap_t which)) |
| { |
| bool ok; |
| if (p == NULL) { |
| ASSERT(false && "attempt to free NULL"); |
| return; |
| } |
| |
| acquire_recursive_lock(&global_alloc_lock); |
| ok = common_heap_free(tu, p, size HEAPACCT(which)); |
| release_recursive_lock(&global_alloc_lock); |
| if (!ok) { |
| /* circular dependence solution: we need to hold DR lock before |
| * global alloc lock -- so we back out, grab it, and retry |
| */ |
| dynamo_vm_areas_lock(); |
| acquire_recursive_lock(&global_alloc_lock); |
| ok = common_heap_free(tu, p, size HEAPACCT(which)); |
| release_recursive_lock(&global_alloc_lock); |
| dynamo_vm_areas_unlock(); |
| } |
| ASSERT(ok); |
| } |
| |
| /* these functions use the global heap instead of a thread's heap: */ |
| void * |
| global_heap_alloc(size_t size HEAPACCT(which_heap_t which)) |
| { |
| void *p = common_global_heap_alloc(&heapmgt->global_units, size HEAPACCT(which)); |
| ASSERT(p != NULL); |
| LOG(GLOBAL, LOG_HEAP, 6, "\nglobal alloc: "PFX" (%d bytes)\n", p, size); |
| return p; |
| } |
| |
| void |
| global_heap_free(void *p, size_t size HEAPACCT(which_heap_t which)) |
| { |
| common_global_heap_free(&heapmgt->global_units, p, size HEAPACCT(which)); |
| LOG(GLOBAL, LOG_HEAP, 6, "\nglobal free: "PFX" (%d bytes)\n", p, size); |
| } |
| |
| |
| /* reallocate area |
| allocates new_num elements of element_size |
| if ptr is NULL acts like global_heap_alloc, |
| copies an old_num elements of given size in the new area */ |
| /* FIXME: do a heap_realloc and a special_heap_realloc too */ |
| void * |
| global_heap_realloc(void *ptr, size_t old_num, size_t new_num, size_t element_size |
| HEAPACCT(which_heap_t which)) |
| { |
| void *new_area = global_heap_alloc(new_num * element_size HEAPACCT(which)); |
| if (ptr) { |
| memcpy(new_area, ptr, (old_num < new_num ? old_num : new_num) * element_size); |
| global_heap_free(ptr, old_num * element_size HEAPACCT(which)); |
| } |
| return new_area; |
| } |
| |
| /* size does not include guard pages (if any) and is reserved, but only |
| * DYNAMO_OPTION(heap_commit_increment) is committed up front |
| */ |
| static heap_unit_t * |
| heap_create_unit(thread_units_t *tu, size_t size, bool must_be_new) |
| { |
| heap_unit_t *u = NULL, *dead = NULL, *prev_dead = NULL; |
| bool new_unit = false; |
| |
| /* we do not restrict size to unit max as we have to make larger-than-max |
| * units for oversized requests |
| */ |
| |
| /* modifying heap list and DR areas must be atomic, and must grab |
| * DR area lock before heap_unit_lock |
| */ |
| ASSERT(safe_to_allocate_or_free_heap_units()); |
| dynamo_vm_areas_lock(); |
| /* take from dead list if possible */ |
| acquire_recursive_lock(&heap_unit_lock); |
| |
| /* FIXME: need to unprotect units that we're going to perform |
| * {next,prev}_global assignments too -- but need to know whether |
| * to re-protect -- do all at once, or each we need? add a writable |
| * flag to heap_unit_t? |
| */ |
| |
| if (!must_be_new) { |
| for (dead = heapmgt->heap.dead; |
| dead != NULL && UNIT_RESERVED_SIZE(dead) < size; |
| prev_dead = dead, dead = dead->next_global) |
| ; |
| } |
| if (dead != NULL) { |
| if (prev_dead == NULL) |
| heapmgt->heap.dead = dead->next_global; |
| else |
| prev_dead->next_global = dead->next_global; |
| u = dead; |
| heapmgt->heap.num_dead--; |
| RSTATS_DEC(heap_num_free); |
| release_recursive_lock(&heap_unit_lock); |
| LOG(GLOBAL, LOG_HEAP, 2, |
| "Re-using dead heap unit: "PFX"-"PFX" %d KB (need %d KB)\n", |
| u, ((byte*)u)+size, UNIT_RESERVED_SIZE(u)/1024, size/1024); |
| } else { |
| size_t commit_size = DYNAMO_OPTION(heap_commit_increment); |
| release_recursive_lock(&heap_unit_lock); /* do not hold while asking for memory */ |
| /* create new unit */ |
| ASSERT(commit_size <= size); |
| u = (heap_unit_t *) |
| get_guarded_real_memory(size, commit_size, MEMPROT_READ|MEMPROT_WRITE, |
| false, true _IF_DEBUG("")); |
| new_unit = true; |
| /* FIXME: handle low memory conditions by freeing units, + fcache units? */ |
| ASSERT(u); |
| LOG(GLOBAL, LOG_HEAP, 2, "New heap unit: "PFX"-"PFX"\n", u, ((byte*)u)+size); |
| /* u is kept at top of unit itself, so displace start pc */ |
| u->start_pc = (heap_pc) (((ptr_uint_t)u) + sizeof(heap_unit_t)); |
| u->end_pc = ((heap_pc)u) + commit_size; |
| u->reserved_end_pc = ((heap_pc)u) + size; |
| u->in_vmarea_list = false; |
| STATS_ADD(heap_capacity, commit_size); |
| STATS_MAX(peak_heap_capacity, heap_capacity); |
| /* FIXME: heap sizes are not always page-aligned so stats will be off */ |
| STATS_ADD_PEAK(heap_reserved_only, (u->reserved_end_pc - u->end_pc)); |
| } |
| RSTATS_ADD_PEAK(heap_num_live, 1); |
| |
| u->cur_pc = u->start_pc; |
| u->next_local = NULL; |
| DODEBUG({ |
| u->id = tu->num_units; |
| tu->num_units++; |
| }); |
| |
| acquire_recursive_lock(&heap_unit_lock); |
| u->next_global = heapmgt->heap.units; |
| if (heapmgt->heap.units != NULL) |
| heapmgt->heap.units->prev_global = u; |
| u->prev_global = NULL; |
| heapmgt->heap.units = u; |
| release_recursive_lock(&heap_unit_lock); |
| dynamo_vm_areas_unlock(); |
| |
| #ifdef DEBUG_MEMORY |
| DOCHECK(CHKLVL_MEMFILL, |
| memset(u->start_pc, HEAP_UNALLOCATED_BYTE, u->end_pc - u->start_pc);); |
| #endif |
| return u; |
| } |
| |
| /* dcontext only used to determine whether a global unit or not */ |
| static void |
| heap_free_unit(heap_unit_t *unit, dcontext_t *dcontext) |
| { |
| heap_unit_t *u, *prev_u; |
| #ifdef DEBUG_MEMORY |
| /* Unit should already be set to all HEAP_UNALLOCATED by the individual |
| * frees and the free list cleanup, verify. */ |
| /* NOTE - this assert fires if any memory in the unit wasn't freed. This |
| * would include memory allocated ACCT_TOMBSTONE (which we don't currently |
| * use). Using ACCT_TOMBSTONE is dangerous since we will still free the |
| * unit here (say at proc or thread exit) even if there are ACCT_TOMBSTONE |
| * allocations in it. */ |
| /* Note, this memset check is done only on the special heap unit header, |
| * not on the unit itself - FIXME: case 10434. Maybe we should embed the |
| * special heap unit header in the first special heap unit itself. */ |
| /* The hotp_only leak relaxation below is for case 9588 & 9593. */ |
| DOCHECK(CHKLVL_MEMFILL, { |
| CLIENT_ASSERT(IF_HOTP(hotp_only_contains_leaked_trampoline |
| (unit->start_pc, unit->end_pc - unit->start_pc) ||) |
| /* i#157: private loader => system lib allocs come here => |
| * they don't always clean up. we have to relax here, but our |
| * threadunits_exit checks should find all leaks anyway. |
| */ |
| heapmgt->global_units.acct.cur_usage[ACCT_LIBDUP] > 0 || |
| is_region_memset_to_char(unit->start_pc, |
| unit->end_pc - unit->start_pc, |
| HEAP_UNALLOCATED_BYTE) |
| /* don't assert when client does premature exit as it's |
| * hard for Extension libs, etc. to clean up in such situations |
| */ |
| IF_CLIENT_INTERFACE(|| client_requested_exit), |
| "memory leak detected"); |
| }); |
| #endif |
| /* modifying heap list and DR areas must be atomic, and must grab |
| * DR area lock before heap_unit_lock |
| */ |
| ASSERT(safe_to_allocate_or_free_heap_units()); |
| dynamo_vm_areas_lock(); |
| acquire_recursive_lock(&heap_unit_lock); |
| |
| /* FIXME: need to unprotect units that we're going to perform |
| * {next,prev}_global assignments too -- but need to know whether |
| * to re-protect -- do all at once, or each we need? add a writable |
| * flag to heap_unit_t? |
| */ |
| |
| /* remove from live list */ |
| if (unit->prev_global != NULL) { |
| unit->prev_global->next_global = unit->next_global; |
| } else |
| heapmgt->heap.units = unit->next_global; |
| if (unit->next_global != NULL) { |
| unit->next_global->prev_global = unit->prev_global; |
| } |
| /* prev_global is not used in the dead list */ |
| unit->prev_global = NULL; |
| RSTATS_DEC(heap_num_live); |
| |
| /* heuristic: don't keep around more dead units than max(5, 1/4 num threads) |
| * FIXME: share the policy with the fcache dead unit policy |
| * also, don't put special larger-than-max units on free list -- though |
| * we do now have support for doing so (after PR 415269) |
| */ |
| if (UNITALLOC(unit) <= HEAP_UNIT_MAX_SIZE && |
| (heapmgt->heap.num_dead < 5 || |
| heapmgt->heap.num_dead * 4U <= (uint) get_num_threads())) { |
| /* Keep dead list sorted small-to-large to avoid grabbing large |
| * when can take small and then needing to allocate when only |
| * have small left. Helps out with lots of small threads. |
| */ |
| for (u = heapmgt->heap.dead, prev_u = NULL; |
| u != NULL && UNIT_RESERVED_SIZE(u) < UNIT_RESERVED_SIZE(unit); |
| prev_u = u, u = u->next_global) |
| ; |
| if (prev_u == NULL) { |
| unit->next_global = heapmgt->heap.dead; |
| heapmgt->heap.dead = unit; |
| } else { |
| unit->next_global = u; |
| prev_u->next_global = unit; |
| } |
| heapmgt->heap.num_dead++; |
| release_recursive_lock(&heap_unit_lock); |
| RSTATS_ADD_PEAK(heap_num_free, 1); |
| } else { |
| /* don't need to hold this while freeing since still hold DR areas lock */ |
| release_recursive_lock(&heap_unit_lock); |
| LOG(GLOBAL, LOG_HEAP, 1, "\tfreeing excess dead unit "PFX"-"PFX" [-"PFX"]\n", |
| unit, UNIT_COMMIT_END(unit), UNIT_RESERVED_END(unit)); |
| really_free_unit(unit); |
| } |
| /* FIXME: shrink lock-held path if we see contention */ |
| dynamo_vm_areas_unlock(); |
| } |
| |
| #ifdef DEBUG_MEMORY |
| static heap_unit_t * |
| find_heap_unit(thread_units_t *tu, heap_pc p, size_t size) |
| { |
| /* FIXME (case 6198): this is a perf hit in debug builds. But, we can't use |
| * a new vmvector b/c of circular dependences. Proposal: use custom data |
| * field of vm_area_t in dynamo_areas list for heap entries to store a pointer |
| * to the heap_unit_t struct, and add a backpointer to the owning thread_units_t |
| * in heap_unit_t. Then have to make sure it's ok lock-wise to query the |
| * dynamo_areas in the middle of an alloc or a free. It should be but for |
| * global alloc and free we will have to grab the dynamo_areas lock up front |
| * every time instead of the very rare times now when we need a new unit. |
| */ |
| heap_unit_t *unit; |
| ASSERT(!POINTER_OVERFLOW_ON_ADD(p, size)); /* should not overflow */ |
| for (unit = tu->top_unit; |
| unit != NULL && (p < unit->start_pc || p+size > unit->end_pc); |
| unit = unit->next_local); |
| return unit; |
| } |
| #endif |
| |
| static void |
| threadunits_init(dcontext_t *dcontext, thread_units_t *tu, size_t size) |
| { |
| int i; |
| DODEBUG({ |
| tu->num_units = 0; |
| }); |
| tu->top_unit = heap_create_unit(tu, size - GUARD_PAGE_ADJUSTMENT, |
| false/*can reuse*/); |
| tu->cur_unit = tu->top_unit; |
| tu->dcontext = dcontext; |
| tu->writable = true; |
| #ifdef HEAP_ACCOUNTING |
| memset(&tu->acct, 0, sizeof(tu->acct)); |
| #endif |
| for (i=0; i<BLOCK_TYPES; i++) |
| tu->free_list[i] = NULL; |
| } |
| |
| #ifdef HEAP_ACCOUNTING |
| #define MAX_5_DIGIT 99999 |
| static void |
| print_tu_heap_statistics(thread_units_t *tu, file_t logfile, const char *prefix) |
| { |
| int i; |
| size_t total = 0, cur = 0; |
| LOG(logfile, LOG_HEAP|LOG_STATS, 1, "%s heap breakdown:\n", prefix); |
| for (i = 0; i < ACCT_LAST; i++) { |
| /* print out cur since this is done periodically, not just at end */ |
| LOG(logfile, LOG_HEAP|LOG_STATS, 1, |
| "%12s: cur=%5"SZFC"K, max=%5"SZFC"K, #=%7d, 1=", |
| whichheap_name[i], tu->acct.cur_usage[i]/1024, |
| tu->acct.max_usage[i]/1024, tu->acct.num_alloc[i]); |
| if (tu->acct.max_single[i] <= MAX_5_DIGIT) |
| LOG(logfile, LOG_HEAP|LOG_STATS, 1, "%5"SZFC, tu->acct.max_single[i]); |
| else { |
| LOG(logfile, LOG_HEAP|LOG_STATS, 1, "%4"SZFC"K", |
| tu->acct.max_single[i]/1024); |
| } |
| LOG(logfile, LOG_HEAP|LOG_STATS, 1, |
| ", new=%5"SZFC"K, re=%5"SZFC"K\n", |
| tu->acct.alloc_new[i]/1024, tu->acct.alloc_reuse[i]/1024); |
| total += tu->acct.max_usage[i]; |
| cur += tu->acct.cur_usage[i]; |
| } |
| LOG(logfile, LOG_HEAP|LOG_STATS, 1, |
| "Total cur usage: %6"SZFC" KB\n", cur/1024); |
| LOG(logfile, LOG_HEAP|LOG_STATS, 1, |
| "Total max (not nec. all used simult.): %6"SZFC" KB\n", total/1024); |
| } |
| |
| void |
| print_heap_statistics() |
| { |
| /* just do cur thread, don't try to walk all threads */ |
| dcontext_t *dcontext = get_thread_private_dcontext(); |
| DOSTATS({ |
| uint i; |
| LOG(GLOBAL, LOG_STATS, 1, "Heap bucket usage counts and wasted memory:\n"); |
| for (i=0; i<BLOCK_TYPES; i++) { |
| LOG(GLOBAL, LOG_STATS|LOG_HEAP, 1, |
| "%2d %3d count=%9u peak_count=%9u peak_wasted=%9u peak_align=%9u\n", |
| i, BLOCK_SIZES[i], block_total_count[i], block_peak_count[i], |
| block_peak_wasted[i], block_peak_align_pad[i]); |
| } |
| }); |
| if (dcontext != NULL) { |
| thread_heap_t *th = (thread_heap_t *) dcontext->heap_field; |
| if (th != NULL) { /* may not be initialized yet */ |
| print_tu_heap_statistics(th->local_heap, THREAD, "Thread"); |
| if (DYNAMO_OPTION(enable_reset)) { |
| ASSERT(th->nonpersistent_heap != NULL); |
| print_tu_heap_statistics(th->nonpersistent_heap, THREAD, |
| "Thread non-persistent"); |
| } |
| } |
| } |
| if (DYNAMO_OPTION(enable_reset)) { |
| print_tu_heap_statistics(&heapmgt->global_nonpersistent_units, GLOBAL, |
| "Non-persistent global units"); |
| } |
| print_tu_heap_statistics(&global_racy_units, GLOBAL, "Racy Up-to-date Process"); |
| print_tu_heap_statistics(&heapmgt->global_units, GLOBAL, |
| "Updated-at-end Process (max is total of maxes)"); |
| } |
| |
| static void |
| add_heapacct_to_global_stats(heap_acct_t *acct) |
| { |
| /* add this thread's stats to the accurate (non-racy) global stats |
| * FIXME: this gives a nice in-one-place total, but loses the |
| * global-heap-only stats -- perhaps should add a total_units stats |
| * to capture total and leave global alone here? |
| */ |
| uint i; |
| acquire_recursive_lock(&global_alloc_lock); |
| for (i = 0; i < ACCT_LAST; i++) { |
| heapmgt->global_units.acct.alloc_reuse[i] += acct->alloc_reuse[i]; |
| heapmgt->global_units.acct.alloc_new[i] += acct->alloc_new[i]; |
| heapmgt->global_units.acct.cur_usage[i] += acct->cur_usage[i]; |
| /* FIXME: these maxes are now not simultaneous max but sum-of-maxes */ |
| heapmgt->global_units.acct.max_usage[i] += acct->max_usage[i]; |
| heapmgt->global_units.acct.max_single[i] += acct->max_single[i]; |
| heapmgt->global_units.acct.num_alloc[i] += acct->num_alloc[i]; |
| } |
| release_recursive_lock(&global_alloc_lock); |
| } |
| #endif |
| |
| /* dcontext only used for debugging */ |
| static void |
| threadunits_exit(thread_units_t *tu, dcontext_t *dcontext) |
| { |
| heap_unit_t *u, *next_u; |
| #ifdef DEBUG |
| size_t total_heap_used = 0; |
| # ifdef HEAP_ACCOUNTING |
| int j; |
| # endif |
| #endif |
| #ifdef DEBUG_MEMORY |
| /* verify and clear (for later asserts) the free list */ |
| uint i; |
| for (i = 0; i < BLOCK_TYPES; i++) { |
| heap_pc p, next_p; |
| for (p = tu->free_list[i]; p != NULL; p = next_p) { |
| next_p = *(heap_pc *)p; |
| /* clear the pointer to the next free for later asserts */ |
| *(heap_pc *)p = (heap_pc) HEAP_UNALLOCATED_PTR_UINT; |
| DOCHECK(CHKLVL_MEMFILL, { |
| if (i < BLOCK_TYPES-1) { |
| CLIENT_ASSERT(is_region_memset_to_char(p, BLOCK_SIZES[i], |
| HEAP_UNALLOCATED_BYTE), |
| "memory corruption detected"); |
| } else { |
| /* variable sized blocks */ |
| CLIENT_ASSERT(is_region_memset_to_char(p, VARIABLE_SIZE(p), |
| HEAP_UNALLOCATED_BYTE), |
| "memory corruption detected"); |
| /* clear the header for later asserts */ |
| MEMSET_HEADER(p, HEAP_UNALLOCATED); |
| } |
| }); |
| } |
| tu->free_list[i] = NULL; |
| } |
| #endif |
| u = tu->top_unit; |
| while (u != NULL) { |
| DOLOG(1, LOG_HEAP|LOG_STATS, { |
| size_t num_used = u->cur_pc - u->start_pc; |
| total_heap_used += num_used; |
| LOG(THREAD, |
| LOG_HEAP|LOG_STATS, 1, |
| "Heap unit %d @"PFX"-"PFX" [-"PFX"] ("SZFMT" [/"SZFMT"] KB): used " |
| SZFMT" KB\n", |
| u->id, u, UNIT_COMMIT_END(u), |
| UNIT_RESERVED_END(u), (UNIT_COMMIT_SIZE(u))/1024, |
| (UNIT_RESERVED_SIZE(u))/1024, num_used/1024); |
| }); |
| next_u = u->next_local; |
| heap_free_unit(u, dcontext); |
| u = next_u; |
| } |
| LOG(THREAD, LOG_HEAP|LOG_STATS, 1, |
| "\tTotal heap used: "SZFMT" KB\n", total_heap_used/1024); |
| #if defined(DEBUG) && defined(HEAP_ACCOUNTING) |
| /* FIXME: separate scopes - in smaller functions for each of DEBUG_MEMORY and HEAP_ACCOUNTING */ |
| for (j = 0; j < ACCT_LAST; j++) { |
| size_t usage = tu->acct.cur_usage[j]; |
| if (usage > 0) { |
| LOG(THREAD, LOG_HEAP|LOG_STATS, 1, |
| "WARNING: %s "SZFMT" bytes not freed!\n", |
| whichheap_name[j], tu->acct.cur_usage[j]); |
| |
| # ifdef HOT_PATCHING_INTERFACE /* known leaks for case 9593 */ |
| if (
|