blob: 5074dfd1c94ae60a4d6708f7c0b1a58773b7135c [file] [log] [blame]
/* **********************************************************
* Copyright (c) 2010-2013 Google, Inc. All rights reserved.
* Copyright (c) 2001-2010 VMware, Inc. All rights reserved.
* **********************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of VMware, Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/* Copyright (c) 2003-2007 Determina Corp. */
/* Copyright (c) 2001-2003 Massachusetts Institute of Technology */
/* Copyright (c) 2001 Hewlett-Packard Company */
/*
* heap.c - heap manager
*/
#ifndef _HEAP_H_
#define _HEAP_H_ 1
#ifdef HEAP_ACCOUNTING
typedef enum {
ACCT_FRAGMENT=0,
ACCT_COARSE_LINK,
ACCT_FRAG_FUTURE,
ACCT_FRAG_TABLE,
ACCT_IBLTABLE,
ACCT_TRACE,
ACCT_FCACHE_EMPTY,
ACCT_VMAREA_MULTI,
ACCT_IR,
ACCT_AFTER_CALL,
ACCT_VMAREAS,
ACCT_SYMBOLS,
# ifdef SIDELINE
ACCT_SIDELINE,
# endif
ACCT_THCOUNTER,
ACCT_TOMBSTONE, /* N.B.: leaks in this category are not reported;
* not currently used */
ACCT_HOT_PATCHING,
ACCT_THREAD_MGT,
ACCT_MEM_MGT,
ACCT_STATS,
ACCT_SPECIAL,
# ifdef CLIENT_INTERFACE
ACCT_CLIENT,
# endif
ACCT_LIBDUP, /* private copies of system libs => may leak */
ACCT_CLEANCALL,
/* NOTE: Also update the whichheap_name in heap.c when adding here */
ACCT_OTHER,
ACCT_LAST
} which_heap_t;
# define HEAPACCT(x) , x
# define IF_HEAPACCT_ELSE(x, y) x
#else
# define HEAPACCT(x)
# define IF_HEAPACCT_ELSE(x, y) y
#endif
typedef enum {
MAP_FILE_COPY_ON_WRITE = 0x0001,
MAP_FILE_IMAGE = 0x0002, /* Windows-only */
MAP_FILE_FIXED = 0x0004, /* Linux-only */
MAP_FILE_REACHABLE = 0x0008, /* Map at location reachable from vmcode */
} map_flags_t;
typedef byte * heap_pc;
#define HEAP_ALIGNMENT sizeof(heap_pc*)
extern vm_area_vector_t *landing_pad_areas;
#ifdef X64
/* Request that the supplied region be 32bit offset reachable from the DR heap. Should
* be called before vmm_heap_init() so we can place the DR heap to meet these constraints.
* Can also be called post vmm_heap_init() but at that point acts as an assert that the
* supplied region is reachable since the heap is already reserved. */
void
request_region_be_heap_reachable(byte *start, size_t size);
void
vmcode_get_reachable_region(byte **region_start OUT, byte **region_end OUT);
#endif
/* virtual heap manager */
void vmm_heap_init_constraints();
void vmm_heap_init(void);
void vmm_heap_exit(void);
void print_vmm_heap_data(file_t outf);
void get_vmm_heap_bounds(byte **heap_start/*OUT*/, byte **heap_end/*OUT*/);
byte *vmcode_get_start();
byte *vmcode_get_end();
byte *vmcode_unreachable_pc();
bool heap_check_option_compatibility(void);
bool is_vmm_reserved_address(byte *pc, size_t size);
bool rel32_reachable_from_vmcode(byte *target);
/* heap management */
void heap_init(void);
void heap_exit(void);
void heap_reset_init(void);
void heap_reset_free(void);
void heap_thread_init(dcontext_t *dcontext);
void heap_thread_exit(dcontext_t *dcontext);
/* re-initializes non-persistent memory */
void heap_thread_reset_init(dcontext_t *dcontext);
/* frees all non-persistent memory */
void heap_thread_reset_free(dcontext_t *dcontext);
/* these functions use the global heap instead of a thread's heap: */
void *global_heap_alloc(size_t size HEAPACCT(which_heap_t which));
void global_heap_free(void *p, size_t size HEAPACCT(which_heap_t which));
void *global_heap_realloc(void *ptr, size_t old_num, size_t new_num,
size_t element_size HEAPACCT(which_heap_t which));
bool lockwise_safe_to_allocate_memory(void);
/* use heap_mmap to allocate large chunks of executable memory */
void *heap_mmap(size_t size);
void heap_munmap(void *p, size_t size);
void *heap_mmap_reserve(size_t reserve_size, size_t commit_size);
void *heap_mmap_ex(size_t reserve_size, size_t commit_size, uint prot, bool guarded);
void heap_munmap_ex(void *p, size_t size, bool guarded);
/* updates dynamo_areas and calls the os_ versions */
byte *
map_file(file_t f, size_t *size INOUT, uint64 offs, app_pc addr, uint prot,
map_flags_t map_flags);
bool
unmap_file(byte *map, size_t size);
/* Allocates executable memory in the same allocation region as this thread's
* stack, to save address space (case 9474).
*/
void *
heap_mmap_reserve_post_stack(dcontext_t *dcontext,
size_t reserve_size, size_t commit_size);
void
heap_munmap_post_stack(dcontext_t *dcontext, void *p, size_t reserve_size);
/* It is up to the caller to ensure commit_size is a page size multiple,
* and that it does not extend beyond the initial reservation.
*/
void
heap_mmap_extend_commitment(void *p, size_t commit_size);
void
heap_mmap_retract_commitment(void *retract_start, size_t decommit_size);
/* use stack_alloc to build a stack -- it returns TOS */
void *stack_alloc(size_t size);
void stack_free(void *p, size_t size);
#ifdef STACK_GUARD_PAGE
/* checks if pc is in guard page on stack */
bool is_stack_overflow(dcontext_t *dcontext, byte *sp);
#endif
/* these are for thread-local allocs
* passing dcontext == GLOBAL_DCONTEXT will end up calling global_heap_{alloc,free}
*/
void *heap_alloc(dcontext_t *dcontext, size_t size HEAPACCT(which_heap_t which));
void heap_free(dcontext_t *dcontext, void *p, size_t size HEAPACCT(which_heap_t which));
#ifdef HEAP_ACCOUNTING
void print_heap_statistics(void);
#endif
/* FIXME: persistence is yet another dimension here
* let's clean all these up and have a single alloc routine?
*/
void *nonpersistent_heap_alloc(dcontext_t *dcontext, size_t size
HEAPACCT(which_heap_t which));
void nonpersistent_heap_free(dcontext_t *dcontext, void *p, size_t size
HEAPACCT(which_heap_t which));
bool local_heap_protected(dcontext_t *dcontext);
void protect_local_heap(dcontext_t *dcontext, bool writable);
void protect_global_heap(bool writable);
void *global_unprotected_heap_alloc(size_t size HEAPACCT(which_heap_t which));
void global_unprotected_heap_free(void *p, size_t size HEAPACCT(which_heap_t which));
#define UNPROTECTED_LOCAL_ALLOC(dc, ...) global_unprotected_heap_alloc(__VA_ARGS__)
#define UNPROTECTED_LOCAL_FREE(dc, ...) global_unprotected_heap_free(__VA_ARGS__)
#define UNPROTECTED_GLOBAL_ALLOC global_unprotected_heap_alloc
#define UNPROTECTED_GLOBAL_FREE global_unprotected_heap_free
/* use global heap for shared fragments and their related data structures */
#define FRAGMENT_ALLOC_DC(dc, flags) (TEST(FRAG_SHARED, (flags)) ? GLOBAL_DCONTEXT : (dc))
#define FRAGMENT_TABLE_ALLOC_DC(dc, flags) \
(TEST(HASHTABLE_SHARED, (flags)) ? GLOBAL_DCONTEXT : (dc))
/* convenience for allocating a single type: does cast, sizeof, and HEAPACCT
* for you, and takes param for whether protected or not
*/
#define PROTECTED true
#define UNPROTECTED false
#define HEAP_ARRAY_ALLOC(dc, type, num, which, protected) \
((protected) ? \
(type *) heap_alloc(dc, sizeof(type)*(num) HEAPACCT(which)) : \
(type *) UNPROTECTED_LOCAL_ALLOC(dc, sizeof(type)*(num) HEAPACCT(which)))
#define HEAP_TYPE_ALLOC(dc, type, which, protected) \
HEAP_ARRAY_ALLOC(dc, type, 1, which, protected)
#define HEAP_ARRAY_ALLOC_MEMSET(dc, type, num, which, protected, val) \
memset(HEAP_ARRAY_ALLOC(dc, type, num, which, protected), (val), \
sizeof(type)*(num));
#define HEAP_ARRAY_FREE(dc, p, type, num, which, protected) \
((protected) ? \
heap_free(dc, (type*)p, sizeof(type)*(num) HEAPACCT(which)) : \
UNPROTECTED_LOCAL_FREE(dc, (type*)p, sizeof(type)*(num) HEAPACCT(which)))
#define HEAP_TYPE_FREE(dc, p, type, which, protected) \
HEAP_ARRAY_FREE(dc, p, type, 1, which, protected)
/* nonpersistent heap is assumed to be protected */
#define NONPERSISTENT_HEAP_ARRAY_ALLOC(dc, type, num, which) \
(type *) nonpersistent_heap_alloc(dc, sizeof(type)*(num) HEAPACCT(which))
#define NONPERSISTENT_HEAP_TYPE_ALLOC(dc, type, which) \
NONPERSISTENT_HEAP_ARRAY_ALLOC(dc, type, 1, which)
#define NONPERSISTENT_HEAP_ARRAY_FREE(dc, p, type, num, which) \
nonpersistent_heap_free(dc, (type*)p, sizeof(type)*(num) HEAPACCT(which))
#define NONPERSISTENT_HEAP_TYPE_FREE(dc, p, type, which) \
NONPERSISTENT_HEAP_ARRAY_FREE(dc, p, type, 1, which)
/* special heap of same-sized blocks that avoids global locks */
void *special_heap_init(uint block_size, bool use_lock, bool executable,
bool persistent);
void special_heap_exit(void *special);
void *special_heap_alloc(void *special);
void *special_heap_calloc(void *special, uint num);
void special_heap_free(void *special, void *p);
void special_heap_cfree(void *special, void *p, uint num);
#if defined(WINDOWS_PC_SAMPLE) && !defined(DEBUG)
void special_heap_profile_exit(void);
#endif
/* iterator over units in a special heap */
typedef struct _special_heap_iterator {
/* opaque types, exposed to support stack-alloc iterator */
void *heap;
void *next_unit;
} special_heap_iterator_t;
/* Special heap iterator. Initialized with special_heap_iterator_start(), and
* destroyed with special_heap_iterator_stop() to release the lock.
* If the special heap uses no lock for alloc, it is up to the caller
* to prevent race conditions causing problems.
* Accessor special_heap_iterator_next() should be called only when
* predicate special_heap_iterator_hasnext() is true.
* Any mutation of the heap while iterating will result in a deadlock
* for heaps that use locks for alloc.
*/
void
special_heap_iterator_start(void *heap, special_heap_iterator_t *shi);
bool
special_heap_iterator_hasnext(special_heap_iterator_t *shi);
void
special_heap_iterator_next(special_heap_iterator_t *shi /* IN/OUT */,
app_pc *heap_start /* OUT */, app_pc *heap_end /* OUT */);
void
special_heap_iterator_stop(special_heap_iterator_t *shi);
/* Special heap w/ a vector for lookups */
void *
special_heap_pclookup_init(uint block_size, bool use_lock, bool executable,
bool persistent, vm_area_vector_t *vector, void *vector_data,
byte *heap_region, size_t heap_size, bool unit_full);
void
special_heap_set_vector_data(void *special, void *vector_data);
/* Returns false if the special heap has more than one unit or has a
* non-externally-allocated unit.
* Sets the cur pc for the only unit to end_pc.
*/
bool
special_heap_set_unit_end(void *special, byte *end_pc);
void heap_vmareas_synch_units(void);
/* Utility routine to delete special heap locks; needed for case 9593. */
void special_heap_delete_lock(void *special);
#ifdef DEBUG_MEMORY
# define HEAP_TO_BYTE_EX(hex) 0x##hex
# define HEAP_TO_BYTE(hex) HEAP_TO_BYTE_EX(hex)
# define HEAP_TO_UINT_EX(hex) 0x##hex##hex##hex##hex
# define HEAP_TO_UINT(hex) HEAP_TO_UINT_EX(hex)
# ifdef X64
# define HEAP_TO_PTR_UINT_EX(hex) 0x##hex##hex##hex##hex##hex##hex##hex##hex
# else
# define HEAP_TO_PTR_UINT_EX(hex) 0x##hex##hex##hex##hex
# endif
# define HEAP_TO_PTR_UINT(hex) HEAP_TO_PTR_UINT_EX(hex)
# define HEAP_UNALLOCATED cd
# define HEAP_UNALLOCATED_BYTE HEAP_TO_BYTE(HEAP_UNALLOCATED)
# define HEAP_UNALLOCATED_UINT HEAP_TO_UINT(HEAP_UNALLOCATED)
# define HEAP_UNALLOCATED_PTR_UINT HEAP_TO_PTR_UINT(HEAP_UNALLOCATED)
# define HEAP_ALLOCATED ab
# define HEAP_ALLOCATED_BYTE HEAP_TO_BYTE(HEAP_ALLOCATED)
# define HEAP_ALLOCATED_UINT HEAP_TO_UINT(HEAP_ALLOCATED)
# define HEAP_ALLOCATED_PTR_UINT HEAP_TO_PTR_UINT(HEAP_ALLOCATED)
# define HEAP_PAD bc
# define HEAP_PAD_BYTE HEAP_TO_BYTE(HEAP_PAD)
# define HEAP_PAD_UINT HEAP_TO_UINT(HEAP_PAD)
# define HEAP_PAD_PTR_UINT HEAP_TO_PTR_UINT(HEAP_PAD)
#endif
#endif /* _HEAP_H_ */