blob: 74064186ae3e1146b0fb4bd29c6fe207dc4e05ee [file] [log] [blame]
/* **********************************************************
* Copyright (c) 2010-2014 Google, Inc. All rights reserved.
* Copyright (c) 2002-2010 VMware, Inc. All rights reserved.
* **********************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of VMware, Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/* Copyright (c) 2003-2007 Determina Corp. */
/* Copyright (c) 2002-2003 Massachusetts Institute of Technology */
/*
* vmareas.c - virtual memory executable areas
*/
#include "globals.h"
/* all of this for selfmod handling */
#include "fragment.h"
#include "instr.h"
#include "decode.h"
#include "decode_fast.h"
#include "link.h"
#include "disassemble.h"
#include "fcache.h"
#include "hotpatch.h"
#include "moduledb.h"
#include "module_shared.h"
#include "perscache.h"
#include "translate.h"
#ifdef WINDOWS
# include "events.h" /* event log messages - not supported yet on Linux */
#endif
#ifdef CLIENT_INTERFACE
# include "instrument.h"
#endif
#ifdef DEBUG
# include "synch.h" /* all_threads_synch_lock */
#endif
#include <string.h>
enum {
/* VM_ flags to distinguish region types
* We also use some FRAG_ flags (but in a separate field so no value space overlap)
* Adjacent regions w/ different flags are never merged.
*/
VM_WRITABLE = 0x0001, /* app memory writable? */
/* UNMOD_IMAGE means the region was mmapped in and has been read-only since then
* this excludes even loader modifications (IAT update, relocate, etc.) on win32!
*/
VM_UNMOD_IMAGE = 0x0002,
VM_DELETE_ME = 0x0004, /* on delete queue -- for thread-local only */
/* NOTE : if a new area is added that overlaps an existing area with a
* different VM_WAS_FUTURE flag, the areas will be merged with the flag
* taken from the new area, see FIXME in add_vm_area */
VM_WAS_FUTURE = 0x0008, /* moved from future list to exec list */
VM_DR_HEAP = 0x0010, /* DR heap area */
VM_ONCE_ONLY = 0x0020, /* on future list but should be removed on
* first exec */
/* FIXME case 7877, 3744: need to properly merge pageprot regions with
* existing selfmod regions before we can truly separate this. For now we
* continue to treat selfmod as pageprot.
*/
VM_MADE_READONLY = VM_WRITABLE/* FIXME: should be 0x0040 -- see above */,
/* DR has marked this region read
* only for consistency, should only be used
* in conjunction with VM_WRITABLE */
VM_DELAY_READONLY = 0x0080, /* dr has not yet marked this region read
* only for consistency, should only be used
* in conjunction with VM_WRITABLE */
#ifdef PROGRAM_SHEPHERDING
/* re-verify this region for code origins policies every time it is
* encountered. only used with selfmod regions that are only allowed if
* they match patterns, to prevent other threads from writing non-pattern
* code though and executing after the region has been approved.
* xref case 4020. can remove once we split code origins list from
* cache consistency list (case 3744).
*/
VM_PATTERN_REVERIFY = 0x0100,
#endif
VM_DRIVER_ADDRESS = 0x0200,
/* a driver hooker area, needed for case 9022. Note we can
* normally read properties only of user mode addresses, so we
* have to probe addresses in this area. Also note that we're
* still executing all of this code in user mode e.g. there is no
* mode switch, no conforming segments, etc.
*/
/* Does this region contain a persisted cache?
* Must also be FRAG_COARSE_GRAIN of course.
* This is a shortcut to reading custom.client->persisted.
* This is not guaranteed to be set on shared_data: only on executable_areas.
*/
VM_PERSISTED_CACHE = 0x0400,
/* Case 10584: avoid flush synch when no code has been executed */
VM_EXECUTED_FROM = 0x0800,
/* A workaround for lock rank issues: we delay adding loaded persisted
* units to shared_data until first asked about.
* This flags is NOT propagated on vmarea splits.
*/
VM_ADD_TO_SHARED_DATA = 0x1000,
};
/* simple way to disable sandboxing */
#define SANDBOX_FLAG() (INTERNAL_OPTION(cache_consistency) ? FRAG_SELFMOD_SANDBOXED : 0)
/* Fields only used for written_areas */
typedef struct _ro_vs_sandbox_data_t {
/* written_count only used for written_areas vector.
* if > 0, areas will NOT be merged, so we can keep separate
* counts by page (hopefully not making the list too long).
*/
uint written_count;
/* Used only for -sandbox2ro_threshold. It's only in the
* written_areas vector b/c executable_areas has its regions removed
* on a flush while threads could still be accessing counters in
* selfmod fragments in the cache. We lose some granularity here but
* it's not a big deal.
* We could make these both ushorts, but it'd be more of a pain
* to increment this counter from the cache then, worrying about overflows.
*/
uint selfmod_execs;
#ifdef DEBUG
uint ro2s_xfers;
uint s2ro_xfers;
#endif
} ro_vs_sandbox_data_t;
/* Our executable area list has three types of areas. Each type can be merged
* with adjacent areas of the same type but not with any of the other types!
* 1) originally RO code == we leave alone
* 2) originally RW code == we mark RO
* 3) originally RW code, written to from within itself == we leave RW and sandbox
* We keep all three types in the same list b/c any particular address interval
* can only be of one type at any one time, and all three are executable, meaning
* code cache code was copied from there.
*/
typedef struct vm_area_t {
app_pc start;
app_pc end; /* open end interval */
/* We have two different flags fields to allow easy use of the FRAG_ flags.
* The two combined are used to distinguish different regions.
* Adjacent regions w/ different flags are never merged.
*/
/* Flags that start with VM_ */
uint vm_flags;
/* Flags that start with FRAG_
* In use now are FRAG_SELFMOD_SANDBOXED and FRAG_DYNGEN.
*/
uint frag_flags;
#ifdef DEBUG
char *comment;
#endif
/********************
* custom fields not used in all vectors
* FIXME: separate into separately-allocated piece? or have a struct
* extension (poor man's subclass, like trace_t, etc.) and make our vector
* iterators handle it?
* once we have a generic interval data structure (case 6208) this
* hardcoding of individual uses will go away.
*/
union {
/* Used in per-thread and shared vectors, not in master area lists.
* We identify vectors using this via VECTOR_FRAGMENT_LIST, needed
* b/c {add,remove}_vm_area have special behavior for frags.
*/
fragment_t *frags;
/* for clients' custom use via vmvector interfaces */
void *client;
} custom;
} vm_area_t;
/* for each thread we record all executable areas, to make it faster
* to decide whether we need to flush any fragments on an munmap
*/
typedef struct thread_data_t {
vm_area_vector_t areas;
/* cached pointer to last area encountered by thread */
vm_area_t *last_area;
/* FIXME: for locality would be nice to have per-thread last_shared_area
* (cannot put shared in private last_area, that would void its usefulness
* since couldn't tell if area really in shared list or not)
* but then have to update all other threads whenever change shared
* vmarea vector, so for now we use a global last_area
*/
/* cached pointer of a PC in the last page decoded by thread -- set only
* in thread-private structures, not in shared structures like shared_data */
app_pc last_decode_area_page_pc;
bool last_decode_area_valid; /* since no sentinel exists */
#ifdef PROGRAM_SHEPHERDING
uint thrown_exceptions; /* number of responses to execution violations */
#endif
} thread_data_t;
#define SHOULD_LOCK_VECTOR(v) \
(TEST(VECTOR_SHARED, (v)->flags) && \
!TEST(VECTOR_NO_LOCK, (v)->flags) && \
!self_owns_write_lock(&(v)->lock))
#define LOCK_VECTOR(v, release_lock, RW) do { \
if (SHOULD_LOCK_VECTOR(v)) { \
(release_lock) = true; \
RW##_lock(&(v)->lock); \
} \
else \
(release_lock) = false; \
} while (0);
#define UNLOCK_VECTOR(v, release_lock, RW) do { \
if ((release_lock)) { \
ASSERT(TEST(VECTOR_SHARED, (v)->flags)); \
ASSERT(!TEST(VECTOR_NO_LOCK, (v)->flags)); \
ASSERT_OWN_READWRITE_LOCK(true, &(v)->lock); \
RW##_unlock(&v->lock); \
} \
} while (0);
/* these two global vectors store all executable areas and all dynamo
* areas (executable or otherwise).
* executable_areas' custom field is used to store coarse unit info.
* for a FRAG_COARSE_GRAIN region, an info struct is always present, even
* if not yet executed from (initially, or after a flush).
*/
static vm_area_vector_t *executable_areas;
static vm_area_vector_t *dynamo_areas;
/* Protected by executable_areas lock; used only to delete coarse_info_t
* while holding executable_areas lock during execute-less flushes
* (case 10995). Extra layer of indirection to get on heap and avoid .data
* unprotection.
*/
static coarse_info_t **coarse_to_delete;
/* used for DYNAMO_OPTION(handle_DR_modify),
* DYNAMO_OPTION(handle_ntdll_modify) == DR_MODIFY_NOP or
* DYNAMO_OPTION(patch_proof_list)
*/
static vm_area_vector_t *pretend_writable_areas;
/* used for DYNAMO_OPTION(patch_proof_list) areas to watch */
vm_area_vector_t *patch_proof_areas;
/* used for DYNAMO_OPTION(emulate_IAT_writes), though in future may be
* expanded, so not just ifdef WINDOWS or ifdef PROGRAM_SHEPHERDING
*/
vm_area_vector_t *emulate_write_areas;
/* used for DYNAMO_OPTION(IAT_convert)
* IAT or GOT areas of all mapped DLLs - note the exact regions are added here.
* While the IATs for modules in native_exec_areas are not added here -
* note that any module's IAT may still be importing native modules.
*/
vm_area_vector_t *IAT_areas;
/* Keeps persistent written-to and execution counts for switching back and
* forth from page prot to sandboxing.
*/
static vm_area_vector_t *written_areas;
static void free_written_area(void *data);
#ifdef PROGRAM_SHEPHERDING
/* for executable_if_flush and executable_if_alloc, we need a future list, so their regions
* are considered executable until de-allocated -- even if written to!
*/
static vm_area_vector_t *futureexec_areas;
# ifdef WINDOWS
/* FIXME: for -xdata_rct we only need start pc called on, so htable would do,
* once we have reusable htable for storing single pc
*/
static vm_area_vector_t *app_flushed_areas;
# endif
#endif
/* tamper resistant region see tamper_resistant_region_add() for current use.
* If needed this should be turned into a vm_area_vector_t as well.
*/
static app_pc tamper_resistant_region_start, tamper_resistant_region_end;
/* shared_data is synchronized via either single_thread_in_DR or
* the vector lock (cannot use bb_building_lock b/c both trace building
* and pc translation need read access and neither can/should grab
* the bb building lock, plus it's cleaner to not depend on it, and now
* with -shared_traces it's not sufficient).
* N.B.: the vector lock is used to protect not just the vector, but also
* the whole thread_data_t struct (including last_area) and sequences
* of vector operations.
* Kept on the heap for selfprot (case 7957).
*/
static thread_data_t *shared_data; /* set in vm_areas_reset_init() */
typedef struct _pending_delete_t {
#ifdef DEBUG
/* record bounds of original deleted region, for debugging only */
app_pc start;
app_pc end;
#endif
/* list of unlinked fragments that are waiting to be deleted */
fragment_t *frags;
/* ref count and timestamp to determine when it's safe to delete them */
uint ref_count;
uint flushtime_deleted;
/* we use a simple linked list of entries */
struct _pending_delete_t *next;
} pending_delete_t;
/* We keep these list pointers on the heap for selfprot (case 8074). */
typedef struct _deletion_lists_t {
/* Unlike private vm lists, we cannot simply mark shared_data vm areas as
* deleted since new fragments come in concurrently, so we have to have a
* separate list of flushed-but-not-yet-deleted areas. We can't use a
* vm_area_vector_t b/c newly flushed fragments spoil our ref count by resetting
* it, so we keep a linked list of fragment lists.
*/
pending_delete_t *shared_delete;
/* We maintain the tail solely for fcache_free_pending_units() */
pending_delete_t *shared_delete_tail;
/* count used for reset threshold */
uint shared_delete_count;
/* shared lazy deletion: a list of fragment_t chained via next_vmarea that
* are pending deletion, but are only freed when a shared deletion event
* shows that it is safe to do so.
*/
fragment_t *lazy_delete_list;
/* stores the end of the list, for appending */
fragment_t *lazy_delete_tail;
/* stores the length of the lazy list */
uint lazy_delete_count;
/* ensure only one thread tries to move to pending deletion list */
bool move_pending;
} deletion_lists_t;
static deletion_lists_t *todelete;
typedef struct _last_deallocated_t {
/* case 9330 - we want to detect races during DLL unloads, and to
* silence a reported violation during unload. At least DLLs are
* expected to be already serialized by the loader so keeping only
* one is sufficient (note Win2K3 doesn't hold lock only during
* process initialization). We'll also keep references to the
* last DLL that was unloaded for diagnostics. Although, that is
* not reliable enough when multiple DLLs are involved - case 6061
* should be used for better tracking after unload.
*/
/* Yet loss of integrity is tolerable, as long as detected. Since
* we currently mark all mappings they are not necessarily
* serialized (and potentially other apps can directly map, so
* can't really count on the loader lock for integrity). We
* should make sure that we do not set unload_in_progress unless
* [last_unload_base, last_unload_size) is really still the
* current module.
*/
bool unload_in_progress;
app_pc last_unload_base;
size_t last_unload_size;
/* FIXME: we may want to overload the above or add different
* fields for non image (MEM_MAPPED) unmaps, and DGC (MEM_PRIVATE)
* frees. Note that we avoid keeping lists of active unloads, or
* even to deal with case 9371 we would need intersection of
* overlapping app syscalls. If we serialize app syscalls as
* proposed case 545 a single one will be sufficient.
*/
} last_deallocated_t;
static last_deallocated_t *last_deallocated;
/* synchronization currently used only for the contents of
* last_deallocated: last_unload_base and last_unload_size
*/
DECLARE_CXTSWPROT_VAR(static mutex_t last_deallocated_lock,
INIT_LOCK_FREE(last_deallocated_lock));
/* synchronization for shared_delete, not a rw lock since readers usually write */
DECLARE_CXTSWPROT_VAR(mutex_t shared_delete_lock, INIT_LOCK_FREE(shared_delete_lock));
/* synchronization for the lazy deletion list */
DECLARE_CXTSWPROT_VAR(static mutex_t lazy_delete_lock, INIT_LOCK_FREE(lazy_delete_lock));
/* multi_entry_t allocation is either global or local heap */
#define MULTI_ALLOC_DC(dc, flags) FRAGMENT_ALLOC_DC(dc, flags)
#define GET_DATA(dc, flags) \
(((dc) == GLOBAL_DCONTEXT || TEST(FRAG_SHARED, (flags))) ? shared_data : \
(thread_data_t *) (dc)->vm_areas_field)
#define GET_VECTOR(dc, flags) \
(((dc) == GLOBAL_DCONTEXT || TEST(FRAG_SHARED, (flags))) ? \
(TEST(FRAG_WAS_DELETED, (flags)) ? NULL : &shared_data->areas) : \
(&((thread_data_t *)(dc)->vm_areas_field)->areas))
#define SHARED_VECTOR_RWLOCK(v, rw, op) do { \
if (TEST(VECTOR_SHARED, (v)->flags)) { \
ASSERT(SHARED_FRAGMENTS_ENABLED()); \
rw##_##op(&(v)->lock); \
} \
} while (0)
#define ASSERT_VMAREA_DATA_PROTECTED(data, RW) \
ASSERT_OWN_##RW##_LOCK((data == shared_data && \
!INTERNAL_OPTION(single_thread_in_DR)), \
&shared_data->areas.lock)
/* FIXME: find a way to assert that an area by itself is synchronized if
* it points into a vector for the routines that take in only areas
*/
#ifdef DEBUG
# define ASSERT_VMAREA_VECTOR_PROTECTED(v, RW) do { \
ASSERT_OWN_##RW##_LOCK(SHOULD_LOCK_VECTOR(v) && \
!dynamo_exited, &(v)->lock); \
if ((v) == dynamo_areas) { \
ASSERT(dynamo_areas_uptodate || dynamo_areas_synching); \
} \
} while (0);
#else
# define ASSERT_VMAREA_VECTOR_PROTECTED(v, RW) /* nothing */
#endif
/* size of security violation string - must be at least 16 */
#define MAXIMUM_VIOLATION_NAME_LENGTH 16
#define VMVECTOR_INITIALIZE_VECTOR(v, flags, lockname) do { \
vmvector_init_vector((v), (flags)); \
ASSIGN_INIT_READWRITE_LOCK_FREE((v)->lock, lockname); \
} while (0);
/* forward declarations */
static void
vmvector_free_vector(dcontext_t *dcontext, vm_area_vector_t *v);
static void
vm_area_clean_fraglist(dcontext_t *dcontext, vm_area_t *area);
static bool
lookup_addr(vm_area_vector_t *v, app_pc addr, vm_area_t **area);
#if defined(DEBUG) && defined(INTERNAL)
static void
print_fraglist(dcontext_t *dcontext, vm_area_t *area, const char *prefix);
static void
print_written_areas(file_t outf);
#endif
#ifdef DEBUG
static void
exec_area_bounds_match(dcontext_t *dcontext, thread_data_t *data);
#endif
static void
update_dynamo_vm_areas(bool have_writelock);
static void
dynamo_vm_areas_start_reading(void);
static void
dynamo_vm_areas_done_reading(void);
#ifdef PROGRAM_SHEPHERDING
static bool
remove_futureexec_vm_area(app_pc start, app_pc end);
DECLARE_CXTSWPROT_VAR(static mutex_t threads_killed_lock,
INIT_LOCK_FREE(threads_killed_lock));
void
mark_unload_future_added(app_pc module_base, size_t size);
#endif
static void
vm_area_coarse_region_freeze(dcontext_t *dcontext, coarse_info_t *info,
vm_area_t *area, bool in_place);
#ifdef SIMULATE_ATTACK
/* synch simulate_at string parsing */
DECLARE_CXTSWPROT_VAR(static mutex_t simulate_lock, INIT_LOCK_FREE(simulate_lock));
#endif
/* used to determine when we need to do another heap walk to keep
* dynamo vm areas up to date (can't do it incrementally b/c of
* circular dependencies).
* protected for both read and write by dynamo_areas->lock
*/
/* Case 3045: areas inside the vmheap reservation are not added to the list,
* so the vector is considered uptodate until we run out of reservation
*/
DECLARE_FREQPROT_VAR(static bool dynamo_areas_uptodate, true);
#ifdef DEBUG
/* used for debugging to tell when uptodate can be false.
* protected for both read and write by dynamo_areas->lock
*/
DECLARE_FREQPROT_VAR(static bool dynamo_areas_synching, false);
#endif
/* HACK to make dynamo_areas->lock recursive
* protected for both read and write by dynamo_areas->lock
* FIXME: provide general rwlock w/ write portion recursive
*/
DECLARE_CXTSWPROT_VAR(uint dynamo_areas_recursion, 0);
/* used for DR area debugging */
bool vm_areas_exited = false;
/***************************************************
* flushing by walking entire hashtable is too slow, so we keep a list of
* all fragments in each region.
* to save memory, we use the fragment_t struct as the linked list entry
* for these lists. However, some fragments are on multiple lists due to
* crossing boundaries (usually traces). For those, the other entries are
* pointed to by an "also" field, and the entries themselves use this struct,
* which plays games (similar to fcache's empty_slot_t) to be able to be used
* like a fragment_t struct in the lists.
*
* this is better than the old fragment_t->app_{min,max}_pc performance wise,
* and granularity-wise for blocks that bounce over regions, but worse
* granularity-wise since if want to flush singe page in text
* section, will end up flushing entire region. especially scary in face of
* merges of adjacent regions, but merges are rare for images since
* they usually have more than just text, so texts aren't adjacent.
*
* FIXME: better way, now that fcache supports multiple units, is to have
* a separate unit for each source vmarea. common case will be a flush to
* an un-merged or clipped area, so just toss whole unit.
*/
typedef struct _multi_entry_t {
fragment_t *f; /* backpointer */
/* flags MUST be at same location as fragment_t->flags
* we set flags==FRAG_IS_EXTRA_VMAREA to indicate a multi_entry_t
* we also use FRAG_SHARED to indicate that a multi_entry_t is on global heap
*/
uint flags;
/* officially all list entries are fragment_t *, really some are multi_entry_t */
fragment_t *next_vmarea;
fragment_t *prev_vmarea;
fragment_t *also_vmarea; /* if in multiple areas */
/* need to be able to look up vmarea: area not stored since vmareas
* shift and merge, so we store original pc */
app_pc pc;
} multi_entry_t;
/* macros to make dealing with both fragment_t and multi_entry_t easier */
#define FRAG_MULTI(f) (TEST(FRAG_IS_EXTRA_VMAREA, (f)->flags))
#define FRAG_MULTI_INIT(f) (TESTALL((FRAG_IS_EXTRA_VMAREA|FRAG_IS_EXTRA_VMAREA_INIT), (f)->flags))
#define FRAG_NEXT(f) ((TEST(FRAG_IS_EXTRA_VMAREA, (f)->flags)) ? \
((multi_entry_t *)(f))->next_vmarea : (f)->next_vmarea)
#define FRAG_NEXT_ASSIGN(f, val) do { \
if (TEST(FRAG_IS_EXTRA_VMAREA, (f)->flags)) \
((multi_entry_t *)(f))->next_vmarea = (val); \
else \
(f)->next_vmarea = (val); \
} while (0)
#define FRAG_PREV(f) ((TEST(FRAG_IS_EXTRA_VMAREA, (f)->flags)) ? \
((multi_entry_t *)(f))->prev_vmarea : (f)->prev_vmarea)
#define FRAG_PREV_ASSIGN(f, val) do { \
if (TEST(FRAG_IS_EXTRA_VMAREA, (f)->flags)) \
((multi_entry_t *)(f))->prev_vmarea = (val); \
else \
(f)->prev_vmarea = (val); \
} while (0)
/* Case 8419: also_vmarea is invalid once we 1st-stage-delete a fragment */
#define FRAG_ALSO(f) ((TEST(FRAG_IS_EXTRA_VMAREA, (f)->flags)) ? \
((multi_entry_t *)(f))->also_vmarea : \
(ASSERT(!TEST(FRAG_WAS_DELETED, (f)->flags)), (f)->also.also_vmarea))
/* Only call this one to avoid the assert when you know it's safe */
#define FRAG_ALSO_DEL_OK(f) ((TEST(FRAG_IS_EXTRA_VMAREA, (f)->flags)) ? \
((multi_entry_t *)(f))->also_vmarea : (f)->also.also_vmarea)
#define FRAG_ALSO_ASSIGN(f, val) do { \
if (TEST(FRAG_IS_EXTRA_VMAREA, (f)->flags)) \
((multi_entry_t *)(f))->also_vmarea = (val); \
else { \
ASSERT(!TEST(FRAG_WAS_DELETED, (f)->flags)); \
(f)->also.also_vmarea = (val); \
} \
} while (0)
/* assumption: if multiple units, fragment_t is on list of region owning tag */
#define FRAG_PC(f) ((TEST(FRAG_IS_EXTRA_VMAREA, (f)->flags)) ? \
((multi_entry_t *)(f))->pc : (f)->tag)
#define FRAG_PC_ASSIGN(f, val) do { \
if (TEST(FRAG_IS_EXTRA_VMAREA, (f)->flags)) \
((multi_entry_t *)(f))->pc = (val); \
else \
ASSERT_NOT_REACHED(); \
} while (0)
#define FRAG_FRAG(fr) ((TEST(FRAG_IS_EXTRA_VMAREA, (fr)->flags)) ? \
((multi_entry_t *)(fr))->f : (fr))
#define FRAG_FRAG_ASSIGN(fr, val) do { \
if (TEST(FRAG_IS_EXTRA_VMAREA, (fr)->flags)) \
((multi_entry_t *)(fr))->f = (val); \
else \
ASSERT_NOT_REACHED(); \
} while (0)
#define FRAG_ID(fr) ((TEST(FRAG_IS_EXTRA_VMAREA, (fr)->flags)) ? \
((multi_entry_t *)(fr))->f->id : (fr)->id)
/***************************************************/
/* FIXME : is problematic to page align subpage regions */
static void
vm_make_writable(byte *pc, size_t size)
{
byte *start_pc = (byte *)ALIGN_BACKWARD(pc, PAGE_SIZE);
size_t final_size = ALIGN_FORWARD(size + (pc - start_pc), PAGE_SIZE);
DEBUG_DECLARE(bool ok = )
make_writable(start_pc, final_size);
ASSERT(ok);
ASSERT(INTERNAL_OPTION(cache_consistency));
}
static void
vm_make_unwritable(byte *pc, size_t size)
{
byte *start_pc = (byte *)ALIGN_BACKWARD(pc, PAGE_SIZE);
size_t final_size = ALIGN_FORWARD(size + (pc - start_pc), PAGE_SIZE);
ASSERT(INTERNAL_OPTION(cache_consistency));
make_unwritable(start_pc, final_size);
/* case 8308: We should never call vm_make_unwritable if
* -sandbox_writable is on, or if -sandbox_non_text is on and this
* is a non-text region.
*/
ASSERT(!DYNAMO_OPTION(sandbox_writable));
DOCHECK(1, {
if (DYNAMO_OPTION(sandbox_non_text)) {
app_pc modbase = get_module_base(pc);
ASSERT(modbase != NULL && is_range_in_code_section(modbase, pc,
pc + size,
NULL, NULL));
}
});
}
/* since dynamorio changes some readwrite memory regions to read only,
* this changes all regions memory permissions back to what they should be,
* since dynamorio uses this mechanism to ensure code cache coherency,
* once this method is called stale code could be executed out of the
* code cache */
void
revert_memory_regions()
{
int i;
/* executable_areas doesn't exist in thin_client mode. */
ASSERT(!DYNAMO_OPTION(thin_client));
read_lock(&executable_areas->lock);
for (i = 0; i < executable_areas->length; i++) {
if (TEST(VM_MADE_READONLY, executable_areas->buf[i].vm_flags)) {
/* this is a region that dynamorio has marked read only, fix */
LOG(GLOBAL, LOG_VMAREAS, 1,
" fixing permissions for RW executable area "PFX"-"PFX" %s\n",
executable_areas->buf[i].start, executable_areas->buf[i].end,
executable_areas->buf[i].comment);
vm_make_writable(executable_areas->buf[i].start,
executable_areas->buf[i].end -
executable_areas->buf[i].start);
}
}
read_unlock(&executable_areas->lock);
}
static void
print_vm_flags(uint vm_flags, uint frag_flags, file_t outf)
{
print_file(outf, " %s%s%s%s",
(vm_flags & VM_WRITABLE) != 0 ? "W" : "-",
(vm_flags & VM_WAS_FUTURE) != 0 ? "F" : "-",
(frag_flags & FRAG_SELFMOD_SANDBOXED) != 0 ? "S" : "-",
TEST(FRAG_COARSE_GRAIN, frag_flags) ? "C" : "-");
#ifdef PROGRAM_SHEPHERDING
print_file(outf, "%s%s",
TEST(VM_PATTERN_REVERIFY, vm_flags) ? "P" : "-",
(frag_flags & FRAG_DYNGEN) != 0 ? "D" : "-");
#endif
}
/* ok to pass NULL for v, only used to identify use of custom field */
static void
print_vm_area(vm_area_vector_t *v, vm_area_t *area, file_t outf, const char *prefix)
{
print_file(outf, "%s"PFX"-"PFX, prefix, area->start, area->end);
print_vm_flags(area->vm_flags, area->frag_flags, outf);
if (v == executable_areas && TEST(FRAG_COARSE_GRAIN, area->frag_flags)) {
coarse_info_t *info = (coarse_info_t *) area->custom.client;
if (info != NULL) {
if (info->persisted)
print_file(outf, "R");
else if (info->frozen)
print_file(outf, "Z");
else
print_file(outf, "-");
}
}
#ifdef DEBUG
print_file(outf, " %s", area->comment);
DOLOG(1, LOG_VMAREAS, {
IF_NO_MEMQUERY(extern vm_area_vector_t *all_memory_areas;)
app_pc modbase =
/* avoid rank order violation */
IF_NO_MEMQUERY(v == all_memory_areas ? NULL :)
get_module_base(area->start);
if (modbase != NULL &&
/* avoid rank order violations */
v != dynamo_areas &&
v != written_areas &&
/* we free module list before vmareas */
!dynamo_exited_and_cleaned &&
is_mapped_as_image(modbase)/*avoid asserts in getting name */) {
const char *name;
os_get_module_info_lock();
os_get_module_name(modbase, &name);
print_file(outf, " %s", name == NULL ? "" : name);
os_get_module_info_unlock();
}
});
#endif
if (v == written_areas) {
ro_vs_sandbox_data_t *ro2s = (ro_vs_sandbox_data_t *) area->custom.client;
#ifdef DEBUG
if (ro2s != NULL) { /* can be null if in middle of adding */
uint tot_w = ro2s->ro2s_xfers * DYNAMO_OPTION(ro2sandbox_threshold);
uint tot_s = ro2s->s2ro_xfers * DYNAMO_OPTION(sandbox2ro_threshold);
print_file(outf, " w %3d, %3d tot; x %3d, %5d tot; ro2s %d, s2ro %d",
ro2s->written_count, tot_w, ro2s->selfmod_execs, tot_s,
ro2s->ro2s_xfers, ro2s->s2ro_xfers);
}
#else
print_file(outf, " written %3d, exec %5d",
ro2s->written_count, ro2s->selfmod_execs);
#endif
}
print_file(outf, "\n");
}
/* Assumes caller holds v->lock for coherency */
static void
print_vm_areas(vm_area_vector_t *v, file_t outf)
{
int i;
ASSERT_VMAREA_VECTOR_PROTECTED(v, READWRITE);
for(i = 0; i < v->length; i++) {
print_vm_area(v, &v->buf[i], outf, " ");
}
}
#if defined(DEBUG) && defined(INTERNAL)
static void
print_contig_vm_areas(vm_area_vector_t *v, app_pc start, app_pc end, file_t outf,
const char *prefix)
{
vm_area_t *new_area;
app_pc pc = start;
do {
lookup_addr(v, pc, &new_area);
if (new_area == NULL)
break;
print_vm_area(v, new_area, outf, prefix);
pc = new_area->end + 1;
} while (new_area->end < end);
}
#endif
#if defined(DEBUG) && defined(INTERNAL)
static void
print_pending_list(file_t outf)
{
pending_delete_t *pend;
int i;
ASSERT_OWN_MUTEX(true, &shared_delete_lock);
for (i = 0, pend = todelete->shared_delete; pend != NULL; i++, pend = pend->next) {
print_file(outf, "%d: "PFX"-"PFX" ref=%d, stamp=%d\n",
i, pend->start, pend->end, pend->ref_count, pend->flushtime_deleted);
}
}
#endif
/* If v requires a lock and the calling thread does not hold that lock,
* this routine acquires the lock and returns true; else it returns false.
*/
static bool
writelock_if_not_already(vm_area_vector_t *v)
{
if (TEST(VECTOR_SHARED, v->flags) && !self_owns_write_lock(&v->lock)) {
SHARED_VECTOR_RWLOCK(v, write, lock);
return true;
}
return false;
}
static void
vm_area_vector_check_size(vm_area_vector_t *v)
{
/* only called by add_vm_area which does the assert that the vector is
* protected */
/* check if at capacity */
if (v->size == v->length){
if (v->length == 0) {
v->size = INTERNAL_OPTION(vmarea_initial_size);
v->buf = (vm_area_t*) global_heap_alloc(v->size*sizeof(struct vm_area_t)
HEAPACCT(ACCT_VMAREAS));
}
else {
/* FIXME: case 4471 we should be doubling size here */
int new_size = (INTERNAL_OPTION(vmarea_increment_size) + v->length);
STATS_INC(num_vmareas_resized);
v->buf = global_heap_realloc(v->buf, v->size, new_size,
sizeof(struct vm_area_t)
HEAPACCT(ACCT_VMAREAS));
v->size = new_size;
}
ASSERT(v->buf != NULL);
}
}
static void
vm_area_merge_fraglists(vm_area_t *dst, vm_area_t *src)
{
/* caller must hold write lock for vector of course: FIXME: assert that here */
LOG(THREAD_GET, LOG_VMAREAS, 2,
"\tmerging frag lists for "PFX"-"PFX" and "PFX"-"PFX"\n",
src->start, src->end, dst->start, dst->end);
if (dst->custom.frags == NULL)
dst->custom.frags = src->custom.frags;
else if (src->custom.frags == NULL)
return;
else {
/* put src's frags at end of dst's frags */
fragment_t *top1 = dst->custom.frags;
fragment_t *top2 = src->custom.frags;
fragment_t *tmp = FRAG_PREV(top1);
FRAG_NEXT_ASSIGN(tmp, top2);
FRAG_PREV_ASSIGN(top1, FRAG_PREV(top2));
FRAG_PREV_ASSIGN(top2, tmp);
DOLOG(4, LOG_VMAREAS, {
print_fraglist(get_thread_private_dcontext(),
dst, "after merging fraglists:");
});
}
}
/* Assumes caller holds v->lock, if necessary.
* Does not return the area added since it may be merged or split depending
* on existing areas->
* If a last_area points into this vector, the caller must make sure to
* clear or update the last_area pointer.
* FIXME: make it easier to keep them in synch -- too easy to add_vm_area
* somewhere to a thread vector and forget to clear last_area.
* Adds a new area to v, merging it with adjacent areas of the same type.
* A new area is only allowed to overlap an old area of a different type if it
* meets certain criteria (see asserts below). For VM_WAS_FUTURE and
* VM_ONCE_ONLY we may clear the flag from an existing region if the new
* region doesn't have the flag and overlaps the existing region. Otherwise
* the new area is split such that the overlapping portion remains part of
* the old area. This tries to keep entire new area from becoming selfmod
* for instance. FIXME : for VM_WAS_FUTURE and VM_ONCE_ONLY may want to split
* region if only paritally overlapping
*
* FIXME: change add_vm_area to return NULL when merged, and otherwise
* return the new complete area, so callers don't have to do a separate lookup
* to access the added area.
*/
static void
add_vm_area(vm_area_vector_t *v, app_pc start, app_pc end,
uint vm_flags, uint frag_flags, void *data _IF_DEBUG(const char *comment))
{
int i, j, diff;
/* if we have overlap, we extend an existing area -- else we add a new area */
int overlap_start = -1, overlap_end = -1;
DEBUG_DECLARE(uint flagignore;)
ASSERT(start < end);
ASSERT_VMAREA_VECTOR_PROTECTED(v, WRITE);
LOG(GLOBAL, LOG_VMAREAS, 4, "in add_vm_area "PFX" "PFX" %s\n", start, end, comment);
/* N.B.: new area could span multiple existing areas! */
for (i = 0; i < v->length; i++) {
/* look for overlap, or adjacency of same type (including all flags, and never
* merge adjacent if keeping write counts)
*/
if ((start < v->buf[i].end && end > v->buf[i].start) ||
(start <= v->buf[i].end && end >= v->buf[i].start &&
vm_flags == v->buf[i].vm_flags &&
frag_flags == v->buf[i].frag_flags &&
/* never merge coarse-grain */
!TEST(FRAG_COARSE_GRAIN, v->buf[i].frag_flags) &&
!TEST(VECTOR_NEVER_MERGE_ADJACENT, v->flags) &&
(v->should_merge_func == NULL ||
v->should_merge_func(true/*adjacent*/, data, v->buf[i].custom.client)))) {
ASSERT(!(start < v->buf[i].end && end > v->buf[i].start) ||
!TEST(VECTOR_NEVER_OVERLAP, v->flags));
if (overlap_start == -1) {
/* assume we'll simply expand an existing area rather than
* add a new one -- we'll reset this if we hit merge conflicts */
overlap_start = i;
}
/* overlapping regions of different properties are often
* problematic so we add a lot of debugging output
*/
DOLOG(4, LOG_VMAREAS, {
LOG(GLOBAL, LOG_VMAREAS, 1,
"==================================================\n"
"add_vm_area "PFX"-"PFX" %s %x-%x overlaps "PFX"-"PFX" %s %x-%x\n",
start, end, comment, vm_flags, frag_flags,
v->buf[i].start, v->buf[i].end,
v->buf[i].comment, v->buf[i].vm_flags, v->buf[i].frag_flags);
print_vm_areas(v, GLOBAL);
/* rank order problem if holding heap_unit_lock, so only print
* if not holding a lock for v right now, though ok to print
* for shared vm areas since its lock is higher than the lock
* for executable/written areas
*/
if (v != dynamo_areas &&
(!TEST(VECTOR_SHARED, v->flags) || v == &shared_data->areas)) {
LOG(GLOBAL, LOG_VMAREAS, 1, "\nexecutable areas:\n");
print_executable_areas(GLOBAL);
LOG(GLOBAL, LOG_VMAREAS, 1, "\nwritten areas:\n");
print_written_areas(GLOBAL);
}
LOG(GLOBAL, LOG_VMAREAS, 1,
"==================================================\n\n");
});
/* we have some restrictions on overlapping regions with
* different flags */
/* no restrictions on WAS_FUTURE flag, but if new region is
* not was future and old region is then should drop from old
* region FIXME : partial overlap? we don't really care about
* this flag anyways */
if (TEST(VM_WAS_FUTURE, v->buf[i].vm_flags) &&
!TEST(VM_WAS_FUTURE, vm_flags)) {
v->buf[i].vm_flags &= ~VM_WAS_FUTURE;
LOG(GLOBAL, LOG_VMAREAS, 1,
"Warning : removing was_future flag from area "PFX
"-"PFX" %s that overlaps new area "PFX"-"PFX" %s\n",
v->buf[i].start, v->buf[i].end, v->buf[i].comment,
start, end, comment);
}
/* no restrictions on ONCE_ONLY flag, but if new region is not
* should drop fom existing region FIXME : partial overlap? is
* not much of an additional security risk */
if (TEST(VM_ONCE_ONLY, v->buf[i].vm_flags) &&
!TEST(VM_ONCE_ONLY, vm_flags)) {
v->buf[i].vm_flags &= ~VM_ONCE_ONLY;
LOG(GLOBAL, LOG_VMAREAS, 1,
"Warning : removing once_only flag from area "PFX
"-"PFX" %s that overlaps new area "PFX"-"PFX" %s\n",
v->buf[i].start, v->buf[i].end, v->buf[i].comment,
start, end, comment);
}
/* shouldn't be adding unmod image over existing not unmod image,
* reverse could happen with os region merging though */
ASSERT(TEST(VM_UNMOD_IMAGE, v->buf[i].vm_flags) ||
!TEST(VM_UNMOD_IMAGE, vm_flags));
/* for VM_WRITABLE only allow new region to not be writable and
* existing region to be writable to handle cases of os region
* merging due to our consistency protection changes */
ASSERT(TEST(VM_WRITABLE, v->buf[i].vm_flags) ||
!TEST(VM_WRITABLE, vm_flags));
/* FIXME: case 7877: if new is VM_MADE_READONLY and old is not, we
* must mark old overlapping portion as VM_MADE_READONLY. Things only
* worked now b/c VM_MADE_READONLY==VM_WRITABLE, so we can add
* pageprot regions that overlap w/ selfmod.
*/
#ifdef PROGRAM_SHEPHERDING
/* !VM_PATTERN_REVERIFY trumps having the flag on, so for new having
* the flag and old not, we're fine, but when old has it we'd like
* to remove it from the overlap portion: FIXME: need better merging
* control, also see all the partial overlap fixmes above.
* for this flag not a big deal, just a possible perf hit as we
* re-check every time.
*/
#endif
/* disallow any other vm_flag differences */
DODEBUG({ flagignore = VM_UNMOD_IMAGE | VM_WAS_FUTURE |
VM_ONCE_ONLY | VM_WRITABLE; });
#ifdef PROGRAM_SHEPHERDING
DODEBUG({ flagignore = flagignore | VM_PATTERN_REVERIFY; });
#endif
ASSERT((v->buf[i].vm_flags & ~flagignore) == (vm_flags & ~flagignore));
/* new region must be more innocent with respect to selfmod */
ASSERT(TEST(FRAG_SELFMOD_SANDBOXED, v->buf[i].frag_flags) ||
!TEST(FRAG_SELFMOD_SANDBOXED, frag_flags));
/* disallow other frag_flag differences */
#ifndef PROGRAM_SHEPHERDING
ASSERT((v->buf[i].frag_flags & ~FRAG_SELFMOD_SANDBOXED) ==
(frag_flags & ~FRAG_SELFMOD_SANDBOXED));
#else
# ifdef DGC_DIAGNOSTICS
/* FIXME : no restrictions on differing FRAG_DYNGEN_RESTRICTED
* flags? */
ASSERT((v->buf[i].frag_flags &
~(FRAG_SELFMOD_SANDBOXED|FRAG_DYNGEN|FRAG_DYNGEN_RESTRICTED)) ==
(frag_flags &
~(FRAG_SELFMOD_SANDBOXED|FRAG_DYNGEN|FRAG_DYNGEN_RESTRICTED)));
# else
ASSERT((v->buf[i].frag_flags &
~(FRAG_SELFMOD_SANDBOXED|FRAG_DYNGEN)) ==
(frag_flags &
~(FRAG_SELFMOD_SANDBOXED|FRAG_DYNGEN)));
# endif
/* shouldn't add non-dyngen overlapping existing dyngen, FIXME
* is the reverse possible? right now we allow it */
ASSERT(TEST(FRAG_DYNGEN, frag_flags) ||
!TEST(FRAG_DYNGEN, v->buf[i].frag_flags));
#endif
/* Never split FRAG_COARSE_GRAIN */
ASSERT(TEST(FRAG_COARSE_GRAIN, frag_flags) ||
!TEST(FRAG_COARSE_GRAIN, v->buf[i].frag_flags));
/* for overlapping region: must overlap same type -- else split */
if ((vm_flags != v->buf[i].vm_flags || frag_flags != v->buf[i].frag_flags) &&
(v->should_merge_func == NULL ||
!v->should_merge_func(false/*not adjacent*/,
data, v->buf[i].custom.client))) {
LOG(GLOBAL, LOG_VMAREAS, 1,
"add_vm_area "PFX"-"PFX" %s vm_flags=0x%08x "
"frag_flags=0x%08x\n overlaps diff type "PFX"-"PFX" %s"
"vm_flags=0x%08x frag_flags=0x%08x\n in vect at "PFX"\n",
start, end, comment, vm_flags, frag_flags,
v->buf[i].start, v->buf[i].end, v->buf[i].comment,
v->buf[i].vm_flags, v->buf[i].frag_flags, v);
LOG(GLOBAL, LOG_VMAREAS, 3,
"before splitting b/c adding "PFX"-"PFX":\n",
start, end);
DOLOG(3, LOG_VMAREAS, { print_vm_areas(v, GLOBAL); });
/* split off the overlapping part from the new region
* reasoning: old regions get marked selfmod, then see new code,
* its region overlaps old selfmod -- don't make new all selfmod,
* split off the part that hasn't been proved selfmod yet.
* since we never split the old region, we don't need to worry
* about splitting its frags list.
*/
if (start < v->buf[i].start) {
if (end > v->buf[i].end) {
void *add_data = data;
/* need two areas, one for either side */
LOG(GLOBAL, LOG_VMAREAS, 3,
"=> will add "PFX"-"PFX" after i\n", v->buf[i].end, end);
/* safe to recurse here, new area will be after the area
* we are currently looking at in the vector */
if (v->split_payload_func != NULL)
add_data = v->split_payload_func(data);
add_vm_area(v, v->buf[i].end, end, vm_flags, frag_flags,
add_data _IF_DEBUG(comment));
}
/* if had been merging, let this routine finish that off -- else,
* need to add a new area
*/
end = v->buf[i].start;
if (overlap_start == i) {
/* no merging */
overlap_start = -1;
}
LOG(GLOBAL, LOG_VMAREAS, 3,
"=> will add/merge "PFX"-"PFX" before i\n", start, end);
overlap_end = i;
break;
} else if (end > v->buf[i].end) {
/* shift area of consideration to end of i, and keep going,
* can't act now since don't know areas overlapping beyond i
*/
LOG(GLOBAL, LOG_VMAREAS, 3,
"=> ignoring "PFX"-"PFX", only adding "PFX"-"PFX"\n",
start, v->buf[i].end, v->buf[i].end, end);
start = v->buf[i].end;
/* reset overlap vars */
ASSERT(overlap_start <= i);
overlap_start = -1;
} else {
/* completely inside -- ok, we'll leave it that way and won't split */
LOG(GLOBAL, LOG_VMAREAS, 3,
"=> ignoring "PFX"-"PFX", forcing to be part of "PFX"-"PFX"\n",
start, end, v->buf[i].start, v->buf[i].end);
}
ASSERT(end > start);
}
} else if (overlap_start > -1) {
overlap_end = i; /* not inclusive */
break;
} else if (end <= v->buf[i].start)
break;
}
if (overlap_start == -1) {
/* brand-new area, goes before v->buf[i] */
struct vm_area_t new_area = {start, end, vm_flags, frag_flags, /* rest 0 */};
#ifdef DEBUG
/* get comment */
size_t len = strlen(comment);
ASSERT(len < 1024);
new_area.comment = (char *) global_heap_alloc(len+1 HEAPACCT(ACCT_VMAREAS));
strncpy(new_area.comment, comment, len);
new_area.comment[len] = '\0'; /* if max no null */
#endif
new_area.custom.client = data;
LOG(GLOBAL, LOG_VMAREAS, 3, "=> adding "PFX"-"PFX"\n", start, end);
vm_area_vector_check_size(v);
/* shift subsequent entries */
for (j = v->length; j > i; j--)
v->buf[j] = v->buf[j-1];
v->buf[i] = new_area;
/* assumption: no overlaps between areas in list! */
#ifdef DEBUG
if (!((i == 0 || v->buf[i-1].end <= v->buf[i].start) &&
(i == v->length || v->buf[i].end <= v->buf[i+1].start))) {
LOG(GLOBAL, LOG_VMAREAS, 1,
"ERROR: add_vm_area illegal overlap "PFX" "PFX" %s\n", start, end, comment);
print_vm_areas(v, GLOBAL);
}
#endif
ASSERT((i == 0 || v->buf[i-1].end <= v->buf[i].start) &&
(i == v->length || v->buf[i].end <= v->buf[i+1].start));
v->length++;
STATS_TRACK_MAX(max_vmareas_length, v->length);
DOSTATS({
if (v == dynamo_areas)
STATS_TRACK_MAX(max_DRareas_length, v->length);
else if (v == executable_areas)
STATS_TRACK_MAX(max_execareas_length, v->length);
});
#ifdef WINDOWS
DOSTATS({
extern vm_area_vector_t *loaded_module_areas;
if (v == loaded_module_areas)
STATS_TRACK_MAX(max_modareas_length, v->length);
});
#endif
} else {
/* overlaps one or more areas, modify first to equal entire range,
* delete rest
*/
if (overlap_end == -1)
overlap_end = v->length;
LOG(GLOBAL, LOG_VMAREAS, 3, "=> changing "PFX"-"PFX,
v->buf[overlap_start].start, v->buf[overlap_start].end);
if (start < v->buf[overlap_start].start)
v->buf[overlap_start].start = start;
if (end > v->buf[overlap_end-1].end)
v->buf[overlap_start].end = end;
else
v->buf[overlap_start].end = v->buf[overlap_end-1].end;
if (v->merge_payload_func != NULL) {
v->buf[overlap_start].custom.client =
v->merge_payload_func(data, v->buf[overlap_start].custom.client);
} else if (v->free_payload_func != NULL) {
/* if a merge exists we assume it will free if necessary */
v->free_payload_func(v->buf[overlap_start].custom.client);
}
LOG(GLOBAL, LOG_VMAREAS, 3, " to "PFX"-"PFX"\n",
v->buf[overlap_start].start, v->buf[overlap_start].end);
/* when merge, use which comment? could combine them all
* FIXME
*/
/* now delete */
for (i = overlap_start+1; i < overlap_end; i++) {
LOG(GLOBAL, LOG_VMAREAS, 3, "=> completely removing "PFX"-"PFX" %s\n",
v->buf[i].start, v->buf[i].end, v->buf[i].comment);
#ifdef DEBUG
global_heap_free(v->buf[i].comment, strlen(v->buf[i].comment)+1
HEAPACCT(ACCT_VMAREAS));
#endif
if (v->merge_payload_func != NULL) {
v->buf[overlap_start].custom.client =
v->merge_payload_func(v->buf[overlap_start].custom.client,
v->buf[i].custom.client);
} else if (v->free_payload_func != NULL) {
/* if a merge exists we assume it will free if necessary */
v->free_payload_func(v->buf[i].custom.client);
}
/* merge frags lists */
/* FIXME: switch this to a merge_payload_func. It won't be able
* to print out the bounds, and it will have to do the work of
* vm_area_clean_fraglist() on each merge, but we could then get
* rid of VECTOR_FRAGMENT_LIST.
*/
if (TEST(VECTOR_FRAGMENT_LIST, v->flags) && v->buf[i].custom.frags != NULL)
vm_area_merge_fraglists(&v->buf[overlap_start], &v->buf[i]);
}
diff = overlap_end - (overlap_start+1);
for (i = overlap_start+1; i < v->length-diff; i++)
v->buf[i] = v->buf[i+diff];
v->length -= diff;
i = overlap_start; /* for return value */
if (TEST(VECTOR_FRAGMENT_LIST, v->flags) && v->buf[i].custom.frags != NULL) {
dcontext_t *dcontext = get_thread_private_dcontext();
ASSERT(dcontext != NULL);
/* have to remove all alsos that are now in same area as frag */
vm_area_clean_fraglist(dcontext, &v->buf[i]);
}
}
DOLOG(5, LOG_VMAREAS, { print_vm_areas(v, GLOBAL); });
}
static void
adjust_coarse_unit_bounds(vm_area_t *area, bool if_invalid)
{
coarse_info_t *info = (coarse_info_t *) area->custom.client;
ASSERT(TEST(FRAG_COARSE_GRAIN, area->frag_flags));
ASSERT(!RUNNING_WITHOUT_CODE_CACHE());
ASSERT(info != NULL);
if (info == NULL) /* be paranoid */
return;
/* FIXME: we'd like to grab info->lock but we have a rank order w/
* exec_areas lock -- so instead we rely on all-thread-synch flushing
* being the only reason to get here; an empty flush won't have synchall,
* but we won't be able to get_executable_area_coarse_info w/o the
* exec areas write lock so we're ok there.
*/
ASSERT(dynamo_all_threads_synched ||
(!TEST(VM_EXECUTED_FROM, area->vm_flags) &&
READWRITE_LOCK_HELD(&executable_areas->lock)));
if (!if_invalid && TEST(PERSCACHE_CODE_INVALID, info->flags)) {
/* Don't change bounds of primary or secondary; we expect vm_area_t to
* be merged back to this size post-rebind; if not, we'll throw out this
* pcache at validation time due to not matching the vm_area_t.
*/
return;
}
while (info != NULL) { /* loop over primary and secondary unit */
/* We should have reset this coarse info when flushing */
ASSERT(info->cache == NULL);
ASSERT(!info->frozen && !info->persisted);
/* No longer covers the removed region */
if (info->base_pc < area->start)
info->base_pc = area->start;
if (info->end_pc > area->end)
info->end_pc = area->end;
ASSERT(info->frozen || info->non_frozen == NULL);
info = info->non_frozen;
ASSERT(info == NULL || !info->frozen);
}
}
/* Assumes caller holds v->lock, if necessary
* Returns false if no area contains start..end
* Ignores type of area -- removes all within start..end
* Caller should probably clear last_area as well
*/
static bool
remove_vm_area(vm_area_vector_t *v, app_pc start, app_pc end, bool restore_prot)
{
int i, diff;
int overlap_start = -1, overlap_end = -1;
bool add_new_area = false;
vm_area_t new_area = {0}; /* used only when add_new_area, wimpy compiler */
/* FIXME: cleaner test? shared_data copies flags, but uses
* custom.frags and not custom.client
*/
bool official_coarse_vector = (v == executable_areas);
ASSERT_VMAREA_VECTOR_PROTECTED(v, WRITE);
LOG(GLOBAL, LOG_VMAREAS, 4, "in remove_vm_area "PFX" "PFX"\n", start, end);
/* N.B.: removed area could span multiple areas! */
for (i = 0; i < v->length; i++) {
/* look for overlap */
if (start < v->buf[i].end && end > v->buf[i].start) {
if (overlap_start == -1)
overlap_start = i;
} else if (overlap_start > -1) {
overlap_end = i; /* not inclusive */
break;
} else if (end <= v->buf[i].start)
break;
}
if (overlap_start == -1)
return false;
if (overlap_end == -1)
overlap_end = v->length;
/* since it's sorted and there are no overlaps, we do not have to re-sort.
* we just delete entire intervals affected, and shorten non-entire
*/
if (start > v->buf[overlap_start].start) {
/* need to split? */
if (overlap_start == overlap_end-1 && end < v->buf[overlap_start].end) {
/* don't call add_vm_area now, that will mess up our vector */
new_area = v->buf[overlap_start]; /* make a copy */
new_area.start = end;
/* rest of fields are correct */
add_new_area = true;
}
/* move ending bound backward */
LOG(GLOBAL, LOG_VMAREAS, 3, "\tchanging "PFX"-"PFX" to "PFX"-"PFX"\n",
v->buf[overlap_start].start, v->buf[overlap_start].end,
v->buf[overlap_start].start, start);
if (restore_prot && TEST(VM_MADE_READONLY, v->buf[overlap_start].vm_flags)) {
vm_make_writable(start, end - start);
}
v->buf[overlap_start].end = start;
/* FIXME: add a vmvector callback function for changing bounds? */
if (TEST(FRAG_COARSE_GRAIN, v->buf[overlap_start].frag_flags) &&
official_coarse_vector) {
adjust_coarse_unit_bounds(&v->buf[overlap_start], false/*leave invalid*/);
}
overlap_start++; /* don't delete me */
}
if (end < v->buf[overlap_end-1].end) {
/* move starting bound forward */
LOG(GLOBAL, LOG_VMAREAS, 3, "\tchanging "PFX"-"PFX" to "PFX"-"PFX"\n",
v->buf[overlap_end-1].start, v->buf[overlap_end-1].end,
end, v->buf[overlap_end-1].end);
if (restore_prot && TEST(VM_MADE_READONLY, v->buf[overlap_end-1].vm_flags)) {
vm_make_writable(v->buf[overlap_end-1].start, end - v->buf[overlap_end-1].start);
}
v->buf[overlap_end-1].start = end;
/* FIXME: add a vmvector callback function for changing bounds? */
if (TEST(FRAG_COARSE_GRAIN, v->buf[overlap_end-1].frag_flags) &&
official_coarse_vector) {
adjust_coarse_unit_bounds(&v->buf[overlap_end-1], false/*leave invalid*/);
}
overlap_end--; /* don't delete me */
}
/* now delete */
if (overlap_start < overlap_end) {
for (i = overlap_start; i < overlap_end; i++) {
LOG(GLOBAL, LOG_VMAREAS, 3, "\tcompletely removing "PFX"-"PFX" %s\n",
v->buf[i].start, v->buf[i].end, v->buf[i].comment);
if (restore_prot && TEST(VM_MADE_READONLY, v->buf[i].vm_flags)) {
vm_make_writable(v->buf[i].start, v->buf[i].end - v->buf[i].start);
}
/* FIXME: use a free_payload_func instead of this custom
* code. But then we couldn't assert on the bounds and on
* VM_EXECUTED_FROM. Could add bounds to callback params, but
* vm_flags are not exposed to vmvector interface...
*/
if (TEST(FRAG_COARSE_GRAIN, v->buf[i].frag_flags) &&
official_coarse_vector) {
coarse_info_t *info = (coarse_info_t *) v->buf[i].custom.client;
coarse_info_t *next_info;
ASSERT(info != NULL);
ASSERT(!RUNNING_WITHOUT_CODE_CACHE());
while (info != NULL) { /* loop over primary and secondary unit */
ASSERT(info->base_pc >= v->buf[i].start &&
info->end_pc <= v->buf[i].end);
ASSERT(info->frozen || info->non_frozen == NULL);
/* Should have already freed fields, unless we flushed a region
* that has not been executed from (case 10995): in which case
* we must delay as we cannot grab change_linking_lock or
* special_heap_lock or info->lock while holding exec_areas lock.
*/
if (info->cache != NULL) {
ASSERT(info->persisted);
ASSERT(!TEST(VM_EXECUTED_FROM, v->buf[i].vm_flags));
ASSERT(info->non_frozen != NULL);
ASSERT(coarse_to_delete != NULL);
/* Both primary and secondary must be un-executed */
info->non_frozen->non_frozen = *coarse_to_delete;
*coarse_to_delete = info;
info = NULL;
} else {
ASSERT(info->cache == NULL && info->stubs == NULL);
next_info = info->non_frozen;
coarse_unit_free(GLOBAL_DCONTEXT, info);
info = next_info;
ASSERT(info == NULL || !info->frozen);
}
}
v->buf[i].custom.client = NULL;
}
if (v->free_payload_func != NULL) {
v->free_payload_func(v->buf[i].custom.client);
}
#ifdef DEBUG
global_heap_free(v->buf[i].comment, strlen(v->buf[i].comment)+1
HEAPACCT(ACCT_VMAREAS));
#endif
/* frags list should always be null here (flush should have happened,
* etc.) */
ASSERT(!TEST(VECTOR_FRAGMENT_LIST, v->flags) || v->buf[i].custom.frags == NULL);
}
diff = overlap_end - overlap_start;
for (i = overlap_start; i < v->length-diff; i++)
v->buf[i] = v->buf[i+diff];
#ifdef DEBUG
memset(v->buf + v->length - diff, 0, diff * sizeof(vm_area_t));
#endif
v->length -= diff;
}
if (add_new_area) {
/* Case 8640: Do not propagate coarse-grain-ness to split-off region,
* for now only for simplicity. FIXME: come up with better policy. We
* do keep it on original part of split region. FIXME: assert that
* there the unit is fully flushed. Better to remove in
* vm_area_allsynch_flush_fragments() and then re-add if warranted?
*/
new_area.frag_flags &= ~FRAG_COARSE_GRAIN;
/* With flush of partial module region w/o remove (e.g., from
* -unsafe_ignore_IAT_writes) we can have VM_ADD_TO_SHARED_DATA set
*/
new_area.vm_flags &= ~VM_ADD_TO_SHARED_DATA;
LOG(GLOBAL, LOG_VMAREAS, 3, "\tadding "PFX"-"PFX"\n", new_area.start, new_area.end);
/* we copied v->buf[overlap_start] above and so already have a copy
* of the client field
*/
if (v->split_payload_func != NULL) {
new_area.custom.client = v->split_payload_func(new_area.custom.client);
} /* else, just keep the copy */
add_vm_area(v, new_area.start, new_area.end, new_area.vm_flags,
new_area.frag_flags, new_area.custom.client
_IF_DEBUG(new_area.comment));
}
DOLOG(5, LOG_VMAREAS, { print_vm_areas(v, GLOBAL); });
return true;
}
/* Returns true if start..end overlaps any area in v.
* If end==NULL, assumes that end is very top of address space (wraparound).
* If area!=NULL, sets *area to an overlapping area in v
* If index!=NULL, sets *index to the vector index of area; if no match
* is found, sets *index to the index before [start,end) (may be -1).
* If first, makes sure *area is the 1st overlapping area
* Assumes caller holds v->lock, if necessary
* N.B.: the pointer returned by this routine is volatile! Only use it while
* you have exclusive control over the vector v, either by holding its lock
* or by being its owning thread if it has no lock.
*/
static bool
binary_search(vm_area_vector_t *v, app_pc start, app_pc end, vm_area_t **area/*OUT*/,
int *index/*OUT*/, bool first)
{
/* BINARY SEARCH -- assumes the vector is kept sorted by add & remove! */
int min = 0;
int max = v->length - 1;
ASSERT(start < end || end == NULL /* wraparound */);
ASSERT_VMAREA_VECTOR_PROTECTED(v, READWRITE);
LOG(GLOBAL, LOG_VMAREAS, 7, "Binary search for "PFX"-"PFX" on this vector:\n",
start, end);
DOLOG(7, LOG_VMAREAS, { print_vm_areas(v, GLOBAL); });
/* binary search */
while (max >= min) {
int i = (min + max) / 2;
if (end != NULL && end <= v->buf[i].start)
max = i - 1;
else if (start >= v->buf[i].end)
min = i + 1;
else {
if (area != NULL || index != NULL) {
if (first) {
/* caller wants 1st matching area */
for (; i >= 1 && v->buf[i-1].end > start; i--)
;
}
/* returning pointer to volatile array dangerous -- see comment above */
if (area != NULL)
*area = &(v->buf[i]);
if (index != NULL)
*index = i;
}
LOG(GLOBAL, LOG_VMAREAS, 7, "\tfound "PFX"-"PFX" in area "PFX"-"PFX"\n",
start, end, v->buf[i].start, v->buf[i].end);
return true;
}
}
/* now max < min */
LOG(GLOBAL, LOG_VMAREAS, 7, "\tdid not find "PFX"-"PFX"!\n", start, end);
if (index != NULL) {
ASSERT((max < 0 || v->buf[max].end <= start) &&
(min > v->length - 1 || v->buf[min].start >= end));
*index = max;
}
return false;
}
/* lookup an addr in the current area
* RETURN true if address area is found, false otherwise
* if area is non NULL it is set to the area found
* Assumes caller holds v->lock, if necessary
* N.B.: the pointer returned by this routine is volatile! Only use it while
* you have exclusive control over the vector v, either by holding its lock
* or by being its owning thread if it has no lock.
*/
/* FIXME: change lookup_addr to two routines, one for readers which
* returns a copy, and the other for writers who must hold a lock
* across all uses of the pointer
*/
static bool
lookup_addr(vm_area_vector_t *v, app_pc addr, vm_area_t **area)
{
/* binary search asserts v is protected */
return binary_search(v, addr, addr+1/*open end*/, area, NULL, false);
}
/* returns true if the passed in area overlaps any known executable areas
* Assumes caller holds v->lock, if necessary
*/
static bool
vm_area_overlap(vm_area_vector_t *v, app_pc start, app_pc end)
{
/* binary search asserts v is protected */
return binary_search(v, start, end, NULL, NULL, false);
}
/*********************** EXPORTED ROUTINES **********************/
/* thread-shared initialization that should be repeated after a reset */
void
vm_areas_reset_init(void)
{
memset(shared_data, 0, sizeof(*shared_data));
VMVECTOR_INITIALIZE_VECTOR(&shared_data->areas,
VECTOR_SHARED | VECTOR_FRAGMENT_LIST, shared_vm_areas);
}
void
dynamo_vm_areas_init()
{
VMVECTOR_ALLOC_VECTOR(dynamo_areas, GLOBAL_DCONTEXT, VECTOR_SHARED,
dynamo_areas);
}
/* calls find_executable_vm_areas to get per-process map
* N.B.: add_dynamo_vm_area can be called before this init routine!
* N.B.: this is called after vm_areas_thread_init()
*/
int
vm_areas_init()
{
int areas;
/* Case 7957: we allocate all vm vectors on the heap for self-prot reasons.
* We're already paying the indirection cost by passing their addresses
* to generic routines, after all.
*/
VMVECTOR_ALLOC_VECTOR(executable_areas, GLOBAL_DCONTEXT, VECTOR_SHARED,
executable_areas);
VMVECTOR_ALLOC_VECTOR(pretend_writable_areas, GLOBAL_DCONTEXT, VECTOR_SHARED,
pretend_writable_areas);
VMVECTOR_ALLOC_VECTOR(patch_proof_areas, GLOBAL_DCONTEXT, VECTOR_SHARED,
patch_proof_areas);
VMVECTOR_ALLOC_VECTOR(emulate_write_areas, GLOBAL_DCONTEXT, VECTOR_SHARED,
emulate_write_areas);
VMVECTOR_ALLOC_VECTOR(IAT_areas, GLOBAL_DCONTEXT, VECTOR_SHARED,
IAT_areas);
VMVECTOR_ALLOC_VECTOR(written_areas, GLOBAL_DCONTEXT,
VECTOR_SHARED | VECTOR_NEVER_MERGE,
written_areas);
vmvector_set_callbacks(written_areas, free_written_area, NULL, NULL, NULL);
#ifdef PROGRAM_SHEPHERDING
VMVECTOR_ALLOC_VECTOR(futureexec_areas, GLOBAL_DCONTEXT, VECTOR_SHARED,
futureexec_areas);
# ifdef WINDOWS
VMVECTOR_ALLOC_VECTOR(app_flushed_areas, GLOBAL_DCONTEXT, VECTOR_SHARED,
app_flushed_areas);
# endif
#endif
shared_data = HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, thread_data_t, ACCT_VMAREAS, PROTECTED);
todelete = HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, deletion_lists_t, ACCT_VMAREAS, PROTECTED);
memset(todelete, 0, sizeof(*todelete));
coarse_to_delete = HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, coarse_info_t *,
ACCT_VMAREAS, PROTECTED);
*coarse_to_delete = NULL;
if (DYNAMO_OPTION(unloaded_target_exception)) {
last_deallocated = HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, last_deallocated_t,
ACCT_VMAREAS, PROTECTED);
memset(last_deallocated, 0, sizeof(*last_deallocated));
} else
ASSERT(last_deallocated == NULL);
vm_areas_reset_init();
/* initialize dynamo list first */
LOG(GLOBAL, LOG_VMAREAS, 2,
"\n--------------------------------------------------------------------------\n");
dynamo_vm_areas_lock();
areas = find_dynamo_library_vm_areas();
dynamo_vm_areas_unlock();
/* initialize executable list
* this routine calls app_memory_allocation() w/ dcontext==NULL and so we
* won't go adding rwx regions, like the linux stack, to our list, even w/
* -executable_if_alloc
*/
areas = find_executable_vm_areas();
DOLOG(1, LOG_VMAREAS, {
if (areas > 0) {
LOG(GLOBAL, LOG_VMAREAS, 1, "\nExecution is allowed in %d areas\n", areas);
print_executable_areas(GLOBAL);
}
LOG(GLOBAL, LOG_VMAREAS, 2,
"--------------------------------------------------------------------------\n");
});
return areas;
}
static void
vm_areas_statistics()
{
#ifdef PROGRAM_SHEPHERDING
DOLOG(1, LOG_VMAREAS|LOG_STATS, {
uint top; uint bottom;
divide_uint64_print(GLOBAL_STAT(looked_up_in_last_area),
GLOBAL_STAT(checked_addresses), true, 2, &top, &bottom);
LOG(GLOBAL, LOG_VMAREAS|LOG_STATS, 1,
"Code Origin: %d address lookups, %d in last area, hit ratio %u.%.2u\n",
GLOBAL_STAT(checked_addresses), GLOBAL_STAT(looked_up_in_last_area),
top, bottom);
});
#endif /* PROGRAM_SHEPHERDING */
DOLOG(1, LOG_VMAREAS, {
LOG(GLOBAL, LOG_VMAREAS, 1, "\nexecutable_areas at exit:\n");
print_executable_areas(GLOBAL);
});
}
/* Free all thread-shared state not critical to forward progress;
* vm_areas_reset_init() will be called before continuing.
*/
void
vm_areas_reset_free(void)
{
if (SHARED_FRAGMENTS_ENABLED()) {
/* all deletion entries should be removed in fragment_exit(),
* else we'd have to free the frags lists and entries here
*/
ASSERT(todelete->shared_delete == NULL);
ASSERT(todelete->shared_delete_tail == NULL);
/* FIXME: don't free lock so init has less work */
vmvector_free_vector(GLOBAL_DCONTEXT, &shared_data->areas);
}
/* vm_area_coarse_units_reset_free() is called in fragment_reset_free() */
}
int
vm_areas_exit()
{
vm_areas_exited = true;
vm_areas_statistics();
if (DYNAMO_OPTION(thin_client)) {
vmvector_delete_vector(GLOBAL_DCONTEXT, dynamo_areas);
dynamo_areas = NULL;
/* For thin_client none of the following areas should have been
* initialized because they aren't used.
* FIXME: wonder if I can do something like this for -client and see
* what I am using unnecessarily.
*/
ASSERT(shared_data == NULL);
ASSERT(todelete == NULL);
ASSERT(executable_areas == NULL);
ASSERT(pretend_writable_areas == NULL);
ASSERT(patch_proof_areas == NULL);
ASSERT(emulate_write_areas == NULL);
ASSERT(written_areas == NULL);
#ifdef PROGRAM_SHEPHERDING
ASSERT(futureexec_areas == NULL);
IF_WINDOWS(ASSERT(app_flushed_areas == NULL);)
#endif
ASSERT(IAT_areas == NULL);
return 0;
}
vm_areas_reset_free();
DELETE_LOCK(shared_delete_lock);
DELETE_LOCK(lazy_delete_lock);
ASSERT(todelete->lazy_delete_count == 0);
ASSERT(!todelete->move_pending);
HEAP_TYPE_FREE(GLOBAL_DCONTEXT, shared_data, thread_data_t, ACCT_VMAREAS, PROTECTED);
shared_data = NULL;
HEAP_TYPE_FREE(GLOBAL_DCONTEXT, todelete, deletion_lists_t, ACCT_VMAREAS, PROTECTED);
todelete = NULL;
ASSERT(coarse_to_delete != NULL);
/* should be freed immediately after each use, during a no-exec flush */
ASSERT(*coarse_to_delete == NULL);
HEAP_TYPE_FREE(GLOBAL_DCONTEXT, coarse_to_delete, coarse_info_t *,
ACCT_VMAREAS, PROTECTED);
if (DYNAMO_OPTION(unloaded_target_exception)) {
HEAP_TYPE_FREE(GLOBAL_DCONTEXT, last_deallocated,
last_deallocated_t, ACCT_VMAREAS, PROTECTED);
last_deallocated = NULL;
} else
ASSERT(last_deallocated == NULL);
DELETE_LOCK(last_deallocated_lock);
vmvector_delete_vector(GLOBAL_DCONTEXT, executable_areas);
executable_areas = NULL;
DOLOG(1, LOG_VMAREAS, {
if (dynamo_areas->buf != NULL) {
LOG(GLOBAL, LOG_VMAREAS, 1, "DR regions at exit are:\n");
print_dynamo_areas(GLOBAL);
LOG(GLOBAL, LOG_VMAREAS, 1, "\n");
}
});
vmvector_delete_vector(GLOBAL_DCONTEXT, dynamo_areas);
dynamo_areas = NULL;
DOLOG(1, LOG_VMAREAS, {
if (written_areas->buf != NULL) {
LOG(GLOBAL, LOG_VMAREAS, 1, "Code write and selfmod exec counts:\n");
print_written_areas(GLOBAL);
LOG(GLOBAL, LOG_VMAREAS, 1, "\n");
}
});
vmvector_delete_vector(GLOBAL_DCONTEXT, pretend_writable_areas);
pretend_writable_areas = NULL;
vmvector_delete_vector(GLOBAL_DCONTEXT, patch_proof_areas);
patch_proof_areas = NULL;
vmvector_delete_vector(GLOBAL_DCONTEXT, emulate_write_areas);
emulate_write_areas = NULL;
vmvector_delete_vector(GLOBAL_DCONTEXT, written_areas);
written_areas = NULL;
#ifdef PROGRAM_SHEPHERDING
DOLOG(1, LOG_VMAREAS, {
if (futureexec_areas->buf != NULL)
LOG(GLOBAL, LOG_VMAREAS, 1, "futureexec %d regions at exit are:\n",
futureexec_areas->length);
print_futureexec_areas(GLOBAL);
});
vmvector_delete_vector(GLOBAL_DCONTEXT, futureexec_areas);
futureexec_areas = NULL;
DELETE_LOCK(threads_killed_lock);
# ifdef WINDOWS
ASSERT(DYNAMO_OPTION(xdata_rct) || vmvector_empty(app_flushed_areas));
vmvector_delete_vector(GLOBAL_DCONTEXT, app_flushed_areas);
app_flushed_areas = NULL;
# endif
#endif
#ifdef SIMULATE_ATTACK
DELETE_LOCK(simulate_lock);
#endif
vmvector_delete_vector(GLOBAL_DCONTEXT, IAT_areas);
IAT_areas = NULL;
return 0;
}
void
vm_areas_thread_reset_init(dcontext_t *dcontext)
{
thread_data_t *data = (thread_data_t *) dcontext->vm_areas_field;
memset(dcontext->vm_areas_field, 0, sizeof(thread_data_t));
VMVECTOR_INITIALIZE_VECTOR(&data->areas, VECTOR_FRAGMENT_LIST, thread_vm_areas);
/* data->areas.lock is never used, but we may want to grab it one day,
e.g. to print other thread areas */
}
/* N.B.: this is called before vm_areas_init() */
void
vm_areas_thread_init(dcontext_t *dcontext)
{
thread_data_t *data = HEAP_TYPE_ALLOC(dcontext, thread_data_t, ACCT_OTHER, PROTECTED);
dcontext->vm_areas_field = data;
vm_areas_thread_reset_init(dcontext);
}
void
vm_areas_thread_reset_free(dcontext_t *dcontext)
{
/* we free the local areas vector so it will match fragments post-reset
* FIXME: put it in nonpersistent heap
*/
thread_data_t *data = (thread_data_t *) dcontext->vm_areas_field;
/* yes, we end up using global heap for the thread-local area
* vector...not a big deal, but FIXME sometime
*/
vmvector_free_vector(GLOBAL_DCONTEXT, &data->areas);
}
void
vm_areas_thread_exit(dcontext_t *dcontext)
{
vm_areas_thread_reset_free(dcontext);
#ifdef DEBUG
/* for non-debug we do fast exit path and don't free local heap */
HEAP_TYPE_FREE(dcontext, dcontext->vm_areas_field, thread_data_t, ACCT_OTHER, PROTECTED);
#endif
}
/****************************************************************************
* external interface to vm_area_vector_t
*
* FIXME: add user data field to vector and to add routine
* FIXME: have init and destroy routines so don't have to expose
* vm_area_vector_t struct or declare vector in this file
*/
void
vmvector_set_callbacks(vm_area_vector_t *v,
void (*free_func)(void*),
void *(*split_func)(void*),
bool (*should_merge_func)(bool, void*, void*),
void *(*merge_func)(void*, void*))
{
bool release_lock; /* 'true' means this routine needs to unlock */
ASSERT(v != NULL);
LOCK_VECTOR(v, release_lock, read);
v->free_payload_func = free_func;
v->split_payload_func = split_func;
v->should_merge_func = should_merge_func;
v->merge_payload_func = merge_func;
UNLOCK_VECTOR(v, release_lock, read);
}
void
vmvector_print(vm_area_vector_t *v, file_t outf)
{
bool release_lock; /* 'true' means this routine needs to unlock */
LOCK_VECTOR(v, release_lock, read);
print_vm_areas(v, outf);
UNLOCK_VECTOR(v, release_lock, read);
}
void
vmvector_add(vm_area_vector_t *v, app_pc start, app_pc end, void *data)
{
bool release_lock; /* 'true' means this routine needs to unlock */
LOCK_VECTOR(v, release_lock, write);
ASSERT_OWN_WRITE_LOCK(SHOULD_LOCK_VECTOR(v), &v->lock);
add_vm_area(v, start, end, 0, 0, data _IF_DEBUG(""));
UNLOCK_VECTOR(v, release_lock, write);
}
void *
vmvector_add_replace(vm_area_vector_t *v, app_pc start, app_pc end, void *data)
{
bool overlap;
vm_area_t *area = NULL;
void *old_data = NULL;
bool release_lock; /* 'true' means this routine needs to unlock */
LOCK_VECTOR(v, release_lock, write);
ASSERT_OWN_WRITE_LOCK(SHOULD_LOCK_VECTOR(v), &v->lock);
overlap = lookup_addr(v, start, &area);
if (overlap && start == area->start && end == area->end) {
old_data = area->custom.client;
area->custom.client = data;
} else
add_vm_area(v, start, end, 0, 0, data _IF_DEBUG(""));
UNLOCK_VECTOR(v, release_lock, write);
return old_data;
}
bool
vmvector_remove(vm_area_vector_t *v, app_pc start, app_pc end)
{
bool ok;
bool release_lock; /* 'true' means this routine needs to unlock */
LOCK_VECTOR(v, release_lock, write);
ASSERT_OWN_WRITE_LOCK(SHOULD_LOCK_VECTOR(v), &v->lock);
ok = remove_vm_area(v, start, end, false);
UNLOCK_VECTOR(v, release_lock, write);
return ok;
}
/* Looks up area encapsulating target pc and removes.
* returns true if found and removed, and optional area boundaries are set
* returns false if not found
*/
bool
vmvector_remove_containing_area(vm_area_vector_t *v, app_pc pc,
app_pc *area_start /* OUT optional */,
app_pc *area_end /* OUT optional */)
{
vm_area_t *a;
bool ok;
bool release_lock; /* 'true' means this routine needs to unlock */
/* common path should be to find one, and would need write lock to
* remove */
LOCK_VECTOR(v, release_lock, write);
ASSERT_OWN_WRITE_LOCK(SHOULD_LOCK_VECTOR(v), &v->lock);
ok = lookup_addr(v, pc, &a);
if (ok) {
if (area_start != NULL)
*area_start = a->start;
if (area_end != NULL)
*area_end = a->end;
remove_vm_area(v, a->start, a->end, false);
}
UNLOCK_VECTOR(v, release_lock, write);
return ok;
}
bool
vmvector_overlap(vm_area_vector_t *v, app_pc start, app_pc end)
{
bool overlap;
bool release_lock; /* 'true' means this routine needs to unlock */
if (vmvector_empty(v))
return false;
LOCK_VECTOR(v, release_lock, read);
ASSERT_OWN_READWRITE_LOCK(SHOULD_LOCK_VECTOR(v), &v->lock);
overlap = vm_area_overlap(v, start, end);
UNLOCK_VECTOR(v, release_lock, read);
return overlap;
}
/* returns custom data field, or NULL if not found. NOTE: Access to
* custom data needs explicit synchronization in addition to
* vm_area_vector_t's locks!
*/
void *
vmvector_lookup(vm_area_vector_t *v, app_pc pc)
{
void *data = NULL;
vmvector_lookup_data(v, pc, NULL, NULL, &data);
return data;
}
/* Looks up if pc is in a vmarea and optionally returns the areas's bounds
* and any custom data. NOTE: Access to custom data needs explicit
* synchronization in addition to vm_area_vector_t's locks!
*/
bool
vmvector_lookup_data(vm_area_vector_t *v, app_pc pc,
app_pc *start /* OUT */, app_pc *end /* OUT */,
void **data /* OUT */)
{
bool overlap;
vm_area_t *area = NULL;
bool release_lock; /* 'true' means this routine needs to unlock */
LOCK_VECTOR(v, release_lock, read);
ASSERT_OWN_READWRITE_LOCK(SHOULD_LOCK_VECTOR(v), &v->lock);
overlap = lookup_addr(v, pc, &area);
if (overlap) {
if (start != NULL)
*start = area->start;
if (end != NULL)
*end = area->end;
if (data != NULL)
*data = area->custom.client;
}
UNLOCK_VECTOR(v, release_lock, read);
return overlap;
}
/* Returns false if pc is in a vmarea in v.
* Otherwise, returns the start pc of the vmarea prior to pc in prev and
* the start pc of the vmarea after pc in next.
* FIXME: most callers will call this and vmvector_lookup_data():
* should this routine do both to avoid an extra binary search?
*/
bool
vmvector_lookup_prev_next(vm_area_vector_t *v, app_pc pc,
OUT app_pc *prev, OUT app_pc *next)
{
bool success;
int index;
bool release_lock; /* 'true' means this routine needs to unlock */
LOCK_VECTOR(v, release_lock, read);
ASSERT_OWN_READWRITE_LOCK(SHOULD_LOCK_VECTOR(v), &v->lock);
success = !binary_search(v, pc, pc+1, NULL, &index, false);
if (success) {
if (prev != NULL) {
if (index == -1)
*prev = NULL;
else
*prev = v->buf[index].start;
}
if (next != NULL) {
if (index >= v->length - 1)
*next = (app_pc) POINTER_MAX;
else
*next = v->buf[index+1].start;
}
}
UNLOCK_VECTOR(v, release_lock, read);
return success;
}
/* Sets custom data field if a vmarea is present. Returns true if found,
* false if not found. NOTE: Access to custom data needs explicit
* synchronization in addition to vm_area_vector_t's locks!
*/
bool
vmvector_modify_data(vm_area_vector_t *v, app_pc start, app_pc end, void *data)
{
bool overlap;
vm_area_t *area = NULL;
bool release_lock; /* 'true' means this routine needs to unlock */
LOCK_VECTOR(v, release_lock, write);
ASSERT_OWN_WRITE_LOCK(SHOULD_LOCK_VECTOR(v), &v->lock);
overlap = lookup_addr(v, start, &area);
if (overlap && start == area->start && end == area->end)
area->custom.client = data;
UNLOCK_VECTOR(v, release_lock, write);
return overlap;
}
/* this routine does NOT initialize the rw lock! use VMVECTOR_INITIALIZE_VECTOR */
void
vmvector_init_vector(vm_area_vector_t *v, uint flags)
{
memset(v, 0, sizeof(*v));
v->flags = flags;
}
/* this routine does NOT initialize the rw lock! use VMVECTOR_ALLOC_VECTOR instead */
vm_area_vector_t *
vmvector_create_vector(dcontext_t *dcontext, uint flags)
{
vm_area_vector_t *v =
HEAP_TYPE_ALLOC(dcontext, vm_area_vector_t, ACCT_VMAREAS, PROTECTED);
vmvector_init_vector(v, flags);
return v;
}
/* frees the fields of vm_area_vector_t v (not v itself) */
void
vmvector_reset_vector(dcontext_t *dcontext, vm_area_vector_t *v)
{
DODEBUG({
int i;
/* walk areas and delete coarse info and comments */
for (i = 0; i < v->length; i++) {
/* FIXME: this code is duplicated in remove_vm_area() */
if (TEST(FRAG_COARSE_GRAIN, v->buf[i].frag_flags) &&
/* FIXME: cleaner test? shared_data copies flags, but uses
* custom.frags and not custom.client
*/
v == executable_areas) {
coarse_info_t *info = (coarse_info_t *) v->buf[i].custom.client;
coarse_info_t *next_info;
ASSERT(!RUNNING_WITHOUT_CODE_CACHE());
ASSERT(info != NULL);
while (info != NULL) { /* loop over primary and secondary unit */
next_info = info->non_frozen;
ASSERT(info->frozen || info->non_frozen == NULL);
coarse_unit_free(GLOBAL_DCONTEXT, info);
info = next_info;
ASSERT(info == NULL || !info->frozen);
}
v->buf[i].custom.client = NULL;
}
global_heap_free(v->buf[i].comment, strlen(v->buf[i].comment)+1
HEAPACCT(ACCT_VMAREAS));
}
});
/* with thread shared cache it is in fact possible to have no thread local vmareas */
if (v->buf != NULL) {
/* FIXME: walk through and make sure frags lists are all freed */
global_heap_free(v->buf, v->size*sizeof(struct vm_area_t) HEAPACCT(ACCT_VMAREAS));
v->size = 0;
v->length = 0;
v->buf = NULL;
} else
ASSERT(v->size == 0 && v->length == 0);
}
static void
vmvector_free_vector(dcontext_t *dcontext, vm_area_vector_t *v)
{
vmvector_reset_vector(dcontext, v);
DELETE_READWRITE_LOCK(v->lock);
}
/* frees the vm_area_vector_t v and its associated memory */
void
vmvector_delete_vector(dcontext_t *dcontext, vm_area_vector_t *v)
{
if (v->free_payload_func != NULL) {
int i;
for (i = 0; i < v->length; i++) {
v->free_payload_func(v->buf[i].custom.client);
}
}
vmvector_free_vector(dcontext, v);
HEAP_TYPE_FREE(dcontext, v, vm_area_vector_t, ACCT_VMAREAS, PROTECTED);
}
/* vmvector iterator */
/* initialize an iterator, has to be released with
* vmvector_iterator_stop. The iterator doesn't support mutations.
* In fact shared vectors should detect a deadlock
* if vmvector_add() and vmvector_remove() is erroneously called.
*/
void
vmvector_iterator_start(vm_area_vector_t *v, vmvector_iterator_t *vmvi)
{
ASSERT(v != NULL);
ASSERT(vmvi != NULL);
if (SHOULD_LOCK_VECTOR(v))
read_lock(&v->lock);
vmvi->vector = v;
vmvi->index = -1;
}
bool
vmvector_iterator_hasnext(vmvector_iterator_t *vmvi)
{
ASSERT_VMAREA_VECTOR_PROTECTED(vmvi->vector, READWRITE);
return (vmvi->index + 1) < vmvi->vector->length;
}
void
vmvector_iterator_startover(vmvector_iterator_t *vmvi)
{
ASSERT_VMAREA_VECTOR_PROTECTED(vmvi->vector, READWRITE);
vmvi->index = -1;
}
/* iterator accessor
* has to be initialized with vmvector_iterator_start, and should be
* called only when vmvector_iterator_hasnext() is true
*
* returns custom data and
* sets the area boundaries in area_start and area_end
*
* does not increment the iterator
*/
void*
vmvector_iterator_peek(vmvector_iterator_t *vmvi, /* IN/OUT */
app_pc *area_start /* OUT */, app_pc *area_end /* OUT */)
{
int idx = vmvi->index + 1;
ASSERT(vmvector_iterator_hasnext(vmvi));
ASSERT_VMAREA_VECTOR_PROTECTED(vmvi->vector, READWRITE);
ASSERT(idx < vmvi->vector->length);
if (area_start != NULL)
*area_start = vmvi->vector->buf[idx].start;
if (area_end != NULL)
*area_end = vmvi->vector->buf[idx].end;
return vmvi->vector->buf[idx].custom.client;
}
/* iterator accessor
* has to be initialized with vmvector_iterator_start, and should be
* called only when vmvector_iterator_hasnext() is true
*
* returns custom data and
* sets the area boundaries in area_start and area_end
*/
void*
vmvector_iterator_next(vmvector_iterator_t *vmvi, /* IN/OUT */
app_pc *area_start /* OUT */, app_pc *area_end /* OUT */)
{
void *res = vmvector_iterator_peek(vmvi, area_start, area_end);
vmvi->index++;
return res;
}
void
vmvector_iterator_stop(vmvector_iterator_t *vmvi)
{
ASSERT_VMAREA_VECTOR_PROTECTED(vmvi->vector, READWRITE);
if (SHOULD_LOCK_VECTOR(vmvi->vector))
read_unlock(&vmvi->vector->lock);
DODEBUG({
vmvi->vector = NULL; /* crash incorrect reuse */
vmvi->index = -1;
});
}
/****************************************************************************
* routines specific to our own vectors
*/
void
print_executable_areas(file_t outf)
{
vmvector_print(executable_areas, outf);
}
void
print_dynamo_areas(file_t outf)
{
dynamo_vm_areas_start_reading();
print_vm_areas(dynamo_areas, outf);
dynamo_vm_areas_done_reading();
}
#ifdef PROGRAM_SHEPHERDING
void
print_futureexec_areas(file_t outf)
{
vmvector_print(futureexec_areas, outf);
}
#endif
#if defined(DEBUG) && defined(INTERNAL)
static void
print_written_areas(file_t outf)
{
vmvector_print(written_areas, outf);
}
#endif
static void
free_written_area(void *data)
{
HEAP_TYPE_FREE(GLOBAL_DCONTEXT, (ro_vs_sandbox_data_t *) data,
ro_vs_sandbox_data_t, ACCT_VMAREAS, UNPROTECTED);
}
/* Functions as a lookup routine if an entry is already present.
* Returns true if an entry was already present, false if not, in which
* case an entry containing tag with suggested bounds of [start, end)
* (actual bounds may be smaller to avoid overlap) is added.
*/
static bool
add_written_area(vm_area_vector_t *v, app_pc tag, app_pc start,
app_pc end, vm_area_t **area)
{
vm_area_t *a = NULL;
bool already;
DEBUG_DECLARE(bool ok;)
/* currently only one vector */
ASSERT(v == written_areas);
ASSERT_OWN_WRITE_LOCK(true, &v->lock);
ASSERT(tag >= start && tag < end);
/* re-adding fails for written_areas since no merging, so lookup first */
already = lookup_addr(v, tag, &a);
if (!already) {
app_pc prev_start, next_start;
LOG(GLOBAL, LOG_VMAREAS, 2,
"new written executable vm area: "PFX"-"PFX"\n",
start, end);
/* case 9179: With no flags, any overlap (in non-tag portion of [start,
* end)) will result in a merge: so we'll inherit and share counts from
* any adjacent region(s): maybe better to split? Rare in any case and
* not critical. In case of simultaneous overlap, we take counter from
* first region, since that's how add_vm_area does the merge.
*/
/* we can't merge b/c we have hardcoded counter pointers in code
* in the cache, so we make sure to only add the non-overlap
*/
DEBUG_DECLARE(ok = ) vmvector_lookup_prev_next(v, tag, &prev_start, &next_start);
ASSERT(ok); /* else already should be true */
if (prev_start != NULL) {
vm_area_t *prev_area = NULL;
DEBUG_DECLARE(ok = ) lookup_addr(v, prev_start, &prev_area);
ASSERT(ok); /* we hold the lock after all */
if (prev_area->end > start)
start = prev_area->end;
}
if (next_start < (app_pc) POINTER_MAX && end > next_start)
end = next_start;
add_vm_area(v, start, end, /* no flags */ 0, 0, NULL _IF_DEBUG(""));
DEBUG_DECLARE(ok = ) lookup_addr(v, tag, &a);
ASSERT(ok && a != NULL);
/* If we merged, we already have an ro2s struct */
/* FIXME: now that we have merge callback support, should just pass
* a struct into add_vm_area and avoid this post-lookup
*/
if (a->custom.client == NULL) {
/* Since selfmod_execs is written from the cache this must be
* unprotected. Attacker changing selfmod_execs or written_count
* shouldn't be able to cause problems.
*/
ro_vs_sandbox_data_t *ro2s =
HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, ro_vs_sandbox_data_t,
ACCT_VMAREAS, UNPROTECTED);
/* selfmod_execs is inc-ed from the cache, and if it crosses a cache
* line we could have a problem with large thresholds. We assert on
* 32-bit alignment here, which our heap alloc currently provides, to
* ensure no cache line is crossed.
*/
ASSERT(ALIGNED(ro2s, sizeof(uint)));
memset(ro2s, 0, sizeof(*ro2s));
a->custom.client = (void *) ro2s;
}
} else {
LOG(GLOBAL, LOG_VMAREAS, 3,
"request for written area "PFX"-"PFX" vs existing "PFX"-"PFX"\n",
start, end, a->start, a->end);
}
ASSERT(a != NULL);
if (area != NULL)
*area = a;
return already;
}
#ifdef WINDOWS
/* Adjusts a new executable area with respect to the IAT.
* Returns whether it should remain coarse or not.
*/
static bool
add_executable_vm_area_check_IAT(app_pc *start /*IN/OUT*/, app_pc *end /*IN/OUT*/,
uint vm_flags,
vm_area_t **existing_area /*OUT*/,
coarse_info_t **info_out /*OUT*/,
coarse_info_t **tofree /*OUT*/,
app_pc *delay_start /*OUT*/,
app_pc *delay_end /*OUT*/)
{
bool keep_coarse = false;
app_pc IAT_start = NULL, IAT_end = NULL;
app_pc orig_start = *start, orig_end = *end;
ASSERT(existing_area != NULL && info_out != NULL && tofree != NULL);
ASSERT(delay_start != NULL && delay_end != NULL);
if (DYNAMO_OPTION(coarse_merge_iat) &&
get_module_base(*start) != NULL &&
get_IAT_section_bounds(get_module_base(*start), &IAT_start, &IAT_end) &&
/* case 1094{5,7}: to match the assumptions of case 10600 we adjust
* to post-IAT even if the IAT is in the middle, if it's toward the front
*/
(*start >= IAT_start || (IAT_start - *start < *end - IAT_end)) &&
*start < IAT_end &&
/* be paranoid: multi-page IAT where hooker fooled our loader matching
* could add just 1st page of IAT? */
*end > IAT_end /* for == avoid an empty region */) {
/* If a pre-IAT region exists, split if off separately (case 10945).
* We want to keep as coarse, but we need the post-IAT region to be the
* primary coarse and the one we try to load a pcache for: so we delay
* the add.
* FIXME: should we do a general split around the IAT and make both sides
* coarse with larger the primary instead of assuming pre-IAT is smaller?
*/
if (orig_start < IAT_start) {
LOG(GLOBAL, LOG_VMAREAS, 2,
"splitting pre-IAT "PFX"-"PFX" off from exec area "PFX"-"PFX"\n",
orig_start, IAT_start, orig_start, orig_end);
*delay_start = orig_start;
*delay_end = IAT_start;
DOCHECK(1, {
/* When IAT is in the middle of +rx region we expect .orpc */
app_pc orpc_start = NULL;
app_pc orpc_end = NULL;
get_named_section_bounds(get_module_base(orig_start), ".orpc",
&orpc_start, &orpc_end);
ASSERT_CURIOSITY(orpc_start == orig_start && orpc_end == IAT_start);
});
}
/* Just abandon [*start, IAT_end) */
*start = IAT_end;
ASSERT(*end > *start);
LOG(GLOBAL, LOG_VMAREAS, 2,
"adjusting exec area "PFX"-"PFX" to post-IAT "PFX"-"PFX"\n",
orig_start, *end, *start, *end);
} else {
LOG(GLOBAL, LOG_VMAREAS, 2,
"NOT adjusting exec area "PFX"-"PFX" vs IAT "PFX"-"PFX"\n",
orig_start, *end, IAT_start, IAT_end);
}
if (TEST(VM_UNMOD_IMAGE, vm_flags))
keep_coarse = true;
else {
/* Keep the coarse-grain flag for modified pages only if IAT pages.
* We want to avoid repeated coarse flushes, so we are
* very conservative about marking if_rx_text regions coarse: we count on
* our IAT loader check to make this a do-once.
* FIXME: Should extend this to also merge on left with .orpc:
* .orpc at page 1, IAT on page 2, and .text continuing on
*/
ASSERT(ALIGNED(*end, PAGE_SIZE));
if (DYNAMO_OPTION(coarse_merge_iat) &&
vm_flags == 0 /* no other flags */ &&
/* FIXME: used our stored bounds */
is_IAT(orig_start, orig_end, true/*page-align*/, NULL, NULL) &&
is_module_patch_region(GLOBAL_DCONTEXT, orig_start, orig_end,
true/*be conservative*/) &&
/* We stored the IAT code at +rw time */
os_module_cmp_IAT_code(orig_start)) {
vm_area_t *area = NULL;
bool all_new = !executable_vm_area_overlap(orig_start, orig_end-1,
true/*wlock*/);
ASSERT(IAT_start != NULL); /* should have found bounds above */
if (all_new && /* elseif assumes next call happened */
lookup_addr(executable_areas, *end, &area) &&
TEST(FRAG_COARSE_GRAIN, area->frag_flags) &&
/* Only merge if no execution has yet occurred: else this
* must not be normal rebinding */
!TEST(VM_EXECUTED_FROM, area->vm_flags) &&
/* Should be marked invalid; else no loader +rw => not rebinding */
area->custom.client != NULL &&
TEST(PERSCACHE_CODE_INVALID,
((coarse_info_t *)area->custom.client)->flags)) {
/* Case 8640: merge IAT page back in to coarse area.
* Easier to merge here than in add_vm_area.
*/
coarse_info_t *info = (coarse_info_t *) area->custom.client;
keep_coarse = true;
LOG(GLOBAL, LOG_VMAREAS, 2,
"merging post-IAT ("PFX"-"PFX") with "PFX"-"PFX"\n",
IAT_end, orig_end, area->start, area->end);
ASSERT(area != NULL);
ASSERT(area->start == *end);
ASSERT(IAT_end > orig_start && IAT_end < area->start);
ASSERT(*start == IAT_end); /* set up above */
*end = area->end;
area->start = *start;
*existing_area = area;
STATS_INC(coarse_merge_IAT);
/* If info was loaded prior to rebinding just use it.
* Else, we need a fresh coarse_info_t if persisted, so rather than
* adjust_coarse_unit_bounds on info we must free it.
* Due to lock constraints we can't do that while holding
* exec areas lock.
*/
/* Bounds should match exactly, since we did not adjust them
* on the flush; if they don't, don't use the pcache. */
if (info->base_pc == area->start && info->end_pc == area->end) {
info->flags &= ~PERSCACHE_CODE_INVALID;
*info_out = info;
STATS_INC(coarse_marked_valid);
LOG(GLOBAL, LOG_VMAREAS, 2,
"\tkeeping now-valid info %s "PFX"-"PFX"\n",
info->module, info->base_pc, info->end_pc);
} else {
/* Go ahead and merge, but don't use this pcache */
ASSERT_CURIOSITY(false && "post-rebind pcache bounds mismatch");
*tofree = info;
area->custom.client = NULL;
/* FIXME: we'll try to load again: prevent that? We
* know the image hasn't been modified so no real danger. */
STATS_INC(perscache_rebind_load);
}
} else if (all_new && area == NULL /*nothing following*/) {
/* Code section is a single page, so was completely flushed
* We'll try to re-load the pcache.
* FIXME: we already merged the persisted rct tables into
* the live tables when we flushed the pcache: so now
* we'll have redundancy, and if we flush again we'll waste
* time tryingn to re-add (we do check for dups).
*/
ASSERT(!lookup_addr(executable_areas, *start, NULL));
LOG(GLOBAL, LOG_VMAREAS, 2,
"marking IAT/code region ("PFX"-"PFX" vs "PFX"-"PFX") as coarse\n",
IAT_start, IAT_end, orig_start, orig_end);
keep_coarse = true;
STATS_INC(coarse_merge_IAT); /* we use same stat */
} else {
LOG(GLOBAL, LOG_VMAREAS, 2,
"NOT merging IAT-containing "PFX"-"PFX": abuts non-inv-coarse\n",
orig_start, orig_end);
DOCHECK(1, {
if (all_new && area != NULL &&
TEST(FRAG_COARSE_GRAIN, area->frag_flags) &&
TEST(VM_EXECUTED_FROM, area->vm_flags)) {
coarse_info_t *info = (coarse_info_t *) area->custom.client;
ASSERT(!info->persisted);
ASSERT(!TEST(PERSCACHE_CODE_INVALID, info->flags));
}
});
}
} else {
LOG(GLOBAL, LOG_VMAREAS, 2,
"NOT merging .text "PFX"-"PFX" vs IAT "PFX"-"PFX" %d %d %d %d %d\n",
orig_start, orig_end, IAT_start, IAT_end,
DYNAMO_OPTION(coarse_merge_iat), vm_flags == 0,
is_IAT(orig_start, *end, true/*page-align*/, NULL, NULL),
is_module_patch_region(GLOBAL_DCONTEXT, orig_start, orig_end,
true/*be conservative*/),
os_module_cmp_IAT_code(orig_start));
}
}
return keep_coarse;
}
#endif
static void
add_executable_vm_area_helper(app_pc start, app_pc end, uint vm_flags, uint frag_flags,
coarse_info_t *info _IF_DEBUG(const char *comment))
{
ASSERT_OWN_WRITE_LOCK(true, &executable_areas->lock);
add_vm_area(executable_areas, start, end,
vm_flags, frag_flags, NULL _IF_DEBUG(comment));
if (TEST(VM_WRITABLE, vm_flags)) {
/* N.B.: the writable flag indicates the natural state of the memory,
* not what we have made it be -- we make it read-only before adding
* to the executable list!
* FIXME: win32 callback's intercept_call code appears in fragments
* and is writable...would like to fix that, and coalesce that memory
* with the generated routines or something
*/
LOG(GLOBAL, LOG_VMAREAS, 2,
"WARNING: new executable vm area is writable: "PFX"-"PFX" %s\n",
start, end, comment);
#if 0
/* this syslog causes services.exe to hang (ref case 666) once case 666
* is fixed re-enable if desired FIXME */
SYSLOG_INTERNAL_WARNING_ONCE("new executable vm area is writable.");
#endif
}
#ifdef PROGRAM_SHEPHERDING
if (!DYNAMO_OPTION(selfmod_futureexec) &&
TEST(FRAG_SELFMOD_SANDBOXED, frag_flags)) {
/* We do not need future entries for selfmod regions. We mark
* the futures as once-only when they are selfmod at future add time, and
* here we catch those who weren't selfmod then but are now.
*/
remove_futureexec_vm_area(start, end);
}
#endif
if (TEST(FRAG_COARSE_GRAIN, frag_flags)) {
vm_area_t *area = NULL;
DEBUG_DECLARE(bool found = )
lookup_addr(executable_areas, start, &area);
ASSERT(found && area != NULL);
/* case 9521: always have one non-frozen coarse unit per coarse region */
if (info == NULL || info->frozen) {
coarse_info_t *new_info = coarse_unit_create(start, end,
(info == NULL) ? NULL :
&info->module_md5,
true/*for execution*/);
LOG(GLOBAL, LOG_VMAREAS, 1, "new %scoarse unit %s "PFX"-"PFX"\n",
info == NULL ? "" : "secondary ", new_info->module, start, end);
if (info == NULL)
info = new_info;
else
info->non_frozen = new_info;
}
area->custom.client = (void *) info;
}
DOLOG(2, LOG_VMAREAS, {
/* new area could have been split into multiple */
print_contig_vm_areas(executable_areas, start, end, GLOBAL, "new executable vm area: ");
});
}
static coarse_info_t *
vm_area_load_coarse_unit(app_pc start, app_pc end, uint vm_flags, uint frag_flags,
bool delayed _IF_DEBUG(const char *comment))
{
coarse_info_t *info;
/* We load persisted cache files at mmap time primarily for RCT
* tables; but to avoid duplicated code, and for simplicity, we do
* so if -use_persisted even if not -use_persisted_rct.
*/
dcontext_t *dcontext = get_thread_private_dcontext();
ASSERT_OWN_WRITE_LOCK(true, &executable_areas->lock);
/* FIXME: we're called before 1st thread is set up. Only a problem
* right now for rac_entries_resurrect() w/ private after-call
* which won't happen w/ -coarse_units that requires shared bbs.
*/
info = coarse_unit_load(dcontext == NULL ? GLOBAL_DCONTEXT : dcontext,
start, end, true/*for execution*/);
if (info != NULL) {
ASSERT(info->base_pc >= start && info->end_pc <= end);
LOG(GLOBAL, LOG_VMAREAS, 1,
"using persisted coarse unit %s "PFX"-"PFX" for "PFX"-"PFX"\n",
info->module, info->base_pc, info->end_pc, start, end);
/* Case 8640/9653/8639: adjust region bounds so that a
* cache consistency event outside the persisted region
* does not invalidate it (mainly targeting loader rebinding).
* We count on FRAG_COARSE_GRAIN preventing any merging of regions.
* We could delay this until code validation, as RCT tables don't care,
* and then we could avoid splitting the region in case validation
* fails: but our plan for lazy per-page validation (case 10601)
* means we can fail post-split even that way. So we go ahead and split
* up front here. For 4.4 we should move this to 1st exec.
*/
if (delayed && (info->base_pc > start || info->end_pc < end)) {
/* we already added a region for the whole range earlier */
remove_vm_area(executable_areas, start, end, false/*leave writability*/);
add_executable_vm_area_helper(info->base_pc, info->end_pc,
vm_flags, frag_flags, info
_IF_DEBUG(comment));
}
if (info->base_pc > start) {
add_executable_vm_area_helper(start, info->base_pc,
vm_flags, frag_flags, NULL
_IF_DEBUG(comment));
start = info->base_pc;
}
if (info->end_pc < end) {
add_executable_vm_area_helper(info->end_pc, end,
vm_flags, frag_flags, NULL
_IF_DEBUG(comment));
end = info->end_pc;
}
/* if !delayed we'll add the region for the unit in caller */
ASSERT(info->frozen && info->persisted);
vm_flags |= VM_PERSISTED_CACHE;
/* For 4.4 we would mark as PERSCACHE_CODE_INVALID here and
* mark valid only at 1st execution when we do md5 checks;
* for 4.3 we're valid until a rebind action.