blob: a1c92956d960aae28a852bbcf7e89098acec5ab4 [file] [log] [blame]
/* ******************************************************************************
* Copyright (c) 2010-2014 Google, Inc. All rights reserved.
* Copyright (c) 2010-2011 Massachusetts Institute of Technology All rights reserved.
* Copyright (c) 2002-2010 VMware, Inc. All rights reserved.
* ******************************************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of VMware, Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/* Copyright (c) 2003-2007 Determina Corp. */
/* Copyright (c) 2002-2003 Massachusetts Institute of Technology */
/* Copyright (c) 2002 Hewlett-Packard Company */
/*
* instrument.c - interface for instrumentation
*/
#include "../globals.h" /* just to disable warning C4206 about an empty file */
#include "instrument.h"
#include "arch.h"
#include "instr.h"
#include "instr_create.h"
#include "instrlist.h"
#include "decode.h"
#include "disassemble.h"
#include "../fragment.h"
#include "../emit.h"
#include "../link.h"
#include "../monitor.h" /* for mark_trace_head */
#include <string.h> /* for strstr */
#include <stdarg.h> /* for varargs */
#include "../nudge.h" /* for nudge_internal() */
#include "../synch.h"
#include "../annotations.h"
#include "../translate.h"
#ifdef UNIX
# include <sys/time.h> /* ITIMER_* */
# include "../unix/module.h" /* redirect_* functions */
#endif
#ifdef CLIENT_INTERFACE
/* in utils.c, not exported to everyone */
extern ssize_t do_file_write(file_t f, const char *fmt, va_list ap);
#ifdef DEBUG
/* case 10450: give messages to clients */
/* we can't undef ASSERT b/c of DYNAMO_OPTION */
# undef ASSERT_TRUNCATE
# undef ASSERT_BITFIELD_TRUNCATE
# undef ASSERT_NOT_REACHED
# define ASSERT_TRUNCATE DO_NOT_USE_ASSERT_USE_CLIENT_ASSERT_INSTEAD
# define ASSERT_BITFIELD_TRUNCATE DO_NOT_USE_ASSERT_USE_CLIENT_ASSERT_INSTEAD
# define ASSERT_NOT_REACHED DO_NOT_USE_ASSERT_USE_CLIENT_ASSERT_INSTEAD
#endif
/* PR 200065: User passes us the shared library, we look up "dr_init",
* and call it. From there, the client can register which events it
* wishes to receive.
*/
#define INSTRUMENT_INIT_NAME "dr_init"
/* PR 250952: version check
* If changing this, don't forget to update:
* - lib/dr_defines.h _USES_DR_VERSION_
* - api/docs/footer.html
*/
#define USES_DR_VERSION_NAME "_USES_DR_VERSION_"
/* Should we expose this for use in samples/tracedump.c?
* Also, if we change this, need to change the symlink generation
* in core/CMakeLists.txt: at that point should share single define.
*/
/* OLDEST_COMPATIBLE_VERSION now comes from configure.h */
/* The 3rd version number, the bugfix/patch number, should not affect
* compatibility, so our version check number simply uses:
* major*100 + minor
* Which gives us room for 100 minor versions per major.
*/
#define NEWEST_COMPATIBLE_VERSION CURRENT_API_VERSION
/* Store the unique not-part-of-version build number (the version
* BUILD_NUMBER is limited to 64K and is not guaranteed to be unique)
* somewhere accessible at a customer site. We could alternatively
* pull it out of our DYNAMORIO_DEFINES string.
*/
DR_API const char *unique_build_number = STRINGIFY(UNIQUE_BUILD_NUMBER);
/* Acquire when registering or unregistering event callbacks
* Also held when invoking events, which happens much more often
* than registration changes, so we use rwlock
*/
DECLARE_CXTSWPROT_VAR(static read_write_lock_t callback_registration_lock,
INIT_READWRITE_LOCK(callback_registration_lock));
/* Structures for maintaining lists of event callbacks */
typedef void (*callback_t)(void);
typedef struct _callback_list_t {
callback_t *callbacks; /* array of callback functions */
size_t num; /* number of callbacks registered */
size_t size; /* allocated space (may be larger than num) */
} callback_list_t;
/* This is a little convoluted. The following is a macro to iterate
* over a list of callbacks and call each function. We use a macro
* instead of a function so we can pass the function type and perform
* a typecast. We need to copy the callback list before iterating to
* support the possibility of one callback unregistering another and
* messing up the list while we're iterating. We'll optimize the case
* for 5 or fewer registered callbacks and stack-allocate the temp
* list. Otherwise, we'll heap-allocate the temp.
*
* We allow the args to use the var "idx" to access the client index.
*
* We consider the first registered callback to have the highest
* priority and call it last. If we gave the last registered callback
* the highest priority, a client could re-register a routine to
* increase its priority. That seems a little weird.
*/
/*
*/
#define FAST_COPY_SIZE 5
#define call_all_ret(ret, retop, postop, vec, type, ...) \
do { \
size_t idx, num; \
/* we will be called even if no callbacks (i.e., (vec).num == 0) */ \
/* we guarantee we're in DR state at all callbacks and clean calls */ \
/* XXX: add CLIENT_ASSERT here */ \
read_lock(&callback_registration_lock); \
num = (vec).num; \
if (num == 0) { \
read_unlock(&callback_registration_lock); \
} \
else if (num <= FAST_COPY_SIZE) { \
callback_t tmp[FAST_COPY_SIZE]; \
memcpy(tmp, (vec).callbacks, num * sizeof(callback_t)); \
read_unlock(&callback_registration_lock); \
for (idx=0; idx<num; idx++) { \
ret retop (((type)tmp[num-idx-1])(__VA_ARGS__)) postop; \
} \
} \
else { \
callback_t *tmp = HEAP_ARRAY_ALLOC(GLOBAL_DCONTEXT, callback_t, \
num, ACCT_OTHER, UNPROTECTED); \
memcpy(tmp, (vec).callbacks, num * sizeof(callback_t)); \
read_unlock(&callback_registration_lock); \
for (idx=0; idx<num; idx++) { \
ret retop (((type)tmp[num-idx-1])(__VA_ARGS__)) postop; \
} \
HEAP_ARRAY_FREE(GLOBAL_DCONTEXT, tmp, callback_t, num, \
ACCT_OTHER, UNPROTECTED); \
} \
} while (0)
/* It's less error-prone if we just have one call_all macro. We'll
* reuse call_all_ret above for callbacks that don't have a return
* value by assigning to a dummy var. Note that this means we'll
* have to pass an int-returning type to call_all()
*/
#define call_all(vec, type, ...) \
do { \
int dummy; \
call_all_ret(dummy, =, , vec, type, __VA_ARGS__); \
} while (0)
/* Lists of callbacks for each event type. Note that init and nudge
* callback lists are kept in the client_lib_t data structure below.
* We could store all lists on a per-client basis, but we can iterate
* over these lists slightly more efficiently if we store all
* callbacks for a specific event in a single list.
*/
static callback_list_t exit_callbacks = {0,};
static callback_list_t thread_init_callbacks = {0,};
static callback_list_t thread_exit_callbacks = {0,};
#ifdef UNIX
static callback_list_t fork_init_callbacks = {0,};
#endif
static callback_list_t bb_callbacks = {0,};
static callback_list_t trace_callbacks = {0,};
#ifdef CUSTOM_TRACES
static callback_list_t end_trace_callbacks = {0,};
#endif
static callback_list_t fragdel_callbacks = {0,};
static callback_list_t restore_state_callbacks = {0,};
static callback_list_t restore_state_ex_callbacks = {0,};
static callback_list_t module_load_callbacks = {0,};
static callback_list_t module_unload_callbacks = {0,};
static callback_list_t filter_syscall_callbacks = {0,};
static callback_list_t pre_syscall_callbacks = {0,};
static callback_list_t post_syscall_callbacks = {0,};
#ifdef WINDOWS
static callback_list_t exception_callbacks = {0,};
#else
static callback_list_t signal_callbacks = {0,};
#endif
#ifdef PROGRAM_SHEPHERDING
static callback_list_t security_violation_callbacks = {0,};
#endif
static callback_list_t persist_ro_size_callbacks = {0,};
static callback_list_t persist_ro_callbacks = {0,};
static callback_list_t resurrect_ro_callbacks = {0,};
static callback_list_t persist_rx_size_callbacks = {0,};
static callback_list_t persist_rx_callbacks = {0,};
static callback_list_t resurrect_rx_callbacks = {0,};
static callback_list_t persist_rw_size_callbacks = {0,};
static callback_list_t persist_rw_callbacks = {0,};
static callback_list_t resurrect_rw_callbacks = {0,};
static callback_list_t persist_patch_callbacks = {0,};
/* An array of client libraries. We use a static array instead of a
* heap-allocated list so we can load the client libs before
* initializing DR's heap.
*/
typedef struct _client_lib_t {
client_id_t id;
char path[MAXIMUM_PATH];
/* PR 366195: dlopen() handle truly is opaque: != start */
shlib_handle_t lib;
app_pc start;
app_pc end;
char options[MAX_OPTION_LENGTH];
/* We need to associate nudge events with a specific client so we
* store that list here in the client_lib_t instead of using a
* single global list.
*/
callback_list_t nudge_callbacks;
} client_lib_t;
/* these should only be modified prior to instrument_init(), since no
* readers of the client_libs array (event handlers, etc.) use synch
*/
static client_lib_t client_libs[MAX_CLIENT_LIBS] = {{0,}};
static size_t num_client_libs = 0;
static void *persist_user_data[MAX_CLIENT_LIBS];
#ifdef WINDOWS
/* private kernel32 lib, used to print to console */
static bool print_to_console;
static shlib_handle_t priv_kernel32;
typedef BOOL (WINAPI *kernel32_WriteFile_t)
(HANDLE, LPCVOID, DWORD, LPDWORD, LPOVERLAPPED);
static kernel32_WriteFile_t kernel32_WriteFile;
static ssize_t dr_write_to_console_varg(bool to_stdout, const char *fmt, ...);
#endif
bool client_requested_exit;
#ifdef WINDOWS
/* used for nudge support */
static bool block_client_nudge_threads = false;
DECLARE_CXTSWPROT_VAR(static int num_client_nudge_threads, 0);
#endif
#ifdef CLIENT_SIDELINE
/* # of sideline threads */
DECLARE_CXTSWPROT_VAR(static int num_client_sideline_threads, 0);
#endif
#if defined(WINDOWS) || defined(CLIENT_SIDELINE)
/* protects block_client_nudge_threads and incrementing num_client_nudge_threads */
DECLARE_CXTSWPROT_VAR(static mutex_t client_thread_count_lock,
INIT_LOCK_FREE(client_thread_count_lock));
#endif
static vm_area_vector_t *client_aux_libs;
#ifdef WINDOWS
DECLARE_CXTSWPROT_VAR(static mutex_t client_aux_lib64_lock,
INIT_LOCK_FREE(client_aux_lib64_lock));
#endif
/****************************************************************************/
/* INTERNAL ROUTINES */
static void
add_callback(callback_list_t *vec, void (*func)(void), bool unprotect)
{
if (func == NULL) {
CLIENT_ASSERT(false, "trying to register a NULL callback");
return;
}
if (standalone_library) {
CLIENT_ASSERT(false, "events not supported in standalone library mode");
return;
}
write_lock(&callback_registration_lock);
/* Although we're receiving a pointer to a callback_list_t, we're
* usually modifying a static var.
*/
if (unprotect) {
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
}
/* We may already have an open slot since we allocate in twos and
* because we don't bother to free the storage when we remove the
* callback. Check and only allocate if necessary.
*/
if (vec->num == vec->size) {
callback_t *tmp = HEAP_ARRAY_ALLOC
(GLOBAL_DCONTEXT, callback_t, vec->size + 2, /* Let's allocate 2 */
ACCT_OTHER, UNPROTECTED);
if (tmp == NULL) {
CLIENT_ASSERT(false, "out of memory: can't register callback");
write_unlock(&callback_registration_lock);
return;
}
if (vec->callbacks != NULL) {
memcpy(tmp, vec->callbacks, vec->num * sizeof(callback_t));
HEAP_ARRAY_FREE(GLOBAL_DCONTEXT, vec->callbacks, callback_t, vec->size,
ACCT_OTHER, UNPROTECTED);
}
vec->callbacks = tmp;
vec->size += 2;
}
vec->callbacks[vec->num] = func;
vec->num++;
if (unprotect) {
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
}
write_unlock(&callback_registration_lock);
}
static bool
remove_callback(callback_list_t *vec, void (*func)(void), bool unprotect)
{
size_t i;
bool found = false;
if (func == NULL) {
CLIENT_ASSERT(false, "trying to unregister a NULL callback");
return false;
}
write_lock(&callback_registration_lock);
/* Although we're receiving a pointer to a callback_list_t, we're
* usually modifying a static var.
*/
if (unprotect) {
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
}
for (i=0; i<vec->num; i++) {
if (vec->callbacks[i] == func) {
size_t j;
/* shift down the entries on the tail */
for (j=i; j<vec->num-1; j++) {
vec->callbacks[j] = vec->callbacks[j+1];
}
vec->num -= 1;
found = true;
break;
}
}
if (unprotect) {
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
}
write_unlock(&callback_registration_lock);
return found;
}
/* This should only be called prior to instrument_init(),
* since no readers of the client_libs array use synch
* and since this routine assumes .data is writable.
*/
static void
add_client_lib(char *path, char *id_str, char *options)
{
client_id_t id;
shlib_handle_t client_lib;
DEBUG_DECLARE(size_t i);
ASSERT(!dynamo_initialized);
/* if ID not specified, we'll default to 0 */
id = (id_str == NULL) ? 0 : strtoul(id_str, NULL, 16);
#ifdef DEBUG
/* Check for conflicting IDs */
for (i=0; i<num_client_libs; i++) {
CLIENT_ASSERT(client_libs[i].id != id, "Clients have the same ID");
}
#endif
if (num_client_libs == MAX_CLIENT_LIBS) {
CLIENT_ASSERT(false, "Max number of clients reached");
return;
}
LOG(GLOBAL, LOG_INTERP, 4, "about to load client library %s\n", path);
client_lib = load_shared_library(path, true/*reachable*/);
if (client_lib == NULL) {
char msg[MAXIMUM_PATH*4];
char err[MAXIMUM_PATH*2];
shared_library_error(err, BUFFER_SIZE_ELEMENTS(err));
snprintf(msg, BUFFER_SIZE_ELEMENTS(msg),
".\n\tError opening instrumentation library %s:\n\t%s",
path, err);
NULL_TERMINATE_BUFFER(msg);
/* PR 232490 - malformed library names or incorrect
* permissions shouldn't blow up an app in release builds as
* they may happen at customer sites with a third party
* client.
*/
#ifdef UNIX
/* PR 408318: 32-vs-64 errors should NOT be fatal to continue
* in debug build across execve chains. Xref i#147.
* XXX: w/ -private_loader, err always equals "error in private loader"
* and so we never match here!
*/
if (strstr(err, "wrong ELF class") == NULL)
#endif
CLIENT_ASSERT(false, msg);
SYSLOG(SYSLOG_ERROR, CLIENT_LIBRARY_UNLOADABLE, 4,
get_application_name(), get_application_pid(), path, msg);
}
else {
/* PR 250952: version check */
int *uses_dr_version = (int *)
lookup_library_routine(client_lib, USES_DR_VERSION_NAME);
if (uses_dr_version == NULL ||
*uses_dr_version < OLDEST_COMPATIBLE_VERSION ||
*uses_dr_version > NEWEST_COMPATIBLE_VERSION) {
/* not a fatal usage error since we want release build to continue */
CLIENT_ASSERT(false,
"client library is incompatible with this version of DR");
SYSLOG(SYSLOG_ERROR, CLIENT_VERSION_INCOMPATIBLE, 2,
get_application_name(), get_application_pid());
}
else {
size_t idx = num_client_libs++;
DEBUG_DECLARE(bool ok;)
client_libs[idx].id = id;
client_libs[idx].lib = client_lib;
DEBUG_DECLARE(ok =)
shared_library_bounds(client_lib, (byte *) uses_dr_version, NULL,
&client_libs[idx].start, &client_libs[idx].end);
ASSERT(ok);
LOG(GLOBAL, LOG_INTERP, 1, "loaded %s at "PFX"-"PFX"\n",
path, client_libs[idx].start, client_libs[idx].end);
#ifdef X64
/* Now that we map the client within the constraints, this request
* should always succeed.
*/
request_region_be_heap_reachable(client_libs[idx].start,
client_libs[idx].end -
client_libs[idx].start);
#endif
strncpy(client_libs[idx].path, path,
BUFFER_SIZE_ELEMENTS(client_libs[idx].path));
NULL_TERMINATE_BUFFER(client_libs[idx].path);
if (options != NULL) {
strncpy(client_libs[idx].options, options,
BUFFER_SIZE_ELEMENTS(client_libs[idx].options));
NULL_TERMINATE_BUFFER(client_libs[idx].options);
}
/* We'll look up dr_init and call it in instrument_init */
}
}
}
void
instrument_load_client_libs(void)
{
if (!IS_INTERNAL_STRING_OPTION_EMPTY(client_lib)) {
char buf[MAX_LIST_OPTION_LENGTH];
char *path;
string_option_read_lock();
strncpy(buf, INTERNAL_OPTION(client_lib), BUFFER_SIZE_ELEMENTS(buf));
string_option_read_unlock();
NULL_TERMINATE_BUFFER(buf);
/* We're expecting path;ID;options triples */
path = buf;
do {
char *id = NULL;
char *options = NULL;
char *next_path = NULL;
id = strstr(path, ";");
if (id != NULL) {
id[0] = '\0';
id++;
options = strstr(id, ";");
if (options != NULL) {
options[0] = '\0';
options++;
next_path = strstr(options, ";");
if (next_path != NULL) {
next_path[0] = '\0';
next_path++;
}
}
}
add_client_lib(path, id, options);
path = next_path;
} while (path != NULL);
}
}
static void
init_client_aux_libs(void)
{
if (client_aux_libs == NULL) {
VMVECTOR_ALLOC_VECTOR(client_aux_libs, GLOBAL_DCONTEXT,
VECTOR_SHARED, client_aux_libs);
}
}
void
instrument_init(void)
{
size_t i;
init_client_aux_libs();
if (num_client_libs > 0) {
/* We no longer distinguish in-DR vs in-client crashes, as many crashes in
* the DR lib are really client bugs.
* We expect most end-user tools to call dr_set_client_name() so we
* have generic defaults here:
*/
set_exception_strings("Tool", "your tool's issue tracker");
}
/* Iterate over the client libs and call each dr_init */
for (i=0; i<num_client_libs; i++) {
void (*init)(client_id_t) = (void (*)(client_id_t))
(lookup_library_routine(client_libs[i].lib, INSTRUMENT_INIT_NAME));
/* we can't do this in instrument_load_client_libs() b/c vmheap
* is not set up at that point
*/
all_memory_areas_lock();
update_all_memory_areas(client_libs[i].start, client_libs[i].end,
/* FIXME: need to walk the sections: but may be
* better to obfuscate from clients anyway.
* We can't set as MEMPROT_NONE as that leads to
* bugs if the app wants to interpret part of
* its code section (xref PR 504629).
*/
MEMPROT_READ, DR_MEMTYPE_IMAGE);
all_memory_areas_unlock();
/* Since the user has to register all other events, it
* doesn't make sense to provide the -client_lib
* option for a module that doesn't export dr_init.
*/
CLIENT_ASSERT(init != NULL,
"client library does not export a dr_init routine");
(*init)(client_libs[i].id);
}
/* We now initialize the 1st thread before coming here, so we can
* hand the client a dcontext; so we need to specially generate
* the thread init event now. An alternative is to have
* dr_get_global_drcontext(), but that's extra complexity for no
* real reason.
* We raise the thread init event prior to the module load events
* so the client can access a dcontext in module load events (i#1339).
*/
if (thread_init_callbacks.num > 0) {
instrument_thread_init(get_thread_private_dcontext(), false, false);
}
/* If the client just registered the module-load event, let's
* assume it wants to be informed of *all* modules and tell it
* which modules are already loaded. If the client registers the
* event later, it will need to use the module iterator routines
* to retrieve currently loaded modules. We use the dr_module_iterator
* exposed to the client to avoid locking issues.
*/
if (module_load_callbacks.num > 0) {
dr_module_iterator_t *mi = dr_module_iterator_start();
while (dr_module_iterator_hasnext(mi)) {
module_data_t *data = dr_module_iterator_next(mi);
instrument_module_load(data, true /*already loaded*/);
dr_free_module_data(data);
}
dr_module_iterator_stop(mi);
}
}
#ifdef DEBUG
void
free_callback_list(callback_list_t *vec)
{
if (vec->callbacks != NULL) {
HEAP_ARRAY_FREE(GLOBAL_DCONTEXT, vec->callbacks, callback_t, vec->size,
ACCT_OTHER, UNPROTECTED);
vec->callbacks = NULL;
}
vec->size = 0;
vec->num = 0;
}
void free_all_callback_lists()
{
free_callback_list(&exit_callbacks);
free_callback_list(&thread_init_callbacks);
free_callback_list(&thread_exit_callbacks);
#ifdef UNIX
free_callback_list(&fork_init_callbacks);
#endif
free_callback_list(&bb_callbacks);
free_callback_list(&trace_callbacks);
#ifdef CUSTOM_TRACES
free_callback_list(&end_trace_callbacks);
#endif
free_callback_list(&fragdel_callbacks);
free_callback_list(&restore_state_callbacks);
free_callback_list(&restore_state_ex_callbacks);
free_callback_list(&module_load_callbacks);
free_callback_list(&module_unload_callbacks);
free_callback_list(&filter_syscall_callbacks);
free_callback_list(&pre_syscall_callbacks);
free_callback_list(&post_syscall_callbacks);
#ifdef WINDOWS
free_callback_list(&exception_callbacks);
#else
free_callback_list(&signal_callbacks);
#endif
#ifdef PROGRAM_SHEPHERDING
free_callback_list(&security_violation_callbacks);
#endif
free_callback_list(&persist_ro_size_callbacks);
free_callback_list(&persist_ro_callbacks);
free_callback_list(&resurrect_ro_callbacks);
free_callback_list(&persist_rx_size_callbacks);
free_callback_list(&persist_rx_callbacks);
free_callback_list(&resurrect_rx_callbacks);
free_callback_list(&persist_rw_size_callbacks);
free_callback_list(&persist_rw_callbacks);
free_callback_list(&resurrect_rw_callbacks);
free_callback_list(&persist_patch_callbacks);
}
#endif /* DEBUG */
void
instrument_exit(void)
{
DEBUG_DECLARE(size_t i);
/* Note - currently own initexit lock when this is called (see PR 227619). */
/* support dr_get_mcontext() from the exit event */
if (!standalone_library)
get_thread_private_dcontext()->client_data->mcontext_in_dcontext = true;
call_all(exit_callbacks, int (*)(),
/* It seems the compiler is confused if we pass no var args
* to the call_all macro. Bogus NULL arg */
NULL);
#ifdef DEBUG
/* Unload all client libs and free any allocated storage */
for (i=0; i<num_client_libs; i++) {
free_callback_list(&client_libs[i].nudge_callbacks);
unload_shared_library(client_libs[i].lib);
}
free_all_callback_lists();
#endif
vmvector_delete_vector(GLOBAL_DCONTEXT, client_aux_libs);
client_aux_libs = NULL;
#ifdef WINDOWS
DELETE_LOCK(client_aux_lib64_lock);
#endif
#if defined(WINDOWS) || defined(CLIENT_SIDELINE)
DELETE_LOCK(client_thread_count_lock);
#endif
DELETE_READWRITE_LOCK(callback_registration_lock);
}
bool
is_in_client_lib(app_pc addr)
{
/* NOTE: we use this routine for detecting exceptions in
* clients. If we add a callback on that event we'll have to be
* sure to deliver it only to the right client.
*/
size_t i;
for (i=0; i<num_client_libs; i++) {
if ((addr >= (app_pc)client_libs[i].start) &&
(addr < client_libs[i].end)) {
return true;
}
}
if (client_aux_libs != NULL &&
vmvector_overlap(client_aux_libs, addr, addr+1))
return true;
return false;
}
bool
get_client_bounds(client_id_t client_id,
app_pc *start/*OUT*/, app_pc *end/*OUT*/)
{
if (client_id >= num_client_libs)
return false;
if (start != NULL)
*start = (app_pc) client_libs[client_id].start;
if (end != NULL)
*end = (app_pc) client_libs[client_id].end;
return true;
}
const char *
get_client_path_from_addr(app_pc addr)
{
size_t i;
for (i=0; i<num_client_libs; i++) {
if ((addr >= (app_pc)client_libs[i].start) &&
(addr < client_libs[i].end)) {
return client_libs[i].path;
}
}
return "";
}
bool
is_valid_client_id(client_id_t id)
{
size_t i;
for (i=0; i<num_client_libs; i++) {
if (client_libs[i].id == id) {
return true;
}
}
return false;
}
void
dr_register_exit_event(void (*func)(void))
{
add_callback(&exit_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_exit_event(void (*func)(void))
{
return remove_callback(&exit_callbacks, (void (*)(void))func, true);
}
void
dr_register_bb_event(dr_emit_flags_t (*func)
(void *drcontext, void *tag, instrlist_t *bb,
bool for_trace, bool translating))
{
if (!INTERNAL_OPTION(code_api)) {
CLIENT_ASSERT(false, "asking for bb event when code_api is disabled");
return;
}
add_callback(&bb_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_bb_event(dr_emit_flags_t (*func)
(void *drcontext, void *tag, instrlist_t *bb,
bool for_trace, bool translating))
{
return remove_callback(&bb_callbacks, (void (*)(void))func, true);
}
void
dr_register_trace_event(dr_emit_flags_t (*func)
(void *drcontext, void *tag, instrlist_t *trace,
bool translating))
{
if (!INTERNAL_OPTION(code_api)) {
CLIENT_ASSERT(false, "asking for trace event when code_api is disabled");
return;
}
add_callback(&trace_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_trace_event(dr_emit_flags_t (*func)
(void *drcontext, void *tag, instrlist_t *trace,
bool translating))
{
return remove_callback(&trace_callbacks, (void (*)(void))func, true);
}
#ifdef CUSTOM_TRACES
void
dr_register_end_trace_event(dr_custom_trace_action_t (*func)
(void *drcontext, void *tag, void *next_tag))
{
if (!INTERNAL_OPTION(code_api)) {
CLIENT_ASSERT(false, "asking for end-trace event when code_api is disabled");
return;
}
add_callback(&end_trace_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_end_trace_event(dr_custom_trace_action_t
(*func)(void *drcontext, void *tag, void *next_tag))
{
return remove_callback(&end_trace_callbacks, (void (*)(void))func, true);
}
#endif
void
dr_register_delete_event(void (*func)(void *drcontext, void *tag))
{
if (!INTERNAL_OPTION(code_api)) {
CLIENT_ASSERT(false, "asking for delete event when code_api is disabled");
return;
}
add_callback(&fragdel_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_delete_event(void (*func)(void *drcontext, void *tag))
{
return remove_callback(&fragdel_callbacks, (void (*)(void))func, true);
}
void
dr_register_restore_state_event(void (*func)
(void *drcontext, void *tag, dr_mcontext_t *mcontext,
bool restore_memory, bool app_code_consistent))
{
if (!INTERNAL_OPTION(code_api)) {
CLIENT_ASSERT(false, "asking for restore state event when code_api is disabled");
return;
}
add_callback(&restore_state_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_restore_state_event(void (*func)
(void *drcontext, void *tag, dr_mcontext_t *mcontext,
bool restore_memory, bool app_code_consistent))
{
return remove_callback(&restore_state_callbacks, (void (*)(void))func, true);
}
void
dr_register_restore_state_ex_event(bool (*func) (void *drcontext, bool restore_memory,
dr_restore_state_info_t *info))
{
if (!INTERNAL_OPTION(code_api)) {
CLIENT_ASSERT(false, "asking for restore_state_ex event when code_api disabled");
return;
}
add_callback(&restore_state_ex_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_restore_state_ex_event(bool (*func) (void *drcontext, bool restore_memory,
dr_restore_state_info_t *info))
{
return remove_callback(&restore_state_ex_callbacks, (void (*)(void))func, true);
}
void
dr_register_thread_init_event(void (*func)(void *drcontext))
{
add_callback(&thread_init_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_thread_init_event(void (*func)(void *drcontext))
{
return remove_callback(&thread_init_callbacks, (void (*)(void))func, true);
}
void
dr_register_thread_exit_event(void (*func)(void *drcontext))
{
add_callback(&thread_exit_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_thread_exit_event(void (*func)(void *drcontext))
{
return remove_callback(&thread_exit_callbacks, (void (*)(void))func, true);
}
#ifdef UNIX
void
dr_register_fork_init_event(void (*func)(void *drcontext))
{
add_callback(&fork_init_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_fork_init_event(void (*func)(void *drcontext))
{
return remove_callback(&fork_init_callbacks, (void (*)(void))func, true);
}
#endif
void
dr_register_module_load_event(void (*func)(void *drcontext, const module_data_t *info,
bool loaded))
{
add_callback(&module_load_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_module_load_event(void (*func)(void *drcontext, const module_data_t *info,
bool loaded))
{
return remove_callback(&module_load_callbacks, (void (*)(void))func, true);
}
void
dr_register_module_unload_event(void (*func)(void *drcontext,
const module_data_t *info))
{
add_callback(&module_unload_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_module_unload_event(void (*func)(void *drcontext,
const module_data_t *info))
{
return remove_callback(&module_unload_callbacks, (void (*)(void))func, true);
}
#ifdef WINDOWS
void
dr_register_exception_event(bool (*func)(void *drcontext, dr_exception_t *excpt))
{
add_callback(&exception_callbacks, (bool (*)(void))func, true);
}
bool
dr_unregister_exception_event(bool (*func)(void *drcontext, dr_exception_t *excpt))
{
return remove_callback(&exception_callbacks, (bool (*)(void))func, true);
}
#else
void
dr_register_signal_event(dr_signal_action_t (*func)
(void *drcontext, dr_siginfo_t *siginfo))
{
add_callback(&signal_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_signal_event(dr_signal_action_t (*func)
(void *drcontext, dr_siginfo_t *siginfo))
{
return remove_callback(&signal_callbacks, (void (*)(void))func, true);
}
#endif /* WINDOWS */
void
dr_register_filter_syscall_event(bool (*func)(void *drcontext, int sysnum))
{
add_callback(&filter_syscall_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_filter_syscall_event(bool (*func)(void *drcontext, int sysnum))
{
return remove_callback(&filter_syscall_callbacks, (void (*)(void))func, true);
}
void
dr_register_pre_syscall_event(bool (*func)(void *drcontext, int sysnum))
{
add_callback(&pre_syscall_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_pre_syscall_event(bool (*func)(void *drcontext, int sysnum))
{
return remove_callback(&pre_syscall_callbacks, (void (*)(void))func, true);
}
void
dr_register_post_syscall_event(void (*func)(void *drcontext, int sysnum))
{
add_callback(&post_syscall_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_post_syscall_event(void (*func)(void *drcontext, int sysnum))
{
return remove_callback(&post_syscall_callbacks, (void (*)(void))func, true);
}
#ifdef PROGRAM_SHEPHERDING
void
dr_register_security_event(void (*func)(void *drcontext, void *source_tag,
app_pc source_pc, app_pc target_pc,
dr_security_violation_type_t violation,
dr_mcontext_t *mcontext,
dr_security_violation_action_t *action))
{
add_callback(&security_violation_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_security_event(void (*func)(void *drcontext, void *source_tag,
app_pc source_pc, app_pc target_pc,
dr_security_violation_type_t violation,
dr_mcontext_t *mcontext,
dr_security_violation_action_t *action))
{
return remove_callback(&security_violation_callbacks, (void (*)(void))func, true);
}
#endif
void
dr_register_nudge_event(void (*func)(void *drcontext, uint64 argument), client_id_t id)
{
size_t i;
for (i=0; i<num_client_libs; i++) {
if (client_libs[i].id == id) {
add_callback(&client_libs[i].nudge_callbacks, (void (*)(void))func,
/* the nudge callback list is stored on the heap, so
* we don't need to unprotect the .data section when
* we update the list */
false);
return;
}
}
CLIENT_ASSERT(false, "dr_register_nudge_event: invalid client ID");
}
bool
dr_unregister_nudge_event(void (*func)(void *drcontext, uint64 argument), client_id_t id)
{
size_t i;
for (i=0; i<num_client_libs; i++) {
if (client_libs[i].id == id) {
return remove_callback(&client_libs[i].nudge_callbacks, (void (*)(void))func,
/* the nudge callback list is stored on the heap, so
* we don't need to unprotect the .data section when
* we update the list */
false);
}
}
CLIENT_ASSERT(false, "dr_unregister_nudge_event: invalid client ID");
return false;
}
dr_config_status_t
dr_nudge_client_ex(process_id_t process_id, client_id_t client_id,
uint64 argument, uint timeout_ms)
{
if (process_id == get_process_id()) {
size_t i;
#ifdef WINDOWS
pre_second_thread();
#endif
for (i=0; i<num_client_libs; i++) {
if (client_libs[i].id == client_id) {
if (client_libs[i].nudge_callbacks.num == 0) {
CLIENT_ASSERT(false, "dr_nudge_client: no nudge handler registered");
return false;
}
return nudge_internal(process_id, NUDGE_GENERIC(client), argument,
client_id, timeout_ms);
}
}
return false;
} else {
return nudge_internal(process_id, NUDGE_GENERIC(client), argument,
client_id, timeout_ms);
}
}
bool
dr_nudge_client(client_id_t client_id, uint64 argument)
{
return dr_nudge_client_ex(get_process_id(), client_id, argument, 0) == DR_SUCCESS;
}
#ifdef WINDOWS
DR_API
bool
dr_is_nudge_thread(void *drcontext)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL, "invalid parameter to dr_is_nudge_thread");
return dcontext->nudge_target != NULL;
}
#endif
void
instrument_client_thread_init(dcontext_t *dcontext, bool client_thread)
{
if (dcontext->client_data == NULL) {
dcontext->client_data = HEAP_TYPE_ALLOC(dcontext, client_data_t,
ACCT_OTHER, UNPROTECTED);
memset(dcontext->client_data, 0x0, sizeof(client_data_t));
#ifdef CLIENT_SIDELINE
ASSIGN_INIT_LOCK_FREE(dcontext->client_data->sideline_mutex, sideline_mutex);
#endif
CLIENT_ASSERT(dynamo_initialized || thread_init_callbacks.num == 0 ||
client_thread,
"1st call to instrument_thread_init should have no cbs");
}
#ifdef CLIENT_SIDELINE
if (client_thread) {
ATOMIC_INC(int, num_client_sideline_threads);
/* We don't call dynamo_thread_not_under_dynamo() b/c we want itimers. */
dcontext->thread_record->under_dynamo_control = false;
dcontext->client_data->is_client_thread = true;
}
#endif /* CLIENT_SIDELINE */
}
void
instrument_thread_init(dcontext_t *dcontext, bool client_thread, bool valid_mc)
{
/* Note that we're called twice for the initial thread: once prior
* to instrument_init() (PR 216936) to set up the dcontext client
* field (at which point there should be no callbacks since client
* has not had a chance to register any) (now split out, but both
* routines are called prior to instrument_init()), and once after
* instrument_init() to call the client event.
*/
#if defined(CLIENT_INTERFACE) && defined(WINDOWS)
bool swap_peb = false;
#endif
if (client_thread) {
/* no init event */
return;
}
#if defined(CLIENT_INTERFACE) && defined(WINDOWS)
/* i#996: we might be in app's state.
* It is simpler to check and swap here than earlier on thread init paths.
*/
if (INTERNAL_OPTION(private_peb) && should_swap_peb_pointer() &&
dr_using_app_state(dcontext)) {
swap_peb_pointer(dcontext, true/*to priv*/);
swap_peb = true;
}
#endif
/* i#117/PR 395156: support dr_get_mcontext() from the thread init event */
if (valid_mc)
dcontext->client_data->mcontext_in_dcontext = true;
call_all(thread_init_callbacks, int (*)(void *), (void *)dcontext);
if (valid_mc)
dcontext->client_data->mcontext_in_dcontext = false;
#if defined(CLIENT_INTERFACE) && defined(WINDOWS)
if (swap_peb)
swap_peb_pointer(dcontext, false/*to app*/);
#endif
}
#ifdef UNIX
void
instrument_fork_init(dcontext_t *dcontext)
{
call_all(fork_init_callbacks, int (*)(void *), (void *)dcontext);
}
#endif
/* PR 536058: split the exit event from thread cleanup, to provide a
* dcontext in the process exit event
*/
void
instrument_thread_exit_event(dcontext_t *dcontext)
{
#ifdef CLIENT_SIDELINE
if (IS_CLIENT_THREAD(dcontext)
/* if nudge thread calls dr_exit_process() it will be marked as a client
* thread: rule it out here so we properly clean it up
*/
IF_WINDOWS(&& dcontext->nudge_target == NULL)) {
ATOMIC_DEC(int, num_client_sideline_threads);
/* no exit event */
return;
}
#endif
/* i#1394: best-effort to try to avoid crashing thread exit events
* where thread init was never called.
*/
if (!dynamo_initialized)
return;
/* support dr_get_mcontext() from the exit event */
dcontext->client_data->mcontext_in_dcontext = true;
/* Note - currently own initexit lock when this is called (see PR 227619). */
call_all(thread_exit_callbacks, int (*)(void *), (void *)dcontext);
}
void
instrument_thread_exit(dcontext_t *dcontext)
{
#ifdef DEBUG
client_todo_list_t *todo;
client_flush_req_t *flush;
#endif
#ifdef DEBUG
/* PR 470957: avoid racy crashes by not freeing in release build */
# ifdef CLIENT_SIDELINE
DELETE_LOCK(dcontext->client_data->sideline_mutex);
# endif
/* could be heap space allocated for the todo list */
todo = dcontext->client_data->to_do;
while (todo != NULL) {
client_todo_list_t *next_todo = todo->next;
if (todo->ilist != NULL) {
instrlist_clear_and_destroy(dcontext, todo->ilist);
}
HEAP_TYPE_FREE(dcontext, todo, client_todo_list_t, ACCT_CLIENT, UNPROTECTED);
todo = next_todo;
}
/* could be heap space allocated for the flush list */
flush = dcontext->client_data->flush_list;
while (flush != NULL) {
client_flush_req_t *next_flush = flush->next;
HEAP_TYPE_FREE(dcontext, flush, client_flush_req_t, ACCT_CLIENT, UNPROTECTED);
flush = next_flush;
}
HEAP_TYPE_FREE(dcontext, dcontext->client_data, client_data_t,
ACCT_OTHER, UNPROTECTED);
dcontext->client_data = NULL; /* for mutex_wait_contended_lock() */
#endif /* DEBUG */
}
bool
dr_bb_hook_exists(void)
{
return (bb_callbacks.num > 0);
}
bool
dr_trace_hook_exists(void)
{
return (trace_callbacks.num > 0);
}
bool
dr_fragment_deleted_hook_exists(void)
{
return (fragdel_callbacks.num > 0);
}
bool
dr_end_trace_hook_exists(void)
{
return (end_trace_callbacks.num > 0);
}
bool
dr_thread_exit_hook_exists(void)
{
return (thread_exit_callbacks.num > 0);
}
bool
dr_exit_hook_exists(void)
{
return (exit_callbacks.num > 0);
}
bool
dr_xl8_hook_exists(void)
{
return (restore_state_callbacks.num > 0 ||
restore_state_ex_callbacks.num > 0);
}
bool
hide_tag_from_client(app_pc tag)
{
#ifdef WINDOWS
/* Case 10009: Basic blocks that consist of a single jump into the
* interception buffer should be obscured from clients. Clients
* will see the displaced code, so we'll provide the address of this
* block if the client asks for the address of the displaced code.
*
* Note that we assume the jump is the first instruction in the
* BB for any blocks that jump to the interception buffer.
*/
if (is_intercepted_app_pc(tag, NULL) ||
/* Displaced app code is now in the landing pad, so skip the
* jump from the interception buffer to the landing pad
*/
is_in_interception_buffer(tag) ||
/* Landing pads that exist between hook points and the trampolines
* shouldn't be seen by the client too. PR 250294.
*/
is_on_interception_initial_route(tag) ||
/* PR 219351: if we lose control on a callback and get it back on
* one of our syscall trampolines, we'll appear at the jmp out of
* the interception buffer to the int/sysenter instruction. The
* problem is that our syscall trampolines, unlike our other
* intercepted code, are hooked earlier than the real action point
* and we have displaced app code at the start of the interception
* buffer: we hook at the wrapper entrance and return w/ a jmp to
* the sysenter/int instr. When creating bbs at the start we hack
* it to make it look like there is no hook. But on retaking control
* we end up w/ this jmp out that won't be solved w/ our normal
* mechanism for other hook jmp-outs: so we just suppress and the
* client next sees the post-syscall bb. It already saw a gap.
*/
is_syscall_trampoline(tag, NULL))
return true;
#endif
return false;
}
#ifdef DEBUG
/* PR 214962: client must set translation fields */
static void
check_ilist_translations(instrlist_t *ilist)
{
/* Ensure client set the translation field for all non-meta
* instrs, even if it didn't return DR_EMIT_STORE_TRANSLATIONS
* (since we may decide ourselves to store)
*/
instr_t *in;
for (in = instrlist_first(ilist); in != NULL; in = instr_get_next(in)) {
if (!instr_opcode_valid(in)) {
CLIENT_ASSERT(INTERNAL_OPTION(fast_client_decode), "level 0 instr found");
} else if (instr_is_app(in)) {
DOLOG(LOG_INTERP, 1, {
if (instr_get_translation(in) == NULL)
loginst(get_thread_private_dcontext(), 1, in, "translation is NULL");
});
CLIENT_ASSERT(instr_get_translation(in) != NULL,
"translation field must be set for every app instruction");
} else {
/* The meta instr could indeed not affect app state, but
* better I think to assert and make them put in an
* empty restore event callback in that case. */
DOLOG(LOG_INTERP, 1, {
if (instr_get_translation(in) != NULL &&
!instr_is_our_mangling(in) &&
!dr_xl8_hook_exists())
loginst(get_thread_private_dcontext(), 1, in, "translation != NULL");
});
CLIENT_ASSERT(instr_get_translation(in) == NULL ||
instr_is_our_mangling(in) ||
dr_xl8_hook_exists(),
/* FIXME: if multiple clients, we need to check that this
* particular client has the callback: but we have
* no way to do that other than looking at library
* bounds...punting for now */
"a meta instr should not have its translation field "
"set without also having a restore_state callback");
}
}
}
#endif
/* Returns true if the bb hook is called */
bool
instrument_basic_block(dcontext_t *dcontext, app_pc tag, instrlist_t *bb,
bool for_trace, bool translating, dr_emit_flags_t *emitflags)
{
dr_emit_flags_t ret = DR_EMIT_DEFAULT;
/* return false if no BB hooks are registered */
if (bb_callbacks.num == 0)
return false;
if (hide_tag_from_client(tag)) {
LOG(THREAD, LOG_INTERP, 3, "hiding tag "PFX" from client\n", tag);
return false;
}
/* do not expand or up-decode the instrlist, client gets to choose
* whether and how to do that
*/
#ifdef DEBUG
LOG(THREAD, LOG_INTERP, 3, "\ninstrument_basic_block ******************\n");
LOG(THREAD, LOG_INTERP, 3, "\nbefore instrumentation:\n");
if (stats->loglevel >= 3 && (stats->logmask & LOG_INTERP) != 0)
instrlist_disassemble(dcontext, tag, bb, THREAD);
#endif
/* i#117/PR 395156: allow dr_[gs]et_mcontext where accurate */
if (!translating && !for_trace)
dcontext->client_data->mcontext_in_dcontext = true;
/* Note - currently we are couldbelinking and hold the
* bb_building lock when this is called (see PR 227619).
*/
/* We or together the return values */
call_all_ret(ret, |=, , bb_callbacks,
int (*) (void *, void *, instrlist_t *, bool, bool),
(void *)dcontext, (void *)tag, bb, for_trace, translating);
if (emitflags != NULL)
*emitflags = ret;
DOCHECK(1, { check_ilist_translations(bb); });
dcontext->client_data->mcontext_in_dcontext = false;
if (IF_DEBUG_ELSE(for_trace, false)) {
CLIENT_ASSERT(instrlist_get_return_target(bb) == NULL &&
instrlist_get_fall_through_target(bb) == NULL,
"instrlist_set_return/fall_through_target"
" cannot be used on traces");
}
#ifdef DEBUG
LOG(THREAD, LOG_INTERP, 3, "\nafter instrumentation:\n");
if (stats->loglevel >= 3 && (stats->logmask & LOG_INTERP) != 0)
instrlist_disassemble(dcontext, tag, bb, THREAD);
#endif
return true;
}
/* Give the user the completely mangled and optimized trace just prior
* to emitting into code cache, user gets final crack at it
*/
dr_emit_flags_t
instrument_trace(dcontext_t *dcontext, app_pc tag, instrlist_t *trace,
bool translating)
{
dr_emit_flags_t ret = DR_EMIT_DEFAULT;
#ifdef UNSUPPORTED_API
instr_t *instr;
#endif
if (trace_callbacks.num == 0)
return DR_EMIT_DEFAULT;
/* do not expand or up-decode the instrlist, client gets to choose
* whether and how to do that
*/
#ifdef DEBUG
LOG(THREAD, LOG_INTERP, 3, "\ninstrument_trace ******************\n");
LOG(THREAD, LOG_INTERP, 3, "\nbefore instrumentation:\n");
if (stats->loglevel >= 3 && (stats->logmask & LOG_INTERP) != 0)
instrlist_disassemble(dcontext, tag, trace, THREAD);
#endif
/* We always pass Level 3 instrs to the client, since we no longer
* expose the expansion routines.
*/
#ifdef UNSUPPORTED_API
for (instr = instrlist_first_expanded(dcontext, trace);
instr != NULL;
instr = instr_get_next_expanded(dcontext, trace, instr)) {
instr_decode(dcontext, instr);
}
/* ASSUMPTION: all ctis are already at Level 3, so we don't have
* to do a separate pass to fix up intra-list targets like
* instrlist_decode_cti() does
*/
#endif
/* i#117/PR 395156: allow dr_[gs]et_mcontext where accurate */
if (!translating)
dcontext->client_data->mcontext_in_dcontext = true;
/* We or together the return values */
call_all_ret(ret, |=, , trace_callbacks,
int (*)(void *, void *, instrlist_t *, bool),
(void *)dcontext, (void *)tag, trace, translating);
DOCHECK(1, { check_ilist_translations(trace); });
CLIENT_ASSERT(instrlist_get_return_target(trace) == NULL &&
instrlist_get_fall_through_target(trace) == NULL,
"instrlist_set_return/fall_through_target"
" cannot be used on traces");
dcontext->client_data->mcontext_in_dcontext = false;
#ifdef DEBUG
LOG(THREAD, LOG_INTERP, 3, "\nafter instrumentation:\n");
if (stats->loglevel >= 3 && (stats->logmask & LOG_INTERP) != 0)
instrlist_disassemble(dcontext, tag, trace, THREAD);
#endif
return ret;
}
/* Notify user when a fragment is deleted from the cache
* FIXME PR 242544: how does user know whether this is a shadowed copy or the
* real thing? The user might free memory that shouldn't be freed!
*/
void
instrument_fragment_deleted(dcontext_t *dcontext, app_pc tag, uint flags)
{
if (fragdel_callbacks.num == 0)
return;
#ifdef WINDOWS
/* Case 10009: We don't call the basic block hook for blocks that
* are jumps to the interception buffer, so we'll hide them here
* as well.
*/
if (!TEST(FRAG_IS_TRACE, flags) && hide_tag_from_client(tag))
return;
#endif
/* PR 243008: we don't expose GLOBAL_DCONTEXT, so change to NULL.
* Our comments warn the user about this.
*/
if (dcontext == GLOBAL_DCONTEXT)
dcontext = NULL;
call_all(fragdel_callbacks, int (*)(void *, void *),
(void *)dcontext, (void *)tag);
}
bool
instrument_restore_state(dcontext_t *dcontext, bool restore_memory,
dr_restore_state_info_t *info)
{
bool res = true;
/* Support both legacy and extended handlers */
if (restore_state_callbacks.num > 0) {
call_all(restore_state_callbacks,
int (*)(void *, void *, dr_mcontext_t *, bool, bool),
(void *)dcontext, info->fragment_info.tag, info->mcontext,
restore_memory, info->fragment_info.app_code_consistent);
}
if (restore_state_ex_callbacks.num > 0) {
/* i#220/PR 480565: client has option of failing the translation.
* We fail it if any client wants to, short-circuiting in that case.
* This does violate the "priority order" of events where the
* last one is supposed to have final say b/c it won't even
* see the event (xref i#424).
*/
call_all_ret(res, = res &&, , restore_state_ex_callbacks,
int (*)(void *, bool, dr_restore_state_info_t *),
(void *)dcontext, restore_memory, info);
}
CLIENT_ASSERT(!restore_memory || res,
"translation should not fail for restore_memory=true");
return res;
}
#ifdef CUSTOM_TRACES
/* Ask whether to end trace prior to adding next_tag fragment.
* Return values:
* CUSTOM_TRACE_DR_DECIDES = use standard termination criteria
* CUSTOM_TRACE_END_NOW = end trace
* CUSTOM_TRACE_CONTINUE = do not end trace
*/
dr_custom_trace_action_t
instrument_end_trace(dcontext_t *dcontext, app_pc trace_tag, app_pc next_tag)
{
dr_custom_trace_action_t ret = CUSTOM_TRACE_DR_DECIDES;
if (end_trace_callbacks.num == 0)
return ret;
/* Highest priority callback decides how to end the trace (see
* call_all_ret implementation)
*/
call_all_ret(ret, =, , end_trace_callbacks, int (*)(void *, void *, void *),
(void *)dcontext, (void *)trace_tag, (void *)next_tag);
return ret;
}
#endif
static module_data_t *
create_and_initialize_module_data(app_pc start, app_pc end, app_pc entry_point,
uint flags, const module_names_t *names,
const char *full_path
#ifdef WINDOWS
, version_number_t file_version,
version_number_t product_version,
uint checksum, uint timestamp,
size_t mod_size
#else
, bool contiguous,
uint num_segments,
module_segment_t *os_segments,
module_segment_data_t *segments,
uint timestamp
# ifdef MACOS
, uint current_version,
uint compatibility_version,
const byte uuid[16]
# endif
#endif
)
{
#ifndef WINDOWS
uint i;
#endif
module_data_t *copy = (module_data_t *)
HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, module_data_t, ACCT_CLIENT, UNPROTECTED);
memset(copy, 0, sizeof(module_data_t));
copy->start = start;
copy->end = end;
copy->entry_point = entry_point;
copy->flags = flags;
if (full_path != NULL)
copy->full_path = dr_strdup(full_path HEAPACCT(ACCT_CLIENT));
if (names->module_name != NULL)
copy->names.module_name = dr_strdup(names->module_name HEAPACCT(ACCT_CLIENT));
if (names->file_name != NULL)
copy->names.file_name = dr_strdup(names->file_name HEAPACCT(ACCT_CLIENT));
#ifdef WINDOWS
if (names->exe_name != NULL)
copy->names.exe_name = dr_strdup(names->exe_name HEAPACCT(ACCT_CLIENT));
if (names->rsrc_name != NULL)
copy->names.rsrc_name = dr_strdup(names->rsrc_name HEAPACCT(ACCT_CLIENT));
copy->file_version = file_version;
copy->product_version = product_version;
copy->checksum = checksum;
copy->timestamp = timestamp;
copy->module_internal_size = mod_size;
#else
copy->contiguous = contiguous;
copy->num_segments = num_segments;
copy->segments = (module_segment_data_t *)
HEAP_ARRAY_ALLOC(GLOBAL_DCONTEXT, module_segment_data_t,
num_segments, ACCT_VMAREAS, PROTECTED);
if (os_segments != NULL) {
for (i = 0; i < num_segments; i++) {
copy->segments[i].start = os_segments[i].start;
copy->segments[i].end = os_segments[i].end;
copy->segments[i].prot = os_segments[i].prot;
}
} else
memcpy(copy->segments, segments, num_segments*sizeof(module_segment_data_t));
copy->timestamp = timestamp;
# ifdef MACOS
copy->current_version = current_version;
copy->compatibility_version = compatibility_version;
memcpy(copy->uuid, uuid, sizeof(copy->uuid));
# endif
#endif
return copy;
}
module_data_t *
copy_module_area_to_module_data(const module_area_t *area)
{
if (area == NULL)
return NULL;
return create_and_initialize_module_data(area->start, area->end, area->entry_point,
0, &area->names, area->full_path
#ifdef WINDOWS
, area->os_data.file_version,
area->os_data.product_version,
area->os_data.checksum,
area->os_data.timestamp,
area->os_data.module_internal_size
#else
, area->os_data.contiguous,
area->os_data.num_segments,
area->os_data.segments,
NULL,
area->os_data.timestamp
# ifdef MACOS
, area->os_data.current_version,
area->os_data.compatibility_version,
area->os_data.uuid
# endif
#endif
);
}
DR_API
/* Makes a copy of a module_data_t for returning to the client. We return a copy so
* we don't have to hold the module areas list lock while in the client (xref PR 225020).
* Note - dr_data is allowed to be NULL. */
module_data_t *
dr_copy_module_data(const module_data_t *data)
{
if (data == NULL)
return NULL;
return create_and_initialize_module_data(data->start, data->end, data->entry_point,
0, &data->names, data->full_path
#ifdef WINDOWS
, data->file_version,
data->product_version,
data->checksum, data->timestamp,
data->module_internal_size
#else
, data->contiguous,
data->num_segments,
NULL,
data->segments,
data->timestamp
# ifdef MACOS
, data->current_version,
data->compatibility_version,
data->uuid
# endif
#endif
);
}
DR_API
/* Used to free a module_data_t created by dr_copy_module_data() */
void
dr_free_module_data(module_data_t *data)
{
dcontext_t *dcontext = get_thread_private_dcontext();
if (data == NULL)
return;
if (dcontext != NULL && data == dcontext->client_data->no_delete_mod_data) {
CLIENT_ASSERT(false, "dr_free_module_data: don\'t free module_data passed to "
"the image load or image unload event callbacks.");
return;
}
#ifdef UNIX
HEAP_ARRAY_FREE(GLOBAL_DCONTEXT, data->segments, module_segment_data_t,
data->num_segments, ACCT_VMAREAS, PROTECTED);
#endif
if (data->full_path != NULL)
dr_strfree(data->full_path HEAPACCT(ACCT_CLIENT));
free_module_names(&data->names HEAPACCT(ACCT_CLIENT));
HEAP_TYPE_FREE(GLOBAL_DCONTEXT, data, module_data_t, ACCT_CLIENT, UNPROTECTED);
}
DR_API
bool
dr_module_contains_addr(const module_data_t *data, app_pc addr)
{
/* XXX: this duplicates module_contains_addr(), but we have two different
* data structures (module_area_t and module_data_t) so it's hard to share.
*/
#ifdef WINDOWS
return (addr >= data->start && addr < data->end);
#else
if (data->contiguous)
return (addr >= data->start && addr < data->end);
else {
uint i;
for (i = 0; i < data->num_segments; i++) {
if (addr >= data->segments[i].start && addr < data->segments[i].end)
return true;
}
}
return false;
#endif
}
/* Looks up the being-loaded module at modbase and invokes the client event */
void
instrument_module_load_trigger(app_pc modbase)
{
/* see notes in module_list_add() where we use to do this: but
* we need this to be after exec areas processing so module is
* in consistent state in case client acts on it, even though
* we have to re-look-up the data here.
*/
if (!IS_STRING_OPTION_EMPTY(client_lib)) {
module_area_t *ma;
module_data_t *client_data = NULL;
os_get_module_info_lock();
ma = module_pc_lookup(modbase);
ASSERT(ma != NULL);
if (ma != NULL) {
client_data = copy_module_area_to_module_data(ma);
os_get_module_info_unlock();
instrument_module_load(client_data, false /*loading now*/);
dr_free_module_data(client_data);
} else
os_get_module_info_unlock();
}
}
/* Notify user when a module is loaded */
void
instrument_module_load(module_data_t *data, bool previously_loaded)
{
/* Note - during DR initialization this routine is called before we've set up a
* dcontext for the main thread and before we've called instrument_init. It's okay
* since there's no way a callback will be registered and we'll return immediately. */
dcontext_t *dcontext;
if (module_load_callbacks.num == 0)
return;
dcontext = get_thread_private_dcontext();
/* client shouldn't delete this */
dcontext->client_data->no_delete_mod_data = data;
call_all(module_load_callbacks, int (*)(void *, module_data_t *, bool),
(void *)dcontext, data, previously_loaded);
dcontext->client_data->no_delete_mod_data = NULL;
}
/* Notify user when a module is unloaded */
void
instrument_module_unload(module_data_t *data)
{
dcontext_t *dcontext;
if (module_unload_callbacks.num == 0)
return;
dcontext = get_thread_private_dcontext();
/* client shouldn't delete this */
dcontext->client_data->no_delete_mod_data = data;
call_all(module_unload_callbacks, int (*)(void *, module_data_t *),
(void *)dcontext, data);
dcontext->client_data->no_delete_mod_data = NULL;
}
/* returns whether this sysnum should be intercepted */
bool
instrument_filter_syscall(dcontext_t *dcontext, int sysnum)
{
bool ret = false;
/* if client does not filter then we don't intercept anything */
if (filter_syscall_callbacks.num == 0)
return ret;
/* if any client wants to intercept, then we intercept */
call_all_ret(ret, =, || ret, filter_syscall_callbacks, bool (*)(void *, int),
(void *)dcontext, sysnum);
return ret;
}
/* returns whether this syscall should execute */
bool
instrument_pre_syscall(dcontext_t *dcontext, int sysnum)
{
bool exec = true;
dcontext->client_data->in_pre_syscall = true;
/* clear flag from dr_syscall_invoke_another() */
dcontext->client_data->invoke_another_syscall = false;
if (pre_syscall_callbacks.num > 0) {
/* Skip syscall if any client wants to skip it, but don't short-circuit,
* as skipping syscalls is usually done when the effect of the syscall
* will be emulated in some other way. The app is typically meant to
* think that the syscall succeeded. Thus, other tool components
* should see the syscall as well (xref i#424).
*/
call_all_ret(exec, =, && exec, pre_syscall_callbacks,
bool (*)(void *, int), (void *)dcontext, sysnum);
}
dcontext->client_data->in_pre_syscall = false;
return exec;
}
void
instrument_post_syscall(dcontext_t *dcontext, int sysnum)
{
if (post_syscall_callbacks.num == 0)
return;
dcontext->client_data->in_post_syscall = true;
call_all(post_syscall_callbacks, int (*)(void *, int),
(void *)dcontext, sysnum);
dcontext->client_data->in_post_syscall = false;
}
bool
instrument_invoke_another_syscall(dcontext_t *dcontext)
{
return dcontext->client_data->invoke_another_syscall;
}
#ifdef WINDOWS
/* Notify user of exceptions. Note: not called for RaiseException */
bool
instrument_exception(dcontext_t *dcontext, dr_exception_t *exception)
{
bool res = true;
/* We short-circuit if any client wants to "own" the fault and not pass on.
* This does violate the "priority order" of events where the last one is
* supposed to have final say b/c it won't even see the event: but only one
* registrant should own it (xref i#424).
*/
call_all_ret(res, = res &&, , exception_callbacks,
bool (*)(void *, dr_exception_t *),
(void *)dcontext, exception);
return res;
}
#else
dr_signal_action_t
instrument_signal(dcontext_t *dcontext, dr_siginfo_t *siginfo)
{
dr_signal_action_t ret = DR_SIGNAL_DELIVER;
/* We short-circuit if any client wants to do other than deliver to the app.
* This does violate the "priority order" of events where the last one is
* supposed to have final say b/c it won't even see the event: but only one
* registrant should own the signal (xref i#424).
*/
call_all_ret(ret, = ret == DR_SIGNAL_DELIVER ? , : ret, signal_callbacks,
dr_signal_action_t (*)(void *, dr_siginfo_t *),
(void *)dcontext, siginfo);
return ret;
}
bool
dr_signal_hook_exists(void)
{
return (signal_callbacks.num > 0);
}
#endif /* WINDOWS */
#ifdef PROGRAM_SHEPHERDING
/* Notify user when a security violation is detected */
void
instrument_security_violation(dcontext_t *dcontext, app_pc target_pc,
security_violation_t violation, action_type_t *action)
{
dr_security_violation_type_t dr_violation;
dr_security_violation_action_t dr_action, dr_action_original;
app_pc source_pc = NULL;
fragment_t *last;
dr_mcontext_t dr_mcontext;
dr_mcontext_init(&dr_mcontext);
if (security_violation_callbacks.num == 0)
return;
if (!priv_mcontext_to_dr_mcontext(&dr_mcontext, get_mcontext(dcontext)))
return;
/* FIXME - the source_tag, source_pc, and context can all be incorrect if the
* violation ends up occurring in the middle of a bb we're building. See case
* 7380 which we should fix in interp.c.
*/
/* Obtain the source addr to pass to the client. xref case 285 --
* we're using the more heavy-weight solution 2) here, but that
* should be okay since we already have the overhead of calling
* into the client. */
last = dcontext->last_fragment;
if (!TEST(FRAG_FAKE, last->flags)) {
cache_pc pc = EXIT_CTI_PC(last, dcontext->last_exit);
source_pc = recreate_app_pc(dcontext, pc, last);
}
/* FIXME - set pc field of dr_mcontext_t. We'll probably want it
* for thread start and possibly apc/callback events as well.
*/
switch (violation) {
case STACK_EXECUTION_VIOLATION:
dr_violation = DR_RCO_STACK_VIOLATION;
break;
case HEAP_EXECUTION_VIOLATION:
dr_violation = DR_RCO_HEAP_VIOLATION;
break;
case RETURN_TARGET_VIOLATION:
dr_violation = DR_RCT_RETURN_VIOLATION;
break;
case RETURN_DIRECT_RCT_VIOLATION:
ASSERT(false); /* Not a client fault, should be NOT_REACHED(). */
dr_violation = DR_UNKNOWN_VIOLATION;
break;
case INDIRECT_CALL_RCT_VIOLATION:
dr_violation = DR_RCT_INDIRECT_CALL_VIOLATION;
break;
case INDIRECT_JUMP_RCT_VIOLATION:
dr_violation = DR_RCT_INDIRECT_JUMP_VIOLATION;
break;
default:
ASSERT(false); /* Not a client fault, should be NOT_REACHED(). */
dr_violation = DR_UNKNOWN_VIOLATION;
break;
}
switch (*action) {
case ACTION_TERMINATE_PROCESS:
dr_action = DR_VIOLATION_ACTION_KILL_PROCESS;
break;
case ACTION_CONTINUE:
dr_action = DR_VIOLATION_ACTION_CONTINUE;
break;
case ACTION_TERMINATE_THREAD:
dr_action = DR_VIOLATION_ACTION_KILL_THREAD;
break;
case ACTION_THROW_EXCEPTION:
dr_action = DR_VIOLATION_ACTION_THROW_EXCEPTION;
break;
default:
ASSERT(false); /* Not a client fault, should be NOT_REACHED(). */
dr_action = DR_VIOLATION_ACTION_CONTINUE;
break;
}
dr_action_original = dr_action;
/* NOTE - last->tag should be valid here (even if the frag is fake since the
* coarse wrappers set the tag). FIXME - for traces we really want the bb tag not
* the trace tag, should get that. Of course the only real reason we pass source
* tag is because we can't always give a valid source_pc. */
/* Note that the last registered function gets the final crack at
* changing the action.
*/
call_all(security_violation_callbacks,
int (*)(void *, void *, app_pc, app_pc, dr_security_violation_type_t,
dr_mcontext_t *, dr_security_violation_action_t *),
(void *)dcontext, last->tag, source_pc, target_pc,
dr_violation, &dr_mcontext, &dr_action);
if (dr_action != dr_action_original) {
switch(dr_action) {
case DR_VIOLATION_ACTION_KILL_PROCESS:
*action = ACTION_TERMINATE_PROCESS;
break;
case DR_VIOLATION_ACTION_KILL_THREAD:
*action = ACTION_TERMINATE_THREAD;
break;
case DR_VIOLATION_ACTION_THROW_EXCEPTION:
*action = ACTION_THROW_EXCEPTION;
break;
case DR_VIOLATION_ACTION_CONTINUE_CHANGED_CONTEXT:
/* FIXME - not safe to implement till case 7380 is fixed. */
CLIENT_ASSERT(false, "action DR_VIOLATION_ACTION_CONTINUE_CHANGED_CONTEXT "
"not yet supported.");
/* note - no break, fall through */
case DR_VIOLATION_ACTION_CONTINUE:
*action = ACTION_CONTINUE;
break;
default:
CLIENT_ASSERT(false, "Security violation event callback returned invalid "
"action value.");
}
}
}
#endif
/* Notify the client of a nudge. */
void
instrument_nudge(dcontext_t *dcontext, client_id_t id, uint64 arg)
{
size_t i;
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
ASSERT(dcontext != NULL && dcontext != GLOBAL_DCONTEXT &&
dcontext == get_thread_private_dcontext());
/* synch_with_all_threads and flush API assume that client nudge threads
* hold no dr locks and are !couldbelinking while in client lib code */
ASSERT_OWN_NO_LOCKS();
ASSERT(!is_couldbelinking(dcontext));
/* find the client the nudge is intended for */
for (i=0; i<num_client_libs; i++) {
/* until we have nudge-arg support (PR 477454), nudges target the 1st client */
if (IF_VMX86_ELSE(true, client_libs[i].id == id)) {
break;
}
}
if (i == num_client_libs || client_libs[i].nudge_callbacks.num == 0)
return;
#ifdef WINDOWS
/* count the number of nudge events so we can make sure they're
* all finished before exiting
*/
mutex_lock(&client_thread_count_lock);
if (block_client_nudge_threads) {
/* FIXME - would be nice if there was a way to let the external agent know that
* the nudge event wasn't delivered (but this only happens when the process
* is detaching or exiting). */
mutex_unlock(&client_thread_count_lock);
return;
}
/* atomic to avoid locking around the dec */
ATOMIC_INC(int, num_client_nudge_threads);
mutex_unlock(&client_thread_count_lock);
/* We need to mark this as a client controlled thread for synch_with_all_threads
* and otherwise treat it as native. Xref PR 230836 on what to do if this
* thread hits native_exec_syscalls hooks.
* XXX: this requires extra checks for "not a nudge thread" after IS_CLIENT_THREAD
* in get_stack_bounds() and instrument_thread_exit_event(): maybe better
* to have synchall checks do extra checks and have IS_CLIENT_THREAD be
* false for nudge threads at exit time?
*/
dcontext->client_data->is_client_thread = true;
dcontext->thread_record->under_dynamo_control = false;
#else
/* support calling dr_get_mcontext() on this thread. the app
* context should be intact in the current mcontext except
* pc which we set from next_tag.
*/
CLIENT_ASSERT(!dcontext->client_data->mcontext_in_dcontext,
"internal inconsistency in where mcontext is");
dcontext->client_data->mcontext_in_dcontext = true;
/* officially get_mcontext() doesn't always set pc: we do anyway */
get_mcontext(dcontext)->pc = dcontext->next_tag;
#endif
call_all(client_libs[i].nudge_callbacks, int (*)(void *, uint64),
(void *)dcontext, arg);
#ifdef UNIX
dcontext->client_data->mcontext_in_dcontext = false;
#else
dcontext->thread_record->under_dynamo_control = true;
dcontext->client_data->is_client_thread = false;
ATOMIC_DEC(int, num_client_nudge_threads);
#endif
}
int
get_num_client_threads(void)
{
int num = IF_WINDOWS_ELSE(num_client_nudge_threads, 0);
# ifdef CLIENT_SIDELINE
num += num_client_sideline_threads;
# endif
return num;
}
#ifdef WINDOWS
/* wait for all nudges to finish */
void
wait_for_outstanding_nudges()
{
/* block any new nudge threads from starting */
mutex_lock(&client_thread_count_lock);
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
block_client_nudge_threads = true;
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
DOLOG(1, LOG_TOP, {
if (num_client_nudge_threads > 0) {
LOG(GLOBAL, LOG_TOP, 1,
"Waiting for %d nudges to finish - app is about to kill all threads "
"except the current one.\n", num_client_nudge_threads);
}
});
/* don't wait if the client requested exit: after all the client might
* have done so from a nudge, and if the client does want to exit it's
* its own problem if it misses nudges (and external nudgers should use
* a finite timeout)
*/
if (client_requested_exit) {
mutex_unlock(&client_thread_count_lock);
return;
}
while (num_client_nudge_threads > 0) {
/* yield with lock released to allow nudges to finish */
mutex_unlock(&client_thread_count_lock);
dr_thread_yield();
mutex_lock(&client_thread_count_lock);
}
mutex_unlock(&client_thread_count_lock);
}
#endif /* WINDOWS */
/****************************************************************************/
/* EXPORTED ROUTINES */
DR_API
/* Creates a DR context that can be used in a standalone program.
* WARNING: this context cannot be used as the drcontext for a thread
* running under DR control! It is only for standalone programs that
* wish to use DR as a library of disassembly, etc. routines.
*/
void *
dr_standalone_init(void)
{
dcontext_t *dcontext = standalone_init();
return (void *) dcontext;
}
DR_API
/* Aborts the process immediately */
void
dr_abort(void)
{
if (TEST(DUMPCORE_DR_ABORT, dynamo_options.dumpcore_mask))
os_dump_core("dr_abort");
os_terminate(NULL, TERMINATE_PROCESS);
}
DR_API
void
dr_exit_process(int exit_code)
{
dcontext_t *dcontext = get_thread_private_dcontext();
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
/* Prevent cleanup from waiting for nudges as this may be called
* from a nudge!
* Also suppress leak asserts, as it's hard to clean up from
* some situations (such as DrMem -crash_at_error).
*/
client_requested_exit = true;
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
#ifdef WINDOWS
if (dcontext != NULL && dcontext->nudge_target != NULL) {
/* we need to free the nudge thread stack which may involved
* switching stacks so we have the nudge thread invoke
* os_terminate for us
*/
nudge_thread_cleanup(dcontext, true/*kill process*/, exit_code);
CLIENT_ASSERT(false, "shouldn't get here");
}
#endif
if (!is_currently_on_dstack(dcontext)
IF_UNIX(&& !is_currently_on_sigaltstack(dcontext))) {
/* if on app stack or sigaltstack, avoid incorrect leak assert at exit */
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
dr_api_exit = true;
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); /* to keep properly nested */
}
os_terminate_with_code(dcontext, /* dcontext is required */
TERMINATE_CLEANUP|TERMINATE_PROCESS, exit_code);
CLIENT_ASSERT(false, "shouldn't get here");
}
DR_API
bool
dr_create_memory_dump(dr_memory_dump_spec_t *spec)
{
if (spec->size != sizeof(dr_memory_dump_spec_t))
return false;
#ifdef WINDOWS
if (TEST(DR_MEMORY_DUMP_LDMP, spec->flags))
return os_dump_core_live(spec->label, spec->ldmp_path, spec->ldmp_path_size);
#endif
return false;
}
DR_API
/* Returns true if all DynamoRIO caches are thread private. */
bool
dr_using_all_private_caches(void)
{
return !SHARED_FRAGMENTS_ENABLED();
}
DR_API
void
dr_request_synchronized_exit(void)
{
SYSLOG_INTERNAL_WARNING_ONCE("dr_request_synchronized_exit deprecated: "
"use dr_set_process_exit_behavior instead");
}
DR_API
void
dr_set_process_exit_behavior(dr_exit_flags_t flags)
{
if ((!DYNAMO_OPTION(multi_thread_exit) && TEST(DR_EXIT_MULTI_THREAD, flags)) ||
(DYNAMO_OPTION(multi_thread_exit) && !TEST(DR_EXIT_MULTI_THREAD, flags))) {
options_make_writable();
dynamo_options.multi_thread_exit = TEST(DR_EXIT_MULTI_THREAD, flags);
options_restore_readonly();
}
if ((!DYNAMO_OPTION(skip_thread_exit_at_exit) &&
TEST(DR_EXIT_SKIP_THREAD_EXIT, flags)) ||
(DYNAMO_OPTION(skip_thread_exit_at_exit) &&
!TEST(DR_EXIT_SKIP_THREAD_EXIT, flags))) {
options_make_writable();
dynamo_options.skip_thread_exit_at_exit = TEST(DR_EXIT_SKIP_THREAD_EXIT, flags);
options_restore_readonly();
}
}
DR_API
/* Returns the option string passed along with a client path via DR's
* -client_lib option.
*/
const char *
dr_get_options(client_id_t id)
{
size_t i;
for (i=0; i<num_client_libs; i++) {
if (client_libs[i].id == id) {
return client_libs[i].options;
}
}
CLIENT_ASSERT(false, "dr_get_options(): invalid client id");
return NULL;
}
DR_API
/* Returns the path to the client library. Client must pass its ID */
const char *
dr_get_client_path(client_id_t id)
{
size_t i;
for (i=0; i<num_client_libs; i++) {
if (client_libs[i].id == id) {
return client_libs[i].path;
}
}
CLIENT_ASSERT(false, "dr_get_client_path(): invalid client id");
return NULL;
}
DR_API
byte *
dr_get_client_base(client_id_t id)
{
size_t i;
for (i=0; i<num_client_libs; i++) {
if (client_libs[i].id == id) {
return client_libs[i].start;
}
}
CLIENT_ASSERT(false, "dr_get_client_base(): invalid client id");
return NULL;
}
DR_API
bool
dr_set_client_name(const char *name, const char *report_URL)
{
/* Although set_exception_strings() accepts NULL, clients should pass real vals. */
if (name == NULL || report_URL == NULL)
return false;
set_exception_strings(name, report_URL);
return true;
}
DR_API const char *
dr_get_application_name(void)
{
#ifdef UNIX
return get_application_short_name();
#else
return get_application_short_unqualified_name();
#endif
}
DR_API process_id_t
dr_get_process_id(void)
{
return (process_id_t) get_process_id();
}
#ifdef UNIX
DR_API
process_id_t
dr_get_parent_id(void)
{
return get_parent_id();
}
#endif
#ifdef WINDOWS
DR_API
process_id_t
dr_convert_handle_to_pid(HANDLE process_handle)
{
ASSERT(POINTER_MAX == INVALID_PROCESS_ID);
return process_id_from_handle(process_handle);
}
DR_API
HANDLE
dr_convert_pid_to_handle(process_id_t pid)
{
return process_handle_from_id(pid);
}
DR_API
/**
* Returns information about the version of the operating system.
* Returns whether successful.
*/
bool
dr_get_os_version(dr_os_version_info_t *info)
{
int ver;
uint sp_major, sp_minor;
get_os_version_ex(&ver, &sp_major, &sp_minor);
if (info->size > offsetof(dr_os_version_info_t, version)) {
switch (ver) {
case WINDOWS_VERSION_8_1: info->version = DR_WINDOWS_VERSION_8_1; break;
case WINDOWS_VERSION_8: info->version = DR_WINDOWS_VERSION_8; break;
case WINDOWS_VERSION_7: info->version = DR_WINDOWS_VERSION_7; break;
case WINDOWS_VERSION_VISTA: info->version = DR_WINDOWS_VERSION_VISTA; break;
case WINDOWS_VERSION_2003: info->version = DR_WINDOWS_VERSION_2003; break;
case WINDOWS_VERSION_XP: info->version = DR_WINDOWS_VERSION_XP; break;
case WINDOWS_VERSION_2000: info->version = DR_WINDOWS_VERSION_2000; break;
case WINDOWS_VERSION_NT: info->version = DR_WINDOWS_VERSION_NT; break;
default: CLIENT_ASSERT(false, "unsupported windows version");
};
} else
return false; /* struct too small for any info */
if (info->size > offsetof(dr_os_version_info_t, service_pack_major)) {
info->service_pack_major = sp_major;
if (info->size > offsetof(dr_os_version_info_t, service_pack_minor)) {
info->service_pack_minor = sp_minor;
}
}
return true;
}
DR_API
bool
dr_is_wow64(void)
{
return is_wow64_process(NT_CURRENT_PROCESS);
}
DR_API
void *
dr_get_app_PEB(void)
{
return get_own_peb();
}
#endif
DR_API
/* Retrieves the current time */
void
dr_get_time(dr_time_t *time)
{
convert_millis_to_date(query_time_millis(), time);
}
DR_API
uint64
dr_get_milliseconds(void)
{
return query_time_millis();
}
DR_API
uint
dr_get_random_value(uint max)
{
return (uint) get_random_offset(max);
}
DR_API
void
dr_set_random_seed(uint seed)
{
set_random_seed(seed);
}
DR_API
uint
dr_get_random_seed(void)
{
return get_random_seed();
}
/***************************************************************************
* MEMORY ALLOCATION
*
* XXX i#774: once we split vmheap from vmcode, we need to make
* dr_thread_alloc(), dr_global_alloc(), and dr_nonheap_alloc()
* all allocate vmcode-reachable memory. Library-redirected
* allocations do not need to be reachable.
*/
DR_API
/* Allocates memory from DR's memory pool specific to the
* thread associated with drcontext.
*/
void *
dr_thread_alloc(void *drcontext, size_t size)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
return heap_alloc(dcontext, size HEAPACCT(ACCT_CLIENT));
}
DR_API
/* Frees thread-specific memory allocated by dr_thread_alloc.
* size must be the same size passed to dr_thread_alloc.
*/
void
dr_thread_free(void *drcontext, void *mem, size_t size)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
CLIENT_ASSERT(drcontext != NULL, "dr_thread_free: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_thread_free: drcontext is invalid");
heap_free(dcontext, mem, size HEAPACCT(ACCT_CLIENT));
}
DR_API
/* Allocates memory from DR's global memory pool.
*/
void *
dr_global_alloc(size_t size)
{
return global_heap_alloc(size HEAPACCT(ACCT_CLIENT));
}
DR_API
/* Frees memory allocated by dr_global_alloc.
* size must be the same size passed to dr_global_alloc.
*/
void
dr_global_free(void *mem, size_t size)
{
global_heap_free(mem, size HEAPACCT(ACCT_CLIENT));
}
DR_API
/* PR 352427: API routine to allocate executable memory */
void *
dr_nonheap_alloc(size_t size, uint prot)
{
return heap_mmap_ex(size, size, prot, false/*no guard pages*/);
}
DR_API
void
dr_nonheap_free(void *mem, size_t size)
{
heap_munmap_ex(mem, size, false/*no guard pages*/);
}
static void *
raw_mem_alloc(size_t size, uint prot, void *addr, dr_alloc_flags_t flags)
{
byte *p;
heap_error_code_t error_code;
CLIENT_ASSERT(ALIGNED(addr, PAGE_SIZE), "addr is not page size aligned");
if (!TEST(DR_ALLOC_NON_DR, flags)) {
/* memory alloc/dealloc and updating DR list must be atomic */
dynamo_vm_areas_lock(); /* if already hold lock this is a nop */
}
addr = (void *)ALIGN_BACKWARD(addr, PAGE_SIZE);
size = ALIGN_FORWARD(size, PAGE_SIZE);
#ifdef WINDOWS
if (TEST(DR_ALLOC_LOW_2GB, flags)) {
CLIENT_ASSERT(!TEST(DR_ALLOC_COMMIT_ONLY, flags),
"cannot combine commit-only and low-2GB");
p = os_heap_reserve_in_region(NULL, (byte *)(ptr_uint_t)0x80000000, size,
&error_code, TEST(DR_MEMPROT_EXEC, flags));
if (p != NULL && !TEST(DR_ALLOC_RESERVE_ONLY, flags)) {
if (!os_heap_commit(p, size, prot, &error_code)) {
os_heap_free(p, size, &error_code);
p = NULL;
}
}
} else
#endif
{
/* We specify that DR_ALLOC_LOW_2GB only applies to x64, so it's
* ok that the Linux kernel will ignore MAP_32BIT for 32-bit.
*/
#ifdef UNIX
uint os_flags = TEST(DR_ALLOC_LOW_2GB, flags) ? RAW_ALLOC_32BIT : 0;
#else
uint os_flags = TEST(DR_ALLOC_RESERVE_ONLY, flags) ? RAW_ALLOC_RESERVE_ONLY :
(TEST(DR_ALLOC_COMMIT_ONLY, flags) ? RAW_ALLOC_COMMIT_ONLY : 0);
#endif
if (IF_WINDOWS(TEST(DR_ALLOC_COMMIT_ONLY, flags) &&)
addr != NULL &&
!app_memory_pre_alloc(get_thread_private_dcontext(), addr, size, prot,
false))
p = NULL;
else
p = os_raw_mem_alloc(addr, size, prot, os_flags, &error_code);
}
if (p != NULL) {
if (TEST(DR_ALLOC_NON_DR, flags)) {
all_memory_areas_lock();
update_all_memory_areas(p, p+size, prot, DR_MEMTYPE_DATA);
all_memory_areas_unlock();
} else {
/* this routine updates allmem for us: */
add_dynamo_vm_area((app_pc)p, ((app_pc)p)+size, prot,
true _IF_DEBUG("fls cb in private lib"));
}
}
if (!TEST(DR_ALLOC_NON_DR, flags))
dynamo_vm_areas_unlock();
return p;
}
static bool
raw_mem_free(void *addr, size_t size, dr_alloc_flags_t flags)
{
bool res;
heap_error_code_t error_code;
byte *p = addr;
#ifdef UNIX
uint os_flags = TEST(DR_ALLOC_LOW_2GB, flags) ? RAW_ALLOC_32BIT : 0;
#else
uint os_flags = TEST(DR_ALLOC_RESERVE_ONLY, flags) ? RAW_ALLOC_RESERVE_ONLY :
(TEST(DR_ALLOC_COMMIT_ONLY, flags) ? RAW_ALLOC_COMMIT_ONLY : 0);
#endif
if (TEST(DR_ALLOC_NON_DR, flags)) {
/* use lock to avoid racy update on parallel memory allocation,
* e.g. allocation from another thread at p happens after os_heap_free
* but before remove_from_all_memory_areas
*/
all_memory_areas_lock();
} else {
/* memory alloc/dealloc and updating DR list must be atomic */
dynamo_vm_areas_lock(); /* if already hold lock this is a nop */
}
res = os_raw_mem_free(p, size, os_flags, &error_code);
if (TEST(DR_ALLOC_NON_DR, flags)) {
remove_from_all_memory_areas(p, p + size);
all_memory_areas_unlock();
} else {
/* this routine updates allmem for us: */
remove_dynamo_vm_area((app_pc)addr, ((app_pc)addr)+size);
}
if (!TEST(DR_ALLOC_NON_DR, flags))
dynamo_vm_areas_unlock();
return res;
}
DR_API
void *
dr_raw_mem_alloc(size_t size, uint prot, void *addr)
{
return raw_mem_alloc(size, prot, addr, DR_ALLOC_NON_DR);
}
DR_API
bool
dr_raw_mem_free(void *addr, size_t size)
{
return raw_mem_free(addr, size, DR_ALLOC_NON_DR);
}
static void *
custom_memory_shared(bool alloc, void *drcontext, dr_alloc_flags_t flags, size_t size,
uint prot, void *addr, bool *free_res)
{
CLIENT_ASSERT(alloc || free_res != NULL, "must ask for free_res on free");
CLIENT_ASSERT(alloc || addr != NULL, "cannot free NULL");
CLIENT_ASSERT(!TESTALL(DR_ALLOC_NON_DR|DR_ALLOC_CACHE_REACHABLE, flags),
"dr_custom_alloc: cannot combine non-DR and cache-reachable");
CLIENT_ASSERT(!alloc || TEST(DR_ALLOC_FIXED_LOCATION, flags) || addr == NULL,
"dr_custom_alloc: address only honored for fixed location");
#ifdef WINDOWS
CLIENT_ASSERT(!TESTANY(DR_ALLOC_RESERVE_ONLY | DR_ALLOC_COMMIT_ONLY, flags) ||
TESTALL(DR_ALLOC_NON_HEAP|DR_ALLOC_NON_DR, flags),
"dr_custom_alloc: reserve/commit-only are only for non-DR non-heap");
CLIENT_ASSERT(!TEST(DR_ALLOC_RESERVE_ONLY, flags) ||
!TEST(DR_ALLOC_COMMIT_ONLY, flags),
"dr_custom_alloc: cannot combine reserve-only + commit-only");
#endif
if (TEST(DR_ALLOC_NON_HEAP, flags)) {
CLIENT_ASSERT(drcontext == NULL,
"dr_custom_alloc: drcontext must be NULL for non-heap");
CLIENT_ASSERT(!TEST(DR_ALLOC_THREAD_PRIVATE, flags),
"dr_custom_alloc: non-heap cannot be thread-private");
CLIENT_ASSERT(!TESTALL(DR_ALLOC_CACHE_REACHABLE|DR_ALLOC_LOW_2GB, flags),
"dr_custom_alloc: cannot combine low-2GB and cache-reachable");
#ifdef WINDOWS
CLIENT_ASSERT(addr != NULL || !TEST(DR_ALLOC_COMMIT_ONLY, flags),
"dr_custom_alloc: commit-only requires non-NULL addr");
#endif
if (TEST(DR_ALLOC_LOW_2GB, flags)) {
#ifdef WINDOWS
CLIENT_ASSERT(!TEST(DR_ALLOC_COMMIT_ONLY, flags),
"dr_custom_alloc: cannot combine commit-only and low-2GB");
#endif
CLIENT_ASSERT(!alloc || addr == NULL,
"dr_custom_alloc: cannot pass an addr with low-2GB");
/* Even if not non-DR, easier to allocate via raw */
if (alloc)
return raw_mem_alloc(size, prot, addr, flags);
else
*free_res = raw_mem_free(addr, size, flags);
} else if (TEST(DR_ALLOC_NON_DR, flags)) {
/* ok for addr to be NULL */
if (alloc)
return raw_mem_alloc(size, prot, addr, flags);
else
*free_res = raw_mem_free(addr, size, flags);
} else { /* including DR_ALLOC_CACHE_REACHABLE */
CLIENT_ASSERT(!alloc || !TEST(DR_ALLOC_CACHE_REACHABLE, flags) ||
addr == NULL,
"dr_custom_alloc: cannot ask for addr and cache-reachable");
/* This flag is here solely so we know which version of free to call */
if (TEST(DR_ALLOC_FIXED_LOCATION, flags)) {
CLIENT_ASSERT(addr != NULL,
"dr_custom_alloc: fixed location requires an address");
if (alloc)
return raw_mem_alloc(size, prot, addr, 0);
else
*free_res = raw_mem_free(addr, size, 0);
} else {
if (alloc)
return dr_nonheap_alloc(size, prot);
else {
*free_res = true;
dr_nonheap_free(addr, size);
}
}
}
} else {
if (!alloc)
*free_res = true;
CLIENT_ASSERT(!alloc || addr == NULL,
"dr_custom_alloc: cannot pass an addr for heap memory");
CLIENT_ASSERT(drcontext == NULL || TEST(DR_ALLOC_THREAD_PRIVATE, flags),
"dr_custom_alloc: drcontext must be NULL for global heap");
CLIENT_ASSERT(!TEST(DR_ALLOC_LOW_2GB, flags),
"dr_custom_alloc: cannot ask for heap in low 2GB");
CLIENT_ASSERT(!TEST(DR_ALLOC_NON_DR, flags),
"dr_custom_alloc: cannot ask for non-DR heap memory");
/* for now it's all cache-reachable so we ignore DR_ALLOC_CACHE_REACHABLE */
if (TEST(DR_ALLOC_THREAD_PRIVATE, flags)) {
if (alloc)
return dr_thread_alloc(drcontext, size);
else
dr_thread_free(drcontext, addr, size);
} else {
if (alloc)
return dr_global_alloc(size);
else
dr_global_free(addr, size);
}
}
return NULL;
}
DR_API
void *
dr_custom_alloc(void *drcontext, dr_alloc_flags_t flags, size_t size,
uint prot, void *addr)
{
return custom_memory_shared(true, drcontext, flags, size, prot, addr, NULL);
}
DR_API
bool
dr_custom_free(void *drcontext, dr_alloc_flags_t flags, void *addr, size_t size)
{
bool res;
custom_memory_shared(false, drcontext, flags, size, 0, addr, &res);
return res;
}
#ifdef UNIX
DR_API
/* With ld's -wrap option, we can supply a replacement for malloc.
* This routine allocates memory from DR's global memory pool. Unlike
* dr_global_alloc(), however, we store the size of the allocation in
* the first few bytes so __wrap_free() can retrieve it.
*/
void *
__wrap_malloc(size_t size)
{
return redirect_malloc(size);
}
DR_API
/* With ld's -wrap option, we can supply a replacement for realloc.
* This routine allocates memory from DR's global memory pool. Unlike
* dr_global_alloc(), however, we store the size of the allocation in
* the first few bytes so __wrap_free() can retrieve it.
*/
void *
__wrap_realloc(void *mem, size_t size)
{
return redirect_realloc(mem, size);
}
DR_API
/* With ld's -wrap option, we can supply a replacement for calloc.
* This routine allocates memory from DR's global memory pool. Unlike
* dr_global_alloc(), however, we store the size of the allocation in
* the first few bytes so __wrap_free() can retrieve it.
*/
void *
__wrap_calloc(size_t nmemb, size_t size)
{
return redirect_calloc(nmemb, size);
}
DR_API
/* With ld's -wrap option, we can supply a replacement for free. This
* routine frees memory allocated by __wrap_alloc and expects the
* allocation size to be available in the few bytes before 'mem'.
*/
void
__wrap_free(void *mem)
{
redirect_free(mem);
}
#endif
DR_API
bool
dr_memory_protect(void *base, size_t size, uint new_prot)
{
/* We do allow the client to modify DR memory, for allocating a
* region and later making it unwritable. We should probably
* allow modifying ntdll, since our general model is to trust the
* client and let it shoot itself in the foot, but that would require
* passing in extra args to app_memory_protection_change() to ignore
* the patch_proof_list: and maybe it is safer to disallow client
* from putting hooks in ntdll.
*/
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
if (!dynamo_vm_area_overlap(base, ((byte *)base) + size)) {
uint mod_prot = new_prot;
uint res = app_memory_protection_change(get_thread_private_dcontext(),
base, size, new_prot, &mod_prot, NULL);
if (res != DO_APP_MEM_PROT_CHANGE) {
if (res == FAIL_APP_MEM_PROT_CHANGE ||
res == PRETEND_APP_MEM_PROT_CHANGE) {
return false;
} else {
/* SUBSET_APP_MEM_PROT_CHANGE should only happen for
* PROGRAM_SHEPHERDING. FIXME: not sure how common
* this will be: for now we just fail.
*/
return false;
}
}
CLIENT_ASSERT(mod_prot == new_prot, "internal error on dr_memory_protect()");
}
return set_protection(base, size, new_prot);
}
DR_API
/* checks to see that all bytes with addresses from pc to pc+size-1
* are readable and that reading from there won't generate an exception.
*/
bool
dr_memory_is_readable(const byte *pc, size_t size)
{
return is_readable_without_exception(pc, size);
}
DR_API
/* OS neutral memory query for clients, just wrapper around our get_memory_info(). */
bool
dr_query_memory(const byte *pc, byte **base_pc, size_t *size, uint *prot)
{
uint real_prot;
bool res;
#if defined(UNIX) && defined(HAVE_MEMINFO)
/* xref PR 246897 - the cached all memory list can have problems when
* out-of-process entities change the mapings. For now we use the from
* os version instead (even though it's slower, and only if we have
* HAVE_MEMINFO_MAPS support). FIXME
* XXX i#853: We could decide allmem vs os with the use_all_memory_areas
* option.
*/
res = get_memory_info_from_os(pc, base_pc, size, &real_prot);
#else
res = get_memory_info(pc, base_pc, size, &real_prot);
#endif
if (prot != NULL) {
if (is_pretend_or_executable_writable((app_pc)pc)) {
/* We can't assert there's no DR_MEMPROT_WRITE b/c we mark selfmod
* as executable-but-writable and we'll come here.
*/
real_prot |= DR_MEMPROT_WRITE | DR_MEMPROT_PRETEND_WRITE;
}
*prot = real_prot;
}
return res;
}
DR_API
bool
dr_query_memory_ex(const byte *pc, OUT dr_mem_info_t *info)
{
bool res;
#if defined(UNIX) && defined(HAVE_MEMINFO)
/* PR 246897: all_memory_areas not ready for prime time */
res = query_memory_ex_from_os(pc, info);
#else
res = query_memory_ex(pc, info);
#endif
if (is_pretend_or_executable_writable((app_pc)pc)) {
/* We can't assert there's no DR_MEMPROT_WRITE b/c we mark selfmod
* as executable-but-writable and we'll come here.
*/
info->prot |= DR_MEMPROT_WRITE | DR_MEMPROT_PRETEND_WRITE;
}
return res;
}
DR_API
/* Wrapper around our safe_read. Xref P4 198875, placeholder till we have try/except */
bool
dr_safe_read(const void *base, size_t size, void *out_buf, size_t *bytes_read)
{
return safe_read_ex(base, size, out_buf, bytes_read);
}
DR_API
/* Wrapper around our safe_write. Xref P4 198875, placeholder till we have try/except */
bool
dr_safe_write(void *base, size_t size, const void *in_buf, size_t *bytes_written)
{
return safe_write_ex(base, size, in_buf, bytes_written);
}
DR_API
void
dr_try_setup(void *drcontext, void **try_cxt)
{
/* Yes we're duplicating the code from the TRY() macro but this
* provides better abstraction and lets us change our impl later
* vs exposing that macro
*/
dcontext_t *dcontext = (dcontext_t *) drcontext;
try_except_context_t *try_state;
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
ASSERT(dcontext != NULL && dcontext == get_thread_private_dcontext());
ASSERT(try_cxt != NULL);
/* We allocate on the heap to avoid having to expose the try_except_context_t
* and dr_jmp_buf_t structs and be tied to their exact layouts.
* The client is likely to allocate memory inside the try anyway
* if doing a decode or something.
*/
try_state = (try_except_context_t *)
HEAP_TYPE_ALLOC(dcontext, try_except_context_t, ACCT_CLIENT, PROTECTED);
*try_cxt = try_state;
try_state->prev_context = dcontext->try_except.try_except_state;
dcontext->try_except.try_except_state = try_state;
}
/* dr_try_start() is in x86.asm since we can't have an extra frame that's
* going to be torn down between the longjmp and the restore point
*/
DR_API
void
dr_try_stop(void *drcontext, void *try_cxt)
{
dcontext_t *dcontext = (dcontext_t *) drcontext;
try_except_context_t *try_state = (try_except_context_t *) try_cxt;
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
ASSERT(dcontext != NULL && dcontext == get_thread_private_dcontext());
ASSERT(try_state != NULL);
POP_TRY_BLOCK(&dcontext->try_except, *try_state);
HEAP_TYPE_FREE(dcontext, try_state, try_except_context_t, ACCT_CLIENT, PROTECTED);
}
DR_API
bool
dr_memory_is_dr_internal(const byte *pc)
{
return is_dynamo_address((app_pc)pc);
}
DR_API
bool
dr_memory_is_in_client(const byte *pc)
{
return is_in_client_lib((app_pc)pc);
}
void
instrument_client_lib_loaded(byte *start, byte *end)
{
/* i#852: include Extensions as they are really part of the clients and
* aren't like other private libs.
* XXX: we only avoid having the client libs on here b/c they're specified via
* full path and don't go through the loaders' locate routines.
* Not a big deal if they do end up on here: if they always did we could
* remove the linear walk in is_in_client_lib().
*/
/* called prior to instrument_init() */
init_client_aux_libs();
vmvector_add(client_aux_libs, start, end, NULL/*not an auxlib*/);
}
void
instrument_client_lib_unloaded(byte *start, byte *end)
{
/* called after instrument_exit() */
if (client_aux_libs != NULL)
vmvector_remove(client_aux_libs, start, end);
}
/**************************************************
* CLIENT AUXILIARY LIBRARIES
*/
DR_API
dr_auxlib_handle_t
dr_load_aux_library(const char *name,
byte **lib_start /*OPTIONAL OUT*/,
byte **lib_end /*OPTIONAL OUT*/)
{
byte *start, *end;
dr_auxlib_handle_t lib = load_shared_library(name, true/*reachable*/);
if (shared_library_bounds(lib, NULL, name, &start, &end)) {
/* be sure to replace b/c i#852 now adds during load w/ empty data */
vmvector_add_replace(client_aux_libs, start, end, (void*)lib);
if (lib_start != NULL)
*lib_start = start;
if (lib_end != NULL)
*lib_end = end;
all_memory_areas_lock();
update_all_memory_areas(start, end,
/* XXX: see comment in instrument_init()
* on walking the sections and what prot to use
*/
MEMPROT_READ, DR_MEMTYPE_IMAGE);
all_memory_areas_unlock();
} else {
unload_shared_library(lib);
lib = NULL;
}
return lib;
}
DR_API
dr_auxlib_routine_ptr_t
dr_lookup_aux_library_routine(dr_auxlib_handle_t lib, const char *name)
{
if (lib == NULL)
return NULL;
return lookup_library_routine(lib, name);
}
DR_API
bool
dr_unload_aux_library(dr_auxlib_handle_t lib)
{
byte *start = NULL, *end = NULL;
/* unfortunately on linux w/ dlopen we cannot find the bounds w/o
* either the path or an address so we iterate.
* once we have our private loader we shouldn't need this:
* XXX i#157
*/
vmvector_iterator_t vmvi;
dr_auxlib_handle_t found = NULL;
if (lib == NULL)
return false;
vmvector_iterator_start(client_aux_libs, &vmvi);
while (vmvector_iterator_hasnext(&vmvi)) {
found = (dr_auxlib_handle_t) vmvector_iterator_next(&vmvi, &start, &end);
if (found == lib)
break;
}
vmvector_iterator_stop(&vmvi);
if (found == lib) {
CLIENT_ASSERT(start != NULL && start < end, "logic error");
vmvector_remove(client_aux_libs, start, end);
unload_shared_library(lib);
all_memory_areas_lock();
update_all_memory_areas(start, end, MEMPROT_NONE, DR_MEMTYPE_FREE);
all_memory_areas_unlock();
return true;
} else {
CLIENT_ASSERT(false, "invalid aux lib");
return false;
}
}
#if defined(WINDOWS) && !defined(X64)
/* XXX i#1035: these routines all have 64-bit handle and routine types for
* handling win8's high ntdll64 in the future. For now the implementation
* treats them as 32-bit types.
*/
DR_API
dr_auxlib64_handle_t
dr_load_aux_x64_library(const char *name)
{
HANDLE h;
/* We use the x64 system loader. We assume that x64 state is fine being
* interrupted at arbitrary points during x86 execution, and that there
* is little risk of transparency violations.
*/
/* load_library_64() is racy. We don't expect anyone else to load
* x64 libs, but another thread in this client could, so we
* serialize here.
*/
mutex_lock(&client_aux_lib64_lock);
/* XXX: if we switch to our private loader we'll need to add custom
* search support to look in 64-bit system dir
*/
/* XXX: I'd add to the client_aux_libs vector, but w/ the system loader
* loading this I don't know all the dependent libs it might load.
* Not bothering for now.