blob: 3e144913e79c28d18cac8a64d7990a8563c71783 [file] [log] [blame]
/* ******************************************************************************
* Copyright (c) 2010-2020 Google, Inc. All rights reserved.
* Copyright (c) 2010-2011 Massachusetts Institute of Technology All rights reserved.
* Copyright (c) 2002-2010 VMware, Inc. All rights reserved.
* ******************************************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of VMware, Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/* Copyright (c) 2003-2007 Determina Corp. */
/* Copyright (c) 2002-2003 Massachusetts Institute of Technology */
/* Copyright (c) 2002 Hewlett-Packard Company */
/*
* instrument.c - interface for instrumentation
*/
#include "../globals.h" /* just to disable warning C4206 about an empty file */
#include "instrument.h"
#include "instr.h"
#include "instr_create.h"
#include "instrlist.h"
#include "decode.h"
#include "disassemble.h"
#include "ir_utils.h"
#include "../fragment.h"
#include "../fcache.h"
#include "../emit.h"
#include "../link.h"
#include "../monitor.h" /* for mark_trace_head */
#include <stdarg.h> /* for varargs */
#include "../nudge.h" /* for nudge_internal() */
#include "../synch.h"
#include "../annotations.h"
#include "../translate.h"
#ifdef UNIX
# include <sys/time.h> /* ITIMER_* */
# include "../unix/module.h" /* redirect_* functions */
#endif
#ifdef CLIENT_INTERFACE
/* in utils.c, not exported to everyone */
extern ssize_t
do_file_write(file_t f, const char *fmt, va_list ap);
# ifdef DEBUG
/* case 10450: give messages to clients */
/* we can't undef ASSERT b/c of DYNAMO_OPTION */
# undef ASSERT_TRUNCATE
# undef ASSERT_BITFIELD_TRUNCATE
# undef ASSERT_NOT_REACHED
# define ASSERT_TRUNCATE DO_NOT_USE_ASSERT_USE_CLIENT_ASSERT_INSTEAD
# define ASSERT_BITFIELD_TRUNCATE DO_NOT_USE_ASSERT_USE_CLIENT_ASSERT_INSTEAD
# define ASSERT_NOT_REACHED DO_NOT_USE_ASSERT_USE_CLIENT_ASSERT_INSTEAD
# endif
/* PR 200065: User passes us the shared library, we look up "dr_init"
* or "dr_client_main" and call it. From there, the client can register which events
* it wishes to receive.
*/
# define INSTRUMENT_INIT_NAME_LEGACY "dr_init"
# define INSTRUMENT_INIT_NAME "dr_client_main"
/* PR 250952: version check
* If changing this, don't forget to update:
* - lib/dr_defines.h _USES_DR_VERSION_
* - api/docs/footer.html
*/
# define USES_DR_VERSION_NAME "_USES_DR_VERSION_"
/* Should we expose this for use in samples/tracedump.c?
* Also, if we change this, need to change the symlink generation
* in core/CMakeLists.txt: at that point should share single define.
*/
/* OLDEST_COMPATIBLE_VERSION now comes from configure.h */
/* The 3rd version number, the bugfix/patch number, should not affect
* compatibility, so our version check number simply uses:
* major*100 + minor
* Which gives us room for 100 minor versions per major.
*/
# define NEWEST_COMPATIBLE_VERSION CURRENT_API_VERSION
/* Store the unique not-part-of-version build number (the version
* BUILD_NUMBER is limited to 64K and is not guaranteed to be unique)
* somewhere accessible at a customer site. We could alternatively
* pull it out of our DYNAMORIO_DEFINES string.
*/
DR_API const char *unique_build_number = STRINGIFY(UNIQUE_BUILD_NUMBER);
/* The flag d_r_client_avx512_code_in_use is described in arch.h. */
# define DR_CLIENT_AVX512_CODE_IN_USE_NAME "_DR_CLIENT_AVX512_CODE_IN_USE_"
/* Acquire when registering or unregistering event callbacks
* Also held when invoking events, which happens much more often
* than registration changes, so we use rwlock
*/
DECLARE_CXTSWPROT_VAR(static read_write_lock_t callback_registration_lock,
INIT_READWRITE_LOCK(callback_registration_lock));
/* Structures for maintaining lists of event callbacks */
typedef void (*callback_t)(void);
typedef struct _callback_list_t {
callback_t *callbacks; /* array of callback functions */
size_t num; /* number of callbacks registered */
size_t size; /* allocated space (may be larger than num) */
} callback_list_t;
/* This is a little convoluted. The following is a macro to iterate
* over a list of callbacks and call each function. We use a macro
* instead of a function so we can pass the function type and perform
* a typecast. We need to copy the callback list before iterating to
* support the possibility of one callback unregistering another and
* messing up the list while we're iterating. We'll optimize the case
* for 5 or fewer registered callbacks and stack-allocate the temp
* list. Otherwise, we'll heap-allocate the temp.
*
* We allow the args to use the var "idx" to access the client index.
*
* We consider the first registered callback to have the highest
* priority and call it last. If we gave the last registered callback
* the highest priority, a client could re-register a routine to
* increase its priority. That seems a little weird.
*/
/*
*/
# define FAST_COPY_SIZE 5
# define call_all_ret(ret, retop, postop, vec, type, ...) \
do { \
size_t idx, num; \
/* we will be called even if no callbacks (i.e., (vec).num == 0) */ \
/* we guarantee we're in DR state at all callbacks and clean calls */ \
/* XXX: add CLIENT_ASSERT here */ \
d_r_read_lock(&callback_registration_lock); \
num = (vec).num; \
if (num == 0) { \
d_r_read_unlock(&callback_registration_lock); \
} else if (num <= FAST_COPY_SIZE) { \
callback_t tmp[FAST_COPY_SIZE]; \
memcpy(tmp, (vec).callbacks, num * sizeof(callback_t)); \
d_r_read_unlock(&callback_registration_lock); \
for (idx = 0; idx < num; idx++) { \
ret retop(((type)tmp[num - idx - 1])(__VA_ARGS__)) postop; \
} \
} else { \
callback_t *tmp = HEAP_ARRAY_ALLOC(GLOBAL_DCONTEXT, callback_t, num, \
ACCT_OTHER, UNPROTECTED); \
memcpy(tmp, (vec).callbacks, num * sizeof(callback_t)); \
d_r_read_unlock(&callback_registration_lock); \
for (idx = 0; idx < num; idx++) { \
ret retop(((type)tmp[num - idx - 1])(__VA_ARGS__)) postop; \
} \
HEAP_ARRAY_FREE(GLOBAL_DCONTEXT, tmp, callback_t, num, ACCT_OTHER, \
UNPROTECTED); \
} \
} while (0)
/* It's less error-prone if we just have one call_all macro. We'll
* reuse call_all_ret above for callbacks that don't have a return
* value by assigning to a dummy var. Note that this means we'll
* have to pass an int-returning type to call_all()
*/
# define call_all(vec, type, ...) \
do { \
int dummy; \
call_all_ret(dummy, =, , vec, type, __VA_ARGS__); \
} while (0)
/* Lists of callbacks for each event type. Note that init and nudge
* callback lists are kept in the client_lib_t data structure below.
* We could store all lists on a per-client basis, but we can iterate
* over these lists slightly more efficiently if we store all
* callbacks for a specific event in a single list.
*/
static callback_list_t exit_callbacks = {
0,
};
static callback_list_t thread_init_callbacks = {
0,
};
static callback_list_t thread_exit_callbacks = {
0,
};
# ifdef UNIX
static callback_list_t fork_init_callbacks = {
0,
};
# endif
static callback_list_t low_on_memory_callbacks = {
0,
};
static callback_list_t bb_callbacks = {
0,
};
static callback_list_t trace_callbacks = {
0,
};
# ifdef CUSTOM_TRACES
static callback_list_t end_trace_callbacks = {
0,
};
# endif
static callback_list_t fragdel_callbacks = {
0,
};
static callback_list_t restore_state_callbacks = {
0,
};
static callback_list_t restore_state_ex_callbacks = {
0,
};
static callback_list_t module_load_callbacks = {
0,
};
static callback_list_t module_unload_callbacks = {
0,
};
static callback_list_t filter_syscall_callbacks = {
0,
};
static callback_list_t pre_syscall_callbacks = {
0,
};
static callback_list_t post_syscall_callbacks = {
0,
};
static callback_list_t kernel_xfer_callbacks = {
0,
};
# ifdef WINDOWS
static callback_list_t exception_callbacks = {
0,
};
# else
static callback_list_t signal_callbacks = {
0,
};
# endif
# ifdef PROGRAM_SHEPHERDING
static callback_list_t security_violation_callbacks = {
0,
};
# endif
static callback_list_t persist_ro_size_callbacks = {
0,
};
static callback_list_t persist_ro_callbacks = {
0,
};
static callback_list_t resurrect_ro_callbacks = {
0,
};
static callback_list_t persist_rx_size_callbacks = {
0,
};
static callback_list_t persist_rx_callbacks = {
0,
};
static callback_list_t resurrect_rx_callbacks = {
0,
};
static callback_list_t persist_rw_size_callbacks = {
0,
};
static callback_list_t persist_rw_callbacks = {
0,
};
static callback_list_t resurrect_rw_callbacks = {
0,
};
static callback_list_t persist_patch_callbacks = {
0,
};
/* An array of client libraries. We use a static array instead of a
* heap-allocated list so we can load the client libs before
* initializing DR's heap.
*/
typedef struct _client_lib_t {
client_id_t id;
char path[MAXIMUM_PATH];
/* PR 366195: dlopen() handle truly is opaque: != start */
shlib_handle_t lib;
app_pc start;
app_pc end;
/* The raw option string, which after i#1736 contains token-delimiting quotes */
char options[MAX_OPTION_LENGTH];
/* The option string with token-delimiting quotes removed for backward compat */
char legacy_options[MAX_OPTION_LENGTH];
/* The parsed options: */
int argc;
const char **argv;
/* We need to associate nudge events with a specific client so we
* store that list here in the client_lib_t instead of using a
* single global list.
*/
callback_list_t nudge_callbacks;
} client_lib_t;
/* these should only be modified prior to instrument_init(), since no
* readers of the client_libs array (event handlers, etc.) use synch
*/
static client_lib_t client_libs[MAX_CLIENT_LIBS] = { {
0,
} };
static size_t num_client_libs = 0;
static void *persist_user_data[MAX_CLIENT_LIBS];
# ifdef WINDOWS
/* private kernel32 lib, used to print to console */
static bool print_to_console;
static shlib_handle_t priv_kernel32;
typedef BOOL(WINAPI *kernel32_WriteFile_t)(HANDLE, LPCVOID, DWORD, LPDWORD, LPOVERLAPPED);
static kernel32_WriteFile_t kernel32_WriteFile;
static ssize_t
dr_write_to_console_varg(bool to_stdout, const char *fmt, ...);
# endif
bool client_requested_exit;
# ifdef WINDOWS
/* used for nudge support */
static bool block_client_nudge_threads = false;
DECLARE_CXTSWPROT_VAR(static int num_client_nudge_threads, 0);
# endif
# ifdef CLIENT_SIDELINE
/* # of sideline threads */
DECLARE_CXTSWPROT_VAR(static int num_client_sideline_threads, 0);
# endif
# if defined(WINDOWS) || defined(CLIENT_SIDELINE)
/* protects block_client_nudge_threads and incrementing num_client_nudge_threads */
DECLARE_CXTSWPROT_VAR(static mutex_t client_thread_count_lock,
INIT_LOCK_FREE(client_thread_count_lock));
# endif
static vm_area_vector_t *client_aux_libs;
static bool track_where_am_i;
# ifdef WINDOWS
DECLARE_CXTSWPROT_VAR(static mutex_t client_aux_lib64_lock,
INIT_LOCK_FREE(client_aux_lib64_lock));
# endif
/****************************************************************************/
/* INTERNAL ROUTINES */
static bool
char_is_quote(char c)
{
return c == '"' || c == '\'' || c == '`';
}
static void
parse_option_array(client_id_t client_id, const char *opstr, int *argc OUT,
const char ***argv OUT, size_t max_token_size)
{
const char **a;
int cnt;
const char *s;
char *token =
HEAP_ARRAY_ALLOC(GLOBAL_DCONTEXT, char, max_token_size, ACCT_CLIENT, UNPROTECTED);
for (cnt = 0, s = dr_get_token(opstr, token, max_token_size); s != NULL;
s = dr_get_token(s, token, max_token_size)) {
cnt++;
}
cnt++; /* add 1 so 0 can be "app" */
a = HEAP_ARRAY_ALLOC(GLOBAL_DCONTEXT, const char *, cnt, ACCT_CLIENT, UNPROTECTED);
cnt = 0;
a[cnt] = dr_strdup(dr_get_client_path(client_id) HEAPACCT(ACCT_CLIENT));
cnt++;
for (s = dr_get_token(opstr, token, max_token_size); s != NULL;
s = dr_get_token(s, token, max_token_size)) {
a[cnt] = dr_strdup(token HEAPACCT(ACCT_CLIENT));
cnt++;
}
HEAP_ARRAY_FREE(GLOBAL_DCONTEXT, token, char, max_token_size, ACCT_CLIENT,
UNPROTECTED);
*argc = cnt;
*argv = a;
}
static bool
free_option_array(int argc, const char **argv)
{
int i;
for (i = 0; i < argc; i++) {
dr_strfree(argv[i] HEAPACCT(ACCT_CLIENT));
}
HEAP_ARRAY_FREE(GLOBAL_DCONTEXT, argv, char *, argc, ACCT_CLIENT, UNPROTECTED);
return true;
}
static void
add_callback(callback_list_t *vec, void (*func)(void), bool unprotect)
{
if (func == NULL) {
CLIENT_ASSERT(false, "trying to register a NULL callback");
return;
}
if (standalone_library) {
CLIENT_ASSERT(false, "events not supported in standalone library mode");
return;
}
d_r_write_lock(&callback_registration_lock);
/* Although we're receiving a pointer to a callback_list_t, we're
* usually modifying a static var.
*/
if (unprotect) {
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
}
/* We may already have an open slot since we allocate in twos and
* because we don't bother to free the storage when we remove the
* callback. Check and only allocate if necessary.
*/
if (vec->num == vec->size) {
callback_t *tmp = HEAP_ARRAY_ALLOC(GLOBAL_DCONTEXT, callback_t,
vec->size + 2, /* Let's allocate 2 */
ACCT_OTHER, UNPROTECTED);
if (tmp == NULL) {
CLIENT_ASSERT(false, "out of memory: can't register callback");
d_r_write_unlock(&callback_registration_lock);
return;
}
if (vec->callbacks != NULL) {
memcpy(tmp, vec->callbacks, vec->num * sizeof(callback_t));
HEAP_ARRAY_FREE(GLOBAL_DCONTEXT, vec->callbacks, callback_t, vec->size,
ACCT_OTHER, UNPROTECTED);
}
vec->callbacks = tmp;
vec->size += 2;
}
vec->callbacks[vec->num] = func;
vec->num++;
if (unprotect) {
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
}
d_r_write_unlock(&callback_registration_lock);
}
static bool
remove_callback(callback_list_t *vec, void (*func)(void), bool unprotect)
{
size_t i;
bool found = false;
if (func == NULL) {
CLIENT_ASSERT(false, "trying to unregister a NULL callback");
return false;
}
d_r_write_lock(&callback_registration_lock);
/* Although we're receiving a pointer to a callback_list_t, we're
* usually modifying a static var.
*/
if (unprotect) {
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
}
for (i = 0; i < vec->num; i++) {
if (vec->callbacks[i] == func) {
size_t j;
/* shift down the entries on the tail */
for (j = i; j < vec->num - 1; j++) {
vec->callbacks[j] = vec->callbacks[j + 1];
}
vec->num -= 1;
found = true;
break;
}
}
if (unprotect) {
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
}
d_r_write_unlock(&callback_registration_lock);
return found;
}
/* This should only be called prior to instrument_init(),
* since no readers of the client_libs array use synch
* and since this routine assumes .data is writable.
*/
static void
add_client_lib(const char *path, const char *id_str, const char *options)
{
client_id_t id;
shlib_handle_t client_lib;
DEBUG_DECLARE(size_t i);
ASSERT(!dynamo_initialized);
/* if ID not specified, we'll default to 0 */
id = (id_str == NULL) ? 0 : strtoul(id_str, NULL, 16);
# ifdef DEBUG
/* Check for conflicting IDs */
for (i = 0; i < num_client_libs; i++) {
CLIENT_ASSERT(client_libs[i].id != id, "Clients have the same ID");
}
# endif
if (num_client_libs == MAX_CLIENT_LIBS) {
CLIENT_ASSERT(false, "Max number of clients reached");
return;
}
LOG(GLOBAL, LOG_INTERP, 4, "about to load client library %s\n", path);
client_lib =
load_shared_library(path, IF_X64_ELSE(DYNAMO_OPTION(reachable_client), true));
if (client_lib == NULL) {
char msg[MAXIMUM_PATH * 4];
char err[MAXIMUM_PATH * 2];
shared_library_error(err, BUFFER_SIZE_ELEMENTS(err));
snprintf(msg, BUFFER_SIZE_ELEMENTS(msg),
".\n\tError opening instrumentation library %s:\n\t%s", path, err);
NULL_TERMINATE_BUFFER(msg);
/* PR 232490 - malformed library names or incorrect
* permissions shouldn't blow up an app in release builds as
* they may happen at customer sites with a third party
* client.
*/
/* PR 408318: 32-vs-64 errors should NOT be fatal to continue
* in debug build across execve chains. Xref i#147.
* XXX: w/ -private_loader, err always equals "error in private loader"
* and so we never match here!
*/
IF_UNIX(if (strstr(err, "wrong ELF class") == NULL))
CLIENT_ASSERT(false, msg);
SYSLOG(SYSLOG_ERROR, CLIENT_LIBRARY_UNLOADABLE, 4, get_application_name(),
get_application_pid(), path, msg);
} else {
/* PR 250952: version check */
int *uses_dr_version =
(int *)lookup_library_routine(client_lib, USES_DR_VERSION_NAME);
if (uses_dr_version == NULL || *uses_dr_version < OLDEST_COMPATIBLE_VERSION ||
*uses_dr_version > NEWEST_COMPATIBLE_VERSION) {
/* not a fatal usage error since we want release build to continue */
CLIENT_ASSERT(false,
"client library is incompatible with this version of DR");
SYSLOG(SYSLOG_ERROR, CLIENT_VERSION_INCOMPATIBLE, 2, get_application_name(),
get_application_pid());
} else {
size_t idx = num_client_libs++;
client_libs[idx].id = id;
client_libs[idx].lib = client_lib;
app_pc client_start, client_end;
# if defined(STATIC_LIBRARY) && defined(LINUX)
// For DR under static+linux we know that the client and DR core
// code are built into the app itself. To avoid various edge cases
// in finding the "library" bounds, delegate this boundary discovery
// to the dll bounds functions. xref i#3387.
client_start = get_dynamorio_dll_start();
client_end = get_dynamorio_dll_end();
ASSERT(client_start <= (app_pc)uses_dr_version &&
(app_pc)uses_dr_version < client_end);
# else
DEBUG_DECLARE(bool ok =)
shared_library_bounds(client_lib, (byte *)uses_dr_version, NULL,
&client_start, &client_end);
ASSERT(ok);
# endif
client_libs[idx].start = client_start;
client_libs[idx].end = client_end;
LOG(GLOBAL, LOG_INTERP, 1, "loaded %s at " PFX "-" PFX "\n", path,
client_libs[idx].start, client_libs[idx].end);
# ifdef X64
/* Now that we map the client within the constraints, this request
* should always succeed.
*/
if (DYNAMO_OPTION(reachable_client)) {
request_region_be_heap_reachable(client_libs[idx].start,
client_libs[idx].end -
client_libs[idx].start);
}
# endif
strncpy(client_libs[idx].path, path,
BUFFER_SIZE_ELEMENTS(client_libs[idx].path));
NULL_TERMINATE_BUFFER(client_libs[idx].path);
if (options != NULL) {
strncpy(client_libs[idx].options, options,
BUFFER_SIZE_ELEMENTS(client_libs[idx].options));
NULL_TERMINATE_BUFFER(client_libs[idx].options);
}
# ifdef X86
bool *client_avx512_code_in_use = (bool *)lookup_library_routine(
client_lib, DR_CLIENT_AVX512_CODE_IN_USE_NAME);
if (client_avx512_code_in_use != NULL) {
if (*client_avx512_code_in_use)
d_r_set_client_avx512_code_in_use();
}
# endif
/* We'll look up dr_client_main and call it in instrument_init */
}
}
}
void
instrument_load_client_libs(void)
{
if (CLIENTS_EXIST()) {
char buf[MAX_LIST_OPTION_LENGTH];
char *path;
string_option_read_lock();
strncpy(buf, INTERNAL_OPTION(client_lib), BUFFER_SIZE_ELEMENTS(buf));
string_option_read_unlock();
NULL_TERMINATE_BUFFER(buf);
/* We're expecting path;ID;options triples */
path = buf;
do {
char *id = NULL;
char *options = NULL;
char *next_path = NULL;
id = strstr(path, ";");
if (id != NULL) {
id[0] = '\0';
id++;
options = strstr(id, ";");
if (options != NULL) {
options[0] = '\0';
options++;
next_path = strstr(options, ";");
if (next_path != NULL) {
next_path[0] = '\0';
next_path++;
}
}
}
# ifdef STATIC_LIBRARY
/* We ignore client library paths and allow client code anywhere in the app.
* We have a check in load_shared_library() to avoid loading
* a 2nd copy of the app.
* We do support passing client ID and options via the first -client_lib.
*/
add_client_lib(get_application_name(), id == NULL ? "0" : id,
options == NULL ? "" : options);
break;
# endif
add_client_lib(path, id, options);
path = next_path;
} while (path != NULL);
}
}
static void
init_client_aux_libs(void)
{
if (client_aux_libs == NULL) {
VMVECTOR_ALLOC_VECTOR(client_aux_libs, GLOBAL_DCONTEXT, VECTOR_SHARED,
client_aux_libs);
}
}
void
instrument_init(void)
{
size_t i;
init_client_aux_libs();
# ifdef X86
if (IF_WINDOWS_ELSE(!dr_earliest_injected, !DYNAMO_OPTION(early_inject))) {
/* A client that had been compiled with AVX-512 may clobber an application's
* state. AVX-512 context switching will not be lazy in this case.
*/
if (d_r_is_client_avx512_code_in_use())
d_r_set_avx512_code_in_use(true, NULL);
}
# endif
if (num_client_libs > 0) {
/* We no longer distinguish in-DR vs in-client crashes, as many crashes in
* the DR lib are really client bugs.
* We expect most end-user tools to call dr_set_client_name() so we
* have generic defaults here:
*/
set_exception_strings("Tool", "your tool's issue tracker");
}
/* Iterate over the client libs and call each init routine */
for (i = 0; i < num_client_libs; i++) {
void (*init)(client_id_t, int, const char **) =
(void (*)(client_id_t, int, const char **))(
lookup_library_routine(client_libs[i].lib, INSTRUMENT_INIT_NAME));
void (*legacy)(client_id_t) = (void (*)(client_id_t))(
lookup_library_routine(client_libs[i].lib, INSTRUMENT_INIT_NAME_LEGACY));
/* we can't do this in instrument_load_client_libs() b/c vmheap
* is not set up at that point
*/
all_memory_areas_lock();
update_all_memory_areas(client_libs[i].start, client_libs[i].end,
/* FIXME: need to walk the sections: but may be
* better to obfuscate from clients anyway.
* We can't set as MEMPROT_NONE as that leads to
* bugs if the app wants to interpret part of
* its code section (xref PR 504629).
*/
MEMPROT_READ, DR_MEMTYPE_IMAGE);
all_memory_areas_unlock();
/* i#1736: parse the options up front */
parse_option_array(client_libs[i].id, client_libs[i].options,
&client_libs[i].argc, &client_libs[i].argv, MAX_OPTION_LENGTH);
# ifdef STATIC_LIBRARY
/* We support the app having client code anywhere, so there does not
* have to be an init routine that we call. This means the app
* may have to iterate modules on its own.
*/
# else
/* Since the user has to register all other events, it
* doesn't make sense to provide the -client_lib
* option for a module that doesn't export an init routine.
*/
CLIENT_ASSERT(init != NULL || legacy != NULL,
"client does not export a dr_client_main or dr_init routine");
# endif
if (init != NULL)
(*init)(client_libs[i].id, client_libs[i].argc, client_libs[i].argv);
else if (legacy != NULL)
(*legacy)(client_libs[i].id);
}
/* We now initialize the 1st thread before coming here, so we can
* hand the client a dcontext; so we need to specially generate
* the thread init event now. An alternative is to have
* dr_get_global_drcontext(), but that's extra complexity for no
* real reason.
* We raise the thread init event prior to the module load events
* so the client can access a dcontext in module load events (i#1339).
*/
if (thread_init_callbacks.num > 0) {
instrument_thread_init(get_thread_private_dcontext(), false, false);
}
/* If the client just registered the module-load event, let's
* assume it wants to be informed of *all* modules and tell it
* which modules are already loaded. If the client registers the
* event later, it will need to use the module iterator routines
* to retrieve currently loaded modules. We use the dr_module_iterator
* exposed to the client to avoid locking issues.
*/
if (module_load_callbacks.num > 0) {
dr_module_iterator_t *mi = dr_module_iterator_start();
while (dr_module_iterator_hasnext(mi)) {
module_data_t *data = dr_module_iterator_next(mi);
instrument_module_load(data, true /*already loaded*/);
/* XXX; more efficient to set this flag during dr_module_iterator_start */
os_module_set_flag(data->start, MODULE_LOAD_EVENT);
dr_free_module_data(data);
}
dr_module_iterator_stop(mi);
}
}
static void
free_callback_list(callback_list_t *vec)
{
if (vec->callbacks != NULL) {
HEAP_ARRAY_FREE(GLOBAL_DCONTEXT, vec->callbacks, callback_t, vec->size,
ACCT_OTHER, UNPROTECTED);
vec->callbacks = NULL;
}
vec->size = 0;
vec->num = 0;
}
static void
free_all_callback_lists()
{
free_callback_list(&exit_callbacks);
free_callback_list(&thread_init_callbacks);
free_callback_list(&thread_exit_callbacks);
# ifdef UNIX
free_callback_list(&fork_init_callbacks);
# endif
free_callback_list(&low_on_memory_callbacks);
free_callback_list(&bb_callbacks);
free_callback_list(&trace_callbacks);
# ifdef CUSTOM_TRACES
free_callback_list(&end_trace_callbacks);
# endif
free_callback_list(&fragdel_callbacks);
free_callback_list(&restore_state_callbacks);
free_callback_list(&restore_state_ex_callbacks);
free_callback_list(&module_load_callbacks);
free_callback_list(&module_unload_callbacks);
free_callback_list(&filter_syscall_callbacks);
free_callback_list(&pre_syscall_callbacks);
free_callback_list(&post_syscall_callbacks);
free_callback_list(&kernel_xfer_callbacks);
# ifdef WINDOWS
free_callback_list(&exception_callbacks);
# else
free_callback_list(&signal_callbacks);
# endif
# ifdef PROGRAM_SHEPHERDING
free_callback_list(&security_violation_callbacks);
# endif
free_callback_list(&persist_ro_size_callbacks);
free_callback_list(&persist_ro_callbacks);
free_callback_list(&resurrect_ro_callbacks);
free_callback_list(&persist_rx_size_callbacks);
free_callback_list(&persist_rx_callbacks);
free_callback_list(&resurrect_rx_callbacks);
free_callback_list(&persist_rw_size_callbacks);
free_callback_list(&persist_rw_callbacks);
free_callback_list(&resurrect_rw_callbacks);
free_callback_list(&persist_patch_callbacks);
}
void
instrument_exit_event(void)
{
/* Note - currently own initexit lock when this is called (see PR 227619). */
/* support dr_get_mcontext() from the exit event */
if (!standalone_library)
get_thread_private_dcontext()->client_data->mcontext_in_dcontext = true;
call_all(exit_callbacks, int (*)(),
/* It seems the compiler is confused if we pass no var args
* to the call_all macro. Bogus NULL arg */
NULL);
}
void
instrument_exit(void)
{
/* Note - currently own initexit lock when this is called (see PR 227619). */
if (IF_DEBUG_ELSE(true, doing_detach)) {
/* Unload all client libs and free any allocated storage */
size_t i;
for (i = 0; i < num_client_libs; i++) {
free_callback_list(&client_libs[i].nudge_callbacks);
unload_shared_library(client_libs[i].lib);
if (client_libs[i].argv != NULL)
free_option_array(client_libs[i].argc, client_libs[i].argv);
}
free_all_callback_lists();
}
vmvector_delete_vector(GLOBAL_DCONTEXT, client_aux_libs);
client_aux_libs = NULL;
num_client_libs = 0;
# ifdef WINDOWS
DELETE_LOCK(client_aux_lib64_lock);
# endif
# if defined(WINDOWS) || defined(CLIENT_SIDELINE)
DELETE_LOCK(client_thread_count_lock);
# endif
DELETE_READWRITE_LOCK(callback_registration_lock);
}
bool
is_in_client_lib(app_pc addr)
{
/* NOTE: we use this routine for detecting exceptions in
* clients. If we add a callback on that event we'll have to be
* sure to deliver it only to the right client.
*/
size_t i;
for (i = 0; i < num_client_libs; i++) {
if ((addr >= (app_pc)client_libs[i].start) && (addr < client_libs[i].end)) {
return true;
}
}
if (client_aux_libs != NULL && vmvector_overlap(client_aux_libs, addr, addr + 1))
return true;
return false;
}
bool
get_client_bounds(client_id_t client_id, app_pc *start /*OUT*/, app_pc *end /*OUT*/)
{
if (client_id >= num_client_libs)
return false;
if (start != NULL)
*start = (app_pc)client_libs[client_id].start;
if (end != NULL)
*end = (app_pc)client_libs[client_id].end;
return true;
}
const char *
get_client_path_from_addr(app_pc addr)
{
size_t i;
for (i = 0; i < num_client_libs; i++) {
if ((addr >= (app_pc)client_libs[i].start) && (addr < client_libs[i].end)) {
return client_libs[i].path;
}
}
return "";
}
bool
is_valid_client_id(client_id_t id)
{
size_t i;
for (i = 0; i < num_client_libs; i++) {
if (client_libs[i].id == id) {
return true;
}
}
return false;
}
void
dr_register_exit_event(void (*func)(void))
{
add_callback(&exit_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_exit_event(void (*func)(void))
{
return remove_callback(&exit_callbacks, (void (*)(void))func, true);
}
void dr_register_bb_event(dr_emit_flags_t (*func)(void *drcontext, void *tag,
instrlist_t *bb, bool for_trace,
bool translating))
{
if (!INTERNAL_OPTION(code_api)) {
CLIENT_ASSERT(false, "asking for bb event when code_api is disabled");
return;
}
add_callback(&bb_callbacks, (void (*)(void))func, true);
}
bool dr_unregister_bb_event(dr_emit_flags_t (*func)(void *drcontext, void *tag,
instrlist_t *bb, bool for_trace,
bool translating))
{
return remove_callback(&bb_callbacks, (void (*)(void))func, true);
}
void dr_register_trace_event(dr_emit_flags_t (*func)(void *drcontext, void *tag,
instrlist_t *trace,
bool translating))
{
if (!INTERNAL_OPTION(code_api)) {
CLIENT_ASSERT(false, "asking for trace event when code_api is disabled");
return;
}
add_callback(&trace_callbacks, (void (*)(void))func, true);
}
bool dr_unregister_trace_event(dr_emit_flags_t (*func)(void *drcontext, void *tag,
instrlist_t *trace,
bool translating))
{
return remove_callback(&trace_callbacks, (void (*)(void))func, true);
}
# ifdef CUSTOM_TRACES
void dr_register_end_trace_event(dr_custom_trace_action_t (*func)(void *drcontext,
void *tag,
void *next_tag))
{
if (!INTERNAL_OPTION(code_api)) {
CLIENT_ASSERT(false, "asking for end-trace event when code_api is disabled");
return;
}
add_callback(&end_trace_callbacks, (void (*)(void))func, true);
}
bool dr_unregister_end_trace_event(dr_custom_trace_action_t (*func)(void *drcontext,
void *tag,
void *next_tag))
{
return remove_callback(&end_trace_callbacks, (void (*)(void))func, true);
}
# endif
void
dr_register_delete_event(void (*func)(void *drcontext, void *tag))
{
if (!INTERNAL_OPTION(code_api)) {
CLIENT_ASSERT(false, "asking for delete event when code_api is disabled");
return;
}
add_callback(&fragdel_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_delete_event(void (*func)(void *drcontext, void *tag))
{
return remove_callback(&fragdel_callbacks, (void (*)(void))func, true);
}
void
dr_register_restore_state_event(void (*func)(void *drcontext, void *tag,
dr_mcontext_t *mcontext, bool restore_memory,
bool app_code_consistent))
{
if (!INTERNAL_OPTION(code_api)) {
CLIENT_ASSERT(false, "asking for restore state event when code_api is disabled");
return;
}
add_callback(&restore_state_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_restore_state_event(void (*func)(void *drcontext, void *tag,
dr_mcontext_t *mcontext,
bool restore_memory,
bool app_code_consistent))
{
return remove_callback(&restore_state_callbacks, (void (*)(void))func, true);
}
void
dr_register_restore_state_ex_event(bool (*func)(void *drcontext, bool restore_memory,
dr_restore_state_info_t *info))
{
if (!INTERNAL_OPTION(code_api)) {
CLIENT_ASSERT(false, "asking for restore_state_ex event when code_api disabled");
return;
}
add_callback(&restore_state_ex_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_restore_state_ex_event(bool (*func)(void *drcontext, bool restore_memory,
dr_restore_state_info_t *info))
{
return remove_callback(&restore_state_ex_callbacks, (void (*)(void))func, true);
}
void
dr_register_thread_init_event(void (*func)(void *drcontext))
{
add_callback(&thread_init_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_thread_init_event(void (*func)(void *drcontext))
{
return remove_callback(&thread_init_callbacks, (void (*)(void))func, true);
}
void
dr_register_thread_exit_event(void (*func)(void *drcontext))
{
add_callback(&thread_exit_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_thread_exit_event(void (*func)(void *drcontext))
{
return remove_callback(&thread_exit_callbacks, (void (*)(void))func, true);
}
# ifdef UNIX
void
dr_register_fork_init_event(void (*func)(void *drcontext))
{
add_callback(&fork_init_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_fork_init_event(void (*func)(void *drcontext))
{
return remove_callback(&fork_init_callbacks, (void (*)(void))func, true);
}
# endif
void
dr_register_low_on_memory_event(void (*func)())
{
add_callback(&low_on_memory_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_low_on_memory_event(void (*func)())
{
return remove_callback(&low_on_memory_callbacks, (void (*)(void))func, true);
}
void
dr_register_module_load_event(void (*func)(void *drcontext, const module_data_t *info,
bool loaded))
{
add_callback(&module_load_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_module_load_event(void (*func)(void *drcontext, const module_data_t *info,
bool loaded))
{
return remove_callback(&module_load_callbacks, (void (*)(void))func, true);
}
void
dr_register_module_unload_event(void (*func)(void *drcontext, const module_data_t *info))
{
add_callback(&module_unload_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_module_unload_event(void (*func)(void *drcontext,
const module_data_t *info))
{
return remove_callback(&module_unload_callbacks, (void (*)(void))func, true);
}
# ifdef WINDOWS
void
dr_register_exception_event(bool (*func)(void *drcontext, dr_exception_t *excpt))
{
add_callback(&exception_callbacks, (bool (*)(void))func, true);
}
bool
dr_unregister_exception_event(bool (*func)(void *drcontext, dr_exception_t *excpt))
{
return remove_callback(&exception_callbacks, (bool (*)(void))func, true);
}
# else
void dr_register_signal_event(dr_signal_action_t (*func)(void *drcontext,
dr_siginfo_t *siginfo))
{
add_callback(&signal_callbacks, (void (*)(void))func, true);
}
bool dr_unregister_signal_event(dr_signal_action_t (*func)(void *drcontext,
dr_siginfo_t *siginfo))
{
return remove_callback(&signal_callbacks, (void (*)(void))func, true);
}
# endif /* WINDOWS */
void
dr_register_filter_syscall_event(bool (*func)(void *drcontext, int sysnum))
{
add_callback(&filter_syscall_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_filter_syscall_event(bool (*func)(void *drcontext, int sysnum))
{
return remove_callback(&filter_syscall_callbacks, (void (*)(void))func, true);
}
void
dr_register_pre_syscall_event(bool (*func)(void *drcontext, int sysnum))
{
add_callback(&pre_syscall_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_pre_syscall_event(bool (*func)(void *drcontext, int sysnum))
{
return remove_callback(&pre_syscall_callbacks, (void (*)(void))func, true);
}
void
dr_register_post_syscall_event(void (*func)(void *drcontext, int sysnum))
{
add_callback(&post_syscall_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_post_syscall_event(void (*func)(void *drcontext, int sysnum))
{
return remove_callback(&post_syscall_callbacks, (void (*)(void))func, true);
}
void
dr_register_kernel_xfer_event(void (*func)(void *drcontext,
const dr_kernel_xfer_info_t *info))
{
add_callback(&kernel_xfer_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_kernel_xfer_event(void (*func)(void *drcontext,
const dr_kernel_xfer_info_t *info))
{
return remove_callback(&kernel_xfer_callbacks, (void (*)(void))func, true);
}
# ifdef PROGRAM_SHEPHERDING
void
dr_register_security_event(void (*func)(void *drcontext, void *source_tag,
app_pc source_pc, app_pc target_pc,
dr_security_violation_type_t violation,
dr_mcontext_t *mcontext,
dr_security_violation_action_t *action))
{
add_callback(&security_violation_callbacks, (void (*)(void))func, true);
}
bool
dr_unregister_security_event(void (*func)(void *drcontext, void *source_tag,
app_pc source_pc, app_pc target_pc,
dr_security_violation_type_t violation,
dr_mcontext_t *mcontext,
dr_security_violation_action_t *action))
{
return remove_callback(&security_violation_callbacks, (void (*)(void))func, true);
}
# endif
void
dr_register_nudge_event(void (*func)(void *drcontext, uint64 argument), client_id_t id)
{
size_t i;
for (i = 0; i < num_client_libs; i++) {
if (client_libs[i].id == id) {
add_callback(&client_libs[i].nudge_callbacks, (void (*)(void))func,
/* the nudge callback list is stored on the heap, so
* we don't need to unprotect the .data section when
* we update the list */
false);
return;
}
}
CLIENT_ASSERT(false, "dr_register_nudge_event: invalid client ID");
}
bool
dr_unregister_nudge_event(void (*func)(void *drcontext, uint64 argument), client_id_t id)
{
size_t i;
for (i = 0; i < num_client_libs; i++) {
if (client_libs[i].id == id) {
return remove_callback(&client_libs[i].nudge_callbacks, (void (*)(void))func,
/* the nudge callback list is stored on the heap, so
* we don't need to unprotect the .data section when
* we update the list */
false);
}
}
CLIENT_ASSERT(false, "dr_unregister_nudge_event: invalid client ID");
return false;
}
dr_config_status_t
dr_nudge_client_ex(process_id_t process_id, client_id_t client_id, uint64 argument,
uint timeout_ms)
{
if (process_id == get_process_id()) {
size_t i;
# ifdef WINDOWS
pre_second_thread();
# endif
for (i = 0; i < num_client_libs; i++) {
if (client_libs[i].id == client_id) {
if (client_libs[i].nudge_callbacks.num == 0) {
CLIENT_ASSERT(false, "dr_nudge_client: no nudge handler registered");
return false;
}
return nudge_internal(process_id, NUDGE_GENERIC(client), argument,
client_id, timeout_ms);
}
}
return false;
} else {
return nudge_internal(process_id, NUDGE_GENERIC(client), argument, client_id,
timeout_ms);
}
}
bool
dr_nudge_client(client_id_t client_id, uint64 argument)
{
return dr_nudge_client_ex(get_process_id(), client_id, argument, 0) == DR_SUCCESS;
}
# ifdef WINDOWS
DR_API
bool
dr_is_nudge_thread(void *drcontext)
{
dcontext_t *dcontext = (dcontext_t *)drcontext;
CLIENT_ASSERT(drcontext != NULL, "invalid parameter to dr_is_nudge_thread");
return dcontext->nudge_target != NULL;
}
# endif
void
instrument_client_thread_init(dcontext_t *dcontext, bool client_thread)
{
if (dcontext->client_data == NULL) {
/* We use PROTECTED partly to keep it local (unprotected "local" heap is
* global and adds complexity to thread exit: i#271).
*/
dcontext->client_data =
HEAP_TYPE_ALLOC(dcontext, client_data_t, ACCT_OTHER, PROTECTED);
memset(dcontext->client_data, 0x0, sizeof(client_data_t));
# ifdef CLIENT_SIDELINE
ASSIGN_INIT_LOCK_FREE(dcontext->client_data->sideline_mutex, sideline_mutex);
# endif
CLIENT_ASSERT(dynamo_initialized || thread_init_callbacks.num == 0 ||
client_thread,
"1st call to instrument_thread_init should have no cbs");
}
# ifdef CLIENT_SIDELINE
if (client_thread) {
ATOMIC_INC(int, num_client_sideline_threads);
/* We don't call dynamo_thread_not_under_dynamo() b/c we want itimers. */
dcontext->thread_record->under_dynamo_control = false;
dcontext->client_data->is_client_thread = true;
dcontext->client_data->suspendable = true;
}
# endif /* CLIENT_SIDELINE */
}
void
instrument_thread_init(dcontext_t *dcontext, bool client_thread, bool valid_mc)
{
/* Note that we're called twice for the initial thread: once prior
* to instrument_init() (PR 216936) to set up the dcontext client
* field (at which point there should be no callbacks since client
* has not had a chance to register any) (now split out, but both
* routines are called prior to instrument_init()), and once after
* instrument_init() to call the client event.
*/
# if defined(CLIENT_INTERFACE) && defined(WINDOWS)
bool swap_peb = false;
# endif
if (client_thread) {
/* no init event */
return;
}
# if defined(CLIENT_INTERFACE) && defined(WINDOWS)
/* i#996: we might be in app's state.
* It is simpler to check and swap here than earlier on thread init paths.
*/
if (dr_using_app_state(dcontext)) {
swap_peb_pointer(dcontext, true /*to priv*/);
swap_peb = true;
}
# endif
/* i#117/PR 395156: support dr_get_mcontext() from the thread init event */
if (valid_mc)
dcontext->client_data->mcontext_in_dcontext = true;
call_all(thread_init_callbacks, int (*)(void *), (void *)dcontext);
if (valid_mc)
dcontext->client_data->mcontext_in_dcontext = false;
# if defined(CLIENT_INTERFACE) && defined(WINDOWS)
if (swap_peb)
swap_peb_pointer(dcontext, false /*to app*/);
# endif
}
# ifdef UNIX
void
instrument_fork_init(dcontext_t *dcontext)
{
call_all(fork_init_callbacks, int (*)(void *), (void *)dcontext);
}
# endif
void
instrument_low_on_memory()
{
call_all(low_on_memory_callbacks, int (*)());
}
/* PR 536058: split the exit event from thread cleanup, to provide a
* dcontext in the process exit event
*/
void
instrument_thread_exit_event(dcontext_t *dcontext)
{
# ifdef CLIENT_SIDELINE
if (IS_CLIENT_THREAD(dcontext)
/* if nudge thread calls dr_exit_process() it will be marked as a client
* thread: rule it out here so we properly clean it up
*/
IF_WINDOWS(&&dcontext->nudge_target == NULL)) {
ATOMIC_DEC(int, num_client_sideline_threads);
/* no exit event */
return;
}
# endif
/* i#1394: best-effort to try to avoid crashing thread exit events
* where thread init was never called.
*/
if (!dynamo_initialized)
return;
/* support dr_get_mcontext() from the exit event */
dcontext->client_data->mcontext_in_dcontext = true;
/* Note - currently own initexit lock when this is called (see PR 227619). */
call_all(thread_exit_callbacks, int (*)(void *), (void *)dcontext);
}
void
instrument_thread_exit(dcontext_t *dcontext)
{
# ifdef DEBUG
client_todo_list_t *todo;
client_flush_req_t *flush;
# endif
# ifdef DEBUG
/* i#271: avoid racy crashes by not freeing in release build. */
# ifdef CLIENT_SIDELINE
DELETE_LOCK(dcontext->client_data->sideline_mutex);
# endif
/* could be heap space allocated for the todo list */
todo = dcontext->client_data->to_do;
while (todo != NULL) {
client_todo_list_t *next_todo = todo->next;
if (todo->ilist != NULL) {
instrlist_clear_and_destroy(dcontext, todo->ilist);
}
HEAP_TYPE_FREE(dcontext, todo, client_todo_list_t, ACCT_CLIENT, PROTECTED);
todo = next_todo;
}
/* could be heap space allocated for the flush list */
flush = dcontext->client_data->flush_list;
while (flush != NULL) {
client_flush_req_t *next_flush = flush->next;
HEAP_TYPE_FREE(dcontext, flush, client_flush_req_t, ACCT_CLIENT, PROTECTED);
flush = next_flush;
}
HEAP_TYPE_FREE(dcontext, dcontext->client_data, client_data_t, ACCT_OTHER, PROTECTED);
dcontext->client_data = NULL; /* for mutex_wait_contended_lock() */
dcontext->is_client_thread_exiting = true; /* for is_using_app_peb() */
# endif /* DEBUG */
}
bool
dr_bb_hook_exists(void)
{
return (bb_callbacks.num > 0);
}
bool
dr_trace_hook_exists(void)
{
return (trace_callbacks.num > 0);
}
bool
dr_fragment_deleted_hook_exists(void)
{
return (fragdel_callbacks.num > 0);
}
bool
dr_end_trace_hook_exists(void)
{
return (end_trace_callbacks.num > 0);
}
bool
dr_thread_exit_hook_exists(void)
{
return (thread_exit_callbacks.num > 0);
}
bool
dr_exit_hook_exists(void)
{
return (exit_callbacks.num > 0);
}
bool
dr_xl8_hook_exists(void)
{
return (restore_state_callbacks.num > 0 || restore_state_ex_callbacks.num > 0);
}
#endif /* CLIENT_INTERFACE */
/* needed outside of CLIENT_INTERFACE for simpler USE_BB_BUILDING_LOCK_STEADY_STATE() */
bool
dr_modload_hook_exists(void)
{
/* We do not support (as documented in the module event doxygen)
* the client changing this during bb building, as that will mess
* up USE_BB_BUILDING_LOCK_STEADY_STATE().
*/
return IF_CLIENT_INTERFACE_ELSE(module_load_callbacks.num > 0, false);
}
#ifdef CLIENT_INTERFACE
bool
hide_tag_from_client(app_pc tag)
{
# ifdef WINDOWS
/* Case 10009: Basic blocks that consist of a single jump into the
* interception buffer should be obscured from clients. Clients
* will see the displaced code, so we'll provide the address of this
* block if the client asks for the address of the displaced code.
*
* Note that we assume the jump is the first instruction in the
* BB for any blocks that jump to the interception buffer.
*/
if (is_intercepted_app_pc(tag, NULL) ||
/* Displaced app code is now in the landing pad, so skip the
* jump from the interception buffer to the landing pad
*/
is_in_interception_buffer(tag) ||
/* Landing pads that exist between hook points and the trampolines
* shouldn't be seen by the client too. PR 250294.
*/
is_on_interception_initial_route(tag) ||
/* PR 219351: if we lose control on a callback and get it back on
* one of our syscall trampolines, we'll appear at the jmp out of
* the interception buffer to the int/sysenter instruction. The
* problem is that our syscall trampolines, unlike our other
* intercepted code, are hooked earlier than the real action point
* and we have displaced app code at the start of the interception
* buffer: we hook at the wrapper entrance and return w/ a jmp to
* the sysenter/int instr. When creating bbs at the start we hack
* it to make it look like there is no hook. But on retaking control
* we end up w/ this jmp out that won't be solved w/ our normal
* mechanism for other hook jmp-outs: so we just suppress and the
* client next sees the post-syscall bb. It already saw a gap.
*/
is_syscall_trampoline(tag, NULL))
return true;
# endif
return false;
}
# ifdef DEBUG
/* PR 214962: client must set translation fields */
static void
check_ilist_translations(instrlist_t *ilist)
{
/* Ensure client set the translation field for all non-meta
* instrs, even if it didn't return DR_EMIT_STORE_TRANSLATIONS
* (since we may decide ourselves to store)
*/
instr_t *in;
for (in = instrlist_first(ilist); in != NULL; in = instr_get_next(in)) {
if (!instr_opcode_valid(in)) {
CLIENT_ASSERT(INTERNAL_OPTION(fast_client_decode), "level 0 instr found");
} else if (instr_is_app(in)) {
DOLOG(LOG_INTERP, 1, {
if (instr_get_translation(in) == NULL) {
d_r_loginst(get_thread_private_dcontext(), 1, in,
"translation is NULL");
}
});
CLIENT_ASSERT(instr_get_translation(in) != NULL,
"translation field must be set for every app instruction");
} else {
/* The meta instr could indeed not affect app state, but
* better I think to assert and make them put in an
* empty restore event callback in that case. */
DOLOG(LOG_INTERP, 1, {
if (instr_get_translation(in) != NULL && !instr_is_our_mangling(in) &&
!dr_xl8_hook_exists()) {
d_r_loginst(get_thread_private_dcontext(), 1, in,
"translation != NULL");
}
});
CLIENT_ASSERT(instr_get_translation(in) == NULL ||
instr_is_our_mangling(in) || dr_xl8_hook_exists(),
/* FIXME: if multiple clients, we need to check that this
* particular client has the callback: but we have
* no way to do that other than looking at library
* bounds...punting for now */
"a meta instr should not have its translation field "
"set without also having a restore_state callback");
}
}
}
# endif
/* Returns true if the bb hook is called */
bool
instrument_basic_block(dcontext_t *dcontext, app_pc tag, instrlist_t *bb, bool for_trace,
bool translating, dr_emit_flags_t *emitflags)
{
dr_emit_flags_t ret = DR_EMIT_DEFAULT;
/* return false if no BB hooks are registered */
if (bb_callbacks.num == 0)
return false;
if (hide_tag_from_client(tag)) {
LOG(THREAD, LOG_INTERP, 3, "hiding tag " PFX " from client\n", tag);
return false;
}
/* do not expand or up-decode the instrlist, client gets to choose
* whether and how to do that
*/
# ifdef DEBUG
LOG(THREAD, LOG_INTERP, 3, "\ninstrument_basic_block ******************\n");
LOG(THREAD, LOG_INTERP, 3, "\nbefore instrumentation:\n");
if (d_r_stats->loglevel >= 3 && (d_r_stats->logmask & LOG_INTERP) != 0)
instrlist_disassemble(dcontext, tag, bb, THREAD);
# endif
/* i#117/PR 395156: allow dr_[gs]et_mcontext where accurate */
if (!translating && !for_trace)
dcontext->client_data->mcontext_in_dcontext = true;
/* Note - currently we are couldbelinking and hold the
* bb_building lock when this is called (see PR 227619).
*/
/* We or together the return values */
call_all_ret(ret, |=, , bb_callbacks,
int (*)(void *, void *, instrlist_t *, bool, bool), (void *)dcontext,
(void *)tag, bb, for_trace, translating);
if (emitflags != NULL)
*emitflags = ret;
DOCHECK(1, { check_ilist_translations(bb); });
dcontext->client_data->mcontext_in_dcontext = false;
if (IF_DEBUG_ELSE(for_trace, false)) {
CLIENT_ASSERT(instrlist_get_return_target(bb) == NULL &&
instrlist_get_fall_through_target(bb) == NULL,
"instrlist_set_return/fall_through_target"
" cannot be used on traces");
}
# ifdef DEBUG
LOG(THREAD, LOG_INTERP, 3, "\nafter instrumentation:\n");
if (d_r_stats->loglevel >= 3 && (d_r_stats->logmask & LOG_INTERP) != 0)
instrlist_disassemble(dcontext, tag, bb, THREAD);
# endif
return true;
}
/* Give the user the completely mangled and optimized trace just prior
* to emitting into code cache, user gets final crack at it
*/
dr_emit_flags_t
instrument_trace(dcontext_t *dcontext, app_pc tag, instrlist_t *trace, bool translating)
{
dr_emit_flags_t ret = DR_EMIT_DEFAULT;
# ifdef UNSUPPORTED_API
instr_t *instr;
# endif
if (trace_callbacks.num == 0)
return DR_EMIT_DEFAULT;
/* do not expand or up-decode the instrlist, client gets to choose
* whether and how to do that
*/
# ifdef DEBUG
LOG(THREAD, LOG_INTERP, 3, "\ninstrument_trace ******************\n");
LOG(THREAD, LOG_INTERP, 3, "\nbefore instrumentation:\n");
if (d_r_stats->loglevel >= 3 && (d_r_stats->logmask & LOG_INTERP) != 0)
instrlist_disassemble(dcontext, tag, trace, THREAD);
# endif
/* We always pass Level 3 instrs to the client, since we no longer
* expose the expansion routines.
*/
# ifdef UNSUPPORTED_API
for (instr = instrlist_first_expanded(dcontext, trace); instr != NULL;
instr = instr_get_next_expanded(dcontext, trace, instr)) {
instr_decode(dcontext, instr);
}
/* ASSUMPTION: all ctis are already at Level 3, so we don't have
* to do a separate pass to fix up intra-list targets like
* instrlist_decode_cti() does
*/
# endif
/* i#117/PR 395156: allow dr_[gs]et_mcontext where accurate */
if (!translating)
dcontext->client_data->mcontext_in_dcontext = true;
/* We or together the return values */
call_all_ret(ret, |=, , trace_callbacks, int (*)(void *, void *, instrlist_t *, bool),
(void *)dcontext, (void *)tag, trace, translating);
DOCHECK(1, { check_ilist_translations(trace); });
CLIENT_ASSERT(instrlist_get_return_target(trace) == NULL &&
instrlist_get_fall_through_target(trace) == NULL,
"instrlist_set_return/fall_through_target"
" cannot be used on traces");
dcontext->client_data->mcontext_in_dcontext = false;
# ifdef DEBUG
LOG(THREAD, LOG_INTERP, 3, "\nafter instrumentation:\n");
if (d_r_stats->loglevel >= 3 && (d_r_stats->logmask & LOG_INTERP) != 0)
instrlist_disassemble(dcontext, tag, trace, THREAD);
# endif
return ret;
}
/* Notify user when a fragment is deleted from the cache
* FIXME PR 242544: how does user know whether this is a shadowed copy or the
* real thing? The user might free memory that shouldn't be freed!
*/
void
instrument_fragment_deleted(dcontext_t *dcontext, app_pc tag, uint flags)
{
if (fragdel_callbacks.num == 0)
return;
# ifdef WINDOWS
/* Case 10009: We don't call the basic block hook for blocks that
* are jumps to the interception buffer, so we'll hide them here
* as well.
*/
if (!TEST(FRAG_IS_TRACE, flags) && hide_tag_from_client(tag))
return;
# endif
/* PR 243008: we don't expose GLOBAL_DCONTEXT, so change to NULL.
* Our comments warn the user about this.
*/
if (dcontext == GLOBAL_DCONTEXT)
dcontext = NULL;
call_all(fragdel_callbacks, int (*)(void *, void *), (void *)dcontext, (void *)tag);
}
bool
instrument_restore_state(dcontext_t *dcontext, bool restore_memory,
dr_restore_state_info_t *info)
{
bool res = true;
/* Support both legacy and extended handlers */
if (restore_state_callbacks.num > 0) {
call_all(restore_state_callbacks,
int (*)(void *, void *, dr_mcontext_t *, bool, bool), (void *)dcontext,
info->fragment_info.tag, info->mcontext, restore_memory,
info->fragment_info.app_code_consistent);
}
if (restore_state_ex_callbacks.num > 0) {
/* i#220/PR 480565: client has option of failing the translation.
* We fail it if any client wants to, short-circuiting in that case.
* This does violate the "priority order" of events where the
* last one is supposed to have final say b/c it won't even
* see the event (xref i#424).
*/
call_all_ret(res, = res &&, , restore_state_ex_callbacks,
int (*)(void *, bool, dr_restore_state_info_t *), (void *)dcontext,
restore_memory, info);
}
CLIENT_ASSERT(!restore_memory || res,
"translation should not fail for restore_memory=true");
return res;
}
/* The client may need to translate memory (e.g., DRWRAP_REPLACE_RETADDR using
* sentinel addresses outside of the code cache as targets) even when the register
* state already contains application values.
*/
bool
instrument_restore_nonfcache_state_prealloc(dcontext_t *dcontext, bool restore_memory,
INOUT priv_mcontext_t *mcontext,
OUT dr_mcontext_t *client_mcontext)
{
if (!dr_xl8_hook_exists())
return true;
dr_restore_state_info_t client_info;
dr_mcontext_init(client_mcontext);
priv_mcontext_to_dr_mcontext(client_mcontext, mcontext);
client_info.raw_mcontext = client_mcontext;
client_info.raw_mcontext_valid = true;
client_info.mcontext = client_mcontext;
client_info.fragment_info.tag = NULL;
client_info.fragment_info.cache_start_pc = NULL;
client_info.fragment_info.is_trace = false;
client_info.fragment_info.app_code_consistent = true;
bool res = instrument_restore_state(dcontext, restore_memory, &client_info);
dr_mcontext_to_priv_mcontext(mcontext, client_mcontext);
return res;
}
/* The large dr_mcontext_t on the stack makes a difference, so we provide two
* versions to avoid a double alloc on the same callstack.
*/
bool
instrument_restore_nonfcache_state(dcontext_t *dcontext, bool restore_memory,
INOUT priv_mcontext_t *mcontext)
{
dr_mcontext_t client_mcontext;
return instrument_restore_nonfcache_state_prealloc(dcontext, restore_memory, mcontext,
&client_mcontext);
}
# ifdef CUSTOM_TRACES
/* Ask whether to end trace prior to adding next_tag fragment.
* Return values:
* CUSTOM_TRACE_DR_DECIDES = use standard termination criteria
* CUSTOM_TRACE_END_NOW = end trace
* CUSTOM_TRACE_CONTINUE = do not end trace
*/
dr_custom_trace_action_t
instrument_end_trace(dcontext_t *dcontext, app_pc trace_tag, app_pc next_tag)
{
dr_custom_trace_action_t ret = CUSTOM_TRACE_DR_DECIDES;
if (end_trace_callbacks.num == 0)
return ret;
/* Highest priority callback decides how to end the trace (see
* call_all_ret implementation)
*/
call_all_ret(ret, =, , end_trace_callbacks, int (*)(void *, void *, void *),
(void *)dcontext, (void *)trace_tag, (void *)next_tag);
return ret;
}
# endif
static module_data_t *
create_and_initialize_module_data(app_pc start, app_pc end, app_pc entry_point,
uint flags, const module_names_t *names,
const char *full_path
# ifdef WINDOWS
,
version_number_t file_version,
version_number_t product_version, uint checksum,
uint timestamp, size_t mod_size
# else
,
bool contiguous, uint num_segments,
module_segment_t *os_segments,
module_segment_data_t *segments, uint timestamp
# ifdef MACOS
,
uint current_version, uint compatibility_version,
const byte uuid[16]
# endif
# endif
)
{
# ifndef WINDOWS
uint i;
# endif
module_data_t *copy = (module_data_t *)HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, module_data_t,
ACCT_CLIENT, UNPROTECTED);
memset(copy, 0, sizeof(module_data_t));
copy->start = start;
copy->end = end;
copy->entry_point = entry_point;
copy->flags = flags;
if (full_path != NULL)
copy->full_path = dr_strdup(full_path HEAPACCT(ACCT_CLIENT));
if (names->module_name != NULL)
copy->names.module_name = dr_strdup(names->module_name HEAPACCT(ACCT_CLIENT));
if (names->file_name != NULL)
copy->names.file_name = dr_strdup(names->file_name HEAPACCT(ACCT_CLIENT));
# ifdef WINDOWS
if (names->exe_name != NULL)
copy->names.exe_name = dr_strdup(names->exe_name HEAPACCT(ACCT_CLIENT));
if (names->rsrc_name != NULL)
copy->names.rsrc_name = dr_strdup(names->rsrc_name HEAPACCT(ACCT_CLIENT));
copy->file_version = file_version;
copy->product_version = product_version;
copy->checksum = checksum;
copy->timestamp = timestamp;
copy->module_internal_size = mod_size;
# else
copy->contiguous = contiguous;
copy->num_segments = num_segments;
copy->segments = (module_segment_data_t *)HEAP_ARRAY_ALLOC(
GLOBAL_DCONTEXT, module_segment_data_t, num_segments, ACCT_VMAREAS, PROTECTED);
if (os_segments != NULL) {
ASSERT(segments == NULL);
for (i = 0; i < num_segments; i++) {
copy->segments[i].start = os_segments[i].start;
copy->segments[i].end = os_segments[i].end;
copy->segments[i].prot = os_segments[i].prot;
copy->segments[i].offset = os_segments[i].offset;
}
} else {
ASSERT(segments != NULL);
if (segments != NULL) {
memcpy(copy->segments, segments,
num_segments * sizeof(module_segment_data_t));
}
}
copy->timestamp = timestamp;
# ifdef MACOS
copy->current_version = current_version;
copy->compatibility_version = compatibility_version;
memcpy(copy->uuid, uuid, sizeof(copy->uuid));
# endif
# endif
return copy;
}
module_data_t *
copy_module_area_to_module_data(const module_area_t *area)
{
if (area == NULL)
return NULL;
return create_and_initialize_module_data(
area->start, area->end, area->entry_point, 0, &area->names, area->full_path
# ifdef WINDOWS
,
area->os_data.file_version, area->os_data.product_version, area->os_data.checksum,
area->os_data.timestamp, area->os_data.module_internal_size
# else
,
area->os_data.contiguous, area->os_data.num_segments, area->os_data.segments,
NULL, area->os_data.timestamp
# ifdef MACOS
,
area->os_data.current_version, area->os_data.compatibility_version,
area->os_data.uuid
# endif
# endif
);
}
DR_API
/* Makes a copy of a module_data_t for returning to the client. We return a copy so
* we don't have to hold the module areas list lock while in the client (xref PR 225020).
* Note - dr_data is allowed to be NULL. */
module_data_t *
dr_copy_module_data(const module_data_t *data)
{
if (data == NULL)
return NULL;
return create_and_initialize_module_data(
data->start, data->end, data->entry_point, 0, &data->names, data->full_path
# ifdef WINDOWS
,
data->file_version, data->product_version, data->checksum, data->timestamp,
data->module_internal_size
# else
,
data->contiguous, data->num_segments, NULL, data->segments, data->timestamp
# ifdef MACOS
,
data->current_version, data->compatibility_version, data->uuid
# endif
# endif
);
}
DR_API
/* Used to free a module_data_t created by dr_copy_module_data() */
void
dr_free_module_data(module_data_t *data)
{
dcontext_t *dcontext = get_thread_private_dcontext();
if (data == NULL)
return;
if (dcontext != NULL && data == dcontext->client_data->no_delete_mod_data) {
CLIENT_ASSERT(false,
"dr_free_module_data: don\'t free module_data passed to "
"the image load or image unload event callbacks.");
return;
}
# ifdef UNIX
HEAP_ARRAY_FREE(GLOBAL_DCONTEXT, data->segments, module_segment_data_t,
data->num_segments, ACCT_VMAREAS, PROTECTED);
# endif
if (data->full_path != NULL)
dr_strfree(data->full_path HEAPACCT(ACCT_CLIENT));
free_module_names(&data->names HEAPACCT(ACCT_CLIENT));
HEAP_TYPE_FREE(GLOBAL_DCONTEXT, data, module_data_t, ACCT_CLIENT, UNPROTECTED);
}
DR_API
bool
dr_module_contains_addr(const module_data_t *data, app_pc addr)
{
/* XXX: this duplicates module_contains_addr(), but we have two different
* data structures (module_area_t and module_data_t) so it's hard to share.
*/
# ifdef WINDOWS
return (addr >= data->start && addr < data->end);
# else
if (data->contiguous)
return (addr >= data->start && addr < data->end);
else {
uint i;
for (i = 0; i < data->num_segments; i++) {
if (addr >= data->segments[i].start && addr < data->segments[i].end)
return true;
}
}
return false;
# endif
}
/* Looks up module containing pc (assumed to be fully loaded).
* If it exists and its client module load event has not been called, calls it.
*/
void
instrument_module_load_trigger(app_pc pc)
{
if (CLIENTS_EXIST()) {
module_area_t *ma;
module_data_t *client_data = NULL;
os_get_module_info_lock();
ma = module_pc_lookup(pc);
if (ma != NULL && !TEST(MODULE_LOAD_EVENT, ma->flags)) {
/* switch to write lock */
os_get_module_info_unlock();
os_get_module_info_write_lock();
ma = module_pc_lookup(pc);
if (ma != NULL && !TEST(MODULE_LOAD_EVENT, ma->flags)) {
ma->flags |= MODULE_LOAD_EVENT;
client_data = copy_module_area_to_module_data(ma);
os_get_module_info_write_unlock();
instrument_module_load(client_data, true /*i#884: already loaded*/);
dr_free_module_data(client_data);
} else
os_get_module_info_write_unlock();
} else
os_get_module_info_unlock();
}
}
/* Notify user when a module is loaded */
void
instrument_module_load(module_data_t *data, bool previously_loaded)
{
/* Note - during DR initialization this routine is called before we've set up a
* dcontext for the main thread and before we've called instrument_init. It's okay
* since there's no way a callback will be registered and we'll return immediately. */
dcontext_t *dcontext;
if (module_load_callbacks.num == 0)
return;
dcontext = get_thread_private_dcontext();
/* client shouldn't delete this */
dcontext->client_data->no_delete_mod_data = data;
call_all(module_load_callbacks, int (*)(void *, module_data_t *, bool),
(void *)dcontext, data, previously_loaded);
dcontext->client_data->no_delete_mod_data = NULL;
}
/* Notify user when a module is unloaded */
void
instrument_module_unload(module_data_t *data)
{
dcontext_t *dcontext;
if (module_unload_callbacks.num == 0)
return;
dcontext = get_thread_private_dcontext();
/* client shouldn't delete this */
dcontext->client_data->no_delete_mod_data = data;
call_all(module_unload_callbacks, int (*)(void *, module_data_t *), (void *)dcontext,
data);
dcontext->client_data->no_delete_mod_data = NULL;
}
/* returns whether this sysnum should be intercepted */
bool
instrument_filter_syscall(dcontext_t *dcontext, int sysnum)
{
bool ret = false;
/* if client does not filter then we don't intercept anything */
if (filter_syscall_callbacks.num == 0)
return ret;
/* if any client wants to intercept, then we intercept */
call_all_ret(ret, =, || ret, filter_syscall_callbacks, bool (*)(void *, int),
(void *)dcontext, sysnum);
return ret;
}
/* returns whether this syscall should execute */
bool
instrument_pre_syscall(dcontext_t *dcontext, int sysnum)
{
bool exec = true;
dcontext->client_data->in_pre_syscall = true;
/* clear flag from dr_syscall_invoke_another() */
dcontext->client_data->invoke_another_syscall = false;
if (pre_syscall_callbacks.num > 0) {
dr_where_am_i_t old_whereami = dcontext->whereami;
dcontext->whereami = DR_WHERE_SYSCALL_HANDLER;
DODEBUG({
/* Avoid the common mistake of forgetting a filter event. */
CLIENT_ASSERT(filter_syscall_callbacks.num > 0,
"A filter event must be "
"provided when using pre- and post-syscall events");
});
/* Skip syscall if any client wants to skip it, but don't short-circuit,
* as skipping syscalls is usually done when the effect of the syscall
* will be emulated in some other way. The app is typically meant to
* think that the syscall succeeded. Thus, other tool components
* should see the syscall as well (xref i#424).
*/
call_all_ret(exec, =, &&exec, pre_syscall_callbacks, bool (*)(void *, int),
(void *)dcontext, sysnum);
dcontext->whereami = old_whereami;
}
dcontext->client_data->in_pre_syscall = false;
return exec;
}
void
instrument_post_syscall(dcontext_t *dcontext, int sysnum)
{
dr_where_am_i_t old_whereami = dcontext->whereami;
if (post_syscall_callbacks.num == 0)
return;
DODEBUG({
/* Avoid the common mistake of forgetting a filter event. */
CLIENT_ASSERT(filter_syscall_callbacks.num > 0,
"A filter event must be "
"provided when using pre- and post-syscall events");
});
dcontext->whereami = DR_WHERE_SYSCALL_HANDLER;
dcontext->client_data->in_post_syscall = true;
call_all(post_syscall_callbacks, int (*)(void *, int), (void *)dcontext, sysnum);
dcontext->client_data->in_post_syscall = false;
dcontext->whereami = old_whereami;
}
bool
instrument_invoke_another_syscall(dcontext_t *dcontext)
{
return dcontext->client_data->invoke_another_syscall;
}
bool
instrument_kernel_xfer(dcontext_t *dcontext, dr_kernel_xfer_type_t type,
os_cxt_ptr_t source_os_cxt, dr_mcontext_t *source_dmc,
priv_mcontext_t *source_mc, app_pc target_pc, reg_t target_xsp,
os_cxt_ptr_t target_os_cxt, priv_mcontext_t *target_mc, int sig)
{
if (kernel_xfer_callbacks.num == 0) {
return false;
}
dr_kernel_xfer_info_t info;
info.type = type;
info.source_mcontext = NULL;
info.target_pc = target_pc;
info.target_xsp = target_xsp;
info.sig = sig;
dr_mcontext_t dr_mcontext;
dr_mcontext.size = sizeof(dr_mcontext);
dr_mcontext.flags = DR_MC_CONTROL | DR_MC_INTEGER;
if (source_dmc != NULL)
info.source_mcontext = source_dmc;
else if (source_mc != NULL) {
if (priv_mcontext_to_dr_mcontext(&dr_mcontext, source_mc))
info.source_mcontext = &dr_mcontext;
} else if (!is_os_cxt_ptr_null(source_os_cxt)) {
if (os_context_to_mcontext(&dr_mcontext, NULL, source_os_cxt))
info.source_mcontext = &dr_mcontext;
}
/* Our compromise to reduce context copying is to provide the PC and XSP inline,
* and only get more if the user calls dr_get_mcontext(), which we support again
* without any copying if not used by taking in a raw os_context_t.
*/
dcontext->client_data->os_cxt = target_os_cxt;
dcontext->client_data->cur_mc = target_mc;
call_all(kernel_xfer_callbacks, int (*)(void *, const dr_kernel_xfer_info_t *),
(void *)dcontext, &info);
set_os_cxt_ptr_null(&dcontext->client_data->os_cxt);
dcontext->client_data->cur_mc = NULL;
return true;
}
# ifdef WINDOWS
/* Notify user of exceptions. Note: not called for RaiseException */
bool
instrument_exception(dcontext_t *dcontext, dr_exception_t *exception)
{
bool res = true;
/* Ensure that dr_get_mcontext() called from instrument_kernel_xfer() from
* dr_redirect_execution() will get the source context.
* cur_mc will later be clobbered by instrument_kernel_xfer() which is ok:
* the redirect ends the callback calling.
*/
dcontext->client_data->cur_mc = dr_mcontext_as_priv_mcontext(exception->mcontext);
/* We short-circuit if any client wants to "own" the fault and not pass on.
* This does violate the "priority order" of events where the last one is
* supposed to have final say b/c it won't even see the event: but only one
* registrant should own it (xref i#424).
*/
call_all_ret(res, = res &&, , exception_callbacks, bool (*)(void *, dr_exception_t *),
(void *)dcontext, exception);
dcontext->client_data->cur_mc = NULL;
return res;
}
# else
dr_signal_action_t
instrument_signal(dcontext_t *dcontext, dr_siginfo_t *siginfo)
{
dr_signal_action_t ret = DR_SIGNAL_DELIVER;
/* We short-circuit if any client wants to do other than deliver to the app.
* This does violate the "priority order" of events where the last one is
* supposed to have final say b/c it won't even see the event: but only one
* registrant should own the signal (xref i#424).
*/
call_all_ret(ret, = ret == DR_SIGNAL_DELIVER ?, : ret, signal_callbacks,
dr_signal_action_t(*)(void *, dr_siginfo_t *), (void *)dcontext,
siginfo);
return ret;
}
bool
dr_signal_hook_exists(void)
{
return (signal_callbacks.num > 0);
}
# endif /* WINDOWS */
# ifdef PROGRAM_SHEPHERDING
/* Notify user when a security violation is detected */
void
instrument_security_violation(dcontext_t *dcontext, app_pc target_pc,
security_violation_t violation, action_type_t *action)
{
dr_security_violation_type_t dr_violation;
dr_security_violation_action_t dr_action, dr_action_original;
app_pc source_pc = NULL;
fragment_t *last;
dr_mcontext_t dr_mcontext;
dr_mcontext_init(&dr_mcontext);
if (security_violation_callbacks.num == 0)
return;
if (!priv_mcontext_to_dr_mcontext(&dr_mcontext, get_mcontext(dcontext)))
return;
/* FIXME - the source_tag, source_pc, and context can all be incorrect if the
* violation ends up occurring in the middle of a bb we're building. See case
* 7380 which we should fix in interp.c.
*/
/* Obtain the source addr to pass to the client. xref case 285 --
* we're using the more heavy-weight solution 2) here, but that
* should be okay since we already have the overhead of calling
* into the client. */
last = dcontext->last_fragment;
if (!TEST(FRAG_FAKE, last->flags)) {
cache_pc pc = EXIT_CTI_PC(last, dcontext->last_exit);
source_pc = recreate_app_pc(dcontext, pc, last);
}
/* FIXME - set pc field of dr_mcontext_t. We'll probably want it
* for thread start and possibly apc/callback events as well.
*/
switch (violation) {
case STACK_EXECUTION_VIOLATION: dr_violation = DR_RCO_STACK_VIOLATION; break;
case HEAP_EXECUTION_VIOLATION: dr_violation = DR_RCO_HEAP_VIOLATION; break;
case RETURN_TARGET_VIOLATION: dr_violation = DR_RCT_RETURN_VIOLATION; break;
case RETURN_DIRECT_RCT_VIOLATION:
ASSERT(false); /* Not a client fault, should be NOT_REACHED(). */
dr_violation = DR_UNKNOWN_VIOLATION;
break;
case INDIRECT_CALL_RCT_VIOLATION:
dr_violation = DR_RCT_INDIRECT_CALL_VIOLATION;
break;
case INDIRECT_JUMP_RCT_VIOLATION:
dr_violation = DR_RCT_INDIRECT_JUMP_VIOLATION;
break;
default:
ASSERT(false); /* Not a client fault, should be NOT_REACHED(). */
dr_violation = DR_UNKNOWN_VIOLATION;
break;
}
switch (*action) {
case ACTION_TERMINATE_PROCESS: dr_action = DR_VIOLATION_ACTION_KILL_PROCESS; break;
case ACTION_CONTINUE: dr_action = DR_VIOLATION_ACTION_CONTINUE; break;
case ACTION_TERMINATE_THREAD: dr_action = DR_VIOLATION_ACTION_KILL_THREAD; break;
case ACTION_THROW_EXCEPTION: dr_action = DR_VIOLATION_ACTION_THROW_EXCEPTION; break;
default:
ASSERT(false); /* Not a client fault, should be NOT_REACHED(). */
dr_action = DR_VIOLATION_ACTION_CONTINUE;
break;
}
dr_action_original = dr_action;
/* NOTE - last->tag should be valid here (even if the frag is fake since the
* coarse wrappers set the tag). FIXME - for traces we really want the bb tag not
* the trace tag, should get that. Of course the only real reason we pass source
* tag is because we can't always give a valid source_pc. */
/* Note that the last registered function gets the final crack at
* changing the action.
*/
call_all(security_violation_callbacks,
int (*)(void *, void *, app_pc, app_pc, dr_security_violation_type_t,
dr_mcontext_t *, dr_security_violation_action_t *),
(void *)dcontext, last->tag, source_pc, target_pc, dr_violation,
&dr_mcontext, &dr_action);
if (dr_action != dr_action_original) {
switch (dr_action) {
case DR_VIOLATION_ACTION_KILL_PROCESS: *action = ACTION_TERMINATE_PROCESS; break;
case DR_VIOLATION_ACTION_KILL_THREAD: *action = ACTION_TERMINATE_THREAD; break;
case DR_VIOLATION_ACTION_THROW_EXCEPTION: *action = ACTION_THROW_EXCEPTION; break;
case DR_VIOLATION_ACTION_CONTINUE_CHANGED_CONTEXT:
/* FIXME - not safe to implement till case 7380 is fixed. */
CLIENT_ASSERT(false,
"action DR_VIOLATION_ACTION_CONTINUE_CHANGED_CONTEXT "
"not yet supported.");
/* note - no break, fall through */
case DR_VIOLATION_ACTION_CONTINUE: *action = ACTION_CONTINUE; break;
default:
CLIENT_ASSERT(false,
"Security violation event callback returned invalid "
"action value.");
}
}
}
# endif
/* Notify the client of a nudge. */
void
instrument_nudge(dcontext_t *dcontext, client_id_t id, uint64 arg)
{
size_t i;
CLIENT_ASSERT(!standalone_library, "API not supported in standalone mode");
ASSERT(dcontext != NULL && dcontext != GLOBAL_DCONTEXT &&
dcontext == get_thread_private_dcontext());
/* synch_with_all_threads and flush API assume that client nudge threads
* hold no dr locks and are !couldbelinking while in client lib code */
ASSERT_OWN_NO_LOCKS();
ASSERT(!is_couldbelinking(dcontext));
/* find the client the nudge is intended for */
for (i = 0; i < num_client_libs; i++) {
/* until we have nudge-arg support (PR 477454), nudges target the 1st client */
if (IF_VMX86_ELSE(true, client_libs[i].id == id)) {
break;
}
}
if (i == num_client_libs || client_libs[i].nudge_callbacks.num == 0)
return;
# ifdef WINDOWS
/* count the number of nudge events so we can make sure they're
* all finished before exiting
*/
d_r_mutex_lock(&client_thread_count_lock);
if (block_client_nudge_threads) {
/* FIXME - would be nice if there was a way to let the external agent know that
* the nudge event wasn't delivered (but this only happens when the process
* is detaching or exiting). */
d_r_mutex_unlock(&client_thread_count_lock);
return;
}
/* atomic to avoid locking around the dec */
ATOMIC_INC(int, num_client_nudge_threads);
d_r_mutex_unlock(&client_thread_count_lock);
/* We need to mark this as a client controlled thread for synch_with_all_threads
* and otherwise treat it as native. Xref PR 230836 on what to do if this
* thread hits native_exec_syscalls hooks.
* XXX: this requires extra checks for "not a nudge thread" after IS_CLIENT_THREAD
* in get_stack_bounds() and instrument_thread_exit_event(): maybe better
* to have synchall checks do extra checks and have IS_CLIENT_THREAD be
* false for nudge threads at exit time?
*/
dcontext->client_data->is_client_thread = true;
dcontext->thread_record->under_dynamo_control = false;
# else
/* support calling dr_get_mcontext() on this thread. the app
* context should be intact in the current mcontext except
* pc which we set from next_tag.
*/
CLIENT_ASSERT(!dcontext->client_data->mcontext_in_dcontext,
"internal inconsistency in where mcontext is");
dcontext->client_data->mcontext_in_dcontext = true;
/* officially get_mcontext() doesn't always set pc: we do anyway */
get_mcontext(dcontext)->pc = dcontext->next_tag;
# endif
call_all(client_libs[i].nudge_callbacks, int (*)(void *, uint64), (void *)dcontext,
arg);
# ifdef UNIX
dcontext->client_data->mcontext_in_dcontext = false;
# else
dcontext->thread_record->under_dynamo_control = true;
dcontext->client_data->is_client_thread = false;
ATOMIC_DEC(int, num_client_nudge_threads);
# endif
}
int
get_num_client_threads(void)
{
int num = IF_WINDOWS_ELSE(num_client_nudge_threads, 0);
# ifdef CLIENT_SIDELINE
num += num_client_sideline_threads;
# endif
return num;
}
# ifdef WINDOWS
/* wait for all nudges to finish */
void
wait_for_outstanding_nudges()
{
/* block any new nudge threads from starting */
d_r_mutex_lock(&client_thread_count_lock);
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
block_client_nudge_threads = true;
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
DOLOG(1, LOG_TOP, {
if (num_client_nudge_threads > 0) {
LOG(GLOBAL, LOG_TOP, 1,
"Waiting for %d nudges to finish - app is about to kill all threads "
"except the current one.\n",
num_client_nudge_threads);
}
});
/* don't wait if the client requested exit: after all the client might
* have done so from a nudge, and if the client does want to exit it's
* its own problem if it misses nudges (and external nudgers should use
* a finite timeout)
*/
if (client_requested_exit) {
d_r_mutex_unlock(&client_thread_count_lock);
return;
}
while (num_client_nudge_threads > 0) {
/* yield with lock released to allow nudges to finish */
d_r_mutex_unlock(&client_thread_count_lock);
dr_thread_yield();
d_r_mutex_lock(&client_thread_count_lock);
}
d_r_mutex_unlock(&client_thread_count_lock);
}
# endif /* WINDOWS */
/****************************************************************************/
/* EXPORTED ROUTINES */
DR_API
/* Creates a DR context that can be used in a standalone program.
* WARNING: this context cannot be used as the drcontext for a thread
* running under DR control! It is only for standalone programs that
* wish to use DR as a library of disassembly, etc. routines.
*/
void *
dr_standalone_init(void)
{
dcontext_t *dcontext = standalone_init();
return (void *)dcontext;
}
DR_API
void
dr_standalone_exit(void)
{
standalone_exit();
}
DR_API
/* Aborts the process immediately */
void
dr_abort(void)
{
if (TEST(DUMPCORE_DR_ABORT, dynamo_options.dumpcore_mask))
os_dump_core("dr_abort");
os_terminate(NULL, TERMINATE_PROCESS);
}
DR_API
void
dr_abort_with_code(int exit_code)
{
if (TEST(DUMPCORE_DR_ABORT, dynamo_options.dumpcore_mask))
os_dump_core("dr_abort");
os_terminate_with_code(NULL, TERMINATE_PROCESS, exit_code);
}
DR_API
void
dr_exit_process(int exit_code)
{
dcontext_t *dcontext = get_thread_private_dcontext();
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
/* Prevent cleanup from waiting for nudges as this may be called
* from a nudge!
* Also suppress leak asserts, as it's hard to clean up from
* some situations (such as DrMem -crash_at_error).
*/
client_requested_exit = true;
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT);
# ifdef WINDOWS
if (dcontext != NULL && dcontext->nudge_target != NULL) {
/* we need to free the nudge thread stack which may involved
* switching stacks so we have the nudge thread invoke
* os_terminate for us
*/
nudge_thread_cleanup(dcontext, true /*kill process*/, exit_code);
CLIENT_ASSERT(false, "shouldn't get here");
}
# endif
if (!is_currently_on_dstack(dcontext)
IF_UNIX(&&!is_currently_on_sigaltstack(dcontext))) {
/* if on app stack or sigaltstack, avoid incorrect leak assert at exit */
SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT);
dr_api_exit = true;
SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); /* to keep properly nested */
}
os_terminate_with_code(dcontext, /* dcontext is required */
TERMINATE_CLEANUP | TERMINATE_PROCESS, exit_code);
CLIENT_ASSERT(false, "shouldn't get here");
}
DR_API
bool
dr_create_memory_dump(dr_memory_dump_spec_t *spec)
{
if (spec->size != sizeof(dr_memory_dump_spec_t))
return false;
# ifdef WINDOWS
if (TEST(DR_MEMORY_DUMP_LDMP, spec->flags))
return os_dump_core_live(spec->label, spec->ldmp_path, spec->ldmp_path_size);
# endif
return false;
}
DR_API
/* Returns true if all DynamoRIO caches are thread private. */
bool
dr_using_all_private_caches(void)
{
return !SHARED_FRAGMENTS_ENABLED();
}
DR_API
void
dr_request_synchronized_exit(void)
{
SYSLOG_INTERNAL_WARNING_ONCE("dr_request_synchronized_exit deprecated: "
"use dr_set_process_exit_behavior instead");
}
DR_API
void
dr_set_process_exit_behavior(dr_exit_flags_t flags)
{
if ((!DYNAMO_OPTION(multi_thread_exit) && TEST(DR_EXIT_MULTI_THREAD, flags)) ||
(DYNAMO_OPTION(multi_thread_exit) && !TEST(DR_EXIT_MULTI_THREAD, flags))) {
options_make_writable();
dynamo_options.multi_thread_exit = TEST(DR_EXIT_MULTI_THREAD, flags);
options_restore_readonly();
}
if ((!DYNAMO_OPTION(skip_thread_exit_at_exit) &&
TEST(DR_EXIT_SKIP_THREAD_EXIT, flags)) ||
(DYNAMO_OPTION(skip_thread_exit_at_exit) &&
!TEST(DR_EXIT_SKIP_THREAD_EXIT, flags))) {
options_make_writable();
dynamo_options.skip_thread_exit_at_exit = TEST(DR_EXIT_SKIP_THREAD_EXIT, flags);
options_restore_readonly();
}
}
void
dr_allow_unsafe_static_behavior(void)
{
loader_allow_unsafe_static_behavior();
}
DR_API
/* Returns the option string passed along with a client path via DR's
* -client_lib option.
*/
/* i#1736: we now token-delimit with quotes, but for backward compat we need to
* pass a version w/o quotes for dr_get_options().
*/
const char *
dr_get_options(client_id_t id)
{
size_t i;
for (i = 0; i < num_client_libs; i++) {
if (client_libs[i].id == id) {
/* If we already converted, pass the result */
if (client_libs[i].legacy_options[0] != '\0' ||
client_libs[i].options[0] == '\0')
return client_libs[i].legacy_options;
/* For backward compatibility, we need to remove the token-delimiting
* quotes. We tokenize, and then re-assemble the flat string.
* i#1755: however, for legacy custom frontends that are not re-quoting
* like drrun now is, we need to avoid removing any quotes from the
* original strings. We try to detect this by assuming a frontend will
* either re-quote everything or nothing. Ideally we would check all
* args, but that would require plumbing info from getword() or
* duplicating its functionality: so instead our heuristic is just checking
* the first and last chars.
*/
if (!char_is_quote(client_libs[i].options[0]) ||
/* Emptry string already detected above */
!char_is_quote(
client_libs[i].options[strlen(client_libs[i].options) - 1])) {
/* At least one arg is not quoted => better use original */
snprintf(client_libs[i].legacy_options,
BUFFER_SIZE_ELEMENTS(client_libs[i].legacy_options), "%s",
client_libs[i].options);
} else {
int j;
size_t sofar = 0;
for (j = 1 /*skip client lib*/; j < client_libs[i].argc; j++) {
if (!print_to_buffer(
client_libs[i].legacy_options,
BUFFER_SIZE_ELEMENTS(client_libs[i].legacy_options), &sofar,
"%s%s", (j == 1) ? "" : " ", client_libs[i].argv[j]))
break;
}
}
NULL_TERMINATE_BUFFER(client_libs[i].legacy_options);
return client_libs[i].legacy_options;
}
}
CLIENT_ASSERT(false, "dr_get_options(): invalid client id");
return NULL;
}
DR_API
bool
dr_get_option_array(client_id_t id, int *argc OUT, const char ***argv OUT)
{
size_t i;
for (i = 0; i < num_client_libs; i++) {
if (client_libs[i].id == id) {
*argc = client_libs[i].argc;
*argv = client_libs[i].argv;
return true;
}
}
CLIENT_ASSERT(false, "dr_get_option_array(): invalid client id");
return false;
}
DR_API
/* Returns the path to the client library. Client must pass its ID */
const char *
dr_get_client_path(client_id_t id)
{
size_t i;
for (i = 0; i < num_client_libs; i++) {
if (client_libs[i].id == id) {
return client_libs[i].path;
}
}
CLIENT_ASSERT(false, "dr_get_client_path(): invalid client id");
return NULL;
}
DR_API
byte *
dr_get_client_base(client_id_t id)
{
size_t i;
for (i = 0; i < num_client_libs; i++) {
if (client_libs[i].id == id) {
return client_libs[i].start;
}
}
CLIENT_ASSERT(false, "dr_get_client_base(): invalid client id");
return NULL;
}
DR_API
bool
dr_set_client_name(const char *name, const char *report_URL)
{
/* Although set_exception_strings() accepts NULL, clients should pass real vals. */
if (name == NULL || report_URL == NULL)
return false;
set_exception_strings(name, report_URL);
return true;
}
bool
dr_set_client_version_string(const char *version)
{
if (version == NULL)
return false;
set_display_version(version);
return true;
}
DR_API const char *
dr_get_application_name(void)
{
# ifdef UNIX
return get_application_short_name();
# else
return get_application_short_unqualified_name();
# endif
}
void
set_client_error_code(dcontext_t *dcontext, dr_error_code_t error_code)
{
if (dcontext == NULL || dcontext == GLOBAL_DCONTEXT)
dcontext = get_thread_private_dcontext();
dcontext->client_data->error_code = error_code;
}
dr_error_code_t
dr_get_error_code(void *drcontext)
{
dcontext_t *dcontext = (dcontext_t *)drcontext;
if (dcontext == GLOBAL_DCONTEXT)
dcontext = get_thread_private_dcontext();
CLIENT_ASSERT(dcontext != NULL, "invalid drcontext");
return dcontext->client_data->error_code;
}
int
dr_num_app_args(void)
{
/* XXX i#2662: Add support for Windows. */
return num_app_args();
}
int
dr_get_app_args(OUT dr_app_arg_t *args_array, int args_count)
{
/* XXX i#2662: Add support for Windows. */
return get_app_args(args_array, (int)args_count);
}
const char *
dr_app_arg_as_cstring(IN dr_app_arg_t *app_arg, char *buf, int buf_size)
{
if (app_arg == NULL) {
set_client_error_code(NULL, DR_ERROR_INVALID_PARAMETER);
return NULL;
}
switch (app_arg->encoding) {
case DR_APP_ARG_CSTR_COMPAT: return (char *)app_arg->start;
case DR_APP_ARG_UTF_16:
/* XXX i#2662: To implement using utf16_to_utf8(). */
ASSERT_NOT_IMPLEMENTED(false);
set_client_error_code(NULL, DR_ERROR_NOT_IMPLEMENTED);
break;
default: set_client_error_code(NULL, DR_ERROR_UNKNOWN_ENCODING);
}
return NULL;
}
DR_API process_id_t
dr_get_process_id(void)
{
return (process_id_t)get_process_id();
}
DR_API process_id_t
dr_get_process_id_from_drcontext(void *drcontext)
{
dcontext_t *dcontext = (dcontext_t *)drcontext;
CLIENT_ASSERT(drcontext != NULL,
"dr_get_process_id_from_drcontext: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT,
"dr_get_process_id_from_drcontext: drcontext is invalid");
# ifdef UNIX
return dcontext->owning_process;
# else
return dr_get_process_id();
# endif
}
# ifdef UNIX
DR_API
process_id_t
dr_get_parent_id(void)
{
return get_parent_id();
}
# endif
# ifdef WINDOWS
DR_API
process_id_t
dr_convert_handle_to_pid(HANDLE process_handle)
{
ASSERT(POINTER_MAX == INVALID_PROCESS_ID);
return process_id_from_handle(process_handle);
}
DR_API
HANDLE
dr_convert_pid_to_handle(process_id_t pid)
{
return process_handle_from_id(pid);
}
DR_API
/**
* Returns information about the version of the operating system.
* Returns whether successful.
*/
bool
dr_get_os_version(dr_os_version_info_t *info)
{
int ver;
uint sp_major, sp_minor, build_number;
const char *release_id, *edition;
get_os_version_ex(&ver, &sp_major, &sp_minor, &build_number, &release_id, &edition);
if (info->size > offsetof(dr_os_version_info_t, version)) {
switch (ver) {
case WINDOWS_VERSION_10_1803: info->version = DR_WINDOWS_VERSION_10_1803; break;
case WINDOWS_VERSION_10_1709: info->version = DR_WINDOWS_VERSION_10_1709; break;
case WINDOWS_VERSION_10_1703: info->version = DR_WINDOWS_VERSION_10_1703; break;
case WINDOWS_VERSION_10_1607: info->version = DR_WINDOWS_VERSION_10_1607; break;
case WINDOWS_VERSION_10_1511: info->version = DR_WINDOWS_VERSION_10_1511; break;
case WINDOWS_VERSION_10: info->version = DR_WINDOWS_VERSION_10; break;
case WINDOWS_VERSION_8_1: info->version = DR_WINDOWS_VERSION_8_1; break;
case WINDOWS_VERSION_8: info->version = DR_WINDOWS_VERSION_8; break;
case WINDOWS_VERSION_7: info->version = DR_WINDOWS_VERSION_7; break;
case WINDOWS_VERSION_VISTA: info->version = DR_WINDOWS_VERSION_VISTA; break;
case WINDOWS_VERSION_2003: info->version = DR_WINDOWS_VERSION_2003; break;
case WINDOWS_VERSION_XP: info->version = DR_WINDOWS_VERSION_XP; break;
case WINDOWS_VERSION_2000: info->version = DR_WINDOWS_VERSION_2000; break;
case WINDOWS_VERSION_NT: info->version = DR_WINDOWS_VERSION_NT; break;
default: CLIENT_ASSERT(false, "unsupported windows version");
};
} else
return false; /* struct too small for any info */
if (info->size > offsetof(dr_os_version_info_t, service_pack_major)) {
info->service_pack_major = sp_major;
if (info->size > offsetof(dr_os_version_info_t, service_pack_minor)) {
info->service_pack_minor = sp_minor;
}
}
if (info->size > offsetof(dr_os_version_info_t, build_number)) {
info->build_number = build_number;
}
if (info->size > offsetof(dr_os_version_info_t, release_id)) {
dr_snprintf(info->release_id, BUFFER_SIZE_ELEMENTS(info->release_id), "%s",
release_id);
NULL_TERMINATE_BUFFER(info->release_id);
}
if (info->size > offsetof(dr_os_version_info_t, edition)) {
dr_snprintf(info->edition, BUFFER_SIZE_ELEMENTS(info->edition), "%s", edition);
NULL_TERMINATE_BUFFER(info->edition);
}
return true;
}
DR_API
bool
dr_is_wow64(void)
{
return is_wow64_process(NT_CURRENT_PROCESS);
}
DR_API
void *
dr_get_app_PEB(void)
{
return get_own_peb();
}
# endif
DR_API
/* Retrieves the current time */
void
dr_get_time(dr_time_t *time)
{
convert_millis_to_date(query_time_millis(), time);
}
DR_API
uint64
dr_get_milliseconds(void)
{
return query_time_millis();
}
DR_API
uint64
dr_get_microseconds(void)
{
return query_time_micros();
}
DR_API
uint
dr_get_random_value(uint max)
{
return (uint)get_random_offset(max);
}
DR_API
void
dr_set_random_seed(uint seed)
{
d_r_set_random_seed(seed);
}
DR_API
uint
dr_get_random_seed(void)
{
return d_r_get_random_seed();
}
/***************************************************************************
* MEMORY ALLOCATION
*/
DR_API
/* Allocates memory from DR's memory pool specific to the
* thread associated with drcontext.
*/
void *
dr_thread_alloc(void *drcontext, size_t size)
{
dcontext_t *dcontext = (dcontext_t *)drcontext;
/* For back-compat this is guaranteed-reachable. */
return heap_reachable_alloc(dcontext, size HEAPACCT(ACCT_CLIENT));
}
DR_API
/* Frees thread-specific memory allocated by dr_thread_alloc.
* size must be the same size passed to dr_thread_alloc.
*/
void
dr_thread_free(void *drcontext, void *mem, size_t size)
{
dcontext_t *dcontext = (dcontext_t *)drcontext;
CLIENT_ASSERT(drcontext != NULL, "dr_thread_free: drcontext cannot be NULL");
CLIENT_ASSERT(drcontext != GLOBAL_DCONTEXT, "dr_thread_free: drcontext is invalid");
heap_reachable_free(dcontext, mem, size HEAPACCT(ACCT_CLIENT));
}
DR_API
/* Allocates memory from DR's global memory pool.
*/
void *
dr_global_alloc(size_t size)
{
/* For back-compat this is guaranteed-reachable. */
return heap_reachable_alloc(GLOBAL_DCONTEXT, size HEAPACCT(ACCT_CLIENT));
}
DR_API
/* Frees memory allocated by dr_global_alloc.
* size must be the same size passed to dr_global_alloc.
*/
void
dr_global_free(void *mem, size_t size)
{
heap_reachable_free(GLOBAL_DCONTEXT, mem, size HEAPACCT(ACCT_CLIENT));
}
DR_API
/* PR 352427: API routine to allocate executable memory */
void *
dr_nonheap_alloc(size_t size, uint prot)
{
CLIENT_ASSERT(
!TESTALL(DR_MEMPROT_WRITE | DR_MEMPROT_EXEC, prot) ||
!DYNAMO_OPTION(satisfy_w_xor_x),
"reachable executable client memory is not supported with -satisfy_w_xor_x");
return heap_mmap_ex(size, size, prot, false /*no guard pages*/,
/* For back-compat we preserve reachability. */
VMM_SPECIAL_MMAP | VMM_REACHABLE);
}
DR_API
void
dr_nonheap_free(void *mem, size_t size)
{
heap_munmap_ex(mem, size, false /*no guard pages*/, VMM_SPECIAL_MMAP | VMM_REACHABLE);
}
static void *
raw_mem_alloc(size_t size, uint prot, void *addr, dr_alloc_flags_t flags)
{
byte *p;
heap_error_code_t error_code;
CLIENT_ASSERT(ALIGNED(addr, PAGE_SIZE), "addr is not page size aligned");
if (!TEST(DR_ALLOC_NON_DR, flags)) {
/* memory alloc/dealloc and updating DR list must be atomic */
dynamo_vm_areas_lock(); /* if already hold lock this is a nop */
}
addr = (void *)ALIGN_BACKWARD(addr, PAGE_SIZE);
size = ALIGN_FORWARD(size, PAGE_SIZE);
# ifdef WINDOWS
if (TEST(DR_ALLOC_LOW_2GB, flags)) {
CLIENT_ASSERT(!TEST(DR_ALLOC_COMMIT_ONLY, flags),
"cannot combine commit-only and low-2GB");
p = os_heap_reserve_in_region(NULL, (byte *)(ptr_uint_t)0x80000000, size,
&error_code, TEST(DR_MEMPROT_EXEC, flags));
if (p != NULL && !TEST(DR_ALLOC_RESERVE_ONLY, flags)) {
if (!os_heap_commit(p, size, prot, &error_code)) {
os_heap_free(p, size, &error_code);
p = NULL;
}
}
} else
# endif
{
/* We specify that DR_ALLOC_LOW_2GB only applies to x64, so it's
* ok that the Linux kernel will ignore MAP_32BIT for 32-bit.
*/
# ifdef UNIX
uint os_flags = TEST(DR_ALLOC_LOW_2GB, flags) ? RAW_ALLOC_32BIT : 0;
# else
uint os_flags = TEST(DR_ALLOC_RESERVE_ONLY, flags)
? RAW_ALLOC_RESERVE_ONLY
: (TEST(DR_ALLOC_COMMIT_ONLY, flags) ? RAW_ALLOC_COMMIT_ONLY : 0);
# endif
if (IF_WINDOWS(TEST(DR_ALLOC_COMMIT_ONLY, flags) &&) addr != NULL &&
!app_memory_pre_alloc(get_thread_private_dcontext(), addr, size, prot, false,
true /*update*/, false /*!image*/)) {
p = NULL;
} else {
p = os_raw_mem_alloc(addr, size,