| /* ********************************************************** |
| * Copyright (c) 2011-2014 Google, Inc. All rights reserved. |
| * Copyright (c) 2000-2010 VMware, Inc. All rights reserved. |
| * **********************************************************/ |
| |
| /* |
| * Redistribution and use in source and binary forms, with or without |
| * modification, are permitted provided that the following conditions are met: |
| * |
| * * Redistributions of source code must retain the above copyright notice, |
| * this list of conditions and the following disclaimer. |
| * |
| * * Redistributions in binary form must reproduce the above copyright notice, |
| * this list of conditions and the following disclaimer in the documentation |
| * and/or other materials provided with the distribution. |
| * |
| * * Neither the name of VMware, Inc. nor the names of its contributors may be |
| * used to endorse or promote products derived from this software without |
| * specific prior written permission. |
| * |
| * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
| * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| * ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE |
| * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR |
| * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER |
| * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
| * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
| * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH |
| * DAMAGE. |
| */ |
| |
| /* Copyright (c) 2003-2007 Determina Corp. */ |
| /* Copyright (c) 2001-2003 Massachusetts Institute of Technology */ |
| /* Copyright (c) 2000-2001 Hewlett-Packard Company */ |
| |
| /* |
| * fragment.c - fragment related routines |
| */ |
| |
| #include "globals.h" |
| #include "link.h" |
| #include "fragment.h" |
| #include "fcache.h" |
| #include "emit.h" |
| #include "monitor.h" |
| #include <string.h> /* for memset */ |
| #include "instrument.h" |
| #include <stddef.h> /* for offsetof */ |
| #include <limits.h> /* UINT_MAX */ |
| #include "perscache.h" |
| #include "synch.h" |
| #ifdef UNIX |
| # include "nudge.h" |
| #endif |
| |
| /* FIXME: make these runtime parameters */ |
| #define INIT_HTABLE_SIZE_SHARED_BB (DYNAMO_OPTION(coarse_units) ? 5 : 10) |
| #define INIT_HTABLE_SIZE_SHARED_TRACE 10 |
| /* the only private bbs will be selfmod, so start small */ |
| #define INIT_HTABLE_SIZE_BB (DYNAMO_OPTION(shared_bbs) ? 5 : 10) |
| /* coarse-grain fragments do not use futures */ |
| #define INIT_HTABLE_SIZE_SHARED_FUTURE (DYNAMO_OPTION(coarse_units) ? 5 : 10) |
| #ifdef RETURN_AFTER_CALL |
| /* we have small per-module hashtables */ |
| # define INIT_HTABLE_SIZE_AFTER_CALL 5 |
| #endif |
| /* private futures are only used when we have private fragments */ |
| #define INIT_HTABLE_SIZE_FUTURE \ |
| ((DYNAMO_OPTION(shared_bbs) && DYNAMO_OPTION(shared_traces)) ? 5 : 9) |
| |
| /* per-module htables */ |
| #define INIT_HTABLE_SIZE_COARSE 5 |
| #define INIT_HTABLE_SIZE_COARSE_TH 4 |
| |
| #ifdef RCT_IND_BRANCH |
| # include "rct.h" |
| /* we have small per-module hashtables */ |
| # define INIT_HTABLE_SIZE_RCT_IBT 7 |
| |
| # ifndef RETURN_AFTER_CALL |
| # error RCT_IND_BRANCH requires RETURN_AFTER_CALL since it reuses data types |
| # endif |
| #endif |
| |
| /* if shared traces, we currently have no private traces so make table tiny |
| * FIMXE: should start out w/ no table at all |
| */ |
| #define INIT_HTABLE_SIZE_TRACE (DYNAMO_OPTION(shared_traces) ? 6 : 9) |
| /* for small table sizes resize is not an expensive operation and we start smaller */ |
| |
| /* Current flusher, protected by thread_initexit_lock. */ |
| DECLARE_FREQPROT_VAR(static dcontext_t *flusher, NULL); |
| /* Current allsynch-flusher, protected by thread_initexit_lock. */ |
| DECLARE_FREQPROT_VAR(static dcontext_t *allsynch_flusher, NULL); |
| |
| /* These global tables are kept on the heap for selfprot (case 7957) */ |
| |
| /* synchronization to these tables is accomplished via read-write locks, |
| * where the writers are removal and resizing -- addition is atomic to |
| * readers. |
| * for now none of these are read from ibl routines so we only have to |
| * synch with other DR routines |
| */ |
| static fragment_table_t *shared_bb; |
| static fragment_table_t *shared_trace; |
| |
| /* if we have either shared bbs or shared traces we need this shared: */ |
| static fragment_table_t *shared_future; |
| |
| /* Thread-shared tables are allocated in a shared per_thread_t. |
| * The structure is also used if we're dumping shared traces. |
| * Kept on the heap for selfprot (case 7957) |
| */ |
| static per_thread_t *shared_pt; |
| |
| #define USE_SHARED_PT() (SHARED_IBT_TABLES_ENABLED() || \ |
| (TRACEDUMP_ENABLED() && DYNAMO_OPTION(shared_traces))) |
| |
| /* We keep track of "old" IBT target tables in a linked list and |
| * deallocate them in fragment_exit(). */ |
| /* FIXME Deallocate tables more aggressively using a distributed, refcounting |
| * algo as is used for shared deletion. */ |
| typedef struct _dead_fragment_table_t { |
| fragment_entry_t *table_unaligned; |
| uint table_flags; |
| uint capacity; |
| uint ref_count; |
| struct _dead_fragment_table_t *next; |
| } dead_fragment_table_t; |
| |
| /* We keep these list pointers on the heap for selfprot (case 8074). */ |
| typedef struct _dead_table_lists_t { |
| dead_fragment_table_t *dead_tables; |
| dead_fragment_table_t *dead_tables_tail; |
| } dead_table_lists_t; |
| |
| static dead_table_lists_t *dead_lists; |
| |
| DECLARE_CXTSWPROT_VAR(static mutex_t dead_tables_lock, INIT_LOCK_FREE(dead_tables_lock)); |
| |
| #ifdef RETURN_AFTER_CALL |
| /* High level lock for an atomic lookup+add operation on the |
| * after call tables. */ |
| DECLARE_CXTSWPROT_VAR(static mutex_t after_call_lock, INIT_LOCK_FREE(after_call_lock)); |
| /* We use per-module tables and only need this table for non-module code; |
| * on Linux though this is the only table used, until we have a module list. |
| */ |
| static rct_module_table_t rac_non_module_table; |
| #endif |
| |
| /* allows independent sequences of flushes and delayed deletions, |
| * though with -syscalls_synch_flush additions we now hold this |
| * throughout a flush. |
| */ |
| DECLARE_CXTSWPROT_VAR(mutex_t shared_cache_flush_lock, |
| INIT_LOCK_FREE(shared_cache_flush_lock)); |
| /* Global count of flushes, used as a timestamp for shared deletion. |
| * Reads may be done w/o a lock, but writes can only be done |
| * via increment_global_flushtime() while holding shared_cache_flush_lock. |
| */ |
| DECLARE_FREQPROT_VAR(uint flushtime_global, 0); |
| |
| #ifdef CLIENT_INTERFACE |
| DECLARE_CXTSWPROT_VAR(mutex_t client_flush_request_lock, |
| INIT_LOCK_FREE(client_flush_request_lock)); |
| DECLARE_CXTSWPROT_VAR(client_flush_req_t *client_flush_requests, NULL); |
| #endif |
| |
| #if defined(RCT_IND_BRANCH) && defined(UNIX) |
| /* On Win32 we use per-module tables; on Linux we use a single global table, |
| * until we have a module list. |
| */ |
| rct_module_table_t rct_global_table; |
| #endif |
| |
| #define NULL_TAG ((app_pc)PTR_UINT_0) |
| /* FAKE_TAG is used as a deletion marker for unlinked entries */ |
| #define FAKE_TAG ((app_pc)PTR_UINT_MINUS_1) |
| |
| /* instead of an empty hashtable slot containing NULL, we fill it |
| * with a pointer to this constant fragment, which we give a tag |
| * of 0. |
| * PR 305731: rather than having a start_pc of 0, which causes |
| * an app targeting 0 to crash at 0, we point at a handler that |
| * sends the app to an ibl miss. |
| */ |
| byte * hashlookup_null_target; |
| #define HASHLOOKUP_NULL_START_PC ((cache_pc)hashlookup_null_handler) |
| static const fragment_t null_fragment = { NULL_TAG, 0, 0, 0, 0, |
| HASHLOOKUP_NULL_START_PC, }; |
| /* to avoid range check on fast path using an end of table sentinel fragment */ |
| static const fragment_t sentinel_fragment = { NULL_TAG, 0, 0, 0, 0, |
| HASHLOOKUP_SENTINEL_START_PC, }; |
| |
| /* Shared fragment IBTs: We need to preserve the open addressing traversal |
| * in the hashtable while marking a table entry as unlinked. |
| * A null_fragment won't work since it terminates the traversal, |
| * so we use an unlinked marker. The lookup table entry for |
| * an unlinked entry *always* has its start_pc_fragment set to |
| * an IBL target_delete entry. |
| */ |
| static const fragment_t unlinked_fragment = { FAKE_TAG, }; |
| |
| /* macro used in the code from time of deletion markers */ |
| /* Shared fragment IBTs: unlinked_fragment isn't a real fragment either. So they |
| * are naturally deleted during a table resize. */ |
| #define REAL_FRAGMENT(fragment) \ |
| ((fragment) != &null_fragment && \ |
| (fragment) != &unlinked_fragment && \ |
| (fragment) != &sentinel_fragment) |
| |
| #define GET_PT(dc) ((dc) == GLOBAL_DCONTEXT ? (USE_SHARED_PT() ? shared_pt : NULL) :\ |
| (per_thread_t *) (dc)->fragment_field) |
| |
| #define TABLE_PROTECTED(ptable) \ |
| (!TABLE_NEEDS_LOCK(ptable) || READWRITE_LOCK_HELD(&(ptable)->rwlock)) |
| |
| /* everything except the invisible table is in here */ |
| #define GET_FTABLE_HELPER(pt, flags, otherwise) \ |
| (TEST(FRAG_IS_TRACE, (flags)) ? \ |
| (TEST(FRAG_SHARED, (flags)) ? shared_trace : &pt->trace) : \ |
| (TEST(FRAG_SHARED, (flags)) ? \ |
| (TEST(FRAG_IS_FUTURE, (flags)) ? shared_future : shared_bb) : \ |
| (TEST(FRAG_IS_FUTURE, (flags)) ? &pt->future : \ |
| (otherwise)))) |
| |
| #define GET_FTABLE(pt, flags) GET_FTABLE_HELPER(pt, (flags), &pt->bb) |
| |
| /* indirect branch table per target type (bb vs trace) and indirect branch type */ |
| #define GET_IBT_TABLE(pt, flags, branch_type) \ |
| (TEST(FRAG_IS_TRACE, (flags)) ? \ |
| (DYNAMO_OPTION(shared_trace_ibt_tables) ? \ |
| &shared_pt->trace_ibt[(branch_type)] : \ |
| &(pt)->trace_ibt[(branch_type)]) : \ |
| (DYNAMO_OPTION(shared_bb_ibt_tables) ? \ |
| &shared_pt->bb_ibt[(branch_type)] : \ |
| &(pt)->bb_ibt[(branch_type)])) |
| |
| /********************************** STATICS ***********************************/ |
| static uint fragment_heap_size(uint flags, int direct_exits, int indirect_exits); |
| |
| static void fragment_free_future(dcontext_t *dcontext, future_fragment_t *fut); |
| |
| #if defined(RETURN_AFTER_CALL) || defined(RCT_IND_BRANCH) |
| static void |
| coarse_persisted_fill_ibl(dcontext_t *dcontext, coarse_info_t *info, |
| ibl_branch_type_t branch_type); |
| #endif |
| |
| #ifdef CLIENT_INTERFACE |
| static void |
| process_client_flush_requests(dcontext_t *dcontext, dcontext_t *alloc_dcontext, |
| client_flush_req_t *req, bool flush); |
| #endif |
| |
| #if defined(INTERNAL) || defined(CLIENT_INTERFACE) |
| /* trace logging and synch for shared trace file: */ |
| DECLARE_CXTSWPROT_VAR(static mutex_t tracedump_mutex, INIT_LOCK_FREE(tracedump_mutex)); |
| DECLARE_FREQPROT_VAR(static stats_int_t tcount, 0); /* protected by tracedump_mutex */ |
| static void exit_trace_file(per_thread_t *pt); |
| static void output_trace(dcontext_t *dcontext, per_thread_t *pt, |
| fragment_t *f, stats_int_t deleted_at); |
| static void init_trace_file(per_thread_t *pt); |
| #endif |
| |
| #define SHOULD_OUTPUT_FRAGMENT(flags) \ |
| (TEST(FRAG_IS_TRACE, (flags)) && \ |
| !TEST(FRAG_TRACE_OUTPUT, (flags)) && \ |
| TRACEDUMP_ENABLED()) |
| |
| #define FRAGMENT_COARSE_WRAPPER_FLAGS \ |
| FRAG_FAKE | FRAG_SHARED | FRAG_COARSE_GRAIN | \ |
| FRAG_LINKED_OUTGOING | FRAG_LINKED_INCOMING |
| |
| /* We use temporary fragment_t + linkstub_t structs to more easily |
| * use existing code when emitting coarse-grain fragments. |
| * Only 1-ind-exit or 1 or 2 dir exit bbs can be coarse-grain. |
| * The bb_building_lock protects use of this. |
| */ |
| DECLARE_FREQPROT_VAR( |
| static struct { |
| fragment_t f; |
| union { |
| struct { |
| direct_linkstub_t dir_exit_1; |
| direct_linkstub_t dir_exit_2; |
| } dir_exits; |
| indirect_linkstub_t ind_exit; |
| } exits; |
| } coarse_emit_fragment, {{0}}); |
| |
| #ifdef SHARING_STUDY |
| /*************************************************************************** |
| * fragment_t sharing study |
| * Only used with -fragment_sharing_study |
| * When the option is off we go ahead and waste the 4 static vars |
| * below so we don't have to have a define and separate build. |
| */ |
| typedef struct _thread_list_t { |
| uint thread_num; |
| uint count; |
| struct _thread_list_t *next; |
| } thread_list_t; |
| |
| typedef struct _shared_entry_t { |
| app_pc tag; |
| uint num_threads; |
| thread_list_t *threads; |
| uint heap_size; |
| uint cache_size; |
| struct _shared_entry_t *next; |
| } shared_entry_t; |
| # define SHARED_HASH_BITS 16 |
| static shared_entry_t ** shared_blocks; |
| DECLARE_CXTSWPROT_VAR(static mutex_t shared_blocks_lock, |
| INIT_LOCK_FREE(shared_blocks_lock)); |
| static shared_entry_t ** shared_traces; |
| DECLARE_CXTSWPROT_VAR(static mutex_t shared_traces_lock, |
| INIT_LOCK_FREE(shared_traces_lock)); |
| |
| /* assumes caller holds table's lock! */ |
| static shared_entry_t * |
| shared_block_lookup(shared_entry_t **table, fragment_t *f) |
| { |
| shared_entry_t *e; |
| uint hindex; |
| |
| hindex = HASH_FUNC_BITS((ptr_uint_t)f->tag, SHARED_HASH_BITS); |
| /* using collision chains */ |
| for (e = table[hindex]; e != NULL; e = e->next) { |
| if (e->tag == f->tag) { |
| return e; |
| } |
| } |
| return NULL; |
| } |
| |
| static void |
| reset_shared_block_table(shared_entry_t **table, mutex_t *lock) |
| { |
| shared_entry_t *e, *nxte; |
| uint i; |
| uint size = HASHTABLE_SIZE(SHARED_HASH_BITS); |
| mutex_lock(lock); |
| for (i = 0; i < size; i++) { |
| for (e = table[i]; e != NULL; e = nxte) { |
| thread_list_t *tl = e->threads; |
| thread_list_t *tlnxt; |
| nxte = e->next; |
| while (tl != NULL) { |
| tlnxt = tl->next; |
| global_heap_free(tl, sizeof(thread_list_t) HEAPACCT(ACCT_OTHER)); |
| tl = tlnxt; |
| } |
| global_heap_free(e, sizeof(shared_entry_t) HEAPACCT(ACCT_OTHER)); |
| } |
| } |
| global_heap_free(table, size*sizeof(shared_entry_t*) HEAPACCT(ACCT_OTHER)); |
| mutex_unlock(lock); |
| } |
| |
| static void |
| add_shared_block(shared_entry_t **table, mutex_t *lock, fragment_t *f) |
| { |
| shared_entry_t *e; |
| uint hindex; |
| int num_direct = 0, num_indirect = 0; |
| linkstub_t *l = FRAGMENT_EXIT_STUBS(f); |
| /* use num to avoid thread_id_t recycling problems */ |
| uint tnum = get_thread_num(get_thread_id()); |
| |
| mutex_lock(lock); |
| e = shared_block_lookup(table, f); |
| if (e != NULL) { |
| thread_list_t *tl = e->threads; |
| for (; tl != NULL; tl = tl->next) { |
| if (tl->thread_num == tnum) { |
| tl->count++; |
| LOG(GLOBAL, LOG_ALL, 2, |
| "add_shared_block: tag "PFX", but re-add #%d for thread #%d\n", |
| e->tag, tl->count, tnum); |
| mutex_unlock(lock); |
| return; |
| } |
| } |
| tl = global_heap_alloc(sizeof(thread_list_t) HEAPACCT(ACCT_OTHER)); |
| tl->thread_num = tnum; |
| tl->count = 1; |
| tl->next = e->threads; |
| e->threads = tl; |
| e->num_threads++; |
| LOG(GLOBAL, LOG_ALL, 2, "add_shared_block: tag "PFX" thread #%d => %d threads\n", |
| e->tag, tnum, e->num_threads); |
| mutex_unlock(lock); |
| return; |
| } |
| |
| /* get num stubs to find heap size */ |
| for (; l != NULL; l = LINKSTUB_NEXT_EXIT(l)) { |
| if (LINKSTUB_DIRECT(l->flags)) |
| num_direct++; |
| else { |
| ASSERT(LINKSTUB_INDIRECT(l->flags)); |
| num_indirect++; |
| } |
| } |
| |
| /* add entry to thread hashtable */ |
| e = (shared_entry_t *) global_heap_alloc(sizeof(shared_entry_t) HEAPACCT(ACCT_OTHER)); |
| e->tag = f->tag; |
| e->num_threads = 1; |
| e->heap_size = fragment_heap_size(f->flags, num_direct, num_indirect); |
| e->cache_size = (f->size + f->fcache_extra); |
| e->threads = global_heap_alloc(sizeof(thread_list_t) HEAPACCT(ACCT_OTHER)); |
| e->threads->thread_num = tnum; |
| e->threads->count = 1; |
| e->threads->next = NULL; |
| LOG(GLOBAL, LOG_ALL, 2, "add_shared_block: tag "PFX", heap %d, cache %d, thread #%d\n", |
| e->tag, e->heap_size, e->cache_size, e->threads->thread_num); |
| |
| hindex = HASH_FUNC_BITS((ptr_uint_t)f->tag, SHARED_HASH_BITS); |
| e->next = table[hindex]; |
| table[hindex] = e; |
| mutex_unlock(lock); |
| } |
| |
| static void |
| print_shared_table_stats(shared_entry_t **table, mutex_t *lock, const char *name) |
| { |
| uint i; |
| shared_entry_t *e; |
| uint size = HASHTABLE_SIZE(SHARED_HASH_BITS); |
| uint tot = 0, shared_tot = 0, shared = 0, heap = 0, cache = 0, |
| creation_count = 0; |
| |
| mutex_lock(lock); |
| for (i = 0; i < size; i++) { |
| for (e = table[i]; e != NULL; e = e->next) { |
| thread_list_t *tl = e->threads; |
| tot++; |
| shared_tot += e->num_threads; |
| for (; tl != NULL; tl = tl->next) |
| creation_count += tl->count; |
| if (e->num_threads > 1) { |
| shared++; |
| /* assume similar size for each thread -- cache padding |
| * only real difference |
| */ |
| heap += (e->heap_size * e->num_threads); |
| cache += (e->cache_size * e->num_threads); |
| } |
| } |
| } |
| mutex_unlock(lock); |
| LOG(GLOBAL, LOG_ALL, 1, "Shared %s statistics:\n", name); |
| LOG(GLOBAL, LOG_ALL, 1, "\ttotal blocks: %10d\n", tot); |
| LOG(GLOBAL, LOG_ALL, 1, "\tcreation count: %10d\n", creation_count); |
| LOG(GLOBAL, LOG_ALL, 1, "\tshared count: %10d\n", shared_tot); |
| LOG(GLOBAL, LOG_ALL, 1, "\tshared blocks: %10d\n", shared); |
| LOG(GLOBAL, LOG_ALL, 1, "\tshared heap: %10d\n", heap); |
| LOG(GLOBAL, LOG_ALL, 1, "\tshared cache: %10d\n", cache); |
| } |
| |
| void |
| print_shared_stats() |
| { |
| print_shared_table_stats(shared_blocks, &shared_blocks_lock, "basic block"); |
| print_shared_table_stats(shared_traces, &shared_traces_lock, "trace"); |
| } |
| #endif /* SHARING_STUDY ***************************************************/ |
| |
| |
| #ifdef FRAGMENT_SIZES_STUDY /*****************************************/ |
| #include <math.h> |
| /* don't bother to synchronize these */ |
| static int bb_sizes[200000]; |
| static int trace_sizes[40000]; |
| static int num_bb = 0; |
| static int num_traces = 0; |
| |
| void |
| record_fragment_size(int size, bool is_trace) |
| { |
| if (is_trace) { |
| trace_sizes[num_traces] = size; |
| num_traces++; |
| ASSERT(num_traces < 40000); |
| } else { |
| bb_sizes[num_bb] = size; |
| num_bb++; |
| ASSERT(num_bb < 200000); |
| } |
| } |
| |
| void |
| print_size_results() |
| { |
| LOG(GLOBAL, LOG_ALL, 1, "Basic block sizes (bytes):\n"); |
| print_statistics(bb_sizes, num_bb); |
| LOG(GLOBAL, LOG_ALL, 1, "Trace sizes (bytes):\n"); |
| print_statistics(trace_sizes, num_traces); |
| } |
| #endif /* FRAGMENT_SIZES_STUDY */ /*****************************************/ |
| |
| #define FRAGTABLE_WHICH_HEAP(flags) \ |
| (TESTALL(FRAG_TABLE_INCLUSIVE_HIERARCHY | FRAG_TABLE_IBL_TARGETED, \ |
| (flags)) ? ACCT_IBLTABLE : ACCT_FRAG_TABLE) |
| |
| #ifdef HASHTABLE_STATISTICS |
| # define UNPROT_STAT(stats) unprot_stats->stats |
| /* FIXME: either put in nonpersistent heap as appropriate, or |
| * preserve across resets |
| */ |
| # define ALLOC_UNPROT_STATS(dcontext, table) do { \ |
| (table)->unprot_stats = \ |
| HEAP_TYPE_ALLOC((dcontext), unprot_ht_statistics_t, \ |
| FRAGTABLE_WHICH_HEAP((table)->table_flags), \ |
| UNPROTECTED); \ |
| memset((table)->unprot_stats, 0, sizeof(unprot_ht_statistics_t)); \ |
| } while (0) |
| # define DEALLOC_UNPROT_STATS(dcontext, table) \ |
| HEAP_TYPE_FREE((dcontext), (table)->unprot_stats, unprot_ht_statistics_t, \ |
| FRAGTABLE_WHICH_HEAP((table)->table_flags), UNPROTECTED) |
| # define CHECK_UNPROT_STATS(table) ASSERT(table.unprot_stats != NULL) |
| |
| static void |
| check_stay_on_trace_stats_overflow(dcontext_t *dcontext, ibl_branch_type_t branch_type) |
| { |
| per_thread_t *pt = (per_thread_t *) dcontext->fragment_field; |
| hashtable_statistics_t *lookup_stats = &pt->trace_ibt[branch_type].unprot_stats-> |
| trace_ibl_stats[branch_type]; |
| if (lookup_stats->ib_stay_on_trace_stat < lookup_stats->ib_stay_on_trace_stat_last) { |
| lookup_stats->ib_stay_on_trace_stat_ovfl++; |
| } |
| lookup_stats->ib_stay_on_trace_stat_last = lookup_stats->ib_stay_on_trace_stat; |
| /* FIXME: ib_trace_last_ibl_exit should have an overflow check as well */ |
| } |
| #endif /* HASHTABLE_STATISTICS */ |
| |
| /* init/update the tls slots storing this table's mask and lookup base |
| * N.B.: for thread-shared the caller must call for each thread |
| */ |
| /* currently we don't support a mixture */ |
| static inline void |
| update_lookuptable_tls(dcontext_t *dcontext, ibl_table_t *table) |
| { |
| /* use dcontext->local_state, rather than get_local_state(), to support |
| * being called from other threads! |
| */ |
| local_state_extended_t *state = |
| (local_state_extended_t *) dcontext->local_state; |
| |
| ASSERT(state != NULL); |
| ASSERT(DYNAMO_OPTION(ibl_table_in_tls)); |
| /* We must hold at least the read lock here, else we could grab |
| * an inconsistent mask/lookuptable pair if another thread is in the middle |
| * of resizing the table (case 10405). |
| */ |
| ASSERT_TABLE_SYNCHRONIZED(table, READWRITE); |
| /* case 10296: for shared tables we must update the table |
| * before the mask, as the ibl lookup code accesses the mask first, |
| * and old mask + new table is ok since it will de-ref within the |
| * new table (we never shrink tables) and be a miss, whereas |
| * new mask + old table can de-ref beyond the end of the table, |
| * crashing or worse. |
| */ |
| state->table_space.table[table->branch_type].lookuptable = |
| table->table; |
| state->table_space.table[table->branch_type].hash_mask = |
| table->hash_mask; |
| } |
| |
| #ifdef DEBUG |
| static const char *ibl_bb_table_type_names[IBL_BRANCH_TYPE_END] = |
| {"ret_bb", "indcall_bb", "indjmp_bb"}; |
| static const char *ibl_trace_table_type_names[IBL_BRANCH_TYPE_END] = |
| {"ret_trace", "indcall_trace", "indjmp_trace"}; |
| #endif |
| |
| #ifdef DEBUG |
| static inline void |
| dump_lookuptable_tls(dcontext_t *dcontext) |
| { |
| /* use dcontext->local_state, rather than get_local_state(), to support |
| * being called from other threads! |
| */ |
| if (DYNAMO_OPTION(ibl_table_in_tls)) { |
| |
| local_state_extended_t *state = |
| (local_state_extended_t *) dcontext->local_state; |
| ibl_branch_type_t branch_type; |
| |
| ASSERT(state != NULL); |
| for (branch_type = IBL_BRANCH_TYPE_START; |
| branch_type < IBL_BRANCH_TYPE_END; branch_type++) { |
| LOG(THREAD, LOG_FRAGMENT, 1, |
| "\t Table %s, table "PFX", mask "PFX"\n", |
| !SHARED_BB_ONLY_IB_TARGETS() ? |
| ibl_trace_table_type_names[branch_type] : |
| ibl_bb_table_type_names[branch_type], |
| state->table_space.table[branch_type].lookuptable, |
| state->table_space.table[branch_type].hash_mask); |
| } |
| } |
| } |
| #endif |
| |
| /******************************************************************************* |
| * IBL HASHTABLE INSTANTIATION |
| */ |
| #define FRAGENTRY_FROM_FRAGMENT(f) { (f)->tag, (f)->start_pc } |
| |
| /* macros w/ name and types are duplicated in fragment.h -- keep in sync */ |
| #define NAME_KEY ibl |
| #define ENTRY_TYPE fragment_entry_t |
| /* not defining HASHTABLE_USE_LOOKUPTABLE */ |
| /* compiler won't let me use null_fragment.tag here */ |
| static const fragment_entry_t fe_empty = { NULL_TAG, HASHLOOKUP_NULL_START_PC }; |
| static const fragment_entry_t fe_sentinel = { NULL_TAG, HASHLOOKUP_SENTINEL_START_PC }; |
| #define ENTRY_TAG(fe) ((ptr_uint_t)(fe).tag_fragment) |
| #define ENTRY_EMPTY (fe_empty) |
| #define ENTRY_SENTINEL (fe_sentinel) |
| #define IBL_ENTRY_IS_EMPTY(fe) \ |
| ((fe).tag_fragment == fe_empty.tag_fragment && \ |
| (fe).start_pc_fragment == fe_empty.start_pc_fragment) |
| #define IBL_ENTRY_IS_INVALID(fe) ((fe).tag_fragment == FAKE_TAG) |
| #define IBL_ENTRY_IS_SENTINEL(fe) \ |
| ((fe).tag_fragment == fe_sentinel.tag_fragment && \ |
| (fe).start_pc_fragment == fe_sentinel.start_pc_fragment) |
| #define ENTRY_IS_EMPTY(fe) IBL_ENTRY_IS_EMPTY(fe) |
| #define ENTRY_IS_SENTINEL(fe) IBL_ENTRY_IS_SENTINEL(fe) |
| #define ENTRY_IS_INVALID(fe) IBL_ENTRY_IS_INVALID(fe) |
| #define IBL_ENTRIES_ARE_EQUAL(fe1,fe2) ((fe1).tag_fragment == (fe2).tag_fragment) |
| #define ENTRIES_ARE_EQUAL(table,fe1,fe2) IBL_ENTRIES_ARE_EQUAL(fe1,fe2) |
| #define HASHTABLE_WHICH_HEAP(flags) FRAGTABLE_WHICH_HEAP(flags) |
| #define HTLOCK_RANK table_rwlock |
| #define HASHTABLE_ENTRY_STATS 1 |
| |
| #include "hashtablex.h" |
| /* all defines are undef-ed at end of hashtablex.h */ |
| |
| /* required routines for hashtable interface that we don't need for this instance */ |
| |
| static void |
| hashtable_ibl_free_entry(dcontext_t *dcontext, ibl_table_t *table, |
| fragment_entry_t entry) |
| { |
| /* nothing to do, data is inlined */ |
| } |
| |
| /******************************************************************************* |
| * FRAGMENT HASHTABLE INSTANTIATION |
| */ |
| |
| /* macros w/ name and types are duplicated in fragment.h -- keep in sync */ |
| #define NAME_KEY fragment |
| #define ENTRY_TYPE fragment_t * |
| /* not defining HASHTABLE_USE_LOOKUPTABLE */ |
| |
| #define ENTRY_TAG(f) ((ptr_uint_t)(f)->tag) |
| /* instead of setting to 0, point at null_fragment */ |
| #define ENTRY_EMPTY ((fragment_t *)&null_fragment) |
| #define ENTRY_SENTINEL ((fragment_t *)&sentinel_fragment) |
| #define ENTRY_IS_EMPTY(f) ((f) == (fragment_t *)&null_fragment) |
| #define ENTRY_IS_SENTINEL(f) ((f) == (fragment_t *)&sentinel_fragment) |
| #define ENTRY_IS_INVALID(f) ((f) == (fragment_t *)&unlinked_fragment) |
| #define ENTRIES_ARE_EQUAL(t,f,g) ((f) == (g)) |
| #define HASHTABLE_WHICH_HEAP(flags) FRAGTABLE_WHICH_HEAP(flags) |
| #define HTLOCK_RANK table_rwlock |
| |
| #include "hashtablex.h" |
| /* all defines are undef-ed at end of hashtablex.h */ |
| |
| static void |
| hashtable_fragment_resized_custom(dcontext_t *dcontext, fragment_table_t *table, |
| uint old_capacity, fragment_t **old_table, |
| fragment_t **old_table_unaligned, |
| uint old_ref_count, uint old_table_flags) |
| { |
| /* nothing */ |
| } |
| |
| static void |
| hashtable_fragment_init_internal_custom(dcontext_t *dcontext, fragment_table_t *table) |
| { |
| /* nothing */ |
| } |
| |
| #ifdef DEBUG |
| static void |
| hashtable_fragment_study_custom(dcontext_t *dcontext, fragment_table_t *table, |
| uint entries_inc/*amnt table->entries was pre-inced*/) |
| { |
| /* nothing */ |
| } |
| #endif |
| |
| /* callers should use either hashtable_ibl_preinit or hashtable_resize instead */ |
| static void |
| hashtable_ibl_init_internal_custom(dcontext_t *dcontext, ibl_table_t *table) |
| { |
| ASSERT(null_fragment.tag == NULL_TAG); |
| ASSERT(null_fragment.start_pc == HASHLOOKUP_NULL_START_PC); |
| ASSERT(FAKE_TAG != NULL_TAG); |
| |
| ASSERT(sentinel_fragment.tag == NULL_TAG); |
| ASSERT(sentinel_fragment.start_pc == HASHLOOKUP_SENTINEL_START_PC); |
| ASSERT(HASHLOOKUP_SENTINEL_START_PC != HASHLOOKUP_NULL_START_PC); |
| |
| ASSERT(TEST(FRAG_TABLE_IBL_TARGETED, table->table_flags)); |
| ASSERT(TEST(FRAG_TABLE_INCLUSIVE_HIERARCHY, table->table_flags)); |
| |
| /* every time we resize a table we reset the flush threshold, |
| * since it is cleared in place after one flush |
| */ |
| table->groom_factor_percent = |
| TEST(FRAG_TABLE_TRACE, table->table_flags) ? |
| DYNAMO_OPTION(trace_ibt_groom) : DYNAMO_OPTION(bb_ibt_groom); |
| table->max_capacity_bits = |
| TEST(FRAG_TABLE_TRACE, table->table_flags) ? |
| DYNAMO_OPTION(private_trace_ibl_targets_max) : |
| DYNAMO_OPTION(private_bb_ibl_targets_max); |
| |
| #ifdef HASHTABLE_STATISTICS |
| if (INTERNAL_OPTION(hashtable_ibl_stats)) { |
| if (table->unprot_stats == NULL) { |
| /* first time, not a resize */ |
| ALLOC_UNPROT_STATS(dcontext, table); |
| } /* else, keep original */ |
| } |
| #endif /* HASHTABLE_STATISTICS */ |
| |
| if (SHARED_IB_TARGETS() && |
| !TEST(FRAG_TABLE_SHARED, table->table_flags)) { |
| /* currently we don't support a mixture */ |
| ASSERT(TEST(FRAG_TABLE_TARGET_SHARED, table->table_flags)); |
| ASSERT(TEST(FRAG_TABLE_IBL_TARGETED, table->table_flags)); |
| ASSERT(table->branch_type != IBL_NONE); |
| /* Only data for one set of tables is stored in TLS -- for the trace |
| * tables in the default config OR the BB tables in shared BBs |
| * only mode. |
| */ |
| if ((TEST(FRAG_TABLE_TRACE, table->table_flags) || |
| SHARED_BB_ONLY_IB_TARGETS()) && |
| DYNAMO_OPTION(ibl_table_in_tls)) |
| update_lookuptable_tls(dcontext, table); |
| } |
| } |
| |
| /* We need our own routines to init/free our added fields */ |
| static void |
| hashtable_ibl_myinit(dcontext_t *dcontext, ibl_table_t *table, uint bits, |
| uint load_factor_percent, hash_function_t func, |
| uint hash_offset, ibl_branch_type_t branch_type, |
| bool use_lookup, uint table_flags _IF_DEBUG(const char *table_name)) |
| { |
| uint flags = table_flags; |
| ASSERT(dcontext != GLOBAL_DCONTEXT || TEST(FRAG_TABLE_SHARED, flags)); |
| /* flags shared by all ibl tables */ |
| flags |= FRAG_TABLE_INCLUSIVE_HIERARCHY; |
| flags |= FRAG_TABLE_IBL_TARGETED; |
| flags |= HASHTABLE_ALIGN_TABLE; |
| /* use entry stats with all our ibl-targeted tables */ |
| flags |= HASHTABLE_USE_ENTRY_STATS; |
| #ifdef HASHTABLE_STATISTICS |
| /* indicate this is first time, not a resize */ |
| table->unprot_stats = NULL; |
| #endif |
| table->branch_type = branch_type; |
| hashtable_ibl_init(dcontext, table, bits, load_factor_percent, |
| func, hash_offset, flags _IF_DEBUG(table_name)); |
| |
| /* PR 305731: rather than having a start_pc of 0, which causes an |
| * app targeting 0 to crash at 0, we point at a handler that sends |
| * the app to an ibl miss via target_delete, which restores |
| * registers saved in the found path. |
| */ |
| if (dcontext != GLOBAL_DCONTEXT && hashlookup_null_target == NULL) { |
| ASSERT(!dynamo_initialized); |
| hashlookup_null_target = get_target_delete_entry_pc(dcontext, table); |
| #if !defined(X64) && defined(LINUX) |
| /* see comments in x86.asm: we patch to avoid text relocations */ |
| byte *pc = (byte *) hashlookup_null_handler; |
| byte *page_start = (byte *) PAGE_START(pc); |
| byte *page_end = (byte *) ALIGN_FORWARD(pc + JMP_LONG_LENGTH, PAGE_SIZE); |
| make_writable(page_start, page_end - page_start); |
| insert_relative_target(pc + 1, hashlookup_null_target, NOT_HOT_PATCHABLE); |
| make_unwritable(page_start, page_end - page_start); |
| #endif |
| } |
| } |
| |
| static void |
| hashtable_ibl_myfree(dcontext_t *dcontext, ibl_table_t *table) |
| { |
| #ifdef HASHTABLE_STATISTICS |
| if (INTERNAL_OPTION(hashtable_ibl_stats)) { |
| ASSERT(TEST(FRAG_TABLE_IBL_TARGETED, table->table_flags)); |
| DEALLOC_UNPROT_STATS(dcontext, table); |
| } |
| #endif /* HASHTABLE_STATISTICS */ |
| hashtable_ibl_free(dcontext, table); |
| } |
| |
| static void |
| hashtable_fragment_free_entry(dcontext_t *dcontext, fragment_table_t *table, |
| fragment_t *f) |
| { |
| if (TEST(FRAG_TABLE_INCLUSIVE_HIERARCHY, table->table_flags)) { |
| ASSERT_NOT_REACHED(); /* case 7691 */ |
| } else { |
| if (TEST(FRAG_IS_FUTURE, f->flags)) |
| fragment_free_future(dcontext, (future_fragment_t *)f); |
| else |
| fragment_free(dcontext, f); |
| } |
| } |
| |
| static inline bool |
| fragment_add_to_hashtable(dcontext_t *dcontext, fragment_t *e, fragment_table_t *table) |
| { |
| /* When using shared IBT tables w/trace building and BB2BB IBL, there is a |
| * race between adding a BB target to a table and having it marked by |
| * another thread as a trace head. The race exists because the two functions |
| * do not use a common lock. |
| * The race does NOT cause a correctness problem since a) the marking thread |
| * removes the trace head from the table and b) any subsequent add attempt |
| * is caught in add_ibl_target(). The table lock is used during add and |
| * remove operations and FRAG_IS_TRACE_HEAD is checked while holding |
| * the lock. So although a trace head may be present in a table temporarily -- |
| * it's being marked while an add operation that has passed the frag flags |
| * check is in progress -- it will be subsequently removed by the marking |
| * thread. |
| * However, the existence of the race does mean that |
| * we cannot ASSERT(!(FRAG_IS_TRACE_HEAD,...)) at arbitrary spots along the |
| * add_ibl_target() path since such an assert could fire due to the race. |
| * What is likely a safe point to assert is when there is only a single |
| * thread in the process. |
| */ |
| DOCHECK(1, { |
| if (TEST(FRAG_TABLE_IBL_TARGETED, table->table_flags) && |
| get_num_threads() == 1) |
| ASSERT(!TEST(FRAG_IS_TRACE_HEAD, e->flags)); |
| }); |
| |
| return hashtable_fragment_add(dcontext, e, table); |
| } |
| |
| /* updates all fragments in a given fragment table which may |
| * have IBL routine heads inlined in the indirect exit stubs |
| * |
| * FIXME: [perf] should add a filter of which branch types need updating if |
| * updating all is a noticeable performance hit. |
| * |
| * FIXME: [perf] Also it maybe better to traverse all fragments in an fcache |
| * unit instead of entries in a half-empty hashtable |
| */ |
| static void |
| update_indirect_exit_stubs_from_table(dcontext_t *dcontext, |
| fragment_table_t *ftable) |
| { |
| fragment_t *f; |
| linkstub_t *l; |
| uint i; |
| |
| for (i = 0; i < ftable->capacity; i++) { |
| f = ftable->table[i]; |
| if (!REAL_FRAGMENT(f)) |
| continue; |
| for (l = FRAGMENT_EXIT_STUBS(f); l != NULL; l = LINKSTUB_NEXT_EXIT(l)) { |
| if (LINKSTUB_INDIRECT(l->flags)) { |
| /* FIXME: should add a filter of which branch types need updating */ |
| update_indirect_exit_stub(dcontext, f, l); |
| LOG(THREAD, LOG_FRAGMENT, 5, |
| "\tIBL target table resizing: updating F%d\n", f->id); |
| STATS_INC(num_ibl_stub_resize_updates); |
| } |
| } |
| } |
| } |
| |
| static void |
| safely_nullify_tables(dcontext_t *dcontext, ibl_table_t *new_table, |
| fragment_entry_t *table, uint capacity) |
| { |
| uint i; |
| cache_pc target_delete = get_target_delete_entry_pc(dcontext, new_table); |
| |
| ASSERT(target_delete != NULL); |
| ASSERT_TABLE_SYNCHRONIZED(new_table, WRITE); |
| for (i = 0; i < capacity; i++) { |
| if (IBL_ENTRY_IS_SENTINEL(table[i])) { |
| ASSERT(i == capacity - 1); |
| continue; |
| } |
| /* We need these writes to be atomic, so check that they're aligned. */ |
| ASSERT(ALIGNED(&table[i].tag_fragment, 4)); |
| ASSERT(ALIGNED(&table[i].start_pc_fragment, 4)); |
| /* We update the tag first so that so that a thread that's skipping |
| * along a chain will exit ASAP. Breaking the chain is ok since we're |
| * nullifying the entire table. |
| */ |
| table[i].tag_fragment = fe_empty.tag_fragment; |
| /* We set the payload to target_delete to induce a cache exit. |
| * |
| * The target_delete path leads to a loss of information -- we can't |
| * tell what the src fragment was (the one that transitioned to the |
| * IBL code) and this in principle could weaken our RCT checks (see case |
| * 5085). In practical terms, RCT checks are unaffected since they |
| * are not employed on in-cache transitions such as an IBL hit. |
| * (All transitions to target_delete are a race along the hit path.) |
| * If we still want to preserve the src info, we can leave the payload |
| * as-is, possibly pointing to a cache address. The effect is that |
| * any thread accessing the old table on the IBL hit path will not exit |
| * the cache as early. (We should leave the fragment_t* value in the |
| * table untouched also so that the fragment_table_t is in a consistent |
| * state.) |
| */ |
| table[i].start_pc_fragment = target_delete; |
| } |
| STATS_INC(num_shared_ibt_table_flushes); |
| } |
| |
| /* Add an item to the dead tables list */ |
| static inline void |
| add_to_dead_table_list(dcontext_t *alloc_dc, ibl_table_t *ftable, |
| uint old_capacity, |
| fragment_entry_t *old_table_unaligned, uint old_ref_count, |
| uint old_table_flags) |
| { |
| dead_fragment_table_t *item =(dead_fragment_table_t*) |
| heap_alloc(GLOBAL_DCONTEXT, sizeof(dead_fragment_table_t) |
| HEAPACCT(ACCT_IBLTABLE)); |
| |
| LOG(GLOBAL, LOG_FRAGMENT, 2, |
| "add_to_dead_table_list %s "PFX" capacity %d\n", |
| ftable->name, old_table_unaligned, old_capacity); |
| ASSERT(old_ref_count >= 1); /* someone other the caller must be holding a reference */ |
| /* write lock must be held so that ref_count is copied accurately */ |
| ASSERT_TABLE_SYNCHRONIZED(ftable, WRITE); |
| item->capacity = old_capacity; |
| item->table_unaligned = old_table_unaligned; |
| item->table_flags = old_table_flags; |
| item->ref_count = old_ref_count; |
| item->next = NULL; |
| /* Add to the end of list. We use a FIFO because generally we'll be |
| * decrementing ref-counts for older tables before we do so for |
| * younger tables. A FIFO will yield faster searches than, say, a |
| * stack. |
| */ |
| mutex_lock(&dead_tables_lock); |
| if (dead_lists->dead_tables == NULL) { |
| ASSERT(dead_lists->dead_tables_tail == NULL); |
| dead_lists->dead_tables = item; |
| } |
| else { |
| ASSERT(dead_lists->dead_tables_tail != NULL); |
| ASSERT(dead_lists->dead_tables_tail->next == NULL); |
| dead_lists->dead_tables_tail->next = item; |
| } |
| dead_lists->dead_tables_tail = item; |
| mutex_unlock(&dead_tables_lock); |
| STATS_ADD_PEAK(num_dead_shared_ibt_tables, 1); |
| STATS_INC(num_total_dead_shared_ibt_tables); |
| } |
| |
| /* forward decl */ |
| static inline void |
| update_private_ptr_to_shared_ibt_table(dcontext_t *dcontext, |
| ibl_branch_type_t branch_type, bool trace, |
| bool adjust_old_ref_count, bool lock_table); |
| static void |
| hashtable_ibl_resized_custom(dcontext_t *dcontext, ibl_table_t *table, |
| uint old_capacity, fragment_entry_t *old_table, |
| fragment_entry_t *old_table_unaligned, |
| uint old_ref_count, uint old_table_flags) |
| { |
| dcontext_t *alloc_dc = FRAGMENT_TABLE_ALLOC_DC(dcontext, table->table_flags); |
| per_thread_t *pt = GET_PT(dcontext); |
| bool shared_ibt_table = |
| TESTALL(FRAG_TABLE_TARGET_SHARED | FRAG_TABLE_SHARED, table->table_flags); |
| ASSERT(TEST(FRAG_TABLE_IBL_TARGETED, table->table_flags)); |
| |
| /* If we change an ibl-targeted table, must patch up every |
| * inlined indirect exit stub that targets it. |
| * For our per-type ibl tables however we don't bother updating |
| * fragments _targeted_ by the resized table, instead we need to |
| * update all fragments that may be a source of an inlined IBL. |
| */ |
| |
| /* private inlined IBL heads targeting this table need to be updated */ |
| if (DYNAMO_OPTION(inline_trace_ibl) && PRIVATE_TRACES_ENABLED()) { |
| /* We'll get here on a trace table resize, while we |
| * need to patch only when the trace_ibt tables are resized. |
| */ |
| /* We assume we don't inline IBL lookup targeting tables of basic blocks |
| * and so shouldn't need to do this for now. */ |
| ASSERT(dcontext != GLOBAL_DCONTEXT && pt != NULL); /* private traces */ |
| if (TESTALL(FRAG_TABLE_INCLUSIVE_HIERARCHY | FRAG_TABLE_TRACE, |
| table->table_flags)) { |
| /* need to update all traces that could be targeting the |
| * currently resized table */ |
| LOG(THREAD, LOG_FRAGMENT, 2, |
| "\tIBL target table resizing: updating all private trace fragments\n"); |
| update_indirect_exit_stubs_from_table(dcontext, &pt->trace); |
| } |
| } |
| |
| /* if we change the trace table (or an IBL target trace |
| * table), must patch up every inlined indirect exit stub |
| * in all bb fragments in case the inlined target is the |
| * resized table |
| */ |
| if (DYNAMO_OPTION(inline_bb_ibl)) { |
| LOG(THREAD, LOG_FRAGMENT, 3, |
| "\tIBL target table resizing: updating bb fragments\n"); |
| update_indirect_exit_stubs_from_table(dcontext, &pt->bb); |
| } |
| |
| /* don't need to update any inlined lookups in shared fragments */ |
| |
| if (shared_ibt_table) { |
| if (old_ref_count > 0) { |
| /* The old table should be nullified ASAP. Since threads update |
| * their table pointers on-demand only when they exit the cache |
| * after a failed IBL lookup, they could have IBL targets for |
| * stale entries. This would likely occur only when there's an |
| * app race but in the future could occur due to cache |
| * management. |
| */ |
| safely_nullify_tables(dcontext, table, old_table, old_capacity); |
| add_to_dead_table_list(alloc_dc, table, old_capacity, |
| old_table_unaligned, |
| old_ref_count, table->table_flags); |
| } |
| /* Update the resizing thread's private ptr. */ |
| update_private_ptr_to_shared_ibt_table(dcontext, table->branch_type, |
| TEST(FRAG_TABLE_TRACE, |
| table->table_flags), |
| false, /* no adjust |
| * old ref-count */ |
| false /* already hold lock */); |
| ASSERT(table->ref_count == 1); |
| } |
| |
| /* CHECK: is it safe to update the table without holding the lock? */ |
| /* Using the table flags to drive the update of generated code may |
| * err on the side of caution, but it's the best way to guarantee |
| * that all of the necessary code is updated. |
| * We may perform extra unnecessary updates when a table that's |
| * accessed off of the dcontext/per_thread_t is grown, but that doesn't |
| * cause correctness problems and likely doesn't hurt peformance. |
| */ |
| STATS_INC(num_ibt_table_resizes); |
| update_generated_hashtable_access(dcontext); |
| } |
| |
| #ifdef DEBUG |
| static void |
| hashtable_ibl_study_custom(dcontext_t *dcontext, ibl_table_t *table, |
| uint entries_inc/*amnt table->entries was pre-inced*/) |
| { |
| # ifdef HASHTABLE_STATISTICS |
| /* For trace table(s) only, use stats from emitted ibl routines */ |
| if (TEST(FRAG_TABLE_IBL_TARGETED, table->table_flags) && |
| INTERNAL_OPTION(hashtable_ibl_stats)) { |
| per_thread_t *pt = GET_PT(dcontext); |
| ibl_branch_type_t branch_type; |
| |
| for (branch_type = IBL_BRANCH_TYPE_START; |
| branch_type < IBL_BRANCH_TYPE_END; branch_type++) { |
| /* This is convoluted since given a table we have to |
| * recover its branch type. |
| * FIXME: should simplify these assumptions one day |
| */ |
| /* Current table should be targeted only by one of the IBL routines */ |
| if (!((!DYNAMO_OPTION(disable_traces) && |
| table == &pt->trace_ibt[branch_type]) || |
| (DYNAMO_OPTION(bb_ibl_targets) && |
| table == &pt->bb_ibt[branch_type]))) |
| continue; |
| /* stats for lookup routines from bb's and trace's targeting the current table */ |
| print_hashtable_stats(dcontext, entries_inc == 0 ? "Total" : "Current", |
| table->name, |
| "trace ibl ", get_branch_type_name(branch_type), |
| &table->UNPROT_STAT(trace_ibl_stats[branch_type])); |
| print_hashtable_stats(dcontext, entries_inc == 0 ? "Total" : "Current", |
| table->name, |
| "bb ibl ", |
| get_branch_type_name(branch_type), |
| &table->UNPROT_STAT(bb_ibl_stats[branch_type])); |
| } |
| } |
| # endif /* HASHTABLE_STATISTICS */ |
| } |
| #endif /* DEBUG */ |
| |
| #if defined(DEBUG) || defined(CLIENT_INTERFACE) |
| /* filter specifies flags for fragments which are OK to be freed */ |
| /* NOTE - if this routine is ever used for non DEBUG purposes be aware that |
| * because of case 7697 we don't unlink when we free the hashtable elements. |
| * As such, if we aren't also freeing all fragments that could possibly link |
| * to fragments in this table at the same time (synchronously) we'll have |
| * problems (for ex. a trace only reset would need to unlink incoming, or |
| * allowing private->shared linking would need to ulink outgoing). |
| */ |
| static void |
| hashtable_fragment_reset(dcontext_t *dcontext, fragment_table_t *table) |
| { |
| int i; |
| fragment_t *f; |
| |
| /* case 7691: we now use separate ibl table types */ |
| ASSERT(!TEST(FRAG_TABLE_INCLUSIVE_HIERARCHY, table->table_flags)); |
| LOG(THREAD, LOG_FRAGMENT, 2, "hashtable_fragment_reset\n"); |
| DOLOG(1, LOG_FRAGMENT|LOG_STATS, { |
| hashtable_fragment_load_statistics(dcontext, table); |
| }); |
| if (TEST(FRAG_TABLE_SHARED, table->table_flags) && |
| TEST(FRAG_TABLE_IBL_TARGETED, table->table_flags)) { |
| DOLOG(4, LOG_FRAGMENT, { |
| hashtable_fragment_dump_table(dcontext, table); |
| }); |
| } |
| DODEBUG({ |
| hashtable_fragment_study(dcontext, table, 0/*table consistent*/); |
| /* ensure write lock is held if the table is shared, unless exiting |
| * or resetting (N.B.: if change reset model to not suspend all in-DR |
| * threads, will have to change this and handle rank order issues) |
| */ |
| if (!dynamo_exited && !dynamo_resetting) |
| ASSERT_TABLE_SYNCHRONIZED(table, WRITE); |
| }); |
| /* Go in reverse order (for efficiency) since using |
| * hashtable_fragment_remove_helper to keep all reachable, which is required |
| * for dynamo_resetting where we unlink fragments here and need to be able to |
| * perform lookups. |
| */ |
| i = table->capacity - 1 - 1 /* sentinel */; |
| while (i >= 0) { |
| f = table->table[i]; |
| if (f == &null_fragment) { |
| i--; |
| } else { /* i stays put */ |
| /* The shared BB table is reset at process reset or shutdown, so |
| * trace_abort() has already been called by (or for) every thread. |
| * If shared traces is true, by this point none of the shared BBs |
| * should have FRAG_TRACE_BUILDING set since the flag is cleared |
| * by trace_abort(). Of course, the flag shouldn't be present |
| * if shared traces is false so we don't need to conditionalize |
| * the assert. |
| */ |
| ASSERT(!TEST(FRAG_TRACE_BUILDING, f->flags)); |
| hashtable_fragment_remove_helper(table, i, &table->table[i]); |
| if (!REAL_FRAGMENT(f)) |
| continue; |
| /* make sure no other hashtable has shared fragments in it |
| * this routine is called on shared table, but only after dynamo_exited |
| * the per-thread IBL tables contain pointers to shared fragments |
| * and are OK |
| */ |
| ASSERT(dynamo_exited || !TEST(FRAG_SHARED, f->flags) || dynamo_resetting); |
| |
| # if defined(SIDELINE) && defined(PROFILE_LINKCOUNT) |
| if ((f->flags & FRAG_DO_NOT_SIDELINE) != 0) { |
| /* print out total count of exit counters */ |
| LOG(THREAD, LOG_SIDELINE, 2, "\tSidelined trace F%d total times executed: " |
| LINKCOUNT_FORMAT_STRING "\n", f->id, get_total_linkcount(f)); |
| } |
| # endif |
| if (TEST(FRAG_IS_FUTURE, f->flags)) { |
| DODEBUG({ ((future_fragment_t *)f)->incoming_stubs = NULL; }); |
| fragment_free_future(dcontext, (future_fragment_t *)f); |
| } else { |
| DOSTATS({ |
| if (dynamo_resetting) |
| STATS_INC(num_fragments_deleted_reset); |
| else |
| STATS_INC(num_fragments_deleted_exit); |
| }); |
| /* Xref 7697 - unlinking the fragments here can screw up the |
| * future table as we are walking in hash order, so we don't |
| * unlink. See note at top of routine for issues with not |
| * unlinking here if this code is ever used in non debug |
| * builds. */ |
| fragment_delete(dcontext, f, |
| FRAGDEL_NO_HTABLE | FRAGDEL_NO_UNLINK | |
| FRAGDEL_NEED_CHLINK_LOCK | |
| (dynamo_resetting ? 0 : FRAGDEL_NO_OUTPUT)); |
| } |
| } |
| } |
| table->entries = 0; |
| table->unlinked_entries = 0; |
| } |
| #endif /* DEBUG || CLIENT_INTERFACE */ |
| |
| /* |
| *******************************************************************************/ |
| |
| |
| #if defined(RETURN_AFTER_CALL) || defined (RCT_IND_BRANCH) |
| /******************************************************************************* |
| * APP_PC HASHTABLE INSTANTIATION |
| */ |
| /* FIXME: RCT tables no longer use future_fragment_t and can be moved out of fragment.c */ |
| |
| /* The ENTRY_* defines are undef-ed at end of hashtablex.h so we make our own. |
| * Would be nice to re-use ENTRY_IS_EMPTY, etc., though w/ multiple htables |
| * in same file can't realistically get away w/o custom defines like these: |
| */ |
| #define APP_PC_EMPTY (NULL) |
| /* assume 1 is always invalid address */ |
| #define APP_PC_SENTINEL ((app_pc)PTR_UINT_1) |
| #define APP_PC_ENTRY_IS_EMPTY(pc) ((pc) == APP_PC_EMPTY) |
| #define APP_PC_ENTRY_IS_SENTINEL(pc) ((pc) == APP_PC_SENTINEL) |
| #define APP_PC_ENTRY_IS_REAL(pc) (!APP_PC_ENTRY_IS_EMPTY(pc) && \ |
| !APP_PC_ENTRY_IS_SENTINEL(pc)) |
| /* 2 macros w/ name and types are duplicated in fragment.h -- keep in sync */ |
| #define NAME_KEY app_pc |
| #define ENTRY_TYPE app_pc |
| /* not defining HASHTABLE_USE_LOOKUPTABLE */ |
| #define ENTRY_TAG(f) ((ptr_uint_t)(f)) |
| #define ENTRY_EMPTY APP_PC_EMPTY |
| #define ENTRY_SENTINEL APP_PC_SENTINEL |
| #define ENTRY_IS_EMPTY(f) APP_PC_ENTRY_IS_EMPTY(f) |
| #define ENTRY_IS_SENTINEL(f) APP_PC_ENTRY_IS_SENTINEL(f) |
| #define ENTRY_IS_INVALID(f) (false) /* no invalid entries */ |
| #define ENTRIES_ARE_EQUAL(t,f,g) ((f) == (g)) |
| #define HASHTABLE_WHICH_HEAP(flags) (ACCT_AFTER_CALL) |
| #define HTLOCK_RANK app_pc_table_rwlock |
| #define HASHTABLE_SUPPORT_PERSISTENCE 1 |
| |
| #include "hashtablex.h" |
| /* all defines are undef-ed at end of hashtablex.h */ |
| |
| /* required routines for hashtable interface that we don't need for this instance */ |
| |
| static void |
| hashtable_app_pc_init_internal_custom(dcontext_t *dcontext, app_pc_table_t *htable) |
| { /* nothing */ |
| } |
| |
| static void |
| hashtable_app_pc_resized_custom(dcontext_t *dcontext, app_pc_table_t *htable, |
| uint old_capacity, app_pc *old_table, |
| app_pc *old_table_unaligned, |
| uint old_ref_count, uint old_table_flags) |
| { /* nothing */ |
| } |
| |
| # ifdef DEBUG |
| static void |
| hashtable_app_pc_study_custom(dcontext_t *dcontext, app_pc_table_t *htable, |
| uint entries_inc/*amnt table->entries was pre-inced*/) |
| { /* nothing */ |
| } |
| # endif |
| |
| static void |
| hashtable_app_pc_free_entry(dcontext_t *dcontext, app_pc_table_t *htable, |
| app_pc entry) |
| { |
| /* nothing to do, data is inlined */ |
| } |
| |
| #endif /* defined(RETURN_AFTER_CALL) || defined (RCT_IND_BRANCH) */ |
| /*******************************************************************************/ |
| |
| |
| bool |
| fragment_initialized(dcontext_t *dcontext) |
| { |
| return (dcontext != GLOBAL_DCONTEXT && dcontext->fragment_field != NULL); |
| } |
| |
| /* thread-shared initialization that should be repeated after a reset */ |
| void |
| fragment_reset_init(void) |
| { |
| /* case 7966: don't initialize at all for hotp_only & thin_client */ |
| if (RUNNING_WITHOUT_CODE_CACHE()) |
| return; |
| |
| mutex_lock(&shared_cache_flush_lock); |
| /* ASSUMPTION: a reset frees all deletions that use flushtimes, so we can |
| * reset the global flushtime here |
| */ |
| flushtime_global = 0; |
| mutex_unlock(&shared_cache_flush_lock); |
| |
| if (SHARED_FRAGMENTS_ENABLED()) { |
| if (DYNAMO_OPTION(shared_bbs)) { |
| hashtable_fragment_init(GLOBAL_DCONTEXT, shared_bb, |
| INIT_HTABLE_SIZE_SHARED_BB, |
| INTERNAL_OPTION(shared_bb_load), |
| (hash_function_t)INTERNAL_OPTION(alt_hash_func), |
| 0 /* hash_mask_offset */, |
| FRAG_TABLE_SHARED | FRAG_TABLE_TARGET_SHARED |
| _IF_DEBUG("shared_bb")); |
| } |
| if (DYNAMO_OPTION(shared_traces)) { |
| hashtable_fragment_init(GLOBAL_DCONTEXT, shared_trace, |
| INIT_HTABLE_SIZE_SHARED_TRACE, |
| INTERNAL_OPTION(shared_trace_load), |
| (hash_function_t)INTERNAL_OPTION(alt_hash_func), |
| 0 /* hash_mask_offset */, |
| FRAG_TABLE_SHARED | FRAG_TABLE_TARGET_SHARED |
| _IF_DEBUG("shared_trace")); |
| } |
| /* init routine will work for future_fragment_t* same as for fragment_t* */ |
| hashtable_fragment_init(GLOBAL_DCONTEXT, shared_future, |
| INIT_HTABLE_SIZE_SHARED_FUTURE, |
| INTERNAL_OPTION(shared_future_load), |
| (hash_function_t)INTERNAL_OPTION(alt_hash_func), |
| 0 /* hash_mask_offset */, |
| FRAG_TABLE_SHARED | FRAG_TABLE_TARGET_SHARED |
| _IF_DEBUG("shared_future")); |
| } |
| |
| if (SHARED_IBT_TABLES_ENABLED()) { |
| |
| ibl_branch_type_t branch_type; |
| |
| ASSERT(USE_SHARED_PT()); |
| |
| for (branch_type = IBL_BRANCH_TYPE_START; |
| branch_type < IBL_BRANCH_TYPE_END; branch_type++) { |
| if (DYNAMO_OPTION(shared_trace_ibt_tables)) { |
| hashtable_ibl_myinit(GLOBAL_DCONTEXT, &shared_pt->trace_ibt[branch_type], |
| DYNAMO_OPTION(shared_ibt_table_trace_init), |
| DYNAMO_OPTION(shared_ibt_table_trace_load), |
| HASH_FUNCTION_NONE, |
| HASHTABLE_IBL_OFFSET(branch_type), |
| branch_type, |
| false, /* no lookup table */ |
| FRAG_TABLE_SHARED | |
| FRAG_TABLE_TARGET_SHARED | |
| FRAG_TABLE_TRACE |
| _IF_DEBUG(ibl_trace_table_type_names[branch_type])); |
| #ifdef HASHTABLE_STATISTICS |
| if (INTERNAL_OPTION(hashtable_ibl_stats)) { |
| CHECK_UNPROT_STATS(&shared_pt->trace_ibt[branch_type]); |
| /* for compatibility using an entry in the per-branch type stats */ |
| INIT_HASHTABLE_STATS(shared_pt->trace_ibt[branch_type]. |
| UNPROT_STAT(trace_ibl_stats[branch_type])); |
| } else { |
| shared_pt->trace_ibt[branch_type].unprot_stats = NULL; |
| } |
| #endif /* HASHTABLE_STATISTICS */ |
| } |
| |
| if (DYNAMO_OPTION(shared_bb_ibt_tables)) { |
| hashtable_ibl_myinit(GLOBAL_DCONTEXT, &shared_pt->bb_ibt[branch_type], |
| DYNAMO_OPTION(shared_ibt_table_bb_init), |
| DYNAMO_OPTION(shared_ibt_table_bb_load), |
| HASH_FUNCTION_NONE, |
| HASHTABLE_IBL_OFFSET(branch_type), |
| branch_type, |
| false, /* no lookup table */ |
| FRAG_TABLE_SHARED | |
| FRAG_TABLE_TARGET_SHARED |
| _IF_DEBUG(ibl_bb_table_type_names[branch_type])); |
| /* mark as inclusive table for bb's - we in fact currently |
| * keep only frags that are not FRAG_IS_TRACE_HEAD */ |
| #ifdef HASHTABLE_STATISTICS |
| if (INTERNAL_OPTION(hashtable_ibl_stats)) { |
| /* for compatibility using an entry in the per-branch type stats */ |
| CHECK_UNPROT_STATS(&shared_pt->bb_ibt[branch_type]); |
| /* FIXME: we don't expect trace_ibl_stats yet */ |
| INIT_HASHTABLE_STATS(shared_pt->bb_ibt[branch_type]. |
| UNPROT_STAT(bb_ibl_stats[branch_type])); |
| } else { |
| shared_pt->bb_ibt[branch_type].unprot_stats = NULL; |
| } |
| #endif /* HASHTABLE_STATISTICS */ |
| } |
| } |
| } |
| |
| #ifdef SHARING_STUDY |
| if (INTERNAL_OPTION(fragment_sharing_study)) { |
| uint size = HASHTABLE_SIZE(SHARED_HASH_BITS) * sizeof(shared_entry_t*); |
| shared_blocks = (shared_entry_t**) global_heap_alloc(size HEAPACCT(ACCT_OTHER)); |
| memset(shared_blocks, 0, size); |
| shared_traces = (shared_entry_t**) global_heap_alloc(size HEAPACCT(ACCT_OTHER)); |
| memset(shared_traces, 0, size); |
| } |
| #endif |
| } |
| |
| /* thread-shared initialization */ |
| void |
| fragment_init() |
| { |
| /* case 7966: don't initialize at all for hotp_only & thin_client |
| * FIXME: could set initial sizes to 0 for all configurations, instead |
| */ |
| if (RUNNING_WITHOUT_CODE_CACHE()) |
| return; |
| |
| /* make sure fields are at same place */ |
| ASSERT(offsetof(fragment_t, flags) == offsetof(future_fragment_t, flags)); |
| ASSERT(offsetof(fragment_t, tag) == offsetof(future_fragment_t, tag)); |
| |
| /* ensure we can read this w/o a lock: no cache line crossing, please */ |
| ASSERT(ALIGNED(&flushtime_global, 4)); |
| |
| if (SHARED_FRAGMENTS_ENABLED()) { |
| /* tables are persistent across resets, only on heap for selfprot (case 7957) */ |
| if (DYNAMO_OPTION(shared_bbs)) { |
| shared_bb = HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, fragment_table_t, |
| ACCT_FRAG_TABLE, PROTECTED); |
| } |
| if (DYNAMO_OPTION(shared_traces)) { |
| shared_trace = HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, fragment_table_t, |
| ACCT_FRAG_TABLE, PROTECTED); |
| } |
| shared_future = HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, fragment_table_t, |
| ACCT_FRAG_TABLE, PROTECTED); |
| } |
| |
| if (USE_SHARED_PT()) |
| shared_pt = HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, per_thread_t, ACCT_OTHER, PROTECTED); |
| |
| if (SHARED_IBT_TABLES_ENABLED()) { |
| dead_lists = |
| HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, dead_table_lists_t, ACCT_OTHER, PROTECTED); |
| memset(dead_lists, 0, sizeof(*dead_lists)); |
| } |
| |
| fragment_reset_init(); |
| |
| #if defined(INTERNAL) || defined(CLIENT_INTERFACE) |
| if (TRACEDUMP_ENABLED() && DYNAMO_OPTION(shared_traces)) { |
| ASSERT(USE_SHARED_PT()); |
| shared_pt->tracefile = open_log_file("traces-shared", NULL, 0); |
| ASSERT(shared_pt->tracefile != INVALID_FILE); |
| init_trace_file(shared_pt); |
| } |
| #endif |
| } |
| |
| /* Free all thread-shared state not critical to forward progress; |
| * fragment_reset_init() will be called before continuing. |
| */ |
| void |
| fragment_reset_free(void) |
| { |
| /* case 7966: don't initialize at all for hotp_only & thin_client */ |
| if (RUNNING_WITHOUT_CODE_CACHE()) |
| return; |
| |
| /* We must study the ibl tables before the trace/bb tables so that we're |
| * not looking at freed entries |
| */ |
| if (SHARED_IBT_TABLES_ENABLED()) { |
| |
| ibl_branch_type_t branch_type; |
| dead_fragment_table_t *current, *next; |
| DEBUG_DECLARE(int table_count = 0;) |
| DEBUG_DECLARE(stats_int_t dead_tables = GLOBAL_STAT(num_dead_shared_ibt_tables);) |
| |
| for (branch_type = IBL_BRANCH_TYPE_START; |
| branch_type < IBL_BRANCH_TYPE_END; branch_type++) { |
| if (DYNAMO_OPTION(shared_trace_ibt_tables)) { |
| DOLOG(1, LOG_FRAGMENT|LOG_STATS, { |
| hashtable_ibl_load_statistics(GLOBAL_DCONTEXT, |
| &shared_pt->trace_ibt[branch_type]); |
| }); |
| hashtable_ibl_myfree(GLOBAL_DCONTEXT, |
| &shared_pt->trace_ibt[branch_type]); |
| } |
| if (DYNAMO_OPTION(shared_bb_ibt_tables)) { |
| DOLOG(1, LOG_FRAGMENT|LOG_STATS, { |
| hashtable_ibl_load_statistics(GLOBAL_DCONTEXT, |
| &shared_pt->bb_ibt[branch_type]); |
| }); |
| hashtable_ibl_myfree(GLOBAL_DCONTEXT, |
| &shared_pt->bb_ibt[branch_type]); |
| } |
| } |
| |
| /* Delete dead tables. */ |
| /* grab lock for consistency, although we expect a single thread */ |
| mutex_lock(&dead_tables_lock); |
| current = dead_lists->dead_tables; |
| while (current != NULL) { |
| DODEBUG({table_count++;}); |
| next = current->next; |
| LOG(GLOBAL, LOG_FRAGMENT, 2, |
| "fragment_reset_free: dead table "PFX" cap %d, freeing\n", |
| current->table_unaligned, current->capacity); |
| hashtable_ibl_free_table(GLOBAL_DCONTEXT, current->table_unaligned, |
| current->table_flags, current->capacity); |
| heap_free(GLOBAL_DCONTEXT, current, sizeof(dead_fragment_table_t) |
| HEAPACCT(ACCT_IBLTABLE)); |
| STATS_DEC(num_dead_shared_ibt_tables); |
| STATS_INC(num_dead_shared_ibt_tables_freed); |
| current = next; |
| DODEBUG({ |
| if (dynamo_exited) |
| STATS_INC(num_dead_shared_ibt_tables_freed_at_exit); |
| }); |
| } |
| dead_lists->dead_tables = dead_lists->dead_tables_tail = NULL; |
| ASSERT(table_count == dead_tables); |
| mutex_unlock(&dead_tables_lock); |
| } |
| |
| /* FIXME: Take in a flag "permanent" that controls whether exiting or |
| * resetting. If resetting only, do not free unprot stats and entry stats |
| * (they're already in persistent heap, but we explicitly free them). |
| * This will be easy w/ unprot but will take work for entry stats |
| * since they resize themselves. |
| * Or, move them both to a new unprot and nonpersistent heap so we can |
| * actually free the memory back to the os, if we don't care to keep |
| * the stats across the reset. |
| */ |
| /* N.B.: to avoid rank order issues w/ shared_vm_areas lock being acquired |
| * after table_rwlock we do NOT grab the write lock before calling |
| * reset on the shared tables! We assume that reset involves suspending |
| * all other threads in DR and there will be no races. If the reset model |
| * changes, the lock order will have to be addressed. |
| */ |
| if (SHARED_FRAGMENTS_ENABLED()) { |
| /* clean up pending delayed deletion, if any */ |
| vm_area_check_shared_pending(GLOBAL_DCONTEXT/*== safe to free all*/, NULL); |
| |
| if (DYNAMO_OPTION(coarse_units)) { |
| /* We need to free coarse units earlier than vm_areas_exit() so we |
| * call it here. Must call before we free fine fragments so coarse |
| * can clean up incoming pointers. |
| */ |
| vm_area_coarse_units_reset_free(); |
| } |
| |
| #if defined(DEBUG) || defined(CLIENT_INTERFACE) |
| /* We need for CLIENT_INTERFACE to get fragment deleted events. */ |
| # if !defined(DEBUG) && defined(CLIENT_INTERFACE) |
| if (dr_fragment_deleted_hook_exists()) { |
| # endif |
| if (DYNAMO_OPTION(shared_bbs)) |
| hashtable_fragment_reset(GLOBAL_DCONTEXT, shared_bb); |
| if (DYNAMO_OPTION(shared_traces)) |
| hashtable_fragment_reset(GLOBAL_DCONTEXT, shared_trace); |
| DODEBUG({hashtable_fragment_reset(GLOBAL_DCONTEXT, shared_future);}); |
| # if !defined(DEBUG) && defined(CLIENT_INTERFACE) |
| } |
| # endif |
| #endif |
| |
| if (DYNAMO_OPTION(shared_bbs)) |
| hashtable_fragment_free(GLOBAL_DCONTEXT, shared_bb); |
| if (DYNAMO_OPTION(shared_traces)) |
| hashtable_fragment_free(GLOBAL_DCONTEXT, shared_trace); |
| hashtable_fragment_free(GLOBAL_DCONTEXT, shared_future); |
| /* Do NOT free RAC table as its state cannot be rebuilt. |
| * We also do not free other RCT tables to avoid the time to rebuild them. |
| */ |
| } |
| |
| #ifdef SHARING_STUDY |
| if (INTERNAL_OPTION(fragment_sharing_study)) { |
| print_shared_stats(); |
| reset_shared_block_table(shared_blocks, &shared_blocks_lock); |
| reset_shared_block_table(shared_traces, &shared_traces_lock); |
| } |
| #endif |
| } |
| |
| /* free all state */ |
| void |
| fragment_exit() |
| { |
| /* case 7966: don't initialize at all for hotp_only & thin_client |
| * FIXME: could set initial sizes to 0 for all configurations, instead |
| */ |
| if (RUNNING_WITHOUT_CODE_CACHE()) |
| goto cleanup; |
| |
| #if defined(INTERNAL) || defined(CLIENT_INTERFACE) |
| if (TRACEDUMP_ENABLED() && DYNAMO_OPTION(shared_traces)) { |
| /* write out all traces prior to deleting any, so links print nicely */ |
| uint i; |
| fragment_t *f; |
| /* change_linking_lock is required for output_trace(), though there |
| * won't be any races at this point of exiting. |
| */ |
| acquire_recursive_lock(&change_linking_lock); |
| TABLE_RWLOCK(shared_trace, read, lock); |
| for (i = 0; i < shared_trace->capacity; i++) { |
| f = shared_trace->table[i]; |
| if (!REAL_FRAGMENT(f)) |
| continue; |
| if (SHOULD_OUTPUT_FRAGMENT(f->flags)) |
| output_trace(GLOBAL_DCONTEXT, shared_pt, f, -1); |
| } |
| TABLE_RWLOCK(shared_trace, read, unlock); |
| release_recursive_lock(&change_linking_lock); |
| exit_trace_file(shared_pt); |
| } |
| #endif |
| |
| #ifdef FRAGMENT_SIZES_STUDY |
| DOLOG(1, (LOG_FRAGMENT|LOG_STATS), { |
| print_size_results(); |
| }); |
| #endif |
| |
| fragment_reset_free(); |
| |
| #ifdef RETURN_AFTER_CALL |
| if (dynamo_options.ret_after_call && rac_non_module_table.live_table != NULL) { |
| DODEBUG({ |
| DOLOG(1, LOG_FRAGMENT|LOG_STATS, { |
| hashtable_app_pc_load_statistics(GLOBAL_DCONTEXT, |
| rac_non_module_table.live_table); |
| }); |
| hashtable_app_pc_study(GLOBAL_DCONTEXT, rac_non_module_table.live_table, |
| 0/*table consistent*/); |
| }); |
| hashtable_app_pc_free(GLOBAL_DCONTEXT, rac_non_module_table.live_table); |
| HEAP_TYPE_FREE(GLOBAL_DCONTEXT, rac_non_module_table.live_table, |
| app_pc_table_t, ACCT_AFTER_CALL, PROTECTED); |
| rac_non_module_table.live_table = NULL; |
| } |
| ASSERT(rac_non_module_table.persisted_table == NULL); |
| DELETE_LOCK(after_call_lock); |
| #endif |
| |
| #if defined(RCT_IND_BRANCH) && defined(UNIX) |
| /* we do not free these tables in fragment_reset_free() b/c we |
| * would just have to build them all back up again in order to |
| * continue execution |
| */ |
| if ((TEST(OPTION_ENABLED, DYNAMO_OPTION(rct_ind_call)) || |
| TEST(OPTION_ENABLED, DYNAMO_OPTION(rct_ind_jump))) && |
| rct_global_table.live_table != NULL) { |
| DODEBUG({ |
| DOLOG(1, LOG_FRAGMENT|LOG_STATS, { |
| hashtable_app_pc_load_statistics(GLOBAL_DCONTEXT, |
| rct_global_table.live_table); |
| }); |
| hashtable_app_pc_study(GLOBAL_DCONTEXT, rct_global_table.live_table, |
| 0/*table consistent*/); |
| }); |
| hashtable_app_pc_free(GLOBAL_DCONTEXT, rct_global_table.live_table); |
| HEAP_TYPE_FREE(GLOBAL_DCONTEXT, rct_global_table.live_table, app_pc_table_t, |
| ACCT_AFTER_CALL, PROTECTED); |
| rct_global_table.live_table = NULL; |
| } else |
| ASSERT(rct_global_table.live_table == NULL); |
| ASSERT(rct_global_table.persisted_table == NULL); |
| #endif /* RCT_IND_BRANCH */ |
| |
| if (SHARED_FRAGMENTS_ENABLED()) { |
| /* tables are persistent across resets, only on heap for selfprot (case 7957) */ |
| if (DYNAMO_OPTION(shared_bbs)) { |
| HEAP_TYPE_FREE(GLOBAL_DCONTEXT, shared_bb, fragment_table_t, |
| ACCT_FRAG_TABLE, PROTECTED); |
| shared_bb = NULL; |
| } else |
| ASSERT(shared_bb == NULL); |
| if (DYNAMO_OPTION(shared_traces)) { |
| HEAP_TYPE_FREE(GLOBAL_DCONTEXT, shared_trace, fragment_table_t, |
| ACCT_FRAG_TABLE, PROTECTED); |
| shared_trace = NULL; |
| } else |
| ASSERT(shared_trace == NULL); |
| HEAP_TYPE_FREE(GLOBAL_DCONTEXT, shared_future, fragment_table_t, |
| ACCT_FRAG_TABLE, PROTECTED); |
| shared_future = NULL; |
| } |
| |
| if (SHARED_IBT_TABLES_ENABLED()) { |
| HEAP_TYPE_FREE(GLOBAL_DCONTEXT, dead_lists, dead_table_lists_t, |
| ACCT_OTHER, PROTECTED); |
| dead_lists = NULL; |
| } else |
| ASSERT(dead_lists == NULL); |
| |
| if (USE_SHARED_PT()) { |
| ASSERT(shared_pt != NULL); |
| HEAP_TYPE_FREE(GLOBAL_DCONTEXT, shared_pt, per_thread_t, ACCT_OTHER, PROTECTED); |
| shared_pt = NULL; |
| } else |
| ASSERT(shared_pt == NULL); |
| |
| if (SHARED_IBT_TABLES_ENABLED()) |
| DELETE_LOCK(dead_tables_lock); |
| #ifdef SHARING_STUDY |
| if (INTERNAL_OPTION(fragment_sharing_study)) { |
| DELETE_LOCK(shared_blocks_lock); |
| DELETE_LOCK(shared_traces_lock); |
| } |
| #endif |
| cleanup: |
| /* FIXME: we shouldn't need these locks anyway for hotp_only & thin_client */ |
| #if defined(INTERNAL) || defined(CLIENT_INTERFACE) |
| DELETE_LOCK(tracedump_mutex); |
| #endif |
| #ifdef CLIENT_INTERFACE |
| process_client_flush_requests(NULL, GLOBAL_DCONTEXT, client_flush_requests, |
| false /* no flush */); |
| DELETE_LOCK(client_flush_request_lock); |
| #endif |
| DELETE_LOCK(shared_cache_flush_lock); |
| } |
| |
| /* Decrement the ref-count for any reference to table that the |
| * per_thread_t contains. If could_be_live is true, will acquire write |
| * locks for the currently live tables. */ |
| /* NOTE: Can't inline in release build -- too many call sites? */ |
| static /* inline */ void |
| dec_table_ref_count(dcontext_t *dcontext, ibl_table_t *table, bool could_be_live) |
| { |
| ibl_table_t *live_table = NULL; |
| ibl_branch_type_t branch_type; |
| |
| /* Search live tables. A live table's ref-count is decremented |
| * during a thread exit. */ |
| /* FIXME If the table is more likely to be dead, we can reverse the order |
| * and search dead tables first. */ |
| if (!DYNAMO_OPTION(ref_count_shared_ibt_tables)) |
| return; |
| ASSERT(TESTALL(FRAG_TABLE_SHARED | FRAG_TABLE_IBL_TARGETED, |
| table->table_flags)); |
| if (could_be_live) { |
| for (branch_type = IBL_BRANCH_TYPE_START; |
| branch_type < IBL_BRANCH_TYPE_END; branch_type++) { |
| /* We match based on lookup table addresses. We need to lock the table |
| * during the compare and hold the lock during the ref-count dec to |
| * prevent a race with it being moved to the dead list. |
| */ |
| ibl_table_t *sh_table_ptr = TEST(FRAG_TABLE_TRACE, table->table_flags) ? |
| &shared_pt->trace_ibt[branch_type] : &shared_pt->bb_ibt[branch_type]; |
| TABLE_RWLOCK(sh_table_ptr, write, lock); |
| if (table->table == sh_table_ptr->table) { |
| live_table = sh_table_ptr; |
| break; |
| } |
| TABLE_RWLOCK(sh_table_ptr, write, unlock); |
| } |
| } |
| if (live_table != NULL) { |
| /* During shutdown, the ref-count can reach 0. The table is freed |
| * in the fragment_exit() path. */ |
| ASSERT(live_table->ref_count >= 1); |
| live_table->ref_count--; |
| TABLE_RWLOCK(live_table, write, unlock); |
| } |
| else { /* Search the dead tables list. */ |
| dead_fragment_table_t *current = dead_lists->dead_tables; |
| dead_fragment_table_t *prev = NULL; |
| |
| ASSERT(dead_lists->dead_tables != NULL); |
| ASSERT(dead_lists->dead_tables_tail != NULL); |
| /* We expect to be removing from the head of the list but due to |
| * races could be removing from the middle, i.e., if a preceding |
| * entry is about to be removed by another thread but the |
| * dead_tables_lock hasn't been acquired yet by that thread. |
| */ |
| mutex_lock(&dead_tables_lock); |
| for (current = dead_lists->dead_tables; current != NULL; |
| prev = current, current = current->next) { |
| if (current->table_unaligned == table->table_unaligned) { |
| ASSERT_CURIOSITY(current->ref_count >= 1); |
| current->ref_count--; |
| if (current->ref_count == 0) { |
| LOG(GLOBAL, LOG_FRAGMENT, 2, |
| "dec_table_ref_count: table "PFX" cap %d at ref 0, freeing\n", |
| current->table_unaligned, current->capacity); |
| /* Unlink this table from the list. */ |
| if (prev != NULL) |
| prev->next = current->next; |
| if (current == dead_lists->dead_tables) { |
| /* remove from the front */ |
| ASSERT(prev == NULL); |
| dead_lists->dead_tables = current->next; |
| } |
| if (current == dead_lists->dead_tables_tail) |
| dead_lists->dead_tables_tail = prev; |
| hashtable_ibl_free_table(GLOBAL_DCONTEXT, |
| current->table_unaligned, |
| current->table_flags, |
| current->capacity); |
| heap_free(GLOBAL_DCONTEXT, current, sizeof(dead_fragment_table_t) |
| HEAPACCT(ACCT_IBLTABLE)); |
| STATS_DEC(num_dead_shared_ibt_tables); |
| STATS_INC(num_dead_shared_ibt_tables_freed); |
| } |
| break; |
| } |
| } |
| mutex_unlock(&dead_tables_lock); |
| ASSERT(current != NULL); |
| } |
| } |
| |
| /* Decrement the ref-count for every shared IBT table that the |
| * per_thread_t has a reference to. */ |
| static void |
| dec_all_table_ref_counts(dcontext_t *dcontext, per_thread_t *pt) |
| { |
| /* We can also decrement ref-count for dead shared tables here. */ |
| if (SHARED_IBT_TABLES_ENABLED()) { |
| ibl_branch_type_t branch_type; |
| for (branch_type = IBL_BRANCH_TYPE_START; |
| branch_type < IBL_BRANCH_TYPE_END; branch_type++) { |
| if (DYNAMO_OPTION(shared_trace_ibt_tables)) { |
| ASSERT(pt->trace_ibt[branch_type].table != NULL); |
| dec_table_ref_count(dcontext, &pt->trace_ibt[branch_type], |
| true/*check live*/); |
| } |
| if (DYNAMO_OPTION(shared_bb_ibt_tables)) { |
| ASSERT(pt->bb_ibt[branch_type].table != NULL); |
| dec_table_ref_count(dcontext, &pt->bb_ibt[branch_type], |
| true/*check live*/); |
| } |
| } |
| } |
| } |
| |
| /* re-initializes non-persistent memory */ |
| void |
| fragment_thread_reset_init(dcontext_t *dcontext) |
| { |
| per_thread_t *pt; |
| ibl_branch_type_t branch_type; |
| |
| /* case 7966: don't initialize at all for hotp_only & thin_client */ |
| if (RUNNING_WITHOUT_CODE_CACHE()) |
| return; |
| |
| pt = (per_thread_t *) dcontext->fragment_field; |
| |
| /* important to init w/ cur timestamp to avoid this thread dec-ing ref |
| * count when it wasn't included in ref count init value! |
| * assumption: don't need lock to read flushtime_global atomically. |
| * when resetting, though, thread free & re-init is done before global free, |
| * so we have to explicitly set to 0 for that case. |
| */ |
| pt->flushtime_last_update = (dynamo_resetting) ? 0 : flushtime_global; |
| |
| /* set initial hashtable sizes */ |
| hashtable_fragment_init(dcontext, &pt->bb, INIT_HTABLE_SIZE_BB, |
| INTERNAL_OPTION(private_bb_load), |
| (hash_function_t)INTERNAL_OPTION(alt_hash_func), |
| 0, 0 _IF_DEBUG("bblock")); |
| |
| /* init routine will work for future_fragment_t* same as for fragment_t* */ |
| hashtable_fragment_init(dcontext, &pt->future, INIT_HTABLE_SIZE_FUTURE, |
| INTERNAL_OPTION(private_future_load), |
| (hash_function_t)INTERNAL_OPTION(alt_hash_func), |
| 0 /* hash_mask_offset */, 0 |
| _IF_DEBUG("future")); |
| |
| /* The trace table now is not used by IBL routines, and |
| * therefore doesn't need a lookup table, we can also use the |
| * alternative hash functions and use a higher load. |
| */ |
| if (PRIVATE_TRACES_ENABLED()) { |
| hashtable_fragment_init(dcontext, &pt->trace, INIT_HTABLE_SIZE_TRACE, |
| INTERNAL_OPTION(private_trace_load), |
| (hash_function_t)INTERNAL_OPTION(alt_hash_func), |
| 0 /* hash_mask_offset */, |
| FRAG_TABLE_TRACE |
| _IF_DEBUG("trace")); |
| } |
| |
| /* We'll now have more control over hashtables based on branch |
| * type. The most important of all is of course the return |
| * target table. These tables should be populated only when |
| * we know that the entry is a valid target, a trace is |
| * created, and it is indeed targeted by an IBL. |
| */ |
| /* These tables are targeted by both bb and trace routines */ |
| for (branch_type = IBL_BRANCH_TYPE_START; |
| branch_type < IBL_BRANCH_TYPE_END; branch_type++) { |
| if (!DYNAMO_OPTION(disable_traces) |
| /* If no traces and no bb ibl targets we point ibl at |
| * an empty trace table */ |
| || !DYNAMO_OPTION(bb_ibl_targets)) { |
| if (!DYNAMO_OPTION(shared_trace_ibt_tables)) { |
| hashtable_ibl_myinit(dcontext, &pt->trace_ibt[branch_type], |
| DYNAMO_OPTION(private_trace_ibl_targets_init), |
| DYNAMO_OPTION(private_ibl_targets_load), |
| HASH_FUNCTION_NONE, |
| HASHTABLE_IBL_OFFSET(branch_type), |
| branch_type, |
| false, /* no lookup table */ |
| (DYNAMO_OPTION(shared_traces) ? |
| FRAG_TABLE_TARGET_SHARED : 0) | |
| FRAG_TABLE_TRACE |
| _IF_DEBUG(ibl_trace_table_type_names[branch_type])); |
| #ifdef HASHTABLE_STATISTICS |
| if (INTERNAL_OPTION(hashtable_ibl_stats)) { |
| CHECK_UNPROT_STATS(pt->trace_ibt[branch_type]); |
| /* for compatibility using an entry in the per-branch type stats */ |
| INIT_HASHTABLE_STATS(pt->trace_ibt[branch_type]. |
| UNPROT_STAT(trace_ibl_stats[branch_type])); |
| } else { |
| pt->trace_ibt[branch_type].unprot_stats = NULL; |
| } |
| #endif /* HASHTABLE_STATISTICS */ |
| } |
| else { |
| /* ensure table from last time (if we had a reset) not still there */ |
| memset(&pt->trace_ibt[branch_type], 0, sizeof(pt->trace_ibt[branch_type])); |
| update_private_ptr_to_shared_ibt_table(dcontext, branch_type, |
| true, /* trace = yes */ |
| false, /* no adjust old |
| * ref-count */ |
| true /* lock */); |
| #ifdef HASHTABLE_STATISTICS |
| if (INTERNAL_OPTION(hashtable_ibl_stats)) { |
| ALLOC_UNPROT_STATS(dcontext, &pt->trace_ibt[branch_type]); |
| CHECK_UNPROT_STATS(pt->trace_ibt[branch_type]); |
| INIT_HASHTABLE_STATS(pt->trace_ibt[branch_type]. |
| UNPROT_STAT(trace_ibl_stats[branch_type])); |
| } else { |
| pt->trace_ibt[branch_type].unprot_stats = NULL; |
| } |
| #endif |
| } |
| } |
| |
| /* When targetting BBs, currently the source is assumed to be only a |
| * bb since traces going to a bb for the first time should mark it |
| * as a trace head. Therefore the tables are currently only |
| * targeted by bb IBL routines. It will be possible to later |
| * deal with trace heads and allow a trace to target a BB with |
| * the intent of modifying its THCI. |
| * |
| * (FIXME: having another table for THCI IBLs seems better than |
| * adding a counter (starting at -1) to all blocks and |
| * trapping when 0 for marking a trace head and again at 50 |
| * for creating a trace. And that is all of course after proving |
| * that doing it in DR has significant impact.) |
| * |
| * Note that private bb2bb transitions are not captured when |
| * we run with -shared_bbs. |
| */ |
| |
| /* These tables should be populated only when we know that the |
| * entry is a valid target, and it is indeed targeted by an |
| * IBL. They have to be per-type so that our security |
| * policies are properly checked. |
| */ |
| if (DYNAMO_OPTION(bb_ibl_targets)) { |
| if (!DYNAMO_OPTION(shared_bb_ibt_tables)) { |
| hashtable_ibl_myinit(dcontext, &pt->bb_ibt[branch_type], |
| DYNAMO_OPTION(private_bb_ibl_targets_init), |
| DYNAMO_OPTION(private_bb_ibl_targets_load), |
| HASH_FUNCTION_NONE, |
| HASHTABLE_IBL_OFFSET(branch_type), |
| branch_type, |
| false, /* no lookup table */ |
| (DYNAMO_OPTION(shared_bbs) ? |
| FRAG_TABLE_TARGET_SHARED : 0) |
| _IF_DEBUG(ibl_bb_table_type_names[branch_type])); |
| /* mark as inclusive table for bb's - we in fact currently |
| * keep only frags that are not FRAG_IS_TRACE_HEAD */ |
| #ifdef HASHTABLE_STATISTICS |
| if (INTERNAL_OPTION(hashtable_ibl_stats)) { |
| /* for compatibility using an entry in the per-branch type stats */ |
| CHECK_UNPROT_STATS(pt->bb_ibt[branch_type]); |
| /* FIXME: we don't expect trace_ibl_stats yet */ |
| INIT_HASHTABLE_STATS(pt->bb_ibt[branch_type]. |
| UNPROT_STAT(bb_ibl_stats[branch_type])); |
| } else { |
| pt->bb_ibt[branch_type].unprot_stats = NULL; |
| } |
| #endif /* HASHTABLE_STATISTICS */ |
| } |
| else { |
| /* ensure table from last time (if we had a reset) not still there */ |
| memset(&pt->bb_ibt[branch_type], 0, sizeof(pt->bb_ibt[branch_type])); |
| update_private_ptr_to_shared_ibt_table(dcontext, branch_type, |
| false, /* trace = no */ |
| false, /* no adjust old |
| * ref-count */ |
| true /* lock */); |
| #ifdef HASHTABLE_STATISTICS |
| if (INTERNAL_OPTION(hashtable_ibl_stats)) { |
| ALLOC_UNPROT_STATS(dcontext, &pt->bb_ibt[branch_type]); |
| CHECK_UNPROT_STATS(pt->bb_ibt[branch_type]); |
| INIT_HASHTABLE_STATS(pt->bb_ibt[branch_type]. |
| UNPROT_STAT(trace_ibl_stats[branch_type])); |
| } else { |
| pt->bb_ibt[branch_type].unprot_stats = NULL; |
| } |
| #endif |
| } |
| } |
| } |
| ASSERT(IBL_BRANCH_TYPE_END == 3); |
| |
| update_generated_hashtable_access(dcontext); |
| } |
| |
| void |
| fragment_thread_init(dcontext_t *dcontext) |
| { |
| /* we allocate per_thread_t in the global heap solely for self-protection, |
| * even when turned off, since even with a lot of threads this isn't a lot of |
| * pressure on the global heap |
| */ |
| per_thread_t *pt; |
| |
| /* case 7966: don't initialize un-needed data for hotp_only & thin_client. |
| * FIXME: could set htable initial sizes to 0 for all configurations, instead. |
| * per_thread_t is pretty big, so we avoid it, though it costs us checks for |
| * hotp_only in the islinking-related routines. |
| */ |
| if (RUNNING_WITHOUT_CODE_CACHE()) |
| return; |
| |
| pt = (per_thread_t *) global_heap_alloc(sizeof(per_thread_t) HEAPACCT(ACCT_OTHER)); |
| dcontext->fragment_field = (void *) pt; |
| |
| fragment_thread_reset_init(dcontext); |
| |
| #if defined(INTERNAL) || defined(CLIENT_INTERFACE) |
| if (TRACEDUMP_ENABLED() && PRIVATE_TRACES_ENABLED()) { |
| pt->tracefile = open_log_file("traces", NULL, 0); |
| ASSERT(pt->tracefile != INVALID_FILE); |
| init_trace_file(pt); |
| } |
| #endif |
| #if defined(CLIENT_INTERFACE) && defined(CLIENT_SIDELINE) |
| ASSIGN_INIT_LOCK_FREE(pt->fragment_delete_mutex, fragment_delete_mutex); |
| #endif |
| |
| pt->could_be_linking = false; |
| pt->wait_for_unlink = false; |
| pt->about_to_exit = false; |
| pt->flush_queue_nonempty = false; |
| pt->waiting_for_unlink = create_event(); |
| pt->finished_with_unlink = create_event(); |
| ASSIGN_INIT_LOCK_FREE(pt->linking_lock, linking_lock); |
| pt->finished_all_unlink = create_event(); |
| pt->soon_to_be_linking = false; |
| pt->at_syscall_at_flush = false; |
| |
| #ifdef PROFILE_LINKCOUNT |
| pt->tracedump_num_below_threshold = 0; |
| pt->tracedump_count_below_threshold = (linkcount_type_t) 0; |
| #endif |
| } |
| |
| static bool |
| check_flush_queue(dcontext_t *dcontext, fragment_t *was_I_flushed); |
| |
| /* frees all non-persistent memory */ |
| void |
| fragment_thread_reset_free(dcontext_t *dcontext) |
| { |
| per_thread_t *pt = (per_thread_t *) dcontext->fragment_field; |
| DEBUG_DECLARE(ibl_branch_type_t branch_type;) |
| |
| /* case 7966: don't initialize at all for hotp_only & thin_client */ |
| if (RUNNING_WITHOUT_CODE_CACHE()) |
| return; |
| |
| /* Dec ref count on any shared tables that are pointed to. */ |
| dec_all_table_ref_counts(dcontext, pt); |
| |
| #ifdef DEBUG |
| /* for non-debug we do fast exit path and don't free local heap */ |
| SELF_PROTECT_CACHE(dcontext, NULL, WRITABLE); |
| |
| /* we remove flushed fragments from the htable, and they can be |
| * flushed after enter_threadexit() due to os_thread_stack_exit(), |
| * so we need to check the flush queue here |
| */ |
| mutex_lock(&pt->linking_lock); |
| check_flush_queue(dcontext, NULL); |
| mutex_unlock(&pt->linking_lock); |
| |
| /* For consistency we remove entries from the IBL targets |
| * tables before we remove them from the trace table. However, |
| * we cannot free any fragments because for sure all of them will |
| * be present in the trace table. |
| */ |
| for (branch_type = IBL_BRANCH_TYPE_START; |
| branch_type < IBL_BRANCH_TYPE_END; branch_type++) { |
| if (!DYNAMO_OPTION(disable_traces) |
| /* If no traces and no bb ibl targets we point ibl at |
| * an empty trace table */ |
| || !DYNAMO_OPTION(bb_ibl_targets)) { |
| if (!DYNAMO_OPTION(shared_trace_ibt_tables)) { |
| DOLOG(2, LOG_FRAGMENT, { |
| hashtable_ibl_dump_table(dcontext, &pt->trace_ibt[branch_type]); |
| }); |
| DOLOG(1, LOG_FRAGMENT|LOG_STATS, { |
| hashtable_ibl_load_statistics(dcontext, &pt->trace_ibt[branch_type]); |
| }); |
| hashtable_ibl_myfree(dcontext, &pt->trace_ibt[branch_type]); |
| } else { |
| # ifdef HASHTABLE_STATISTICS |
| if (INTERNAL_OPTION(hashtable_ibl_stats)) { |
| print_hashtable_stats(dcontext, "Total", |
| shared_pt->trace_ibt[branch_type].name, |
| "trace ibl ", |
| get_branch_type_name(branch_type), |
| &pt->trace_ibt[branch_type]. |
| UNPROT_STAT(trace_ibl_stats[branch_type])); |
| DEALLOC_UNPROT_STATS(dcontext, &pt->trace_ibt[branch_type]); |
| } |
| # endif |
| memset(&pt->trace_ibt[branch_type], 0, |
| sizeof(pt->trace_ibt[branch_type])); |
| } |
| } |
| if (DYNAMO_OPTION(bb_ibl_targets)) { |
| if (!DYNAMO_OPTION(shared_bb_ibt_tables)) { |
| DOLOG(2, LOG_FRAGMENT, { |
| hashtable_ibl_dump_table(dcontext, &pt->bb_ibt[branch_type]); |
| }); |
| DOLOG(1, LOG_FRAGMENT|LOG_STATS, { |
| hashtable_ibl_load_statistics(dcontext, &pt->bb_ibt[branch_type]); |
| }); |
| hashtable_ibl_myfree(dcontext, &pt->bb_ibt[branch_type]); |
| } else { |
| # ifdef HASHTABLE_STATISTICS |
| if (INTERNAL_OPTION(hashtable_ibl_stats)) { |
| print_hashtable_stats(dcontext, "Total", |
| shared_pt->bb_ibt[branch_type].name, |
| "bb ibl ", |
| get_branch_type_name(branch_type), |
| &pt->bb_ibt[branch_type]. |
| UNPROT_STAT(bb_ibl_stats[branch_type])); |
| DEALLOC_UNPROT_STATS(dcontext, &pt->bb_ibt[branch_type]); |
| } |
| # endif |
| memset(&pt->bb_ibt[branch_type], 0, sizeof(pt->bb_ibt[branch_type])); |
| } |
| } |
| } |
| |
| /* case 7653: we can't free the main tables prior to freeing the contents |
| * of all of them, as link freeing involves looking up in the other tables. |
| */ |
| if (PRIVATE_TRACES_ENABLED()) { |
| DOLOG(1, LOG_FRAGMENT|LOG_STATS, { |
| hashtable_fragment_load_statistics(dcontext, &pt->trace); |
| }); |
| hashtable_fragment_reset(dcontext, &pt->trace); |
| } |
| DOLOG(1, LOG_FRAGMENT|LOG_STATS, { |
| hashtable_fragment_load_statistics(dcontext, &pt->bb); |
| }); |
| hashtable_fragment_reset(dcontext, &pt->bb); |
| DOLOG(1, LOG_FRAGMENT|LOG_STATS, { |
| hashtable_fragment_load_statistics(dcontext, &pt->future); |
| }); |
| hashtable_fragment_reset(dcontext, &pt->future); |
| |
| if (PRIVATE_TRACES_ENABLED()) |
| hashtable_fragment_free(dcontext, &pt->trace); |
| hashtable_fragment_free(dcontext, &pt->bb); |
| hashtable_fragment_free(dcontext, &pt->future); |
| |
| SELF_PROTECT_CACHE(dcontext, NULL, READONLY); |
| |
| #else |
| /* Case 10807: Clients need to be informed of fragment deletions |
| * so we'll reset the relevant hash tables for CI release builds. |
| */ |
| # ifdef CLIENT_INTERFACE |
| if (PRIVATE_TRACES_ENABLED()) |
| hashtable_fragment_reset(dcontext, &pt->trace); |
| hashtable_fragment_reset(dcontext, &pt->bb); |
| # endif |
| |
| #endif /* !DEBUG */ |
| } |
| |
| /* atexit cleanup */ |
| void |
| fragment_thread_exit(dcontext_t *dcontext) |
| { |
| per_thread_t *pt = (per_thread_t *) dcontext->fragment_field; |
| |
| /* case 7966: don't initialize at all for hotp_only & thin_client */ |
| if (RUNNING_WITHOUT_CODE_CACHE()) |
| return; |
| |
| #if defined(INTERNAL) || defined(CLIENT_INTERFACE) |
| if (TRACEDUMP_ENABLED() && PRIVATE_TRACES_ENABLED()) { |
| /* write out all traces prior to deleting any, so links print nicely */ |
| uint i; |
| fragment_t *f; |
| for (i = 0; i < pt->trace.capacity; i++) { |
| f = pt->trace.table[i]; |
| if (!REAL_FRAGMENT(f)) |
| continue; |
| if (SHOULD_OUTPUT_FRAGMENT(f->flags)) |
| output_trace(dcontext, pt, f, -1); |
| } |
| exit_trace_file(pt); |
| } |
| #endif |
| |
| fragment_thread_reset_free(dcontext); |
| |
| /* events are global */ |
| destroy_event(pt->waiting_for_unlink); |
| destroy_event(pt->finished_with_unlink); |
| destroy_event(pt->finished_all_unlink); |
| DELETE_LOCK(pt->linking_lock); |
| |
| #if defined(CLIENT_INTERFACE) && defined(CLIENT_SIDELINE) |
| DELETE_LOCK(pt->fragment_delete_mutex); |
| #endif |
| |
| global_heap_free(pt, sizeof(per_thread_t) HEAPACCT(ACCT_OTHER)); |
| dcontext->fragment_field = NULL; |
| } |
| |
| #ifdef UNIX |
| void |
| fragment_fork_init(dcontext_t *dcontext) |
| { |
| /* FIXME: what about global file? */ |
| # if defined(INTERNAL) || defined(CLIENT_INTERFACE) |
| per_thread_t *pt = (per_thread_t *) dcontext->fragment_field; |
| if (TRACEDUMP_ENABLED() && PRIVATE_TRACES_ENABLED()) { |
| /* new log dir has already been created, so just open a new log file */ |
| pt->tracefile = open_log_file("traces", NULL, 0); |
| ASSERT(pt->tracefile != INVALID_FILE); |
| init_trace_file(pt); |
| } |
| # endif |
| } |
| #endif |
| |
| /* fragment_t heap layout looks like this: |
| * |
| * fragment_t/trace_t |
| * translation_info_t*, if necessary |
| * array composed of different sizes of linkstub_t subclasses: |
| * direct_linkstub_t |
| * cbr_fallthrough_linkstub_t |
| * indirect_linkstub_t |
| * post_linkstub_t, if necessary |
| */ |
| static uint |
| fragment_heap_size(uint flags, int direct_exits, int indirect_exits) |
| { |
| uint total_sz; |
| ASSERT((direct_exits + indirect_exits > 0) || TEST(FRAG_COARSE_GRAIN, flags)); |
| total_sz = FRAGMENT_STRUCT_SIZE(flags) + |
| linkstubs_heap_size(flags, direct_exits, indirect_exits); |
| /* we rely on a small heap size for our ushort offset at the end */ |
| ASSERT(total_sz <= USHRT_MAX); |
| return total_sz; |
| } |
| |
| /* Allocates memory for a fragment_t and linkstubs and initializes them, but |
| * does not do any fcache-related initialization. |
| */ |
| static fragment_t * |
| fragment_create_heap(dcontext_t *dcontext, |
| int direct_exits, int indirect_exits, uint flags) |
| { |
| dcontext_t *alloc_dc = FRAGMENT_ALLOC_DC(dcontext, flags); |
| uint heapsz = fragment_heap_size(flags, direct_exits, indirect_exits); |
| /* linkstubs are in an array immediately after the fragment_t/trace_t struct */ |
| fragment_t *f = (fragment_t *) |
| nonpersistent_heap_alloc(alloc_dc, heapsz |
| HEAPACCT(TEST(FRAG_IS_TRACE, flags) ? |
| ACCT_TRACE : ACCT_FRAGMENT)); |
| LOG(THREAD, LOG_FRAGMENT, 5, |
| "fragment heap size for flags 0x%08x, exits %d %d, is %d => "PFX"\n", |
| flags, direct_exits, indirect_exits, heapsz, f); |
| |
| return f; |
| } |
| |
| static void |
| fragment_init_heap(fragment_t *f, app_pc tag, int direct_exits, int indirect_exits, |
| uint flags) |
| { |
| ASSERT(f != NULL); |
| f->flags = flags; /* MUST set before calling fcache_add_fragment or |
| * FRAGMENT_EXIT_STUBS */ |
| f->tag = tag; |
| /* Let fragment_create() fill in; other users are building fake fragments */ |
| DODEBUG({ f->id = -1; }); |
| f->next_vmarea = NULL; /* must be set by caller */ |
| f->prev_vmarea = NULL; /* must be set by caller */ |
| f->also.also_vmarea = NULL; /* must be set by caller */ |
| |
| linkstubs_init(FRAGMENT_EXIT_STUBS(f), direct_exits, indirect_exits, f); |
| |
| /* initialize non-ibt entry to top of fragment (caller responsible for |
| * setting up prefix) |
| */ |
| f->prefix_size = 0; |
| #ifdef FRAGMENT_SIZES_STUDY |
| record_fragment_size(f->size, (flags & FRAG_IS_TRACE) != 0); |
| #endif |
| |
| f->in_xlate.incoming_stubs = NULL; |
| #ifdef CUSTOM_TRACES_RET_REMOVAL |
| f->num_calls = 0; |
| f->num_rets = 0; |
| #endif |
| |
| /* trace-only fields */ |
| if (TEST(FRAG_IS_TRACE, flags)) { |
| trace_only_t *t = TRACE_FIELDS(f); |
| t->bbs = NULL; |
| /* real num_bbs won't be set until after the trace is emitted, |
| * but we need a non-zero value for linkstub_fragment() |
| */ |
| t->num_bbs = 1; |
| #ifdef PROFILE_RDTSC |
| t->count = 0UL; |
| t->total_time = (uint64) 0; |
| #endif |
| #ifdef SIDELINE_COUNT_STUDY |
| t->count_old_pre = (linkcount_type_t) 0; |
| t->count_old_post = (linkcount_type_t) 0; |
| #endif |
| } |
| } |
| |
| /* Create a new fragment_t with empty prefix and return it. |
| * The fragment_t is allocated on the global or local heap, depending on the flags, |
| * unless FRAG_COARSE_GRAIN is set, in which case the fragment_t is a unique |
| * temporary struct that is NOT heap allocated and is only safe to use |
| * so long as the bb_building_lock is held! |
| */ |
| fragment_t * |
| fragment_create(dcontext_t *dcontext, app_pc tag, int body_size, |
| int direct_exits, int indirect_exits, int exits_size, uint flags) |
| { |
| fragment_t *f; |
| DEBUG_DECLARE(stats_int_t next_id;) |
| DOSTATS({ |
| /* should watch this stat and if it gets too high need to re-do |
| * who needs the post-linkstub offset |
| */ |
| if (linkstub_frag_offs_at_end(flags, direct_exits, indirect_exits)) |
| STATS_INC(num_fragment_post_linkstub); |
| }); |
| |
| /* ensure no races during a reset */ |
| ASSERT(!dynamo_resetting); |
| |
| if (TEST(FRAG_COARSE_GRAIN, flags)) { |
| ASSERT(DYNAMO_OPTION(coarse_units)); |
| ASSERT_OWN_MUTEX(USE_BB_BUILDING_LOCK(), &bb_building_lock); |
| ASSERT(!TEST(FRAG_IS_TRACE, flags)); |
| ASSERT(TEST(FRAG_SHARED, flags)); |
| ASSERT(fragment_prefix_size(flags) == 0); |
| ASSERT((direct_exits == 0 && indirect_exits == 1) || |
| (indirect_exits == 0 && (direct_exits == 1 || direct_exits == 2))); |
| /* FIXME: eliminate this temp fragment and linkstubs and |
| * have custom emit and link code that does not require such data |
| * structures? It would certainly be faster code. |
| * But would still want to record each exit's target in a convenient |
| * data structure, for later linking, unless we try to link in |
| * the same pass in which we emit indirect stubs. |
| * We could also use fragment_create() and free the resulting struct |
| * somewhere and switch to a wrapper at that point. |
| */ |
| memset(&coarse_emit_fragment, 0, sizeof(coarse_emit_fragment)); |
| f = (fragment_t *) &coarse_emit_fragment; |
| /* We do not mark as FRAG_FAKE since this is pretty much a real |
| * fragment_t, and we do want to walk its linkstub_t structs, which |
| * are present. |
| */ |
| } else { |
| f = fragment_create_heap(dcontext, direct_exits, indirect_exits, flags); |
| } |
| |
| fragment_init_heap(f, tag, direct_exits, indirect_exits, flags); |
| |
| /* To make debugging easier we assign coarse-grain ids in the same namespace |
| * as fine-grain fragments, though we won't remember them at all. |
| */ |
| STATS_INC_ASSIGN(num_fragments, next_id); |
| IF_X64(ASSERT_TRUNCATE(f->id, int, next_id)); |
| DOSTATS({ f->id = (int) next_id; }); |
| DO_GLOBAL_STATS({ |
| if (!TEST(FRAG_IS_TRACE, f->flags)) { |
| RSTATS_INC(num_bbs); |
| IF_X64(if (FRAG_IS_32(f->flags)) STATS_INC(num_32bit_bbs);) |
| } |
| }); |
| DOSTATS({ |
| /* avoid double-counting for adaptive working set */ |
| if (!fragment_lookup_deleted(dcontext, tag) && !TEST(FRAG_COARSE_GRAIN, flags)) |
| STATS_INC(num_unique_fragments); |
| }); |
| /* FIXME: make fragment count a release-build stat so we can do this in |
| * release builds |
| */ |
| DOSTATS({ |
| if (stats != NULL && |
| (uint) GLOBAL_STAT(num_fragments) == INTERNAL_OPTION(reset_at_fragment_count)) { |
| schedule_reset(RESET_ALL); |
| } |
| }); |
| |
| /* size is a ushort |
| * our offsets are ushorts as well: they assume body_size is small enough, not size |
| */ |
| #ifdef CLIENT_INTERFACE |
| if (body_size + exits_size + fragment_prefix_size(flags) > MAX_FRAGMENT_SIZE) { |
| FATAL_USAGE_ERROR(INSTRUMENTATION_TOO_LARGE, 2, |
| get_application_name(), get_application_pid()); |
| } |
| #endif |
| ASSERT(body_size + exits_size + fragment_prefix_size(flags) <= MAX_FRAGMENT_SIZE); |
| /* currently MAX_FRAGMENT_SIZE is USHRT_MAX, but future proofing */ |
| ASSERT_TRUNCATE(f->size, ushort, |
| (body_size + exits_size + fragment_prefix_size(flags))); |
| f->size = (ushort) (body_size + exits_size + fragment_prefix_size(flags)); |
| |
| /* fcache_add will fill in start_pc, next_fcache, |
| * prev_fcache, and fcache_extra |
| */ |
| fcache_add_fragment(dcontext, f); |
| |
| /* after fcache_add_fragment so we can call get_fragment_coarse_info */ |
| DOSTATS({ |
| if (TEST(FRAG_SHARED, flags)) { |
| STATS_INC(num_shared_fragments); |
| if (TEST(FRAG_IS_TRACE, flags)) |
| STATS_INC(num_shared_traces); |
| else if (TEST(FRAG_COARSE_GRAIN, flags)) { |
| coarse_info_t *info = get_fragment_coarse_info(f); |
| if (get_executable_area_coarse_info(f->tag) != info) |
| STATS_INC(num_coarse_secondary); |
| STATS_INC(num_coarse_fragments); |
| } else |
| STATS_INC(num_shared_bbs); |
| } else { |
| STATS_INC(num_private_fragments); |
| if (TEST(FRAG_IS_TRACE, flags)) |
| STATS_INC(num_private_traces); |
| else |
| STATS_INC(num_private_bbs); |
| } |
| }); |
| |
| /* wait until initialized fragment completely before dumping any stats */ |
| DOLOG(1, LOG_FRAGMENT|LOG_VMAREAS, { |
| if (INTERNAL_OPTION(global_stats_interval) && |
| (f->id % INTERNAL_OPTION(global_stats_interval) == 0)) { |
| LOG(GLOBAL, LOG_FRAGMENT, 1, "Created %d fragments\n", f->id); |
| dump_global_stats(false); |
| } |
| if (INTERNAL_OPTION(thread_stats_interval) && |
| INTERNAL_OPTION(thread_stats)) { |
| /* FIXME: why do we need a new dcontext? */ |
| dcontext_t *dcontext = get_thread_private_dcontext(); |
| if (THREAD_STATS_ON(dcontext) && |
| THREAD_STAT(dcontext, num_fragments) % INTERNAL_OPTION(thread_stats_interval) == 0) { |
| dump_thread_stats(dcontext, false); |
| } |
| } |
| }); |
| |
| #ifdef WINDOWS |
| DOLOG(1, LOG_FRAGMENT|LOG_VMAREAS, { |
| if (f->id % 50000 == 0) { |
| LOG(GLOBAL, LOG_VMAREAS, 1, |
| "50K fragment check point: here are the loaded modules:\n"); |
| print_modules(GLOBAL, DUMP_NOT_XML); |
| LOG(GLOBAL, LOG_VMAREAS, 1, |
| "50K fragment check point: here are the executable areas:\n"); |
| print_executable_areas(GLOBAL); |
| } |
| }); |
| #endif |
| |
| return f; |
| } |
| |
| /* Creates a new fragment_t+linkstubs from the passed-in fragment and |
| * fills in linkstub_t and fragment_t fields, copying the fcache-related fields |
| * from the passed-in fragment (so be careful how the fields are used). |
| * Meant to be used to create a full fragment from a coarse-grain fragment. |
| * Caller is responsible for freeing via fragment_free() w/ the same dcontext |
| * passed in here. |
| */ |
| fragment_t * |
| fragment_recreate_with_linkstubs(dcontext_t *dcontext, fragment_t *f_src) |
| { |
| uint num_dir, num_indir; |
| uint size; |
| fragment_t *f_tgt; |
| instrlist_t *ilist; |
| linkstub_t *l; |
| cache_pc body_end_pc; |
| /* Not FAKE since has linkstubs, but still fake in a sense since no fcache |
| * slot -- need to mark that? |
| */ |
| uint flags = (f_src->flags & ~FRAG_FAKE); |
| ASSERT_CURIOSITY(TEST(FRAG_COARSE_GRAIN, f_src->flags)); /* only use so far */ |
| /* FIXME case 9325: build from tag here? Need to exactly re-mangle + re-instrument. |
| * We use _exact to get any elided final jmp not counted in size |
| */ |
| ilist = decode_fragment_exact(dcontext, f_src, NULL, NULL, f_src->flags, |
| &num_dir, &num_indir); |
| f_tgt = fragment_create_heap(dcontext, num_dir, num_indir, flags); |
| fragment_init_heap(f_tgt, f_src->tag, num_dir, num_indir, flags); |
| |
| f_tgt->start_pc = f_src->start_pc; |
| /* Can't call this until we have start_pc set */ |
| body_end_pc = set_linkstub_fields(dcontext, f_tgt, ilist, num_dir, num_indir, |
| false/*do not emit*/); |
| /* Calculate total size */ |
| IF_X64(ASSERT_TRUNCATE(size, uint, (body_end_pc - f_tgt->start_pc))); |
| size = (uint) (body_end_pc - f_tgt->start_pc); |
| for (l = FRAGMENT_EXIT_STUBS(f_tgt); l != NULL; l = LINKSTUB_NEXT_EXIT(l)) { |
| if (!EXIT_HAS_LOCAL_STUB(l->flags, f_tgt->flags)) |
| continue; /* it's kept elsewhere */ |
| size += linkstub_size(dcontext, f_tgt, l); |
| #ifdef CUSTOM_EXIT_STUBS |
| size += l->fixed_stub_offset; |
| #endif |
| } |
| ASSERT_TRUNCATE(f_tgt->size, ushort, size); |
| f_tgt->size = (ushort) size; |
| ASSERT(TEST(FRAG_FAKE, f_src->flags) || size == f_src->size); |
| ASSERT_TRUNCATE(f_tgt->prefix_size, byte, fragment_prefix_size(f_src->flags)); |
| f_tgt->prefix_size = (byte) fragment_prefix_size(f_src->flags); |
| ASSERT(TEST(FRAG_FAKE, f_src->flags) || f_src->prefix_size == f_tgt->prefix_size); |
| f_tgt->fcache_extra = f_src->fcache_extra; |
| |
| instrlist_clear_and_destroy(dcontext, ilist); |
| |
| return f_tgt; |
| } |
| |
| /* Frees the storage associated with f. |
| * Callers should use fragment_delete() instead of this routine, unless they |
| * obtained their fragment_t from fragment_recreate_with_linkstubs(). |
| */ |
| void |
| fragment_free(dcontext_t *dcontext, fragment_t *f) |
| { |
| dcontext_t *alloc_dc = FRAGMENT_ALLOC_DC(dcontext, f->flags); |
| uint heapsz; |
| int direct_exits = 0; |
| int indirect_exits = 0; |
| linkstub_t *l = FRAGMENT_EXIT_STUBS(f); |
| for (; l != NULL; l = LINKSTUB_NEXT_EXIT(l)) { |
| if (LINKSTUB_DIRECT(l->flags)) |
| direct_exits++; |
| else { |
| ASSERT(LINKSTUB_INDIRECT(l->flags)); |
| indirect_exits++; |
| } |
| } |
| heapsz = fragment_heap_size(f->flags, direct_exits, indirect_exits); |
| |
| STATS_INC(num_fragments_deleted); |
| |
| if (HAS_STORED_TRANSLATION_INFO(f)) { |
| ASSERT(FRAGMENT_TRANSLATION_INFO(f) != NULL); |
| translation_info_free(dcontext, FRAGMENT_TRANSLATION_INFO(f)); |
| } else |
| ASSERT(FRAGMENT_TRANSLATION_INFO(f) == NULL); |
| |
| /* N.B.: monitor_remove_fragment() was called in fragment_delete, |
| * which is assumed to have been called prior to fragment_free |
| */ |
| |
| linkstub_free_exitstubs(dcontext, f); |
| |
| if ((f->flags & FRAG_IS_TRACE) != 0) { |
| trace_only_t *t = TRACE_FIELDS(f); |
| if (t->bbs != NULL) { |
| nonpersistent_heap_free(alloc_dc, t->bbs, t->num_bbs*sizeof(trace_bb_info_t) |
| HEAPACCT(ACCT_TRACE)); |
| } |
| nonpersistent_heap_free(alloc_dc, f, heapsz HEAPACCT(ACCT_TRACE)); |
| } |
| else { |
| nonpersistent_heap_free(alloc_dc, f, heapsz HEAPACCT(ACCT_FRAGMENT)); |
| } |
| } |
| |
| /* Returns the end of the fragment body + any local stubs (excluding selfmod copy) */ |
| cache_pc |
| fragment_stubs_end_pc(fragment_t *f) |
| { |
| if (TEST(FRAG_SELFMOD_SANDBOXED, f->flags)) |
| return FRAGMENT_SELFMOD_COPY_PC(f); |
| else |
| return f->start_pc + f->size; |
| } |
| |
| /* Returns the end of the fragment body (excluding exit stubs and selfmod copy) */ |
| cache_pc |
| fragment_body_end_pc(dcontext_t *dcontext, fragment_t *f) |
| { |
| linkstub_t *l; |
| for (l = FRAGMENT_EXIT_STUBS(f); l; l = LINKSTUB_NEXT_EXIT(l)) { |
| if (EXIT_HAS_LOCAL_STUB(l->flags, f->flags)) { |
| return EXIT_STUB_PC(dcontext, f, l); |
| } |
| } |
| /* must be no stubs after fragment body */ |
| return fragment_stubs_end_pc(f); |
| } |
| |
| #ifdef PROFILE_LINKCOUNT |
| linkcount_type_t |
| get_total_linkcount(fragment_t *f) |
| { |
| /* return total count of exit counters */ |
| linkstub_t *l; |
| linkcount_type_t total = (linkcount_type_t) 0; |
| for (l = FRAGMENT_EXIT_STUBS(f); l != NULL; l = LINKSTUB_NEXT_EXIT(l)) { |
| total += l->count; |
| } |
| return total; |
| } |
| #endif |
| |
| #if defined(CLIENT_INTERFACE) && defined(CLIENT_SIDELINE) |
| /* synchronization routines needed for sideline threads so they don't get |
| * fragments they are referencing deleted */ |
| |
| void |
| fragment_get_fragment_delete_mutex(dcontext_t *dcontext) |
| { |
| if (dynamo_exited || dcontext == GLOBAL_DCONTEXT) |
| return; |
| mutex_lock(&(((per_thread_t *) dcontext->fragment_field)->fragment_delete_mutex)); |
| } |
| |
| void |
| fragment_release_fragment_delete_mutex(dcontext_t *dcontext) |
| { |
| if (dynamo_exited || dcontext == GLOBAL_DCONTEXT) |
| return; |
| mutex_unlock(&(((per_thread_t *) dcontext->fragment_field)->fragment_delete_mutex)); |
| } |
| #endif |
| |
| /* cleaner to have own flags since there are no negative versions |
| * of FRAG_SHARED and FRAG_IS_TRACE for distinguishing from "don't care" |
| */ |
| enum { |
| LOOKUP_TRACE = 0x001, |
| LOOKUP_BB = 0x002, |
| LOOKUP_PRIVATE = 0x004, |
| LOOKUP_SHARED = 0x008, |
| }; |
| |
| /* A lookup constrained by bb/trace and/or shared/private */ |
| static inline fragment_t * |
| fragment_lookup_type(dcontext_t *dcontext, app_pc tag, uint lookup_flags) |
| { |
| fragment_t *f; |
| |
| LOG(THREAD, LOG_MONITOR, 6, "fragment_lookup_type "PFX" 0x%x\n", |
| tag, lookup_flags); |
| if (dcontext != GLOBAL_DCONTEXT && TEST(LOOKUP_PRIVATE, lookup_flags)) { |
| /* FIXME: add a hashtablex.h wrapper that checks #entries and |
| * grabs lock for us for all lookups? |
| */ |
| /* look at private tables */ |
| per_thread_t *pt = (per_thread_t *) dcontext->fragment_field; |
| /* case 147: traces take precedence over bbs */ |
| if (PRIVATE_TRACES_ENABLED() && TEST(LOOKUP_TRACE, lookup_flags)) { |
| /* now try trace table */ |
| f = hashtable_fragment_lookup(dcontext, (ptr_uint_t)tag, &pt->trace); |
| if (f->tag != NULL) { |
| ASSERT(f->tag == tag); |
| DOLOG(2, LOG_FRAGMENT, { |
| if (DYNAMO_OPTION(shared_traces)) { |
| /* ensure private trace never shadows shared trace */ |
| fragment_t *sf; |
| read_lock(&shared_trace->rwlock); |
| sf = hashtable_fragment_lookup(dcontext, (ptr_uint_t)tag, |
| shared_trace); |
| read_unlock(&shared_trace->rwlock); |
| ASSERT(sf->tag == NULL); |
| } |
| }); |
| ASSERT(!TESTANY(FRAG_FAKE|FRAG_COARSE_GRAIN, f->flags)); |
| return f; |
| } |
| } |
| if (TEST(LOOKUP_BB, lookup_flags) && pt->bb.entries > 0) { |
| /* basic block table last */ |
| f = hashtable_fragment_lookup(dcontext, (ptr_uint_t)tag, &pt->bb); |
| if (f->tag != NULL) { |
| ASSERT(f->tag == tag); |
| DOLOG(2, LOG_FRAGMENT, { |
| if (DYNAMO_OPTION(shared_bbs)) { |
| /* ensure private bb never shadows shared bb, except for |
| * temp privates for trace building |
| */ |
| fragment_t *sf; |
| read_lock(&shared_bb->rwlock); |
| sf = hashtable_fragment_lookup(dcontext, (ptr_uint_t)tag, |
| shared_bb); |
| read_unlock(&shared_bb->rwlock); |
| ASSERT(sf->tag == NULL || TEST(FRAG_TEMP_PRIVATE, f->flags)); |
| } |
| }); |
| ASSERT(!TESTANY(FRAG_FAKE|FRAG_COARSE_GRAIN, f->flags)); |
| return f; |
| } |
| } |
| } |
| |
| if (TEST(LOOKUP_SHARED, lookup_flags)) { |
| if (DYNAMO_OPTION(shared_traces) && TEST(LOOKUP_TRACE, lookup_flags)) { |
| /* MUST look at shared trace table before shared bb table, |
| * since a shared trace can shadow a shared trace head |
| */ |
| read_lock(&shared_trace->rwlock); |
| f = hashtable_fragment_lookup(dcontext, (ptr_uint_t)tag, shared_trace); |
| read_unlock(&shared_trace->rwlock); |
| if (f->tag != NULL) { |
| ASSERT(f->tag == tag); |
| ASSERT(!TESTANY(FRAG_FAKE|FRAG_COARSE_GRAIN, f->flags)); |
| return f; |
| } |
| } |
| |
| if (DYNAMO_OPTION(shared_bbs) && TEST(LOOKUP_BB, lookup_flags)) { |
| /* MUST look at private trace table before shared bb table, |
| * since a private trace can shadow a shared trace head |
| */ |
| read_lock(&shared_bb->rwlock); |
| f = hashtable_fragment_lookup(dcontext, (ptr_uint_t)tag, shared_bb); |
| read_unlock(&shared_bb->rwlock); |
| if (f->tag != NULL) { |
| ASSERT(f->tag == tag); |
| ASSERT(!TESTANY(FRAG_FAKE|FRAG_COARSE_GRAIN, f->flags)); |
| return f; |
| } |
| } |
| } |
| |
| return NULL; |
| } |
| |
| /* lookup a fragment tag */ |
| fragment_t * |
| fragment_lookup(dcontext_t *dcontext, app_pc tag) |
| { |
| return fragment_lookup_type(dcontext, tag, |
| LOOKUP_TRACE|LOOKUP_BB|LOOKUP_PRIVATE|LOOKUP_SHARED); |
| } |
| |
| /* lookup a fragment tag, but only look in trace tables |
| * N.B.: because of shadowing this may not return what fragment_lookup() returns! |
| */ |
| fragment_t * |
| fragment_lookup_trace(dcontext_t *dcontext, app_pc tag) |
| { |
| return fragment_lookup_type(dcontext, tag, LOOKUP_TRACE|LOOKUP_PRIVATE|LOOKUP_SHARED); |
| } |
| |
| /* lookup a fragment tag, but only look in bb tables |
| * N.B.: because of shadowing this may not return what fragment_lookup() returns! |
| */ |
| fragment_t * |
| fragment_lookup_bb(dcontext_t *dcontext, app_pc tag) |
| { |
| return fragment_lookup_type(dcontext, tag, LOOKUP_BB|LOOKUP_PRIVATE|LOOKUP_SHARED); |
| } |
| |
| /* lookup a fragment tag, but only look in shared bb table |
| * N.B.: because of shadowing this may not return what fragment_lookup() returns! |
| */ |
| fragment_t * |
| fragment_lookup_shared_bb(dcontext_t *dcontext, app_pc tag) |
| { |
| return fragment_lookup_type(dcontext, tag, LOOKUP_BB|LOOKUP_SHARED); |
| } |
| |
| /* lookup a fragment tag, but only look in tables that are the same shared-ness |
| * as flags. |
| * N.B.: because of shadowing this may not return what fragment_lookup() returns! |
| */ |
| fragment_t * |
| fragment_lookup_same_sharing(dcontext_t *dcontext, app_pc tag, uint flags) |
| { |
| return fragment_lookup_type(dcontext, tag, LOOKUP_TRACE|LOOKUP_BB| |
| (TEST(FRAG_SHARED, flags) ? |
| LOOKUP_SHARED : LOOKUP_PRIVATE)); |
| } |
| |
| #ifdef DEBUG /*currently only used for debugging */ |
| static fragment_t * |
| hashtable_pclookup(dcontext_t *dcontext, fragment_table_t *table, cache_pc pc) |
| { |
| uint i; |
| fragment_t *f; |
| ASSERT_TABLE_SYNCHRONIZED(table, READWRITE); /* lookup requires read (or write) lock */ |
| for (i = 0; i < table->capacity; i++) { |
| f = table->table[i]; |
| if (!REAL_FRAGMENT(f)) |
| continue; |
| if (pc >= f->start_pc && pc < (f->start_pc + f->size)) { |
| return f; |
| } |
| } |
| return NULL; |
| } |
| |
| /* lookup a fragment pc in the fcache by walking all hashtables. |
| * we have more efficient methods (fcache_fragment_pclookup) so this is only |
| * used for debugging. |
| */ |
| fragment_t * |
| fragment_pclookup_by_htable(dcontext_t *dcontext, cache_pc pc, fragment_t *wrapper) |
| { |
| /* if every fragment is guaranteed to end in 1+ stubs (which |
| * is not true for DYNAMO_OPTION(separate_private_stubs) we can |
| * simply decode forward until we hit the stub and recover |
| * the linkstub_t* from there -- much more efficient than walking |
| * all the hashtables, plus nicely handles invisible & removed frags! |
| * FIXME: measure perf hit of pclookup, implement this decode strategy. |
| * also we can miss invisible or removed fragments (case 122) so we |
| * may want this regardless of performance -- see also FIXME below. |
| */ |
| fragment_t *f; |
| per_thread_t *pt = NULL; |
| if (dcontext != GLOBAL_DCONTEXT) { |
| pt = (per_thread_t *) dcontext->fragment_field; |
| /* look at private traces first */ |
| if (PRIVATE_TRACES_ENABLED()) { |
| f = hashtable_pclookup(dcontext, &pt->trace, pc); |
| if (f != NULL) |
| return f; |
| } |
| } |
| if (DYNAMO_OPTION(shared_traces)) { |
| /* then shared traces */ |
| read_lock(&shared_trace->rwlock); |
| f = hashtable_pclookup(dcontext, shared_trace, pc); |
| read_unlock(&shared_trace->rwlock); |
| if (f != NULL) |
| return f; |
| } |
| if (DYNAMO_OPTION(shared_bbs)) { |
| /* then shared basic blocks */ |
| read_lock(&shared_bb->rwlock); |
| f = hashtable_pclookup(dcontext, shared_bb, pc); |
| read_unlock(&shared_bb->rwlock); |
| if (f != NULL) |
| return f; |
| } |
| if (dcontext != GLOBAL_DCONTEXT) { |
| /* now private basic blocks */ |
| f = hashtable_pclookup(dcontext, &pt->bb, pc); |
| if (f != NULL) |
| return f; |
| } |
| if (DYNAMO_OPTION(coarse_units)) { |
| coarse_info_t *info = get_executable_area_coarse_info(pc); |
| while (info != NULL) { /* loop over primary and secondary unit */ |
| cache_pc body; |
| app_pc tag = fragment_coarse_pclookup(dcontext, info, pc, &body); |
| if (tag != NULL) { |
| ASSERT(wrapper != NULL); |
| fragment_coarse_wrapper(wrapper, tag, body); |
| return wrapper; |
| } |
| ASSERT(info->frozen || info->non_frozen == NULL); |
| info = info->non_frozen; |
| ASSERT(info == NULL || !info->frozen); |
| } |
| } |
| /* FIXME: shared fragment may have been removed from hashtable but |
| * still be in cache, and e.g. handle_modified_code still needs to know about it -- |
| * should walk deletion vector |
| */ |
| return NULL; |
| } |
| #endif /* DEBUG */ |
| |
| /* lookup a fragment pc in the fcache */ |
| fragment_t * |
| fragment_pclookup(dcontext_t *dcontext, cache_pc pc, fragment_t *wrapper) |
| { |
| /* Rather than walk every single hashtable, including the invisible table, |
| * and the pending-deletion list (case 3567), we find the fcache unit |
| * and walk it. |
| * An even more efficient alternative would be to decode backward, but |
| * that's not doable in general. |
| * |
| * If every fragment is guaranteed to end in 1+ stubs (which |
| * is not true for DYNAMO_OPTION(separate_{private,shared}_stubs) we can |
| * simply decode forward until we hit the stub and recover |
| * the linkstub_t* from there. |
| * Or we can decode until we hit a jmp and if it's to a linked fragment_t, |
| * search its incoming list. |
| * Stub decoding is complicated by CLIENT_INTERFACE custom stubs and by |
| * PROFILE_LINKCOUNT stub variations. |
| */ |
| return fcache_fragment_pclookup(dcontext, pc, wrapper); |
| } |
| |
| /* Performs a pclookup and if the result is a coarse-grain fragment, allocates |
| * a new fragment_t+linkstubs. |
| * Returns in alloc whether the returned fragment_t was allocated and needs to be |
| * freed by the caller via fragment_free(). |
| * If no result is found, alloc is set to false. |
| * FIXME: use FRAG_RECREATED flag to indicate allocated instead? |
| */ |
| fragment_t * |
| fragment_pclookup_with_linkstubs(dcontext_t *dcontext, cache_pc pc, |
| /*OUT*/bool *alloc) |
| { |
| fragment_t wrapper; |
| fragment_t *f = fragment_pclookup(dcontext, pc, &wrapper); |
| ASSERT(alloc != NULL); |
| if (f != NULL && TEST(FRAG_COARSE_GRAIN, f->flags)) { |
| ASSERT(f == &wrapper); |
| f = fragment_recreate_with_linkstubs(dcontext, f); |
| *alloc = true; |
| } else |
| *alloc = false; |
| return f; |
| } |
| |
| /* add f to the ftable */ |
| void |
| fragment_add(dcontext_t *dcontext, fragment_t *f) |
| { |
| per_thread_t *pt = (per_thread_t *) dcontext->fragment_field; |
| fragment_table_t *table = GET_FTABLE(pt, f->flags); |
| bool resized; |
| /* no future frags! */ |
| ASSERT(!TEST(FRAG_IS_FUTURE, f->flags)); |
| |
| DOCHECK(1, { |
| fragment_t *existing = fragment_lookup(dcontext, f->tag); |
| ASSERT(existing == NULL || |
| IF_CUSTOM_TRACES(/* we create and persist shadowed trace heads */ |
| (TEST(FRAG_IS_TRACE_HEAD, f->flags) || |
| TEST(FRAG_IS_TRACE_HEAD, existing->flags)) ||) |
| /* private trace or temp can shadow shared bb */ |
| (TESTANY(FRAG_IS_TRACE | FRAG_TEMP_PRIVATE, f->flags) && |
| TEST(FRAG_SHARED, f->flags) != TEST(FRAG_SHARED, existing->flags)) || |
| /* shared trace can shadow shared trace head, even with |
| * -remove_shared_trace_heads */ |
| (TESTALL(FRAG_IS_TRACE | FRAG_SHARED, f->flags) && |
| !TEST(FRAG_IS_TRACE, existing->flags) && |
| TESTALL(FRAG_SHARED | FRAG_IS_TRACE_HEAD, existing->flags))); |
| }); |
| |
| /* We'd like the shared fragment table synch to be independent of the |
| * bb building synch (which may become more fine-grained in the future), |
| * so an add needs to hold the write lock to prevent conflicts with |
| * other adds. |
| * We may be able to have a scheme where study() and remove() are writers |
| * but add() is a reader -- but that's confusing and prone to errors in |
| * the future. |
| * We assume that synchronizing addition of the same tag is done through |
| * other means -- we cannot grab this while performing the lookup |
| * w/o making our read locks check to see if we're the writer, |
| * which is a perf hit. Only the actual hashtable add is a "write". |
| */ |
| TABLE_RWLOCK(table, write, lock); |
| resized = fragment_add_to_hashtable(dcontext, f, table); |
| TABLE_RWLOCK(table, write, unlock); |
| |
| /* After resizing a table that is targeted by inlined IBL heads |
| * the current fragment will need to be repatched; but, we don't have |
| * to update the stubs when using per-type trace tables since the |
| * trace table itself is not targeted therefore resizing it doesn't matter. |
| */ |
| |
| #ifdef SHARING_STUDY |
| if (INTERNAL_OPTION(fragment_sharing_study)) { |
| if (TEST(FRAG_IS_TRACE, f->flags)) |
| add_shared_block(shared_traces, &shared_traces_lock, f); |
| else |
| add_shared_block(shared_blocks, &shared_blocks_lock, f); |
| } |
| #endif |
| } |
| |
| /* Many options, use macros in fragment.h for readability |
| * If output: |
| * dumps f to trace file |
| * If remove: |
| * removes f from ftable |
| * If unlink: |
| * if f is linked, unlinks f |
| * removes f from incoming link tables |
| * If fcache: |
| * deletes f from fcache unit |
| */ |
| void |
| fragment_delete(dcontext_t *dcontext, fragment_t *f, uint actions) |
| { |
| #if defined(CLIENT_INTERFACE) && defined(CLIENT_SIDELINE) |
| bool acquired_shared_vm_lock = false; |
|