| /* ********************************************************** |
| * Copyright (c) 2011-2013 Google, Inc. All rights reserved. |
| * Copyright (c) 2005-2010 VMware, Inc. All rights reserved. |
| * **********************************************************/ |
| |
| /* |
| * Redistribution and use in source and binary forms, with or without |
| * modification, are permitted provided that the following conditions are met: |
| * |
| * * Redistributions of source code must retain the above copyright notice, |
| * this list of conditions and the following disclaimer. |
| * |
| * * Redistributions in binary form must reproduce the above copyright notice, |
| * this list of conditions and the following disclaimer in the documentation |
| * and/or other materials provided with the distribution. |
| * |
| * * Neither the name of VMware, Inc. nor the names of its contributors may be |
| * used to endorse or promote products derived from this software without |
| * specific prior written permission. |
| * |
| * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
| * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| * ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE |
| * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
| * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR |
| * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER |
| * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
| * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
| * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH |
| * DAMAGE. |
| */ |
| |
| /* Copyright (c) 2005-2007 Determina Corp. */ |
| |
| /* |
| * hotpatch.c - Hot patching mechanism |
| */ |
| |
| #include "globals.h" |
| #include "fragment.h" |
| #include "arch.h" |
| #include "instr.h" |
| #include "instr_create.h" |
| #include "decode.h" |
| #include "instrument.h" |
| #include "hotpatch.h" |
| #include "hotpatch_interface.h" |
| #include "moduledb.h" /* macros for nudge; can be moved with nudge to os.c */ |
| |
| #ifndef WINDOWS |
| # include <string.h> |
| #endif |
| |
| #include <limits.h> /* for ULLONG_MAX */ |
| |
| #include "fcache.h" /* for fcache_reset_all_caches_proactively */ |
| #ifdef GBOP |
| # include "aslr.h" |
| #endif |
| |
| #include "perscache.h" |
| #include "synch.h" |
| |
| /* Note: HOT_PATCHING_INTERFACE refers to the mechanism for injecting code at |
| * arbitrary points in the application text. It was formerly known as |
| * constraint injection. It has nothing to do with the DR mechanism that |
| * allows for dynamically changing existing instructions, as indicated |
| * by INSTR_HOT_PATCHABLE in instr.h |
| */ |
| #ifdef HOT_PATCHING_INTERFACE /* Around the whole file */ |
| /*----------------------------------------------------------------------------*/ |
| /* Local typed constants. */ |
| |
| /* Local untyped constants. */ |
| |
| /* defined for non-GBOP as well since used for -probe_api */ |
| #define HOTP_ONLY_GBOP_PRECEDENCE 10 |
| |
| /* Limits for vulnerability data provided by the constraint writer. */ |
| enum { |
| MIN_HOTP_INTERFACE_VERSION = HOTP_INTERFACE_VERSION, |
| MAX_HOTP_INTERFACE_VERSION = HOTP_INTERFACE_VERSION, |
| MIN_NUM_VULNERABILITIES = 1, |
| MAX_NUM_VULNERABILITIES = 10000, |
| MIN_VULNERABILITY_ID_LENGTH = 1, |
| MAX_VULNERABILITY_ID_LENGTH = 32, |
| MIN_POLICY_ID_LENGTH = HOTP_POLICY_ID_LENGTH, |
| MAX_POLICY_ID_LENGTH = HOTP_POLICY_ID_LENGTH, |
| MIN_POLICY_VERSION = 1, |
| MAX_POLICY_VERSION = 10000, |
| MIN_NUM_SETS = 1, |
| MAX_NUM_SETS = 10000, |
| MIN_NUM_MODULES = 1, |
| MAX_NUM_MODULES = 10000, |
| |
| /* We don't expect PE files to be more than 1 GB in size. */ |
| MAX_MODULE_SIZE = 1024*1024*1024, |
| |
| /* Can have PEs with time stamp as zero, though fairly unlikely; zero |
| * checksum is more likely, zero file version is somewhat likely and zero |
| * {image,code} size is extremely unlikely. The max values though are |
| * unlikely to exist in reality, we use these limits as the don't care |
| * values for timestamp, checksum, {image,code} size & file version. |
| */ |
| MIN_PE_TIMESTAMP = 0, |
| MAX_PE_TIMESTAMP = UINT_MAX, |
| PE_TIMESTAMP_IGNORE = UINT_MAX, |
| PE_TIMESTAMP_UNAVAILABLE = PE_TIMESTAMP_IGNORE - 1, |
| |
| MIN_PE_CHECKSUM = 0, |
| MAX_PE_CHECKSUM = UINT_MAX, |
| PE_CHECKSUM_IGNORE = UINT_MAX, |
| PE_CHECKSUM_UNAVAILABLE = PE_CHECKSUM_IGNORE - 1, |
| |
| MIN_PE_IMAGE_SIZE = 0, |
| MAX_PE_IMAGE_SIZE = UINT_MAX, |
| PE_IMAGE_SIZE_IGNORE = UINT_MAX, |
| PE_IMAGE_SIZE_UNAVAILABLE = PE_IMAGE_SIZE_IGNORE - 1, |
| |
| MIN_PE_CODE_SIZE = 0, /* kbdus.dll has only data in it */ |
| MAX_PE_CODE_SIZE = UINT_MAX, |
| PE_CODE_SIZE_IGNORE = UINT_MAX, |
| PE_CODE_SIZE_UNAVAILABLE = PE_CODE_SIZE_IGNORE - 1, |
| |
| MIN_PE_FILE_VERSION = 0, |
| MAX_PE_FILE_VERSION = ULLONG_MAX, |
| PE_FILE_VERSION_IGNORE = IF_WINDOWS_ELSE(MODULE_FILE_VERSION_INVALID, ULLONG_MAX), |
| PE_FILE_VERSION_UNAVAILABLE = PE_FILE_VERSION_IGNORE - 1, |
| |
| MIN_NUM_PATCH_POINT_HASHES = 1, |
| MAX_NUM_PATCH_POINT_HASHES = 10000, |
| MIN_HASH_START_OFFSET = 1, |
| MAX_HASH_START_OFFSET = MAX_MODULE_SIZE, /* Can't exceed module size. */ |
| MIN_HASH_LENGTH = 1, |
| MAX_HASH_LENGTH = MAX_MODULE_SIZE, /* Can't exceed module size. */ |
| MIN_HASH_VALUE = 0, |
| MAX_HASH_VALUE = UINT_MAX, |
| |
| MIN_NUM_PATCH_POINTS = MIN_NUM_PATCH_POINT_HASHES, |
| MAX_NUM_PATCH_POINTS = MAX_NUM_PATCH_POINT_HASHES, |
| MIN_PATCH_OFFSET = 1, |
| MAX_PATCH_OFFSET = MAX_MODULE_SIZE, /* Can't exceed module size. */ |
| MIN_PATCH_PRECEDENCE = 1, |
| MAX_PATCH_PRECEDENCE = 10000, |
| MIN_DETECTOR_OFFSET = 1, |
| /* Hot patch dlls shouldn't be anywhere near 10 MB in size; |
| * this check is just to catch some wrong file being loaded by accident. |
| * Today a typical hot patch is far less than 1k in size, so to hit 10 MB |
| * we would need a mininum of 10000 constraints of 1k each - unlikely. |
| */ |
| MAX_DETECTOR_OFFSET = 10*1024*1024, |
| /* Protectors should exist for all hot patches; even if it does nothing. */ |
| MIN_PROTECTOR_OFFSET = 1, |
| MAX_PROTECTOR_OFFSET = MAX_DETECTOR_OFFSET, |
| /* Zero offset either means there is no protector or no control flow |
| * change is requested by the protector. |
| */ |
| MIN_RETURN_ADDR = 0, |
| /* We don't expect return addresses to be across modules; given that we |
| * don't expect a module to be more than 1 GB in size, the return address |
| * offset shouldn't be more than 1 GB too. |
| */ |
| MAX_RETURN_ADDR = MAX_PATCH_OFFSET, |
| MIN_MODE = HOTP_MODE_OFF, |
| MAX_MODE = HOTP_MODE_PROTECT, |
| |
| /* case 8051: > 256KB per-process means we should start thinking about |
| * sharing. 24-Apr-07: sharing is in plan for 4.3 or 4.4; upping to 384k. |
| * Note: this is used only in debug builds; release builds can handle all |
| * sizes as long as we don't run out of memory. */ |
| MAX_POLICY_FILE_SIZE = 384 * 1024 |
| |
| }; |
| #define PE_NAME_IGNORE "*" /* Can't have strings in the enum. */ |
| #define PE_NAME_UNAVAILABLE '\0' |
| /*----------------------------------------------------------------------------*/ |
| /* Local type definitions. */ |
| |
| /* Module signature is used to uniquely describe a module, in our case, a Win32 |
| * PE module. |
| */ |
| typedef struct { /* xref case 4688 */ |
| const char *pe_name; |
| |
| /* Don't-care values for pe_{checksum,timestamp,{image,code}_size, |
| * file_version} will be their respective MAX values. See enum above. |
| */ |
| uint pe_checksum; |
| uint pe_timestamp; |
| size_t pe_image_size; |
| |
| /* Refers to the sum of the unpadded sizes of all executable sections in the |
| * PE image. The section size used is from get_image_section_unpadded_size() |
| * which equals VirtualSize (unless that is 0 in which case it equals SizeOfRawData). |
| * |
| * As an aside note that VirtualSize usually has no alignment padding while |
| * SizeOfRawData is typically padded to FileAlignment (the image loader pads |
| * VirtualSize to SectionAligment), so SizeOfRawData is often larger than |
| * VirtualSize for fully initialized sections (- this is the opposite of how it is |
| * in unix/elf, i.e., raw/file size is usually smaller than virtual/mem size because |
| * the latter does the alignment; also in unix, there are usually two different |
| * mmaps as opposed to one on windows to load the image). Though xref case 5355, |
| * what is actually accepted (and generated by some compilers) differs from what is |
| * typical/legal in pe specifications. |
| * |
| * Using _code_ rather than _text_ in the name because text usually refers |
| * only to the .text section. |
| */ |
| size_t pe_code_size; |
| |
| /* Found in the resource section, some PE file may not have it, in which |
| * case it will be set to its don't-care value. |
| */ |
| uint64 pe_file_version; |
| } hotp_module_sig_t; |
| |
| /* A patch point describes what application address to patch and the address of |
| * the hot patches that will be used for patching. If a hot patch (only a |
| * protector) intends to change the flow of application's execution, then the |
| * address to which control should go to after the hot patch is executed is |
| * also specified. A precedence attribute defines the order (rank) in which a |
| * particular patch is to be applied if more than one need to be applied at the |
| * same application offset. All addresses are relative to the base of the |
| * module. |
| */ |
| /* TODO: typedef uint app_rva_t to define offsets; app_pc is actually an address, |
| * not an offset, so don't use it for defining offsets |
| */ |
| /* app_pc is a pointer, not an offset; using it to a compute pointer with |
| * base address gives a compiler error about adding two pointers. Hence, a new |
| * type to define module offsets. |
| */ |
| typedef struct { |
| app_rva_t offset; /* offset relative to the base of the module where |
| * thepatch is to be applied */ |
| |
| /* TODO: clearly split each structure into read only and runtime data |
| * because things are tending to go out of synch again; can create a |
| * parallel tree later on. |
| */ |
| app_rva_t detector_fn; /* Offset of the detector function from the base |
| * of the hot patch dll */ |
| app_rva_t protector_fn; |
| app_rva_t return_addr; |
| |
| /* NYI (was never needed in practice, though at design time I thought this |
| * was needed for supporting multiple patches at the same address); lower |
| * numbers mean high precedence. |
| */ |
| uint precedence; |
| |
| /*------------------------------------------------------------------------*/ |
| /* The following fields are part of runtime policy/vulnerability data, not |
| * part of vulnerability definitions, i.e., shouldn't be shared across |
| * processes. |
| */ |
| /* TODO: num_injected at the vulnerability level; relevant here? */ |
| |
| /* Buffer to hold the trampoline with which a patch point was hooked in |
| * order to execute a hot patch in hotp_only mode. Should be NULL for |
| * regular hot patching, i.e., with fcache. |
| */ |
| byte *trampoline; |
| |
| /* Pointer to the copy of app code that resides inside the trampoline, |
| * that gets executed at the end of trampoline execution; this is the app |
| * code that existed at the injection point. Used only by hotp_only. |
| */ |
| byte *app_code_copy; |
| |
| /* Pointer to the cti target inside the trampoline (the one that is used to |
| * implement AFTER_INTERCEPT_LET_GO_ALT_DYN) that is used to |
| * change control flow. Used only in hotp_only mode for a patch point |
| * that requests a control flow change, i.e., has non-zero return_addr. |
| */ |
| byte *tramp_exit_tgt; |
| } hotp_patch_point_t; |
| |
| /* Experiments showed that the maximum size of a single interception |
| * trampoline/hook is about 400 to 450 bytes, so 512 should be adequate. |
| */ |
| #define HOTP_ONLY_TRAMPOLINE_SIZE 512 |
| #define HOTP_ONLY_NUM_THREADS_AT_INIT -1 |
| |
| /* A patch region size of 5 is used for hotp_only mode. This is done so |
| * that the same vm_area_t vector (hotp_patch_point_areas) can be used for |
| * patch point overlap checks and address lookup. Note: 5 is the minimum |
| * bytes needed to encode/insert a direct jmp with 32-bit displacement, i.e., |
| * a hook. |
| * For hotp in code cache, all patch regions are points, so patch region size |
| * 1 is used. In this mode it is used only for patch address lookup. |
| * |
| * NOTE: Investigate issues when implementing hotp_only for native_exec dlls as |
| * we would have to have regions with different sizes - might trigger a |
| * few hotp asserts. |
| * |
| * Use -1 as an error catching value if this macro is used without -hot_patching. |
| */ |
| #define HOTP_ONLY_PATCH_REGION_SIZE (5) |
| #define HOTP_CACHE_PATCH_REGION_SIZE (1) |
| #define HOTP_BAD_PATCH_REGION_SIZE (-1) |
| #define HOTP_PATCH_REGION_SIZE \ |
| (DYNAMO_OPTION(hot_patching) ? \ |
| (DYNAMO_OPTION(hotp_only) ? \ |
| HOTP_ONLY_PATCH_REGION_SIZE : \ |
| HOTP_CACHE_PATCH_REGION_SIZE) : \ |
| HOTP_BAD_PATCH_REGION_SIZE) |
| |
| /* This structure is used to define a hash value for a specified region |
| * around a patch point as decided by the hot patch writer. This hash, which |
| * is provided by the hot patch writer will be used at run time as part of the |
| * mechanism to identify a given PE module for injecting hot patches. |
| */ |
| typedef struct { |
| /* Offset, relative to the base of the module, that should be used as the |
| * starting point of hash computation string; for the module to be patched. |
| */ |
| app_rva_t start; |
| uint len; /* number of bytes to be used for hash computation */ |
| uint hash_value; |
| } hotp_patch_point_hash_t; |
| |
| typedef struct { |
| hotp_module_sig_t sig; |
| uint num_patch_points; |
| hotp_patch_point_t *patch_points; |
| uint num_patch_point_hashes; |
| hotp_patch_point_hash_t *hashes; |
| |
| /* Data computed at run time; should be zeroed out at read time */ |
| bool matched; /* True if current module is loaded & matched. */ |
| app_pc base_address; |
| } hotp_module_t; |
| |
| typedef struct { |
| uint num_modules; |
| hotp_module_t *modules; |
| } hotp_set_t; |
| |
| /* Note: status and statistics are kept in a separate structure to allow for |
| * easy output, either via a file or via read only memory. |
| * Note: whole struct is runtime data; hence separated out. |
| */ |
| typedef struct { |
| hotp_exec_status_t exec_status; |
| |
| /* Points to the one in hotp_policy_status_t to avoid duplication. */ |
| hotp_inject_status_t *inject_status; |
| |
| /* TODO: num_injected at the vulnerability level */ |
| /* TODO: decide on the size of stats (uint or uint64) before finalizing |
| * interface with jim |
| */ |
| uint64 num_detected; |
| uint64 num_not_detected; |
| uint64 num_detector_error; |
| uint64 num_protected; |
| uint64 num_not_protected; |
| uint64 num_kill_thread; |
| uint64 num_kill_process; |
| uint64 num_raise_exception; |
| uint64 num_change_control_flow; |
| uint64 num_protector_error; |
| uint64 num_aborted; |
| } hotp_vul_info_t; |
| |
| /* The types are defined as unique bit flags because it may be possible in |
| * future that we have a case that is more than one type. |
| * For example a hot patch with a symbolic offset may be |
| * HOTP_TYPE_SYMBOLIC_TYPE | HOTP_TYPE_HOT_PATCH, whereas a gbop hook may be |
| * HOTP_TYPE_SYMBOLIC_TYPE | HOTP_TYPE_GBOP_HOOK. |
| */ |
| typedef enum { |
| /* This represents the patches that fix vulnerabilities, as described by |
| * the hot patch injection design. |
| */ |
| HOTP_TYPE_HOT_PATCH = 0x1, |
| |
| /* This represents all gbop hook points. This type is different in that it: |
| * 1. Isn't specified by a config file; well not as of now (FIXME?), |
| * 2. Is specified by gbop_hooks and/or gbop_include_list (FIXME: NYI), |
| * 3. Can't be turned off by modes file; will not as of now (FIXME?), |
| * 4. Can be turned of by gbop_exclude_list (FIXME: NYI), |
| * 5. Uses a symbolic name rather than identifying the PE uniquely, |
| * 6. Has a generic detector and protector which is part of the core, and, |
| * 7. Uses the core defaults for events, actions, dumps & forensics |
| * (FIXME: NYI). |
| */ |
| HOTP_TYPE_GBOP_HOOK = 0x2, |
| |
| /* Currently will be exclusive with HOTP_TYPE_{HOT_PATCH,GBOP_HOOK}, |
| * eventually will co-exist. |
| */ |
| HOTP_TYPE_PROBE = 0x4 |
| } hotp_type_t; |
| |
| /* hotp_vul_t defines a vulnerability */ |
| /* the entire expanded structure of hotp_vul_t consists of constant data, |
| * except for a couple of runtime data; this is so that policies can be |
| * easily read in from file/memory in a binary format, thus eliminating the |
| * need to do any data formatting/processing inside the core. |
| */ |
| typedef struct { |
| const char *vul_id; |
| |
| /* policy_id is of the format XXXX.XXXX so that it can be used to |
| * generate the corresponding threat_id; so use |
| * char policy_id[MAX_POLICY_ID_LENGTH + 1]; |
| * to be consistent with hotp_policy_status_t; TODO |
| * not done now because SET_STR doesn't handle arrays. |
| */ |
| const char *policy_id; |
| uint policy_version; |
| const char *hotp_dll; |
| const char *hotp_dll_hash; |
| hotp_policy_mode_t mode; |
| |
| uint num_sets; |
| hotp_set_t *sets; |
| |
| /* Data computed at run time; should be zeroed out at read time */ |
| hotp_vul_info_t *info; |
| app_pc hotp_dll_base; |
| /* TODO: if policy data is going to be shared across multiple processes, |
| * info (i.e., runtime data) can't be part of this; a parallel runtime |
| * structure must be created; not a big issue till hot patches reach |
| * thousands in number |
| */ |
| |
| /* FIXME: right now this isn't specified by the config file because |
| * config files are assumed to define only hotpatches. Also, gbop_hooks |
| * are added to the table by a different routine, so there is no room |
| * for ambiguity. If we decide to use the config file for all, then |
| * this type should come from there - that would involve revving up |
| * the hotp interface, i.e., engine version. |
| * Note: probe types are provided by client libraries directly via |
| * dr_register_probes. |
| */ |
| hotp_type_t type; |
| |
| /* The following fields were introduced for probe api. */ |
| |
| /* Unique ID for each probe; must be unique across different clients in the |
| * same process to avoid one client from controlling another's probes. */ |
| unsigned int id; |
| } hotp_vul_t; |
| |
| /* Maintain a list of vulnerability tables so that they can be freed at exit |
| * time. Nudge for policy reading creates new tables. The old ones should |
| * be left alone so that races between hot patch execution and table freeing |
| * are avoided (case 5521). All such tables are freed during DR exit. |
| * FIXME: Release tables using a ref_count in case there are many & memory usage |
| * is high. It is highly unlikely that a given process will get more |
| * than a couple of policy read nudges during its lifetime. |
| * memory usage issue not correctness one, work on it after beta. |
| */ |
| typedef struct hotp_vul_tab_t { |
| hotp_vul_t *vul_tab; |
| uint num_vuls; |
| struct hotp_vul_tab_t *next; |
| } hotp_vul_tab_t; |
| |
| /* TODO: for now this just has debug information; later on move all hot patch |
| * related globals into this structure. The debug variable listed below needed |
| * to be updated during loader activity and that conflicts with our data segment |
| * protection. |
| */ |
| #ifdef DEBUG |
| typedef struct hotp_globals_t { |
| /* The variables below help catch removing the same patch twice and |
| * injecting it twice, which is ok only for loader safety. Technically |
| * each patch point should have this variable, but given that the loader |
| * loads/relocates one dll at a time, this should be ok. |
| */ |
| bool ldr_safe_hook_removal; /* used only in -hotp_only mode */ |
| bool ldr_safe_hook_injection; /* used only in -hotp_only mode */ |
| } hotp_globals_t; |
| #endif |
| /*----------------------------------------------------------------------------*/ |
| /* Macro definitions. */ |
| |
| /* These macros serve two purposes. Firstly they provide a clean interface |
| * to access the global hotp_vul_table, so that direct use of the global |
| * variable can be avoided. Secondly they improve readability; given that |
| * these structures are nested, accessing a member directly would result in |
| * long lines of code, which aren't very readable. |
| */ |
| /* TODO: Derek feels that these macros obfuscate the code rather than making |
| * them readable, which is opposite to what I thought. Try using local |
| * variables and if that looks good, remove these macros. |
| */ |
| #define VUL(vul_table, i) (vul_table[i]) |
| #define SET(vul_table, v, i) (VUL(vul_table, v).sets[i]) |
| #define MODULE(vul_table, v, s, i) (SET(vul_table, v, s).modules[i]) |
| #define SIG(vul_table, v, s, m) (MODULE(vul_table, v, s, m).sig) |
| #define PPOINT(vul_table, v, s, m, i) (MODULE(vul_table, v, s, m).patch_points[i]) |
| #define PPOINT_HASH(vul_table, v, s, m, i) \ |
| (MODULE(vul_table, v, s, m).hashes[i]) |
| |
| #define NUM_GLOBAL_VULS (hotp_num_vuls) |
| #define GLOBAL_VUL_TABLE (hotp_vul_table) |
| #define GLOBAL_VUL(i) VUL(GLOBAL_VUL_TABLE, i) |
| #define GLOBAL_SET(v, i) SET(GLOBAL_VUL_TABLE, v, i) |
| #define GLOBAL_MODULE(v, s, i) MODULE(GLOBAL_VUL_TABLE, v, s,i) |
| #define GLOBAL_SIG(v, s, m) SIG(GLOBAL_VUL_TABLE, v, s, m) |
| #define GLOBAL_PPOINT(v, s, m, i) PPOINT(GLOBAL_VUL_TABLE, v, s, m, i) |
| #define GLOBAL_HASH(v, s, m, i) PPOINT_HASH(GLOBAL_VUL_TABLE, v, s, m, i) |
| |
| /* TODO: change it to model ATOMIC_ADD; can't use ATOMIC_ADD directly because |
| * it wants only uint, not uint64 which is what all vulnerability stats |
| * are; maybe the easy way is to make the vul stat uint, but don't know |
| * if that will result in overflows fairly quickly, esp. for long running |
| * apps. either way, make this increment non racy, the users of this |
| * macro assume atomic increments. |
| */ |
| #define VUL_STAT_INC(x) ((x)++); |
| |
| #define SET_NUM(var, type, limit_str, input_ptr) \ |
| { \ |
| char *str = hotp_get_next_str(&(input_ptr)); \ |
| const char *hex_fmt, *dec_fmt; \ |
| type temp; \ |
| \ |
| ASSERT(sizeof(type) == sizeof(uint) || sizeof(type) == sizeof(uint64)); \ |
| hex_fmt = (sizeof(type) == sizeof(uint)) ? "0x%x" : "0x" HEX64_FORMAT_STRING; \ |
| dec_fmt = (sizeof(type) == sizeof(uint)) ? "%d" : INT64_FORMAT_STRING; \ |
| \ |
| if (sscanf(str, hex_fmt, &temp) == 1 || sscanf(str, dec_fmt, &temp) == 1) { \ |
| if (temp < (type)(MIN_##limit_str) || \ |
| temp > (type)(MAX_##limit_str)) \ |
| goto error_reading_policy; /* Range error */ \ |
| (var) = temp; \ |
| } \ |
| else \ |
| goto error_reading_policy; /* Parse error. */ \ |
| } |
| |
| /* FIXME: range check strs for min & max length; null check already done. */ |
| #define SET_STR_DUP(var, input_ptr) \ |
| { \ |
| char *str = hotp_get_next_str(&(input_ptr)); \ |
| \ |
| if (str == NULL) \ |
| goto error_reading_policy; \ |
| (var) = dr_strdup(str HEAPACCT(ACCT_HOT_PATCHING)); \ |
| } |
| |
| #define SET_STR_PTR(var, input_ptr) \ |
| { \ |
| char *str = hotp_get_next_str(&(input_ptr)); \ |
| \ |
| if (str == NULL) \ |
| goto error_reading_policy; \ |
| (var) = str; \ |
| } |
| |
| #define SET_STR(var, input_ptr) SET_STR_DUP(var, input_ptr) |
| |
| #define HOTP_IS_IN_REGION(region_start, region_size, addr) \ |
| (((addr) >= (region_start)) && ((addr) < ((region_start) + (region_size)))) |
| |
| /* This checks addresses. */ |
| #define HOTP_ONLY_IS_IN_TRAMPOLINE(ppoint, addr) \ |
| (((ppoint)->trampoline == NULL || (addr) == NULL) ? false : \ |
| HOTP_IS_IN_REGION((ppoint)->trampoline, HOTP_ONLY_TRAMPOLINE_SIZE, addr)) |
| |
| /* This checks offsets/RVAs. */ |
| #define HOTP_ONLY_IS_IN_PATCH_REGION(ppoint, addr) \ |
| (((ppoint)->offset <= 0 || (addr) <= 0) ? false : \ |
| HOTP_IS_IN_REGION((ppoint)->offset, HOTP_PATCH_REGION_SIZE, addr)) |
| |
| /* TODO: PR 225550 - make this a better function so that each probe is |
| * identified uniquely so as to prevent clients from modifying each others' |
| * probes - make it a function of the client name, probe def & this counter. |
| * Note: probe id is generated outside hotp_vul_table_lock because of having |
| * to load probe/callback dlls without hitting dr hooks, so updates to |
| * probe_id_counter haver to be atomic. |
| */ |
| #define GENERATE_PROBE_ID() (atomic_add_exchange_int((int*)&probe_id_counter, 4)) |
| /*----------------------------------------------------------------------------*/ |
| /* Local function prototypes. */ |
| static void hotp_change_control_flow(const hotp_context_t *app_reg_ptr, |
| const app_pc target); |
| |
| static after_intercept_action_t hotp_gateway(const hotp_vul_t *vul_tab, |
| const uint num_vuls, |
| const uint vul_index, |
| const uint set_index, |
| const uint module_index, |
| const uint ppoint_index, |
| hotp_context_t *app_reg_ptr, |
| const bool own_hot_patch_lock); |
| |
| static void hotp_free_vul_table(hotp_vul_t *tab, uint num_vuls_alloc); |
| static hotp_exec_status_t hotp_execute_patch(hotp_func_t hotp_fn_ptr, |
| hotp_context_t *hotp_cxt, |
| hotp_policy_mode_t mode, |
| bool dump_excpt_info, |
| bool dump_error_info); |
| static void hotp_update_vul_stats(const hotp_exec_status_t exec_status, |
| const uint vul_index); |
| static void hotp_event_notify(hotp_exec_status_t exec_status, bool protected, |
| const hotp_offset_match_t *inject_point, |
| const app_pc bad_addr, |
| const hotp_context_t *hotp_cxt); |
| #if defined(DEBUG) && defined(INTERNAL) |
| static void hotp_dump_reg_state(const hotp_context_t *reg_state, |
| const app_pc eip, const uint loglevel); |
| #endif |
| static void hotp_only_inject_patch(const hotp_offset_match_t *ppoint_desc, |
| const thread_record_t **all_threads, |
| const int num_threads); |
| static void hotp_only_remove_patch(const hotp_module_t *module, |
| hotp_patch_point_t *cur_ppoint); |
| after_intercept_action_t hotp_only_gateway(app_state_at_intercept_t *state); |
| static uint hotp_compute_hash(app_pc base, hotp_patch_point_hash_t *hash); |
| #ifdef GBOP |
| static void |
| hotp_only_read_gbop_policy_defs(hotp_vul_t *tab, uint *num_vuls); |
| #endif |
| |
| /* TODO: add function prototypes for all functions in this file */ |
| /*----------------------------------------------------------------------------*/ |
| /* Local data. */ |
| |
| hotp_policy_status_table_t *hotp_policy_status_table; |
| |
| /* FIXME: create hotp_vul_table_t and put these three into it */ |
| static hotp_vul_t *hotp_vul_table; |
| static uint hotp_num_vuls; |
| static hotp_vul_tab_t *hotp_old_vul_tabs; |
| |
| DECLARE_CXTSWPROT_VAR(static read_write_lock_t hotp_vul_table_lock, {{0}}); |
| /* Special heap for hotp_only trampolines; heap is executable. */ |
| static void *hotp_only_tramp_heap; |
| /* Leak to handle case 9593. This should go if we find a cleaner solution. */ |
| #if defined(DEBUG) && defined(HEAP_ACCOUNTING) |
| DECLARE_NEVERPROT_VAR(int hotp_only_tramp_bytes_leaked, 0); |
| #endif |
| /* This is used to cache hotp_only_tramp_heap for handling leak asserts during |
| * detach and to track whether or not any hotp_only patch was removed. Case |
| * 9593 & PR 215520. */ |
| static void *hotp_only_tramp_heap_cache; |
| |
| /* Trampoline area vector; currently used only to identify if a thread is in |
| * the middle of hot patch execution during suspension - for multiprocessor |
| * safe hot patch removal in hotp_only mode. |
| * Kept on the heap for selfprot (case 7957). |
| */ |
| static vm_area_vector_t *hotp_only_tramp_areas; |
| |
| /* This has all the matched patch points, i.e., patch points that have been |
| * determined by hotp_process_image() to be ready to be injected. Only that |
| * function adds or removes from this vector because only that function does |
| * module matching. |
| * The custom data stored is a hotp_offset_match_t structure which describes |
| * the patch point precisely in the GLOBAL_VUL_TABLE. |
| * For hotp_only this refers to all injected patches because they get injected |
| * during match/dll load time. For fcache based hot patches, this may or may |
| * not specify patch injection, but will specify matches. This is because for |
| * hotp_only matching & injection are done in one shot, whereas they are split |
| * for fcache based hot patches. |
| * This vector is not static, it is on the heap because of selfprot; case 8074. |
| * Uses: |
| * 1. for hotp_only to solve the overlapping hashes problem (case 7279). |
| * 2. for offset lookup for both hotp and hotp_only (case 8132). |
| * 3. NYI - all patch removal & injection; perscache stuff (case 10728). |
| */ |
| static vm_area_vector_t *hotp_patch_point_areas; |
| |
| #ifdef DEBUG |
| static hotp_globals_t *hotp_globals; |
| #endif |
| |
| #ifdef CLIENT_INTERFACE |
| /* Global counter used to generate unique ids for probes. This is updated |
| * atomically and isn't guarded by any lock. See GENERATE_PROBE_ID() for |
| * details. |
| */ |
| static unsigned int probe_id_counter; |
| #endif |
| /*----------------------------------------------------------------------------*/ |
| /* Function definitions. */ |
| |
| /* Don't expose the hot patch lock directly outside this module. */ |
| read_write_lock_t * |
| hotp_get_lock(void) |
| { |
| ASSERT(DYNAMO_OPTION(hot_patching)); |
| return &hotp_vul_table_lock; |
| } |
| |
| static inline app_pc |
| hotp_ppoint_addr(const hotp_module_t *module, const hotp_patch_point_t *ppoint) |
| { |
| app_pc ppoint_offset; |
| ASSERT(module != NULL && ppoint != NULL); |
| ASSERT(module->base_address != NULL && ppoint->offset != 0); |
| |
| ppoint_offset = module->base_address + ppoint->offset; |
| |
| /* The patch point should be inside the code section of a loaded module. */ |
| ASSERT(is_in_code_section(module->base_address, ppoint_offset, NULL, NULL)); |
| |
| return ppoint_offset; |
| } |
| |
| static void |
| hotp_ppoint_areas_add(hotp_offset_match_t *ppoint_desc) |
| { |
| hotp_module_t *module; |
| hotp_patch_point_t *ppoint; |
| hotp_offset_match_t *copy; |
| app_pc ppoint_start, ppoint_end; |
| |
| ASSERT(ppoint_desc != NULL); |
| ASSERT(GLOBAL_VUL_TABLE != NULL && hotp_patch_point_areas != NULL); |
| ASSERT_OWN_READWRITE_LOCK(true, &hotp_vul_table_lock); |
| |
| module = &GLOBAL_MODULE(ppoint_desc->vul_index, ppoint_desc->set_index, |
| ppoint_desc->module_index); |
| ppoint = &module->patch_points[ppoint_desc->ppoint_index]; |
| |
| /* Shouldn't be adding to hotp_patch_point_areas if the module hasn't been |
| * matched. |
| */ |
| ASSERT(module->matched); |
| ppoint_start = hotp_ppoint_addr(module, ppoint); |
| ppoint_end = ppoint_start + HOTP_PATCH_REGION_SIZE; |
| |
| /* Each matched (or injected) patch point should be added only |
| * once and removed only once, so before adding, make sure that it |
| * is not already in there. |
| */ |
| ASSERT(!vmvector_overlap(hotp_patch_point_areas, ppoint_start, ppoint_end)); |
| |
| copy = HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, hotp_offset_match_t, |
| ACCT_HOT_PATCHING, PROTECTED); |
| *copy = *ppoint_desc; |
| vmvector_add(hotp_patch_point_areas, ppoint_start, ppoint_end, (void *)copy); |
| } |
| |
| static void |
| hotp_ppoint_areas_remove(app_pc pc) |
| { |
| hotp_offset_match_t *ppoint_desc; |
| DEBUG_DECLARE(bool ok;) |
| |
| ASSERT(pc != NULL); |
| ASSERT(GLOBAL_VUL_TABLE != NULL && hotp_patch_point_areas != NULL); |
| ASSERT_OWN_READWRITE_LOCK(true, &hotp_vul_table_lock); |
| |
| ppoint_desc = (hotp_offset_match_t *) |
| vmvector_lookup(hotp_patch_point_areas, pc); |
| |
| DOCHECK(1, { |
| hotp_module_t *module; |
| hotp_patch_point_t *ppoint; |
| |
| /* Shouldn't be trying to remove something that wasn't added. */ |
| ASSERT(ppoint_desc != NULL); |
| |
| /* Verify that the ppoint_desc in the vmvector corresponds to pc. */ |
| module = &GLOBAL_MODULE(ppoint_desc->vul_index, ppoint_desc->set_index, |
| ppoint_desc->module_index); |
| ppoint = &module->patch_points[ppoint_desc->ppoint_index]; |
| ASSERT(pc == hotp_ppoint_addr(module, ppoint)); |
| }); |
| |
| HEAP_TYPE_FREE(GLOBAL_DCONTEXT, ppoint_desc, hotp_offset_match_t, |
| ACCT_HOT_PATCHING, PROTECTED); |
| |
| DEBUG_DECLARE(ok = ) |
| vmvector_remove(hotp_patch_point_areas, pc, |
| pc + HOTP_PATCH_REGION_SIZE); |
| ASSERT(ok); |
| } |
| |
| static void |
| hotp_ppoint_areas_release(void) |
| { |
| app_pc vm_start, vm_end; |
| hotp_offset_match_t *ppoint_desc; |
| vmvector_iterator_t iterator; |
| |
| ASSERT_OWN_READWRITE_LOCK(true, &hotp_vul_table_lock); |
| |
| /* Release all patch point descriptors. */ |
| vmvector_iterator_start(hotp_patch_point_areas, &iterator); |
| while (vmvector_iterator_hasnext(&iterator)) { |
| ppoint_desc = vmvector_iterator_next(&iterator, &vm_start, &vm_end); |
| ASSERT(ppoint_desc != NULL); |
| HEAP_TYPE_FREE(GLOBAL_DCONTEXT, ppoint_desc, hotp_offset_match_t, |
| ACCT_HOT_PATCHING, PROTECTED); |
| } |
| vmvector_iterator_stop(&iterator); |
| |
| /* Remove all vm_areas in the vmvector. */ |
| vmvector_remove(hotp_patch_point_areas, UNIVERSAL_REGION_BASE, UNIVERSAL_REGION_END); |
| ASSERT(vmvector_empty(hotp_patch_point_areas)); |
| } |
| |
| /* Used to read in vulnerability definitions from file */ |
| static char* |
| hotp_get_next_str(char **start) |
| { |
| char *end = *start, *temp = *start; |
| bool dos_line_terminator = false; |
| |
| if (start == NULL || *start == NULL) |
| return NULL; |
| |
| while (*end != '\n' && *end != '\r' && *end != '\0') |
| end++; |
| |
| if (*end != '\0') { |
| if (*end == '\r') { |
| if (*(end + 1) == '\n') |
| dos_line_terminator = true; |
| else |
| SYSLOG_INTERNAL_WARNING("Mac OS style line separator!"); |
| } |
| |
| *end++ = '\0'; |
| if (dos_line_terminator) |
| end++; |
| } |
| *start = end; |
| return temp; |
| } |
| |
| /* Used to read either the policy file or the modes file. */ |
| enum { |
| POLICY_FILE = 1, |
| MODES_FILE |
| }; |
| |
| static char * |
| hotp_read_data_file(uint type, size_t *buf_len /* OUT */) |
| { |
| int retval; |
| char file[MAXIMUM_PATH]; |
| |
| ASSERT(type == POLICY_FILE || type == MODES_FILE); |
| ASSERT(buf_len != NULL); |
| |
| *buf_len = 0; |
| |
| retval = get_parameter(type == POLICY_FILE ? |
| PARAM_STR(DYNAMORIO_VAR_HOT_PATCH_POLICIES) : |
| PARAM_STR(DYNAMORIO_VAR_HOT_PATCH_MODES), |
| file, BUFFER_SIZE_ELEMENTS(file)); |
| if (IS_GET_PARAMETER_FAILURE(retval)) { |
| SYSLOG_INTERNAL_WARNING("Can't find %s definition directory name.", |
| (type == POLICY_FILE) ? "policy" : "mode"); |
| return NULL; |
| } |
| |
| /* The {defs,modes} file is |
| * $DYNAMORIO_HOT_PATCH_POLICIES/<engine>/HOTP_{POLICIES,MODES}_FILENAME |
| */ |
| CAT_AND_TERMINATE(file, "\\" STRINGIFY(HOTP_INTERFACE_VERSION) "\\"); |
| CAT_AND_TERMINATE(file, type == POLICY_FILE ? HOTP_POLICIES_FILENAME : |
| HOTP_MODES_FILENAME); |
| |
| LOG(GLOBAL, LOG_HOT_PATCHING, 1, "Hot patch %s definition file: %s\n", |
| (type == POLICY_FILE) ? "policy" : "mode", file); |
| |
| return read_entire_file(file, buf_len HEAPACCT(ACCT_HOT_PATCHING)); |
| } |
| |
| /* On a successful read, this should return a valid pointer to a vulnerability |
| * table and modify the size argument passed to it. If it fails, it should |
| * dump a log event, return NULL & not modify the size. |
| * |
| * The caller should release the old table & make the return value the new |
| * table; the reason for doing this table swap outside this function is to |
| * allow (future work) identification of vulnerabilities that have actually |
| * changed; from this set of changed vulnerabilities, identify those that have |
| * been injected and flush only those (an optimization issue). |
| * |
| * Policy file format: (indentations don't appear in the actual file, they exist |
| * here to illustrate the format & to show where multiple data can occur; also |
| * format is close to binary as it is now) |
| * All integers/hex_numbers are 32-bits unless explicitly stated otherwise. |
| |
| <engine_version-str> |
| <num_vulnerabilities-decimal_integer> |
| <vulnerability_id-str> |
| <policy_id-str> |
| <version-decimal_integer> |
| <hotpatch_dll-str> |
| <hotpatch_dll_hash-str> |
| <num_sets-decimal_integer> |
| <num_modules-decimal_integer> |
| <pe_name-str> |
| <pe_timestamp-hex_number> |
| <pe_checksum-hex_number> |
| <pe_image_size-hex_number> |
| <pe_code_size-hex_number> |
| <pe_file_version-hex_number-64_bits> |
| <num_hashes-decimal_integer> |
| <start-hex_number> |
| <length-hex_number> |
| <hash-decimal_integer> |
| <num_patch_points-decimal_integer> |
| <offset-hex_number> |
| <precedence-decimal_integer> |
| <detector_offset-hex_number> |
| <protector_offset-hex_number> |
| <return_addr-hex_number> |
| |
| * TODO: all unused fields, i.e., runtime fields in the data structures should |
| * be set to NULL/0 to avoid any assumption violations down stream. |
| * TODO: after reading in the vulnerability data, that region should be write |
| * protected |
| */ |
| static hotp_vul_t * |
| hotp_read_policy_defs(uint *num_vuls_read) |
| { |
| hotp_vul_t *tab = NULL; |
| uint hotp_interface_version; |
| uint vul = 0, set, module, hash, ppoint; |
| uint num_vuls = 0, num_vuls_alloc = 0; |
| char *buf = NULL; /* TODO: for now only; will go after file mapping */ |
| size_t buf_len = 0; |
| char *start = NULL; |
| DEBUG_DECLARE(bool started_parsing = false;) |
| |
| /* Read the config file only if -liveshields is turned on. If it isn't |
| * turned on, read gbop hooks if -gbop is specified. |
| */ |
| if (!DYNAMO_OPTION(liveshields)) { |
| #ifdef GBOP |
| if (DYNAMO_OPTION(gbop)) |
| goto read_gbop_only; |
| #endif |
| return NULL; |
| } |
| |
| buf = hotp_read_data_file(POLICY_FILE, &buf_len); |
| if (buf == NULL) { |
| ASSERT(buf_len == 0); |
| goto error_reading_policy; |
| } else { |
| ASSERT(buf_len > 0); |
| ASSERT_CURIOSITY(buf_len < MAX_POLICY_FILE_SIZE); |
| } |
| |
| start = buf; |
| DEBUG_DECLARE(started_parsing = true;) |
| SET_NUM(hotp_interface_version, uint, HOTP_INTERFACE_VERSION, start); |
| SET_NUM(num_vuls, uint, NUM_VULNERABILITIES, start); |
| #ifdef GBOP |
| if (DYNAMO_OPTION(gbop)) |
| num_vuls_alloc = gbop_get_num_hooks(); |
| #endif |
| num_vuls_alloc += num_vuls; |
| ASSERT(num_vuls_alloc > 0 && num_vuls_alloc <= MAX_NUM_VULNERABILITIES); |
| |
| /* Zero out all dynamically allocated hotpatch table structures to avoid |
| * leaks when there is a parse error. See case 8272, 9045. |
| */ |
| tab = HEAP_ARRAY_ALLOC_MEMSET(GLOBAL_DCONTEXT, hotp_vul_t, num_vuls_alloc, |
| ACCT_HOT_PATCHING, PROTECTED, 0); |
| |
| for (vul = 0; vul < num_vuls; vul++) { |
| /* FIXME: bounds checking; length should be > 2 && < 32; not null */ |
| SET_STR(VUL(tab, vul).vul_id, start); |
| SET_STR(VUL(tab, vul).policy_id, start); |
| SET_NUM(VUL(tab, vul).policy_version, uint, POLICY_VERSION, start); |
| |
| /* FIXME: strdup strings because the buffer/mapped file will be deleted |
| * after processing; don't use strdup though! |
| * works right now till the next time I read in a policy file |
| * into buf[]; if that read fails the old data will be corrupt! |
| * remember, if not strdup'ed, all strings are in writable memory |
| */ |
| SET_STR(VUL(tab, vul).hotp_dll, start); |
| SET_STR(VUL(tab, vul).hotp_dll_hash, start); |
| SET_NUM(VUL(tab, vul).num_sets, uint, NUM_SETS, start); |
| |
| /* Initialize all runtime values in the structure. */ |
| VUL(tab, vul).mode = HOTP_MODE_OFF; /* Fix for case 5326. */ |
| VUL(tab, vul).type = HOTP_TYPE_HOT_PATCH; |
| |
| VUL(tab, vul).sets = HEAP_ARRAY_ALLOC_MEMSET(GLOBAL_DCONTEXT, |
| hotp_set_t, VUL(tab, vul).num_sets, |
| ACCT_HOT_PATCHING, PROTECTED, 0); |
| VUL(tab, vul).info = HEAP_ARRAY_ALLOC_MEMSET(GLOBAL_DCONTEXT, |
| hotp_vul_info_t, 1, ACCT_HOT_PATCHING, |
| PROTECTED, 0); |
| |
| for (set = 0; set < VUL(tab, vul).num_sets; set++) { |
| SET_NUM(SET(tab, vul, set).num_modules, uint, NUM_MODULES, start); |
| SET(tab, vul, set).modules = HEAP_ARRAY_ALLOC_MEMSET(GLOBAL_DCONTEXT, |
| hotp_module_t, |
| SET(tab, vul, set).num_modules, |
| ACCT_HOT_PATCHING, PROTECTED, 0); |
| for (module = 0; module < SET(tab, vul, set).num_modules; |
| module++) { |
| SET_STR(SIG(tab, vul, set, module).pe_name, start); |
| SET_NUM(SIG(tab, vul, set, module).pe_timestamp, uint, |
| PE_TIMESTAMP, start); |
| SET_NUM(SIG(tab, vul, set, module).pe_checksum, uint, |
| PE_CHECKSUM, start); |
| SET_NUM(SIG(tab, vul, set, module).pe_image_size, uint, |
| PE_IMAGE_SIZE, start); |
| SET_NUM(SIG(tab, vul, set, module).pe_code_size, uint, |
| PE_CODE_SIZE, start); |
| SET_NUM(SIG(tab, vul, set, module).pe_file_version, uint64, |
| PE_FILE_VERSION, start); |
| |
| /* Initialize all runtime values in the structure. */ |
| MODULE(tab, vul, set, module).matched = false; |
| MODULE(tab, vul, set, module).base_address = NULL; |
| |
| SET_NUM(MODULE(tab, vul, set, module).num_patch_point_hashes, |
| uint, NUM_PATCH_POINT_HASHES, start); |
| MODULE(tab, vul, set, module).hashes = |
| HEAP_ARRAY_ALLOC_MEMSET(GLOBAL_DCONTEXT, |
| hotp_patch_point_hash_t, |
| MODULE(tab, vul, set, module).num_patch_point_hashes, |
| ACCT_HOT_PATCHING, PROTECTED, 0); |
| |
| for (hash = 0; |
| hash < MODULE(tab, vul, set, module).num_patch_point_hashes; |
| hash++) { |
| SET_NUM(PPOINT_HASH(tab, vul, set, module, hash).start, |
| app_rva_t, HASH_START_OFFSET, start); |
| SET_NUM(PPOINT_HASH(tab, vul, set, module, hash).len, |
| uint, HASH_LENGTH, start); |
| SET_NUM(PPOINT_HASH(tab, vul, set, module, hash).hash_value, |
| uint, HASH_VALUE, start); |
| } |
| |
| SET_NUM(MODULE(tab, vul, set, module).num_patch_points, uint, |
| NUM_PATCH_POINTS, start); |
| MODULE(tab, vul, set, module).patch_points = |
| HEAP_ARRAY_ALLOC_MEMSET(GLOBAL_DCONTEXT, hotp_patch_point_t, |
| MODULE(tab, vul, set, module).num_patch_points, |
| ACCT_HOT_PATCHING, PROTECTED, 0); |
| |
| for (ppoint = 0; |
| ppoint < MODULE(tab, vul, set, module).num_patch_points; |
| ppoint++) { |
| SET_NUM(PPOINT(tab, vul, set, module, ppoint).offset, |
| app_rva_t, PATCH_OFFSET, start); |
| SET_NUM(PPOINT(tab, vul, set, module, ppoint).precedence, |
| uint, PATCH_PRECEDENCE, start); |
| SET_NUM(PPOINT(tab, vul, set, module, ppoint).detector_fn, |
| app_rva_t, DETECTOR_OFFSET, start); |
| |
| /* Both protector and return address can be NULL */ |
| SET_NUM(PPOINT(tab, vul, set, module, ppoint).protector_fn, |
| app_rva_t, PROTECTOR_OFFSET, start); |
| SET_NUM(PPOINT(tab, vul, set, module, ppoint).return_addr, |
| app_rva_t, RETURN_ADDR, start); |
| PPOINT(tab, vul, set, module, ppoint).trampoline = NULL; |
| PPOINT(tab, vul, set, module, ppoint).app_code_copy = NULL; |
| PPOINT(tab, vul, set, module, ppoint).tramp_exit_tgt = NULL; |
| } |
| } |
| } |
| } |
| |
| #ifdef GBOP |
| if (DYNAMO_OPTION(gbop)) { |
| hotp_only_read_gbop_policy_defs(tab, &num_vuls /* IN OUT arg */); |
| ASSERT(num_vuls_alloc == num_vuls); |
| } |
| #endif |
| *num_vuls_read = num_vuls; |
| LOG(GLOBAL, LOG_HOT_PATCHING, 1, |
| "read %d vulnerability definitions\n", num_vuls); |
| heap_free (GLOBAL_DCONTEXT, buf, buf_len HEAPACCT(ACCT_HOT_PATCHING)); |
| return tab; |
| |
| error_reading_policy: |
| /* TODO: log error, free stuff, set tab to null, leave size intact & exit |
| * for now just assert to make sure that bugs don't escape. |
| */ |
| /* TODO: provide line #, not offset; alex couldn't use the offset */ |
| SYSLOG_INTERNAL_WARNING("Error reading or parsing hot patch definitions"); |
| /* Need this curiosity to make qa notice; the warning is handy for |
| * development testing only. No hot patching on Linux, so don't assert. |
| * FIXME: Convert to assert after case 9066 has been fixed & tested. |
| * Note: Warn for missing file, but assert for parsing error; latter is |
| * bug, former may just be a hotpatch-less installation - mostly coredev. |
| */ |
| IF_WINDOWS(ASSERT_CURIOSITY(!started_parsing && |
| "Error parsing hot patch definitions");) |
| *num_vuls_read = 0; |
| if (tab != NULL) { |
| ASSERT(num_vuls_alloc > 0); |
| /* If gbop is on, then during a parse error num_vuls (parsed) must be |
| * less than num_vuls_alloc because if table as been allocated space |
| * has been allocated for gbop entries as well which wouldn't have been |
| * read on a parse error. It is read after this point; see below. |
| */ |
| IF_GBOP(ASSERT(!DYNAMO_OPTION(gbop) || num_vuls < num_vuls_alloc);) |
| /* On error free the whole table, not just what was read; case 9044. */ |
| hotp_free_vul_table(tab, num_vuls_alloc); |
| tab = NULL; |
| } |
| |
| /* buf can be allocated even if vulnerability table hasn't been allocated. |
| * See case 8332. |
| */ |
| if (buf != NULL) { |
| ASSERT(buf_len > 0); |
| heap_free(GLOBAL_DCONTEXT, buf, buf_len HEAPACCT(ACCT_HOT_PATCHING)); |
| LOG(GLOBAL, LOG_HOT_PATCHING, 1, |
| "error reading vulnerability file at offset "SZFMT"\n", |
| (ptr_uint_t)(start - buf)); |
| } |
| |
| #ifdef GBOP |
| /* Even if we couldn't read the hot patch policies, we should still |
| * allocate a new table and read in the gbop hooks. |
| */ |
| read_gbop_only: |
| if (DYNAMO_OPTION(gbop)) { |
| num_vuls_alloc = gbop_get_num_hooks(); |
| ASSERT(num_vuls_alloc > 0 && num_vuls_alloc <= MAX_NUM_VULNERABILITIES); |
| num_vuls = 0; |
| |
| tab = HEAP_ARRAY_ALLOC(GLOBAL_DCONTEXT, hotp_vul_t, num_vuls_alloc, |
| ACCT_HOT_PATCHING, PROTECTED); |
| hotp_only_read_gbop_policy_defs(tab, &num_vuls /* IN OUT arg */); |
| |
| ASSERT(num_vuls_alloc == num_vuls); |
| *num_vuls_read = num_vuls; |
| } |
| #endif |
| |
| return tab; |
| } |
| |
| /* TODO: An efficiency issue: don't load all hot patch dlls unless the mode |
| * for at least one corresponding policy is detect or protect; this will |
| * avoid loading all hot patch dlls whether they are used or not. Note: |
| * this is still eager loading as per the design. |
| */ |
| static void |
| hotp_load_hotp_dlls(hotp_vul_t *vul_tab, uint num_vuls) |
| { |
| uint vul; |
| int retval; |
| /* TODO: these arrays are large so make them static with a lock to avoid |
| * a potential runtime stack overflow. |
| */ |
| char hotp_dll_path[MAXIMUM_PATH]; |
| char hotp_dll_cache[MAXIMUM_PATH]; |
| |
| /* Only liveshields need to know DYNAMORIO_HOME, probes give full paths. */ |
| if (DYNAMO_OPTION(liveshields)) { |
| /* If null or non-existent hotp_dll_cache directory raise error log, |
| * disable all associated vuls? We are going to assert/log if we can't |
| * find the dll (below) anyway. |
| */ |
| retval = get_parameter(PARAM_STR(DYNAMORIO_VAR_HOME), hotp_dll_cache, |
| BUFFER_SIZE_ELEMENTS(hotp_dll_cache)); |
| if (IS_GET_PARAMETER_FAILURE(retval)) { |
| SYSLOG_INTERNAL_WARNING("Can't read %s. Hot patch dll loading " |
| "failed; hot patching won't work.", |
| DYNAMORIO_VAR_HOME); |
| return; |
| } |
| } else { |
| #ifdef CLIENT_INTERFACE |
| ASSERT(DYNAMO_OPTION(probe_api)); |
| #endif |
| } |
| |
| /* Compute dll cache path, i.e., $DYNAMORIO_HOME/lib/hotp/<engine>/ */ |
| NULL_TERMINATE_BUFFER(hotp_dll_cache); |
| CAT_AND_TERMINATE(hotp_dll_cache, HOT_PATCHING_DLL_CACHE_PATH); |
| CAT_AND_TERMINATE(hotp_dll_cache, STRINGIFY(HOTP_INTERFACE_VERSION) "\\"); |
| |
| for (vul = 0; vul < num_vuls; vul++) { |
| /* Hot patch dlls exist only for the type hot_patch and probe, not for |
| * gbop hooks; well, not at least for now. |
| */ |
| if (!TESTANY(HOTP_TYPE_HOT_PATCH | HOTP_TYPE_PROBE, |
| VUL(vul_tab, vul).type)) { |
| ASSERT(TESTALL(HOTP_TYPE_GBOP_HOOK, VUL(vul_tab, vul).type)); |
| /* TODO: also assert that the base is dynamorio.dll & remediator |
| * offsets are what they should be - use a DODEBUG |
| */ |
| continue; |
| } |
| |
| if (VUL(vul_tab, vul).hotp_dll_base == NULL) { /* Not loaded yet. */ |
| ASSERT(TESTANY(HOTP_TYPE_HOT_PATCH | HOTP_TYPE_PROBE, |
| VUL(vul_tab, vul).type)); |
| ASSERT(VUL(vul_tab, vul).hotp_dll != NULL); |
| |
| /* Liveshields give just the base name which is used to compute |
| * full path, i.e., DYNAMORIO_HOME/lib/hotp/hotp_dll. */ |
| if (TEST(HOTP_TYPE_HOT_PATCH, VUL(vul_tab, vul).type)) { |
| strncpy(hotp_dll_path, hotp_dll_cache, |
| BUFFER_SIZE_ELEMENTS(hotp_dll_path) - 1); |
| NULL_TERMINATE_BUFFER(hotp_dll_path); |
| |
| /* Hot patch dll names should just be base names; with no / or \. */ |
| ASSERT(strchr(VUL(vul_tab, vul).hotp_dll, '\\') == NULL && |
| strchr(VUL(vul_tab, vul).hotp_dll, '/') == NULL); |
| strncat(hotp_dll_path, VUL(vul_tab, vul).hotp_dll, |
| BUFFER_SIZE_ELEMENTS(hotp_dll_path) - |
| strlen(hotp_dll_path) - 1); |
| } else { |
| /* Probe api calls provide full path to hotp dlls. */ |
| strncpy(hotp_dll_path, VUL(vul_tab, vul).hotp_dll, |
| BUFFER_SIZE_ELEMENTS(hotp_dll_path) - 1); |
| } |
| NULL_TERMINATE_BUFFER(hotp_dll_path); |
| ASSERT(strlen(hotp_dll_path) < BUFFER_SIZE_ELEMENTS(hotp_dll_path)); |
| |
| /* TODO: check if file exists; if not log, turn off associated |
| * vulnerabilities & bail out; need to think through the |
| * error exit mechanism while reading polcy-{defs,modes}. |
| */ |
| |
| /* FIXME: currently our loadlibrary hits our own syscall_while_native |
| * hook and goes to dispatch, which expects protected data sections. |
| * Once we have our own loader we can remove this. |
| */ |
| VUL(vul_tab, vul).hotp_dll_base = |
| load_shared_library(hotp_dll_path, false/*!reachable*/); |
| |
| /* TODO: if module base is null, raise a log event, mark vul as not |
| * usable (probably a new status) and move on; for now just |
| * assert. |
| */ |
| /* TODO: assert that all detector_fn & protector_fn offsets |
| * associated with this hotp_dll actually lie within its |
| * text space. |
| */ |
| if (VUL(vul_tab, vul).hotp_dll_base == NULL) { |
| LOG(GLOBAL, LOG_HOT_PATCHING, 1, |
| "unable to load hotp_dll: %s\n", hotp_dll_path); |
| ASSERT(VUL(vul_tab, vul).hotp_dll_base != NULL); |
| } |
| LOG(GLOBAL, LOG_HOT_PATCHING, 1, "loaded hotp_dll: %s at "PFX"\n", |
| hotp_dll_path, VUL(vul_tab, vul).hotp_dll_base); |
| |
| /* TODO: this one must be done asap; add the hot patch dll's text |
| * either to a new vm_area_vector_t or executable_vm_areas; |
| * check with the team first. case 5381 |
| * add_executable_vm_area(hotp_dll_text_base, size_of_text, |
| * VM_UNMOD_IMAGE, false |
| _IF_DEBUG("hot patch dll loading")); |
| */ |
| /* TODO: assert that hotp_dll's dllmain is null to prevent control |
| * flow from going there during the thread creation due to |
| * nudge; but how? |
| */ |
| } |
| } |
| } |
| |
| /* TODO: need a lot more LOG, ASSERT and SYSLOG statements */ |
| /*----------------------------------------------------------------------------*/ |
| |
| /* TODO: for now just read from a flat file; change it in next phase to |
| * file/shmem depending upon what we decide; same goes for binary vs. |
| * text format; either way, the format of communication has to be defined |
| * so that nodemanager & core know what to write & read - key items |
| * include number of mode changes transmitted & the structure of each. |
| * |
| * Mode file format: |
| * <num_mode_update_entries> |
| * <policy_id-str>:<mode-decimal_integer> |
| * ... |
| * mode 0 - off, 1 - detect, 2 - protect; |
| * |
| * TODO: eventually, modes will be strings (good idea?, not binary; might be |
| * better to leave it as it is today. |
| */ |
| static void |
| hotp_read_policy_modes(hotp_policy_mode_t **old_modes) |
| { |
| /* TODO: for the next phase decide whether to use registry key or option |
| * string; for use a registry key. |
| */ |
| uint mode = 0, vul, num_mode_update_entries, i; |
| char *buf = NULL; |
| size_t buf_len = 0; |
| char *start = NULL; |
| |
| /* Old modes are needed only by regular hotp for flushing patches; |
| * hotp_only shouldn't use them. |
| */ |
| ASSERT(!DYNAMO_OPTION(hotp_only) || old_modes == NULL); |
| if (old_modes != NULL) /* init to NULL because there are error exits */ |
| *old_modes = NULL; |
| |
| /* Can be called only during hotp_init() or during a nudge. */ |
| ASSERT_OWN_WRITE_LOCK(true, &hotp_vul_table_lock); |
| |
| /* This function shouldn't be called before policies are read. |
| * Sometimes, the node manager can nudge for a mode read without specifying |
| * policies first! This may happen during startup. Case 5448. |
| */ |
| if (GLOBAL_VUL_TABLE == NULL) { |
| LOG(GLOBAL, LOG_HOT_PATCHING, 1, "Modes can't be set without " |
| "policy definitions. Probably caused due to a nudge by the node " |
| "manager to read modes when there were no policies."); |
| return; |
| } |
| |
| buf = hotp_read_data_file(MODES_FILE, &buf_len); |
| if (buf == NULL) { |
| ASSERT(buf_len == 0); |
| return; |
| } |
| ASSERT(buf_len > 0); |
| |
| /* Allocate space to save the old modes if they were requested for. */ |
| if (old_modes != NULL) { |
| *old_modes = HEAP_ARRAY_ALLOC(GLOBAL_DCONTEXT, hotp_policy_mode_t, |
| NUM_GLOBAL_VULS, ACCT_HOT_PATCHING, PROTECTED); |
| ASSERT(*old_modes != NULL); /* make sure that space was allocated */ |
| } |
| |
| /* Turn off all vulnerabilities before reading modes. Only those for which |
| * a mode is specified should be on. Fix for case 5565. As the write lock |
| * is held, there is no danger of any lookup providing a no-match when there |
| * is one. |
| */ |
| for (vul = 0; vul < NUM_GLOBAL_VULS; vul++) { |
| if (old_modes != NULL) |
| (*old_modes)[vul] = GLOBAL_VUL(vul).mode; |
| |
| /* Only hot patch types can be turned off by mode files. Other types |
| * like gbop hooks can't be. |
| */ |
| if (TESTALL(HOTP_TYPE_HOT_PATCH, GLOBAL_VUL(vul).type)) |
| GLOBAL_VUL(vul).mode = HOTP_MODE_OFF; |
| } |
| |
| start = buf; |
| SET_NUM(num_mode_update_entries, uint, NUM_VULNERABILITIES, start); |
| |
| /* TODO: what if num_mode_update_entries is more than the entries in the |
| * file? |
| */ |
| for (i = 0; i < num_mode_update_entries; i++) { |
| bool matched = false; |
| char *temp, *policy_id; |
| |
| SET_STR_PTR(policy_id, start); |
| temp = strchr(policy_id, ':'); |
| if (temp == NULL) |
| goto error_reading_policy; |
| *temp++ = '\0'; /* TODO: during file mapping, this won't work */ |
| |
| SET_NUM(mode, uint, MODE, temp); |
| |
| /* Must set mode for all vulnerabilities with a matching policy_id, not |
| * just the first one. |
| */ |
| for (vul = 0; vul < NUM_GLOBAL_VULS; vul++) { |
| if (strncmp(GLOBAL_VUL(vul).policy_id, policy_id, |
| MAX_POLICY_ID_LENGTH) == 0) { |
| GLOBAL_VUL(vul).mode = mode; |
| matched = true; |
| } |
| } |
| |
| /* If during mode update policy_id from a mode file doesn't have |
| * a corresponding vul_t, log a warning. When the node manager is |
| * starting up, modes file can be inconsistent, so this may happen |
| * (cases 5500 & 5526). However this could be a bug somewhere in the |
| * pipe line (EV, nm, policy package, etc) too. |
| */ |
| if (!matched) |
| SYSLOG_INTERNAL_WARNING("While reading modes, found a mode " |
| "definition for a policy (%s) that didn't exist", policy_id); |
| } |
| |
| /* TODO: make the macro take this as an argument or find a neutral name */ |
| error_reading_policy: |
| ASSERT(buf != NULL); |
| heap_free(GLOBAL_DCONTEXT, buf, buf_len HEAPACCT(ACCT_HOT_PATCHING)); |
| return; |
| } |
| |
| static void |
| hotp_set_policy_status(const uint vul_index, const hotp_inject_status_t status) |
| { |
| uint crc_buf_size; |
| |
| ASSERT_OWN_WRITE_LOCK(true, &hotp_vul_table_lock); |
| |
| ASSERT(hotp_policy_status_table != NULL); |
| ASSERT(status == HOTP_INJECT_NO_MATCH || status == HOTP_INJECT_PENDING || |
| status == HOTP_INJECT_IN_PROGRESS || status == HOTP_INJECT_DETECT || |
| status == HOTP_INJECT_PROTECT || status == HOTP_INJECT_ERROR); |
| |
| /* Given that no other thread, app or nudge, will change this without the |
| * hot patch lock, this can be done without an atomic write. |
| */ |
| ASSERT(GLOBAL_VUL(vul_index).info->inject_status != NULL); |
| *(GLOBAL_VUL(vul_index).info->inject_status) = status; |
| |
| /* Compute CRC after this status update and put it in the |
| * policy status table so that the node manager is protected from |
| * reading invalid status due to policy status table being reset/ |
| * relllocated due to hotp_init or nudge or detach taking place. |
| * |
| * Note: The CRC write to the table doesn't need to be atomic too. Also, |
| * the CRC value is for all bytes of the policy status table except |
| * the CRC itself. Otherwise we would have to do the CRC computation |
| * twice; wastefully expensive. |
| */ |
| crc_buf_size = hotp_policy_status_table->size - |
| sizeof(hotp_policy_status_table->crc); |
| hotp_policy_status_table->crc = crc32((char*)&hotp_policy_status_table->size, |
| crc_buf_size); |
| } |
| |
| /* The status of hot patches is directly read by the node manager from the |
| * memory address specified in the drmarker; no nudge is needed. While the |
| * table is being created, the drmarker pointer will be null and set only |
| * after the table is fully initialized. Also, updates to the table entries |
| * are made with the hot patch lock, as with creation. The only way the node |
| * manager can get invalid data is after it reads drmarker, this routine |
| * releases the old policy status table before the node manager can read it. |
| * That is guarded by the table CRC, which is likely to be wrong. If drmarker |
| * points to memory released to the os or NULL, node manager will get a memory |
| * read error and it should be able to reattempt within which the new table will |
| * be ready. |
| * |
| * Format of policy status table in memory: |
| * <CRC32-uint> - CRC of size_in_bytes - sizeof(CRC32, i.e, uint). |
| * <size_in_bytes-uint> |
| * <num_policy_entries-uint> |
| * <hotp_policy_status_t>* |
| */ |
| static void |
| hotp_init_policy_status_table(void) |
| { |
| uint i, num_policies = NUM_GLOBAL_VULS, size_in_bytes, crc_buf_size; |
| hotp_policy_status_table_t *temp; |
| |
| /* Can be called only during hotp_init() or during a nudge. */ |
| ASSERT_OWN_WRITE_LOCK(true, &hotp_vul_table_lock); |
| ASSERT(!DATASEC_PROTECTED(DATASEC_RARELY_PROT)); |
| |
| /* This function shouldn't be called before policies and/or modes are read. |
| * Sometimes, the node manager can nudge for a mode read without specifying |
| * policies first! This may happen during startup. Case 5448. |
| */ |
| if (GLOBAL_VUL_TABLE == NULL) { |
| LOG(GLOBAL, LOG_HOT_PATCHING, 1, "Policy status table can't be created " |
| "without policy definitions. Probably caused due to a nudge by the " |
| "node manager to read modes when there were no policies. Or " |
| "because all probes registered using the probe api were invalid."); |
| return; |
| } |
| |
| /* This function is called each time a new policies and/or modes are read |
| * in. Each such time all existing injected hot patches are removed, so |
| * the policy status table associated with the old global vulnerability |
| * table must be released or resized to fit only the new set of |
| * hot patches turned on. The former is simpler to do. |
| * |
| * Note: if the optimization of flushing only those policies that |
| * have changed is implemented, which is not the case today, then just |
| * releasing policy status table will result in incorrect inject status. |
| * It should be released after the new table is created and filled with |
| * old values. |
| */ |
| if (hotp_policy_status_table != NULL) { |
| temp = hotp_policy_status_table; |
| hotp_policy_status_table = NULL; |
| |
| /* If dr_marker_t isn't initialized, this won't be set. In that case, |
| * the dr_marker_t initialization code will set up the policy status table. |
| * This can happen at init time because hotp_init() is called before |
| * callback_interception_init(). |
| */ |
| set_drmarker_hotp_policy_status_table(NULL); |
| |
| heap_free(GLOBAL_DCONTEXT, temp, temp->size HEAPACCT(ACCT_HOT_PATCHING)); |
| } |
| |
| /* Right now, the status table contains as many elements as |
| * vulnerabilities. The original idea was to have only policies which |
| * are turned on in the table. This caused failures in the core |
| * because we need to maintain status internally for vulnerabilities |
| * that are turned off too. Case 5326. |
| */ |
| size_in_bytes = sizeof(hotp_policy_status_table_t) + |
| sizeof(hotp_policy_status_t) * num_policies; |
| temp = heap_alloc(GLOBAL_DCONTEXT, size_in_bytes HEAPACCT(ACCT_HOT_PATCHING)); |
| temp->size = size_in_bytes; |
| temp->policy_status_array = (hotp_policy_status_t*)((char*)temp + |
| sizeof(hotp_policy_status_table_t)); |
| |
| /* Init status buffer elements & set up global vul table pointers */ |
| /* TODO: two vulnerabilities can belong to the same policy; need to check |
| * for that and avoid duplication in the table; not needed now |
| * because we don't have such policies yet. |
| */ |
| for (i = 0; i < NUM_GLOBAL_VULS; i++) { |
| strncpy(temp->policy_status_array[i].policy_id, |
| GLOBAL_VUL(i).policy_id, MAX_POLICY_ID_LENGTH); |
| NULL_TERMINATE_BUFFER(temp->policy_status_array[i].policy_id); |
| temp->policy_status_array[i].inject_status = HOTP_INJECT_NO_MATCH; |
| |
| /* Fix for case 5484, where the node manager wasn't able to tell if an |
| * inject status was for a policy that was turned on or off. |
| */ |
| temp->policy_status_array[i].mode = GLOBAL_VUL(i).mode; |
| |
| /* The inject status in global vulnerability table should point |
| * to the corresponding element in this table. |
| */ |
| GLOBAL_VUL(i).info->inject_status = |
| &temp->policy_status_array[i].inject_status; |
| } |
| temp->num_policies = i; |
| |
| /* Set the table CRC now that the table has been initialized. */ |
| crc_buf_size = temp->size - sizeof(temp->crc); |
| temp->crc = crc32((char*)&temp->size, crc_buf_size); |
| |
| /* Make the policy status table live. If the dr_marker_t isn't initialized |
| * this won't be set. In that case, the dr_marker_t initialization code will |
| * set up the policy status table; happens during startup/initialization. |
| */ |
| hotp_policy_status_table = temp; |
| |
| set_drmarker_hotp_policy_status_table((void*)temp); |
| } |
| |
| /* Frees all the dynamically allocated members of vul (strings, info, sets, |
| * modules and patch points). NOTE: It doesn't free the vul itself. |
| */ |
| static void hotp_free_one_vul(hotp_vul_t *vul) |
| { |
| uint set_idx, module_idx, ppoint_idx; |
| |
| /* If this routine is called with a NULL for argument then there is a bug |
| * somewhere.*/ |
| ASSERT(vul != NULL); |
| if (vul == NULL) |
| return; |
| |
| if (vul->vul_id != NULL) |
| dr_strfree(vul->vul_id HEAPACCT(ACCT_HOT_PATCHING)); |
| if (vul->policy_id != NULL) |
| dr_strfree(vul->policy_id HEAPACCT(ACCT_HOT_PATCHING)); |
| if (vul->hotp_dll != NULL) |
| dr_strfree(vul->hotp_dll HEAPACCT(ACCT_HOT_PATCHING)); |
| if (vul->hotp_dll_hash != NULL) |
| dr_strfree(vul->hotp_dll_hash HEAPACCT(ACCT_HOT_PATCHING)); |
| if (vul->info != NULL) { |
| HEAP_TYPE_FREE(GLOBAL_DCONTEXT, vul->info, hotp_vul_info_t, |
| ACCT_HOT_PATCHING, PROTECTED); |
| } |
| |
| if (vul->sets == NULL) |
| return; |
| |
| /* If a set's array isn't null, then the number of sets can't be zero. */ |
| ASSERT(vul->num_sets > 0); |
| for (set_idx = 0; set_idx < vul->num_sets; set_idx++) { |
| hotp_set_t *set = &vul->sets[set_idx]; |
| |
| if (set->modules == NULL) |
| continue; |
| |
| /* If a modules array isn't null, then the number of modules can't |
| * be zero. |
| */ |
| ASSERT(set->num_modules > 0); |
| for (module_idx = 0; module_idx < set->num_modules; module_idx++) { |
| hotp_module_t *module = &set->modules[module_idx]; |
| if (module->sig.pe_name != NULL) { |
| dr_strfree(module->sig.pe_name HEAPACCT(ACCT_HOT_PATCHING)); |
| } |
| |
| if (module->hashes != NULL) { |
| ASSERT(module->num_patch_point_hashes > 0); |
| HEAP_ARRAY_FREE(GLOBAL_DCONTEXT, module->hashes, |
| hotp_patch_point_hash_t, |
| module->num_patch_point_hashes, |
| ACCT_HOT_PATCHING, PROTECTED); |
| } |
| |
| if (module->patch_points != NULL) { |
| ASSERT(module->num_patch_points > 0); |
| for (ppoint_idx = 0; ppoint_idx < module->num_patch_points; |
| ppoint_idx++) { |
| hotp_patch_point_t *ppoint = &module->patch_points[ppoint_idx]; |
| if (ppoint->trampoline != NULL) { |
| ASSERT(DYNAMO_OPTION(hotp_only)); |
| ASSERT(ppoint->app_code_copy != NULL); |
| special_heap_free(hotp_only_tramp_heap, |
| (void *) ppoint->trampoline); |
| } |
| } |
| HEAP_ARRAY_FREE(GLOBAL_DCONTEXT, module->patch_points, |
| hotp_patch_point_t, module->num_patch_points, |
| ACCT_HOT_PATCHING, PROTECTED); |
| } |
| } |
| HEAP_ARRAY_FREE(GLOBAL_DCONTEXT, set->modules, hotp_module_t, |
| set->num_modules, ACCT_HOT_PATCHING, PROTECTED); |
| } |
| HEAP_ARRAY_FREE(GLOBAL_DCONTEXT, vul->sets, hotp_set_t, |
| vul->num_sets, ACCT_HOT_PATCHING, PROTECTED); |
| } |
| |
| /* Release all memory used by the hot patch vulnerability table, tab. |
| * num_vuls_alloc is number of vulnerability defs. the table has space for. |
| * The table may not always contain num_vuls_alloc policy defs. Where there |
| * is an error during policy defs file parsing they can be fewer in number with |
| * the last one (one where the error happened) being partial. Cases 8272, 9045. |
| */ |
| static void |
| hotp_free_vul_table(hotp_vul_t *tab, uint num_vuls_alloc) |
| { |
| uint vul_idx; |
| |
| if (tab == NULL) { |
| ASSERT(num_vuls_alloc == 0); |
| return; |
| } |
| |
| /* If the table isn't NULL, the number of vulnerabilities can't be zero. */ |
| ASSERT(num_vuls_alloc > 0); |
| |
| for (vul_idx = 0; vul_idx < num_vuls_alloc; vul_idx++) { |
| hotp_free_one_vul(&tab[vul_idx]); |
| } |
| HEAP_ARRAY_FREE(GLOBAL_DCONTEXT, tab, hotp_vul_t, num_vuls_alloc, |
| ACCT_HOT_PATCHING, PROTECTED); |
| } |
| |
| /* This routine flushes all fragments in fcache that have been injected with a |
| * hot patch, i.e., restoring an app text to its pre-hot-patch state. |
| * |
| * Note: hot patch removal is not optimized, i.e., changes to existing |
| * policy definitions, modes or actual injection status aren't used to limit |
| * flushing. Not a performance issue for now. |
| * TODO: flush only those vulnerabilities that have actually changed, not |
| * every thing that is active or has been injected. |
| * TODO: make this use loaded_module_areas & get rid off the 4-level nested |
| * loops. |
| */ |
| static void |
| hotp_remove_patches_from_module(const hotp_vul_t *vul_tab, const uint num_vuls, |
| const bool hotp_only, const app_pc mod_base, |
| const hotp_policy_mode_t *old_modes) |
| { |
| uint vul_idx, set_idx, module_idx, ppoint_idx; |
| hotp_module_t *module; |
| hotp_patch_point_t *ppoint; |
| dcontext_t *dcontext = get_thread_private_dcontext(); |
| |
| ASSERT(dcontext != NULL && dcontext != GLOBAL_DCONTEXT); |
| /* For hotp_only patch removal, we should be running in hotp_only mode. */ |
| ASSERT(!hotp_only || DYNAMO_OPTION(hotp_only)); |
| /* Old vulnerability modes shouldn't be used with hotp_only. */ |
| ASSERT(!DYNAMO_OPTION(hotp_only) || old_modes == NULL); |
| /* Alternate modes shouldn't be used during module specific removal also. */ |
| ASSERT(mod_base == NULL || old_modes == NULL); |
| |
| /* Though trying to flush a NULL vul table is a bug, this can happen |
| * because the node manager can nudge the core to read modes when it hasn't |
| * provided the policies! See case 5448. Hence just a warning & no assert. |
| */ |
| if (vul_tab == NULL) { |
| LOG(GLOBAL, LOG_HOT_PATCHING, 1, "Hot patch flushing has been invoked " |
| "with a NULL table"); |
| return; |
| } |
| |
| LOG(GLOBAL, LOG_HOT_PATCHING, 1, "flushing as a result of nudge\n"); |
| for (vul_idx = 0; vul_idx < num_vuls; vul_idx++) { |
| bool set_processed = false; |
| const hotp_vul_t *vul = &VUL(vul_tab, vul_idx); |
| |
| /* Nothing to remove or flush if the mode is off, i.e., nothing would |
| * have been injected. |
| * Note: Both vul's current mode & its old mode should be off to skip |
| * removal; even if one is not, then that vulnerability's patches need |
| * to be removed. In otherwords, if patch was previously on (injected) |
| * or is now on (to be injected), corresponding bbs must be flushed; |
| * this is for regular hotp, not for hotp_only which has no flushing. |
| */ |
| if (vul->mode == HOTP_MODE_OFF) { |
| if (old_modes == NULL) { |
| /* If there is no old_mode, skip right here. */ |
| continue; |
| } else if (old_modes[vul_idx] == HOTP_MODE_OFF) { |
| /* If old_mode exists, that must be off too in order to skip. */ |
| continue; |
| } |
| } |
| ASSERT(vul->mode == HOTP_MODE_DETECT || |
| vul->mode == HOTP_MODE_PROTECT || |
| (old_modes != NULL && (old_modes[vul_idx] == HOTP_MODE_DETECT || |
| old_modes[vul_idx] == HOTP_MODE_PROTECT))); |
| |
| for (set_idx = 0; set_idx < VUL(vul_tab, vul_idx).num_sets; set_idx++) { |
| /* Only the first matching set should be used; case 10248. */ |
| if (set_processed) |
| break; |
| |
| for (module_idx = 0; module_idx < SET(vul_tab, vul_idx, set_idx).num_modules; |
| module_idx++) { |
| module = &MODULE(vul_tab, vul_idx, set_idx, module_idx); |
| if (module->matched) { |
| /* If a specific module is mentioned remove patches from |
| * just that. |
| */ |
| if (mod_base != NULL && mod_base != module->base_address) |
| continue; |
| |
| set_processed = true; |
| /* Otherwise, flush all patch points in any module that |
| * matches. Nothing to flush in unmatched modules. |
| */ |
| for (ppoint_idx = 0; ppoint_idx < module->num_patch_points; |
| ppoint_idx++) { |
| ppoint = &module->patch_points[ppoint_idx]; |
| if (hotp_only) { |
| /* For a hotp_only patch, we can only remove that |
| * which has been injected, unlike the hotp mode |
| * where we might just be flushing out uninjected |
| * fragments or don't know which particular patch |
| * point has been injected (in hotp_only mode all |
| * of them should be injected if one is injected). |
| */ |
| if (ppoint->trampoline != NULL) |
| hotp_only_remove_patch(module, ppoint); |
| else { |
| /* If module is matched and mode is on, then |
| * hotp_only patch targeting the current |
| * ppoint must be injected unless it has |
| * been removed to handle loader-safety issues. |
| */ |
| ASSERT((ppoint->trampoline == NULL || |
| hotp_globals->ldr_safe_hook_removal) && |
| "hotp_only - double patch removal"); |
| } |
| } else { |
| app_pc flush_addr = hotp_ppoint_addr(module, ppoint); |
| |
| ASSERT_OWN_NO_LOCKS(); |
| LOG(GLOBAL, LOG_HOT_PATCHING, 4, |
| "flushing "PFX" due to a nudge\n", flush_addr); |
| flush_fragments_in_region_start(dcontext, flush_addr, |
| 1, false /* no lock */, |
| false /* keep futures */, |
| false/*exec still valid*/, |
| false/*don't force synchall*/ |
| _IF_DGCDIAG(NULL)); |
| flush_fragments_in_region_finish(dcontext, false); |
| /* TODO: ASSERT (flushed fragments have really been) |
| * flushed but how, using a vm_areas_overlap() |
| * or fragment_lookup() check? |
| */ |
| } |
| } |
| } |
| } |
| } |
| } |
| } |
| |
| /* TODO: make this use hotp_patch_point_areas & get rid off the 4-level nested |
| * loops which is used in hotp_remove_patches_from_module. |
| */ |
| static void |
| hotp_remove_hot_patches(const hotp_vul_t *vul_tab, const uint num_vuls, |
| const bool hotp_only, |
| const hotp_policy_mode_t *old_modes) |
| { |
| /* Old vulnerability modes shouldn't be used with hotp_only. */ |
| ASSERT(!DYNAMO_OPTION(hotp_only) || old_modes == NULL); |
| hotp_remove_patches_from_module(vul_tab, num_vuls, hotp_only, NULL, |
| old_modes); |
| } |
| |
| /* TODO: vlad wanted the ability to ignore some attributes during checking; |
| * this is not for constraints, but if he wants an ad-hoc patch to fix |
| * something other than a vulnerability, say, broken code that is not |
| * a vulnerability; for hot patches/constraints all attributes must be |
| * checked, no ignoring stuff. |
| */ |
| static bool |
| hotp_module_match(const hotp_module_t *module, const app_pc base, |
| const uint checksum, const uint timestamp, |
| const size_t image_size, const size_t code_size, |
| const uint64 file_version, const char *name, hotp_type_t type) |
| { |
| uint hash_index, computed_hash; |
| hotp_patch_point_hash_t *hash; |
| bool matched; |
| |
| ASSERT(module != NULL && base != NULL); |
| ASSERT(TESTANY(HOTP_TYPE_HOT_PATCH | HOTP_TYPE_GBOP_HOOK | HOTP_TYPE_PROBE, |
| type)); |
| |
| LOG(GLOBAL, LOG_HOT_PATCHING, 1, "Matching module base "PFX" %s\n", base, name); |
| |
| /* For library offset or export function based patch points, the probe will |
| * define a library by name (if needed we expand it to include the |
| * liveshield type matching, but the client can do it outside) */ |
| /* gbop type patches provide a symbolic name to hook, so there is nothing |
| * to match it with other than the pe name. |
| */ |
| if (TESTALL(HOTP_TYPE_PROBE, type) |
| IF_GBOP(|| (TESTALL(HOTP_TYPE_GBOP_HOOK, type)))) { |
| ASSERT(module->sig.pe_checksum == 0 && module->sig.pe_timestamp == 0 && |
| module->sig.pe_image_size == 0 && module->sig.pe_code_size == 0 && |
| module->sig.pe_file_version == 0 && |
| module->num_patch_points == 1 && module->patch_points != NULL && |
| module->num_patch_point_hashes == 0 && module->hashes == NULL); |
| if (name == NULL) { |
| /* if the only check is the module name, then a NULL name means |
| * the module wasn't matched; otherwise this check would be bogus. |
| */ |
| return false; |
| } else if (strncasecmp(module->sig.pe_name, name, MAXIMUM_PATH) == 0) { |
| /* FIXME: strcmp() is faster than the ignore case version, |
| * but we shouldn't rely on the PE name case to be the |
| * same in all versions of Windows. |
| */ |
| return true; |
| } else { |
| return false; |
| } |
| } |
| |
| /* These checks are for hot patch types, i.e., ones that have offset rvas |
| * specified for each known version. |
| * First stage check: PE timestamp, PE checksum, PE code_size, PE file |
| * version & PE name, i.e., signature match. |
| * |
| * FIXME: Today error handling of PE parsing is not done by the core, so |
| * unavailability of an attribute isn't recorded. Thus IGNORE and |
| * UNAVAILABLE are treated the same for module matching. When the core can |
| * handle it the UNAVAILABLE part should be removed from the checks, and |
| * checks for unavailability should be done. Case 9215 tracks the core not |
| * handling PE parsing for malformed files and their impact on hot patching. |
| */ |
| ASSERT(TESTALL(HOTP_TYPE_HOT_PATCH, type)); |
| |
| matched = module->sig.pe_timestamp == timestamp || |
| module->sig.pe_timestamp == PE_TIMESTAMP_IGNORE || |
| module->sig.pe_timestamp == PE_TIMESTAMP_UNAVAILABLE; |
| |
| matched = matched && (module->sig.pe_checksum == checksum || |
| module->sig.pe_checksum == PE_CHECKSUM_IGNORE || |
| module->sig.pe_checksum == PE_CHECKSUM_UNAVAILABLE); |
| |
| matched = matched && (module->sig.pe_image_size == image_size || |
| module->sig.pe_image_size == PE_IMAGE_SIZE_IGNORE || |
| module->sig.pe_image_size == PE_IMAGE_SIZE_UNAVAILABLE); |
| |
| matched = matched && (module->sig.pe_code_size == code_size || |
| module->sig.pe_code_size == PE_CODE_SIZE_IGNORE || |
| module->sig.pe_code_size == PE_CODE_SIZE_UNAVAILABLE); |
| |
| matched = matched && (module->sig.pe_file_version == file_version || |
| module->sig.pe_file_version == PE_FILE_VERSION_IGNORE || |
| module->sig.pe_file_version == PE_FILE_VERSION_UNAVAILABLE); |
| |
| matched = matched && ((strncmp(module->sig.pe_name, PE_NAME_IGNORE, |
| sizeof(PE_NAME_IGNORE)) == 0) || |
| (name == NULL && /* no name case */ |
| module->sig.pe_name[0] == PE_NAME_UNAVAILABLE) || |
| (name != NULL && |
| (strncmp(module->sig.pe_name, name, MAXIMUM_PATH) == 0))); |
| |
| if (matched) { |
| LOG(GLOBAL, LOG_HOT_PATCHING, 1, "Module signature check passed\n"); |
| |
| /* First stage check was true, now let us do the second stage check, |
| * i.e., check the hashes of patch points in the module. |
| */ |
| ASSERT(module->num_patch_point_hashes > 0 && |
| module->hashes != NULL); |
| for (hash_index = 0; hash_index < module->num_patch_point_hashes; |
| hash_index++) { |
| hash = &module->hashes[hash_index]; |
| computed_hash = hotp_compute_hash(base, hash); |
| if (computed_hash != hash->hash_value) |
| return false; |
| } |
| LOG(GLOBAL, LOG_HOT_PATCHING, 1, "Patch point hash check passed\n"); |
| return true; |
| } |
| return false; |
| } |
| |
| /* Used to compute the hash of a patch point hash region. In hotp_only mode, |
| * if there is an overlap between a hash region and a patch region, the |
| * image bytes, stored at the top of the trampoline, are used to create copy |
| * of the image on which crc32 is computed. In regular hotp mode, crc32 is |
| * called directly. |
| */ |
| static uint |
| hotp_compute_hash(app_pc base, hotp_patch_point_hash_t *hash) |
| { |
| uint crc, copy_size; |
| char *hash_buf, *copy; |
| app_pc hash_start, hash_end, vm_start, vm_end, src, dst, trampoline; |
| vmvector_iterator_t iterator; |
| hotp_offset_match_t *ppoint_desc; |
| |
| ASSERT(base != NULL && hash != NULL); |
| ASSERT(hash->start > 0 && hash->len > 0); |
| |
| hash_start = base + hash->start; |
| hash_end = hash_start + hash->len; |
| |
| /* If the hash region overlaps with any patch point region, then use the |
| * original image bytes to compute the crc32. Valid for hotp_only because |
| * in hotp mode, i.e., with a code cache, we don't modify the original code. |
| */ |
| if (DYNAMO_OPTION(hotp_only) && |
| vmvector_overlap(hotp_patch_point_areas, hash_start, hash_end)) { |
| |
| /* Make sure that the patch region size for hotp_only is correct. */ |
| ASSERT(HOTP_PATCH_REGION_SIZE == HOTP_ONLY_PATCH_REGION_SIZE); |
| |
| /* Allocate a buffer & copy the image bytes represented by the hash. |
| * This will include bytes modified by a prior hotp_only patch. |
| * Note: an extra 2 x HOTP_PATCH_REGION_SIZE is allocated to be used |
| * as overflow buffers at the front & back of the copy; makes handling |
| * the overlap scenarios (4 different ones) easy. |
| */ |
| copy_size = hash->len + (2 * HOTP_PATCH_REGION_SIZE); |
| copy = HEAP_ARRAY_ALLOC(GLOBAL_DCONTEXT, char, copy_size, |
| ACCT_HOT_PATCHING, PROTECTED); |
| hash_buf = copy + HOTP_PATCH_REGION_SIZE; |
| memcpy(hash_buf, hash_start, hash->len); |
| |
| /* Now, for each vmarea that overlaps, copy the original image bytes |
| * into the buffer. |
| * FIXME: we do a linear walk as opposed to walking over only those |
| * regions that overlap, ineffcient; see case 8211 about a new |
| * vmvector iterator that walks over only overlapping regions. |
| */ |
| vmvector_iterator_start(hotp_patch_point_areas, &iterator); |
| while (vmvector_iterator_hasnext(&iterator)) { |
| ppoint_desc = vmvector_iterator_next(&iterator, &vm_start, &vm_end); |
| trampoline = GLOBAL_PPOINT(ppoint_desc->vul_index, |
| ppoint_desc->set_index, |
| ppoint_desc->module_index, |
| ppoint_desc->ppoint_index).trampoline; |
| |
| /* If the patch isn't injected, overlap doesn't matter because |
| * the image hasn't been changed. Overlap with an uninjected patch |
| * region can only happen when loader safety is in progress during |
| * which a patch point is removed (only from the image, not |
| * hotp_patch_point_areas) and it is re-injected; the re-injection |
| * of the patch point will overlap with itself. See case 8222. |
| */ |
| if (trampoline == NULL) { |
| /* If hash belongs ppoint_desc, i.e., overlaps with self, then |
| * base and module's base must match. |
| */ |
| ASSERT(base == GLOBAL_MODULE(ppoint_desc->vul_index, |
| ppoint_desc->set_index, |
| ppoint_desc->module_index).base_address); |
| continue; |
| } |
| |
| /* If the trampoline exists, it better be a valid one, i.e., the |
| * patch corresponding to this vmarea must be injected. |
| */ |
| ASSERT(vmvector_overlap(hotp_only_tramp_areas, trampoline, |
| trampoline + HOTP_ONLY_TRAMPOLINE_SIZE)); |
| |
| /* The size of each vmarea in hotp_patch_point_areas must be |
| * equal to that of the patch region. |
| */ |
| ASSERT(vm_end - vm_start == HOTP_PATCH_REGION_SIZE); |
| |
| /* The module corresponding to this vm area (patch point) should |
| * have been matched by a vul. def. (in hotp_process_image). |
| */ |
| ASSERT(GLOBAL_MODULE(ppoint_desc->vul_index, ppoint_desc->set_index, |
| ppoint_desc->module_index).matched); |
| |
| /* There are a few scenarios for a hash & patch point to overlap, |
| * vmarea fully within the hash area, vice versa, partial below, |
| * partial above, and exact on either side or both |
| * Using an extra buffer the size of a patch region at the front |
| * and back allows all the scenarios to be handled with a single |
| * equation - eliminates messy code; worth allocating 10 bytes |
| * extra. |
| * Note: the extra buffer can be 1 byte shorter on either side, but |
| * leaving it at patch point region size, just to be safe. |
| */ |
| if (vm_start < hash_end && vm_end > hash_start) { |
| src = trampoline; |
| dst = (app_pc) hash_buf + (vm_start - hash_start); |
| |
| /* Just make sure that we don't trash anything when copying the |
| * original image over the bytes in hash_buf. |
| */ |
| ASSERT((dst >= (app_pc)copy) && |
| ((dst + HOTP_PATCH_REGION_SIZE) <= |
| ((app_pc)copy + copy_size))); |
| |
| /* If the hash overlaps with a patch point region, then the |
| * current image & the copy should be different, i.e., a patch |
| * must exist at that point. |
| */ |
| ASSERT(memcmp(dst, src, HOTP_PATCH_REGION_SIZE) != 0); |
| |
| /* CAUTION: this memcpy assumes the location & size of |
| * app code copy in the trampoline, i.e., the first 5 bytes of |
| * trampoline contain the original app code; so any changes |
| * should be kept in sync. |
| */ |
| memcpy(dst, src, HOTP_PATCH_REGION_SIZE); |
| } |
| /* FIXME: if the iterator guaranteed order, we can break out after |
| * the first non-match - optimization. |
| */ |
| } |
| vmvector_iterator_stop(&iterator); |
| crc = crc32(hash_buf, hash->len); |
| HEAP_ARRAY_FREE(GLOBAL_DCONTEXT, copy, char, copy_size, |
| ACCT_HOT_PATCHING, PROTECTED); |
| } else { |
| /* No overlap; image is unmodified, so image's crc32 should be valid. */ |
| crc = crc32((char *)hash_start, hash->len); |
| } |
| return crc; |
| } |
| |
| /* TODO: this function should be used for startup & nudge dll list walking, |
| * dll loading and unloading. |
| * TODO: assert somehow that every time this function is invoked there must |
| * be a flush preceding or succeeding it, except at startup |
| * TODO: os specific routine; move to win32/ |
| * TODO: this function is called when vm_areas_init() is invoked, but |
| * hotp_init() is called after vm_areas_init()! bogus - check |
| * other start up scenarios like retakeover to see if policy reading |
| * & activation get out of order; this is the same issue that vlad |
| * pointed out: make sure that process_image() is called after |
| * hotp_init() |
| * TODO: process_{image,mmap}() should never be called on hot patch dlls |
| * because dr is loading them; assert for this somewhere to prevent |
| * assumption violation bugs. |
| */ |
| static void |
| hotp_process_image_helper(const app_pc base, const bool loaded, |
| const bool own_hot_patch_lock, const bool just_check, |
| bool *needs_processing, const thread_record_t **all_threads, |
| const int num_threads, const bool ldr_safety, |
| vm_area_vector_t *toflush); |
| void |
| hotp_process_image(const app_pc base, const bool loaded, |
| const bool own_hot_patch_lock, const bool just_check, |
| bool *needs_processing, const thread_record_t **all_threads, |
| const int num_threads) |
| { |
| hotp_process_image_helper(base, loaded, own_hot_patch_lock, just_check, |
| needs_processing, all_threads, num_threads, false, NULL); |
| } |
| |
| /* Helper routine for seeing if point is in hotp_ppoint_vec */ |
| bool |
| hotp_ppoint_on_list(app_rva_t ppoint, |
| app_rva_t *hotp_ppoint_vec, uint hotp_ppoint_vec_num) |
| { |
| bool on_list = false; |
| uint i; |
| /* We assume there are at most a handful of these so we don't sort. |
| * If we add GBOP hooks we may want to do that. */ |
| #ifdef GBOP |
| ASSERT(DYNAMO_OPTION(gbop) == 0); |
| #endif |
| ASSERT(ppoint != 0); |
| ASSERT(hotp_ppoint_vec != NULL && hotp_ppoint_vec_num > 0); |
| if (hotp_ppoint_vec == NULL) |
| return false; |
| for (i = 0; i < hotp_ppoint_vec_num; i++) { |
| if (ppoint == hotp_ppoint_vec[i]) { |
| on_list = true; |
| break; |
| } |
| } |
| return on_list; |
| } |
| |
| /* Returns true if there is a persistent cache in [base,base+image_size) that |
| * may contain code for any of the patch points of module |
| */ |
| static bool |
| hotp_perscache_overlap(uint vul, uint set, uint module, app_pc base, size_t image_size) |
| { |
| vmvector_iterator_t vmvi; |
| coarse_info_t *info; |
| uint pp; |
| bool flush_perscache = false; |
| ASSERT(DYNAMO_OPTION(use_persisted_hotp)); |
| ASSERT(!DYNAMO_OPTION(hotp_only)); |
| vm_area_coarse_iter_start(&vmvi, base); |
| /* We have a lot of nested linear walks here, esp. when called from |
| * hotp_process_image_helper inside nested loops, but typically the coarse |
| * iterator involves one binary search and only one match, and |
| * hotp_ppoint_on_list and the pp for loop here only a few entries each; |
| * so this routine shouldn't be a perf bottleneck by itself. |
| */ |
| while (!flush_perscache && |
| vm_area_coarse_iter_hasnext(&vmvi, base+image_size)) { |
| info = vm_area_coarse_iter_next(&vmvi, base+image_size); |
| ASSERT(info != NULL); |
| if (info == NULL) /* be paranoid */ |
| continue; |
| if (info->hotp_ppoint_vec == NULL) |
| flush_perscache = true; |
| else { |
| ASSERT(info->persisted); |
| for (pp = 0; pp < GLOBAL_MODULE(vul, set, module).num_patch_points; pp++) { |
| if (!hotp_ppoint_on_list(GLOBAL_PPOINT(vul, set, module, pp).offset, |
| info->hotp_ppoint_vec, |
| info->hotp_ppoint_vec_num)) { |
| flush_perscache = true; |
| break; |
| } |
| } |
| } |
| /* Should be able to ignore 2ndary unit */ |
| ASSERT(info->non_frozen == NULL || info->non_frozen->hotp_ppoint_vec == NULL); |
| } |
| vm_area_coarse_iter_stop(&vmvi); |
| return flush_perscache; |
| } |
| |
| /* This helper exists mainly to handle the loader safety case for adding |
| * ppoint areas. vm_areas should be added to ppoint_areas only during module |
| * load/unload (including the initial stack walk) and during policy read |
| * nudge, not during a reinjection during loader safety. |
| * The same holds good for removal, but today that isn't an |
| * issue because loader safety uses hotp_remove_patches_from_module() to do |
| * it, which doesn't modify ppoint areas. |
| * FIXME: once hotp_inject_patches_into_module() is implemented |
| * based on loaded_module_areas and used in hotp_only_mem_prot_change() instead |
| * of hotp_process_image_helper, this can go. |
| */ |
| static void |
| hotp_process_image_helper(const app_pc base, const bool loaded, |
| const bool own_hot_patch_lock, const bool just_check, |
| bool *needs_processing, const thread_record_t **all_threads, |
| const int num_threads_arg, const bool ldr_safety, |
| vm_area_vector_t *toflush) |
| { |
| uint vul_idx, set_idx, module_idx, ppoint_idx; |
| hotp_module_t *module; |
| dcontext_t *dcontext = get_thread_private_dcontext(); |
| uint checksum, timestamp; |
| size_t image_size = 0, code_size; |
| uint64 file_version; |
| module_names_t *names = NULL; |
| const char *name = NULL, *pe_name = NULL, *mod_name = NULL; |
| int num_threads = num_threads_arg; |
| bool any_matched = false; |
| bool flush_perscache = false; |
| bool perscache_range_overlap = false; |
| |
| ASSERT(base != NULL); /* Is it a valid dll in loaded memory? */ |
| |
| LOG(GLOBAL, LOG_HOT_PATCHING, 2, "hotp_process_image "PFX" %s w/ %d vuls\n", |
| base, loaded ? "load" : "unload", NUM_GLOBAL_VULS); |
| |
| ASSERT(dcontext != GLOBAL_DCONTEXT); |
| /* note that during startup processing due to |
| * find_executable_vm_areas() dcontext can in fact be NULL |
| */ |
| if (dcontext != NULL && dcontext->nudge_thread) /* Fix for case 5367. */ |
| return; |
| #ifdef WINDOWS |
| if (num_threads == 0 && !just_check && DYNAMO_OPTION(hotp_only)) { |
| /* FIXME PR 225578: dr_register_probes passes 0 for the thread count |
| * b/c post-init probes are NYI: but to enable at-your-own risk probes |
| * relaxing the assert |
| */ |
| ASSERT_CURIOSITY_ONCE(!dynamo_initialized && |
| "post-init probes at your own risk: PR 225578!"); |
| num_threads = HOTP_ONLY_NUM_THREADS_AT_INIT; |
| /* For hotp_only, all threads should be suspended before patch injection. |
| * However, at this point in startup, callback hooks aren't in place and |
| * we don't know if any other thread is running around that the core |
| * doesn't know about. This would be rare and with early injection, rarer. |
| * However, if that thread is executing in a region being patched we can |
| * fail spectacularly. Curiosity in the meanwhile. |
| * Also, to be on the safe side grab the synchronization locks. |
| */ |
| ASSERT_CURIOSITY(check_sole_thread()); |
| ASSERT(!own_hot_patch_lock); /* can't get hotp lock before sync locks */ |
| mutex_lock(&all_threads_synch_lock); |
| mutex_lock(&thread_initexit_lock); |
| } |
| #endif |
| |
| if (!own_hot_patch_lock) |
| write_lock(&hotp_vul_table_lock); |
| ASSERT_OWN_READWRITE_LOCK(true, &hotp_vul_table_lock); |
| |
| /* Caller doesn't want to process the image, but to know if it matches. */ |
| if (just_check) { |
| /* Only hotp_only needs this; not regular hot patching. */ |
| ASSERT(DYNAMO_OPTION(hotp_only)); |
| ASSERT(needs_processing != NULL); |
| *needs_processing = false; /* will be set it to true, if needed */ |
| } else |
| ASSERT(needs_processing == NULL); |
| |
| /* get module information from PE once (case 7990) */ |
| /* FIXME: once all pe information is available in loaded_module_areas, use |
| * that here |
| * FIXME: file_version is obtained by walking the resouce section which is expensive; |
| * the same is true for code_size to some extent, i.e., expensive but not |
| * that much. So we may be better off by computing them in separate routines |
| * predicated by the first check - and put all these into hotp_get_module_sig() |
| */ |
| os_get_module_info_lock(); |
| if (!os_get_module_info_all_names(base, &checksum, ×tamp, &image_size, |
| &names, &code_size, &file_version)) { |
| /* FIXME: case 9778 - module info is now obtained from |
| * loaded_module_areas vector, which doesn't seem to have hotp dll |
| * info, so we hit this. As a first step this is converted to a log |
| * to make tests work; will have to read it from pe directly (using |
| * try/except) if it isn't a hotp dll - if that doesn't work then be |
| * curious. Also, need to find out if it was triggered only for hotp |
| * dlls. */ |
| LOG(GLOBAL, LOG_HOT_PATCHING, 2, "unreadable PE base ("PFX")?\n", base); |
| os_get_module_info_unlock(); |
| goto hotp_process_image_exit; |
| } else { |
| /* Make our own copy of both the pe name and the general module name. |
| * This is because pe name can be null for executables, which is fine |
| * for liveshields, but not for gbop or probe api - they just specify a |
| * module name, so we have to use any name that is available. Note: as |
| * of today, gbop hasn't been done on executables, which is why it |
| * worked - it is broken for hooks in exes - a FIXME, but gbop is going |
| * away anyway. */ |
| pe_name = dr_strdup(names->module_name HEAPACCT(ACCT_HOT_PATCHING)); |
| mod_name = dr_strdup(GET_MODULE_NAME(names) HEAPACCT(ACCT_HOT_PATCHING)); |
| os_get_module_info_unlock(); |
| /* These values can't be read in from a module, they are used by the |
| * patch writer to hint to the core to ignore the corresponding checks. |
| */ |
| ASSERT_CURIOSITY(timestamp != PE_TIMESTAMP_IGNORE && |
| checksum != PE_CHECKSUM_IGNORE && |
| image_size != PE_IMAGE_SIZE_IGNORE); |
| } |
| #ifdef WINDOWS |
| DOCHECK(1, { |
| if (TEST(ASLR_DLL, DYNAMO_OPTION(aslr)) && |
| TEST(ASLR_SHARED_CONTENTS, DYNAMO_OPTION(aslr_cache))) { |
| /* case 8507 - the timestamp and possibly checksum of the current mapping, |
| possibly ASLRed, may not be the same as the application DLL */ |
| uint pe_timestamp; |
| uint pe_checksum; |
| bool ok = os_get_module_info(base, &pe_checksum, &pe_timestamp, NULL, NULL, |
| NULL, NULL); |
| ASSERT_CURIOSITY(timestamp != 0); |
| /* Note that if we don't find the DLL in the module list, |
| * we'll keep using the previously found checksum and |
| * timestamp. Although normally all DLLs are expected to be |
| * listed, currently that is done only with ASLR_TRACK_AREAS. |
| */ |
| /* case 5381: we don't ASSERT(ok) b/c hotpatch DLLs aren't listed in our |
| * own module areas, so we don't always find all modules */ |
| /* with the current scheme the checksum is still the original DLLs checksum |
| * though it won't check, and the timestamp is bumped by one second |
| */ |
| ASSERT(!ok || pe_checksum == checksum); |
| ASSERT_CURIOSITY(!ok || |
| pe_timestamp == timestamp || |
| pe_timestamp == timestamp + 1); |
| } |
| }); |
| #endif /* WINDOWS */ |
| |
| if (!DYNAMO_OPTION(hotp_only)) { |
| perscache_range_overlap = |
| executable_vm_area_persisted_overlap(base, base+image_size); |
| } |
| |
| /* TODO: assert that 'base' is the module's base address, |
| * get_dll_short_name() expects this; will be used for sig check |
| * use the fn() that gets only what is in the PE |
| * FIXME: eliminate this n^4 loop for each module {load,unload}; case 10683 |
| */ |
| for (vul_idx = 0; vul_idx < NUM_GLOBAL_VULS; vul_idx++) { |
| bool set_matched = false; |
| |
| if (TESTALL(HOTP_TYPE_PROBE, GLOBAL_VUL(vul_idx).type) |
| IF_GBOP(|| (TESTALL(HOTP_TYPE_GBOP_HOOK, GLOBAL_VUL(vul_idx).type)))) { |
| /* FIXME PR 533522: state in the docs/comments which name is |
| * used where! pe_name vs mod_name |
| */ |
| name = mod_name; |
| } else { |
| ASSERT(TESTALL(HOTP_TYPE_HOT_PATCH, GLOBAL_VUL(vul_idx).type)); |
| /* FIXME PR 533522: state in the docs/comments which name is |
| * used where! pe_name vs mod_name |
| */ |
| name = pe_name; |
| } |
| |
| for (set_idx = 0; set_idx < GLOBAL_VUL(vul_idx).num_sets; set_idx++) { |
| /* Case 10248 - multiple sets can match, but only the first such set |
| * should be used, the rest discarded. In the old model only one |
| * set matched, but it was changed to let the patch writer to |
| * progressively relax the matching criteria. */ |
| if (set_matched) |
| break; |
| |
| for (module_idx = 0; |
| module_idx < GLOBAL_SET(vul_idx, set_idx).num_modules; |
| module_idx++) { |
| module = &GLOBAL_MODULE(vul_idx, set_idx, module_idx); |
| |
| /* When unloading a matched dll in hotp_only mode, all injected |
| * patches must be removed before proceeding any further. |
| * Otherwise hotp_module_match() will fail in the id hash |
| * computation part due to a changed image, due to injection. |
| */ |
| if (base == module->base_address && !loaded) { |
| if (just_check) { /* caller doesn't want processing */ |
| *needs_processing = true; |
| goto hotp_process_image_exit; |
| } |
| |
| /* For hotp_only if a module matches all patch points |
| * in it must be removed in one shot; just as they are |
| * injected in one shot. |
| */ |
| if (GLOBAL_VUL(vul_idx).mode == HOTP_MODE_DETECT || |
| GLOBAL_VUL(vul_idx).mode == HOTP_MODE_PROTECT) { |
| for (ppoint_idx = 0; ppoint_idx < module->num_patch_points; |
| ppoint_idx++) { |
| hotp_patch_point_t *ppoint; |
| ppoint = &module->patch_points[ppoint_idx]; |
| |
| if (DYNAMO_OPTION(hotp_only)) { |
| if (ppoint->trampoline != NULL) { |
| hotp_only_remove_patch(module, ppoint); |
| } else { |
| /* If module is matched & mode is on, then the |
| * patch must be injected unless it has been |
| * removed to handle loader-safety issues. |
| */ |
| ASSERT(hotp_globals->ldr_safe_hook_removal && |
| "hotp_only - double patch removal"); |
| } |
| } |
| /* xref case 10736. |
| * For hotp_only, module load & inject, and |
| * similarly, module unload and remove are done |
| * together, so hot_patch_point_areas won't be out |
| * of synch. However, for hotp with fcache, a |
| * module unload can remove the patches from |
| * hotp_patch_point_areas before flushing them. |
| * This can prevent the flush from happening if |
| * hotp_patch_point_areas is used for it (which |
| * isn't done today; case 10728). It can also |
| * result in voiding a patch injection for a new bb |
| * in that module, i.e., module can be without a |
| * patch for a brief period till it is unloaded. |
| */ |
| hotp_ppoint_areas_remove( |
| hotp_ppoint_addr(module, ppoint)); |
| } |
| } |
| |
| /* Once hotp_only patches are removed, the module must |
| * match at this point. |
| * TODO: multiple vulnerabilities targeting the same module |
| * & whose hashes overlap, won't be {inject,remove}d |
| * because the image gets modified with the injection |
| * of the first one and the hash check for the second |
| * one will fail. |
| */ |
| ASSERT_CURIOSITY(hotp_module_match(module, base, checksum, |
| timestamp, image_size, |
| code_size, file_version, |
| name, |
| GLOBAL_VUL(vul_idx).type)); |
| } |
| |
| /* FIXME: there's no reason to compute whether an OFF patch |
| * matches; just wasted cycles, as we come back here on |
| * any path that later turns the patch on, and no external |
| * stats rely on knowing whether an off patch matches. |
| */ |
| if (hotp_module_match(module, base, checksum, timestamp, |
| image_size, code_size, file_version, name, |
| GLOBAL_VUL(vul_idx).type)) { |
| set_matched = true; |
| if (just_check) { /* caller doesn't want processing */ |
| *needs_processing = true; |
| goto hotp_process_image_exit; |
| } |
| |
| if (loaded) { /* loading dll */ |
| bool patch_enabled = |
| (GLOBAL_VUL(vul_idx).mode == HOTP_MODE_DETECT || |
| GLOBAL_VUL(vul_idx).mode == HOTP_MODE_PROTECT); |
| LOG(GLOBAL, LOG_HOT_PATCHING, 1, |
| "activating vulnerability %s while loading %s\n", |
| GLOBAL_VUL(vul_idx).vul_id, module->sig.pe_name); |
| |
| any_matched = true; |
| /* Case 9970: See if we need to flush any |
| * perscaches in the region. Once we decide to flush |
| * we're going to flush everything. We avoid the later |
| * flush on a nudge in vm_area_allsynch_flush_fragments(). |
| * We currently come here for OFF patches, so we explicitly |
| * check for that before flushing. |
| */ |
| if (patch_enabled && |
| perscache_range_overlap && !flush_perscache && |
| DYNAMO_OPTION(use_persisted_hotp)) { |
| flush_perscache = |
| hotp_perscache_overlap(vul_idx, set_idx, module_idx, |
| base, image_size); |
| } |
| |
| /* TODO: check if all modules in the current |
| * vulnerability are active; if so activate the |
| * policy |
| * also, add patch points to lookup structures |
| * only if entire vulnerability is active; |
| * needed to enforce atomicity of patch injection |
| */ |
| /* the base is used to find the runtime address of |
| * patch offset in the current lookup routine; till a |
| * offset lookup hash is constructed the base address |
| * is needed because the offset in the patchpoint |
| * structure is read only data that should be fixed to |
| * point to the runtime address. even then, the flush |
| * routine would need to know which offset, i.e., |
| * runtime offset, to flush; so this base_address is |
| * needed or a runtime data field must be created. |
| */ |
| module->base_address = base; |
| module->matched = true; |
| hotp_set_policy_status(vul_idx, HOTP_INJECT_PENDING); |
| |
| /* gbop type hooks don't have patch offsets defined, |
| * as they use function names; must set them otherwise |
| * patching will blow up. |
| */ |
| if (TESTALL(HOTP_TYPE_GBOP_HOOK, GLOBAL_VUL(vul_idx).type)) { |
| /* FIXME: assert on all patch point fields being 0, |
| * except precedence. |
| * also, ASSERT on func_addr & func_name; |
| */ |
| app_pc func_addr; |
| app_rva_t offset; |
| |
| /* gbop is only in -client mode, i.e., hotp_only */ |
| ASSERT(DYNAMO_OPTION(hotp_only)); |
| |
| func_addr = (app_pc) get_proc_address((module_handle_t)base, |
| GLOBAL_VUL(vul_idx).vul_id); |
| if (func_addr != NULL) { /* fix for case 7969 */ |
| ASSERT(func_addr > base); |
| offset = func_addr - base; |
| module->patch_points[0].offset = offset; |
| } else { |
| /* Some windows versions won't have some gbop |
| * hook funcs or get_proc_address might just |
| * fail; either way just ignore such hooks. |
| * TODO: think about this - design issue. |
| */ |
| module->base_address = NULL; |
| module->matched = false; |
| continue; |
| } |
| } |
| |
| /* For hotp_only if a module matches all patch points |
| * in it must be injected in one shot. |
| */ |
| if (patch_enabled) { |
| hotp_offset_match_t ppoint_desc; |
| |
| ppoint_desc.vul_index = vul_idx; |
| ppoint_desc.set_index = set_idx; |
| ppoint_desc.module_index = module_idx; |
| for (ppoint_idx = 0; |
| ppoint_idx < module->num_patch_points; |
| ppoint_idx++) { |
| ppoint_desc.ppoint_index = ppoint_idx; |
| |
| /* ldr_safety can happen only for hotp_only. */ |
| ASSERT(DYNAMO_OPTION(hotp_only) || !ldr_safety); |
| |
| /* Don't re-add a patch point to the vector |
| * during patch injection while handling |
| * loader safe injection. |
| */ |
| if (!ldr_safety) |
| hotp_ppoint_areas_add(&ppoint_desc); |
| |
| if (DYNAMO_OPTION(hotp_only)) { |
| hotp_only_inject_patch(&ppoint_desc, |
| all_threads, |
| num_threads); |
| } |
| } |
| } |
| } else { /* unloading dll */ |
| /* TODO: same issues as in the 'if' block above, but |
| * reverse. |
| */ |
| module->base_address = NULL; |
| module->matched = false; |
| hotp_set_policy_status(vul_idx, HOTP_INJECT_NO_MATCH); |
| } |
| } |
| } |
| } |
| } |
| |
| if (!DYNAMO_OPTION(use_persisted_hotp)) /* else we check in loop above */ |
| flush_perscache = any_matched && perscache_range_overlap; |
| if (flush_perscache) { |
| ASSERT(any_matched && perscache_range_overlap); |
| ASSERT(!DYNAMO_OPTION(hotp_only)); |
| /* During startup we process hotp before we add exec areas, so we |
| * should only get a match in a later nudge, when we pass in toflush. |
| */ |
| ASSERT(dynamo_initialized); |
| ASSERT(toflush != NULL); |
| #ifdef WINDOWS |
| ASSERT(dcontext->nudge_target != NULL); |
| #else |
| ASSERT_NOT_REACHED(); /* No nudge on Linux, should only be here for nudge. */ |
| #endif |
| if (toflush != NULL) { /* be paranoid (we fail otherwise though) */ |
| LOG(GLOBAL, LOG_HOT_PATCHING, 2, |
| "Hotp for "PFX"-"PFX" %s overlaps perscache, flushing\n", |
| base, base+image_size, name); |
| /* As we hold the hotp_vul_table_lock we cannot flush here; |
| * instead we add to a pending-flush vmvector. |
| */ |
| vmvector_add(toflush, base, base+image_size, NULL); |
| STATS_INC(hotp_persist_flush); |
| /* FIXME: we could eliminate this and rely on our later flush of the |
| * patch area, as we're only coming here for nudges; we technically |
| * only need an explicit check when loading a perscache, as long as |
| * hotp defs are set up first. |
| */ |
| } |
| } |
| |
| hotp_process_image_exit: |
| if (pe_name != NULL) |
| dr_strfree(pe_name HEAPACCT(ACCT_HOT_PATCHING)); |
| if (mod_name != NULL) |
| dr_strfree(mod_name HEAPACCT(ACCT_HOT_PATCHING)); |
| /* Don't unlock in case the lock was already obtained before reaching this |
| * function. Only in that case lock_acquired will be false. |
| */ |
| /* TODO: or does this go after flush? */ |
| if (!own_hot_patch_lock) |
| write_unlock(&hotp_vul_table_lock); |
| /* TODO: also there are some race conditions with nudging & policy lookup/ |
| * injection; sort those out; flushing before or after reading the |
| * policy plays a role too. |
| */ |
| #ifdef WINDOWS |
| if (num_threads == HOTP_ONLY_NUM_THREADS_AT_INIT) { |
| ASSERT(DYNAMO_OPTION(hotp_only)); |
| ASSERT(!just_check); |
| ASSERT_CURIOSITY(check_sole_thread()); |
| mutex_unlock(&thread_initexit_lock); |
| mutex_unlock(&all_threads_synch_lock); |
| } |
| #endif |
| } |
| |
| /* If vec==NULL, returns the number of patch points for the |
| * matched vuls in [start,end). |
| * Else, stores in vec the offsets for all the matched patch points in [start,end). |
| * Returns -1 if vec!=NULL and vec_num is too small (still fills it up). |
| * For now this routine assumes that [start,end) is contained in a single module. |
| * The caller must own the hotp_vul_table_lock (as a read lock). |
| */ |
| static int |
| hotp_patch_point_persist_helper(const app_pc start, const app_pc end, |
| app_rva_t *vec, uint vec_num) |
| { |
| uint num_ppoints = 0; |
| uint vul, set, module, pp; |
| /* FIXME: check [start,end) instead of module */ |
| app_pc modbase = get_module_base(start); |
| ASSERT(modbase == get_module_base(end)); |
| ASSERT(start != NULL); /* only support single module for now */ |
| ASSERT_OWN_READ_LOCK(true, &hotp_vul_table_lock); |
| if (GLOBAL_VUL_TABLE == NULL) |
| return 0; |
| /* FIXME: once hotp_patch_point_areas is not just hotp_only, use it here */ |
| for (vul = 0; vul < NUM_GLOBAL_VULS; vul++) { |
| bool set_processed = false; |
| |
| /* Ignore if off or dll wasn't loaded */ |
| if (GLOBAL_VUL(vul).mode == HOTP_MODE_OFF || |
| GLOBAL_VUL(vul).hotp_dll_base == NULL) |
| continue; |
| for (set = 0; set < GLOBAL_VUL(vul).num_sets; set++) { |
| /* Only the first matching set should be used; case 10248. */ |
| if (set_processed) |
| break; |
| |
| for (module = 0; module < GLOBAL_SET(vul, set).num_modules; module++) { |
| if (!GLOBAL_MODULE(vul, set, module).matched || |
| modbase != GLOBAL_MODULE(vul, set, module).base_address) |
| continue; |
| set_processed = true; |
| if (vec == NULL) { |
| num_ppoints += GLOBAL_MODULE(vul, set, module).num_patch_points; |
| } else { |
| for (pp = 0; pp < GLOBAL_MODULE(vul, set, module).num_patch_points; |
| pp++) { |
| if (num_ppoints >= vec_num) { |
| /* It's ok to get here, just currently no callers do */ |
| ASSERT_NOT_TESTED(); |
| return -1; |
| } |
| vec[num_ppoints++] = GLOBAL_PPOINT(vul, set, module, pp).offset; |
| } |
| } |
| } |
| } |
| } |
| return num_ppoints; |
| } |
| |
| /* Returns the number of patch points for the matched vuls in [start,end). |
| * For now this routine assumes that [start,end) is contained in a single module. |
| * The caller must own the hotp_vul_table_lock (as a read lock). |
| */ |
| int |
| hotp_num_matched_patch_points(const app_pc start, const app_pc end) |
| { |
| return hotp_patch_point_persist_helper(start, end, NULL, 0); |
| } |
| |
| /* Stores in vec the offsets for all the matched patch points in [start,end). |
| * Returns -1 if vec_num is too small (still fills it up). |
| * For now this routine assumes that [start,end) is contained in a single module. |
| * The caller must own the hotp_vul_table_lock (as a read lock). |
| */ |
| int |
| hotp_get_matched_patch_points(const app_pc start, const app_pc end, |
| app_rva_t *vec, uint vec_num) |
| { |
| return hotp_patch_point_persist_helper(start, end, vec, vec_num); |
| } |
| |
| /* Checks whether any matched patch point in [start, end) is not listed on |
| * hotp_ppoint_vec. If hotp_ppoint_vec is NULL just checks whether any patch |
| * point is matched in the region. For now this routine assumes that |
| * [start,end) is contained in a single module. |
| */ |
| bool |
| hotp_point_not_on_list(const app_pc start, const app_pc end, bool own_hot_patch_lock, |
| app_rva_t *hotp_ppoint_vec, uint hotp_ppoint_vec_num) |
| { |
| /* We could use hotp_process_image_helper()'s just_check but would have |
| * to add hotp_ppoint_vec arg; plus we don't care about module matching. |
| */ |
| bool not_on_list = false; |
| uint vul, set, module, pp; |
| /* FIXME: check [start,end) instead of module */ |
| app_pc modbase = get_module_base(start); |
| DEBUG_DECLARE(bool matched = false;) |
| ASSERT(modbase == get_module_base(end)); |
| if (!own_hot_patch_lock) |
| read_lock(&hotp_vul_table_lock); |
| ASSERT_OWN_READWRITE_LOCK(true, &hotp_vul_table_lock); |
| if (GLOBAL_VUL_TABLE == NULL) |
| goto hotp_policy_list_exit; |
| /* FIXME: I would make an iterator to share w/ patch_point_persist_helper but |
| * this many-nested loop lookup should go away in general ASAP and be |
| * replaced w/ hotp_patch_point_areas which is currently only hotp_only. |
| */ |
| for (vul = 0; vul < NUM_GLOBAL_VULS; vul++) { |
| bool set_processed = false; |
| |
| /* Ignore if off or dll wasn't loaded */ |
| if (GLOBAL_VUL(vul).mode == HOTP_MODE_OFF || |
| GLOBAL_VUL(vul).hotp_dll_base == NULL) |
| continue; |
| for (set = 0; set < GLOBAL_VUL(vul).num_sets; set++) { |
| /* Only the first matching set should be used; case 10248. */ |
| if (set_processed) |
| break; |
| |
| for (module = 0; module < GLOBAL_SET(vul, set).num_modules; module++) { |
| if (!GLOBAL_MODULE(vul, set, module).matched || |
| modbase != GLOBAL_MODULE(vul, set, module).base_address) |
| continue; |
| /* We have a match; only ok if on the list */ |
| DODEBUG({ matched = true; }); |
| set_processed = true; |
| ASSERT(!not_on_list); /* should have exited if not on list */ |
| not_on_list = true; |
| if (hotp_ppoint_vec != NULL && DYNAMO_OPTION(use_persisted_hotp)) { |
| for (pp = 0; pp < GLOBAL_MODULE(vul, set, module).num_patch_points; |
| pp++) { |
| if (!hotp_ppoint_on_list(GLOBAL_PPOINT(vul, set, module, pp). |
| offset, hotp_ppoint_vec, |
| hotp_ppoint_vec_num)) |
| goto hotp_policy_list_exit; |
| } |
| not_on_list = false; |
| } else |
| goto hotp_policy_list_exit; |
| } |
| } |
| } |
| hotp_policy_list_exit: |
| if (!own_hot_patch_lock) |
| read_unlock(&hotp_vul_table_lock); |
| DOSTATS({ |
| if (matched && !not_on_list) { |
| ASSERT(hotp_ppoint_vec != NULL && DYNAMO_OPTION(use_persisted_hotp)); |
| STATS_INC(perscache_hotp_conflict_avoided); |
| } |
| }); |
| return not_on_list; |
| } |
| |
| /* TODO: change this to walk the new PE list (not for now though); needed only |
| * during nudge; start up walk is already done by the core, piggyback |
| * on that and call hotp_process_image() there; basically, get rid |
| * of the need to walk the loader list |
| * Note: for -probe_api, we walk the module list at start up |
| * because client init is done after vmareas_init, i.e., after |
| * scanning for modules in memory and processing them. |
| * |
| */ |
| static void |
| hotp_walk_loader_list(thread_record_t **all_threads, const int num_threads, |
| vm_area_vector_t *toflush, bool probe_init) |
| { |
| /* This routine will go away; till then need to compile on linux. Not walking |
| * the module list on linux means that no vulnerability will get activated |
| * for injection; that is ok as we aren't trying to build a linux version now. |
| */ |
| #ifdef WINDOWS |
| /* TODO: this routine uses PEB, etc, this has to be os specific */ |
| PEB *peb = get_own_peb(); |
| PEB_LDR_DATA *ldr = peb->LoaderData; |
| LIST_ENTRY *e, *start; |
| LDR_MODULE *mod; |
| |
| /* For hotp_only, all_threads can be valid, i.e., all known threads may be |
| * suspended. Even if they are not, all synch locks will be held, so that |
| * module processing can happen without races. Check for that. |
| * Note: for -probe_api, this routine can be called during dr init time, |
| * i.e., synch locks won't be held, so we shouldn't assert. |
| */ |
| if (!probe_init) { |
| ASSERT_OWN_MUTEX(DYNAMO_OPTION(hotp_only), &all_threads_synch_lock); |
| ASSERT_OWN_MUTEX(DYNAMO_OPTION(hotp_only), &thread_initexit_lock); |
| } |
| |
| /* Flushing of pcaches conflicting with hot patches is handled for dll |
| * loads by the pcache loads. Conflicts at hotp_init time can't happen as |
| * pcaches won't be loaded then (they are loaded in vm_areas_init which |
| * comes afterwards). However for nudging and client init |
| * (dr_register_probes) this is needed because pcaches can be loaded by |
| * then. Note even though client init happens during startup, it happens |
| * after vm_areas_init, hence pcaches can be loaded. PR 226578 tracks |
| * implementing pcache flushes for probe api - till then this assert is |
| * relaxed. |
| */ |
| #ifdef CLIENT_INTERFACE |
| ASSERT(toflush != NULL || DYNAMO_OPTION(hotp_only) || |
| (DYNAMO_OPTION(probe_api) && !DYNAMO_OPTION(use_persisted))); |
| #else |
| ASSERT(toflush != NULL || DYNAMO_OPTION(hotp_only)); |
| #endif |
| |
| start = &ldr->InLoadOrderModuleList; |
| for (e = start->Flink; e != start; e = e->Flink) { |
| mod = (LDR_MODULE*) e; |
| |
| /* TODO: ASSERT that the module is loaded? */ |
| hotp_process_image_helper(mod->BaseAddress, true, |
| probe_init ? false : true, false, NULL, |
| all_threads, num_threads, false/*!ldr*/, |
| toflush); |
| |
| /* TODO: make hotp_process_image() emit different log messages |
| * depending upon which path it is invoked from. |
| */ |
| } |
| #endif /* WINDOWS */ |
| } |
| |
| void |
| hotp_init(void) |
| { |
| ASSIGN_INIT_READWRITE_LOCK_FREE(hotp_vul_table_lock, hotp_vul_table_lock); |
| |
| /* Assuming no locks are held on while initializing hot patching. */ |
| ASSERT_OWN_NO_LOCKS(); |
| ASSERT(DYNAMO_OPTION(hot_patching)); |
| #ifdef GBOP |
| /* gbop can't be turned on without hotp_only. */ |
| ASSERT(DYNAMO_OPTION(hotp_only) || !DYNAMO_OPTION(gbop)); |
| #endif |
| |
| if (DYNAMO_OPTION(hotp_only)) { |
| VMVECTOR_ALLOC_VECTOR(hotp_only_tramp_areas, GLOBAL_DCONTEXT, |
| VECTOR_SHARED | VECTOR_NEVER_MERGE, |
| hotp_only_tramp_areas_lock); |
| } |
| |
| write_lock(&hotp_vul_table_lock); |
| |
| #ifdef DEBUG |
| hotp_globals = HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, hotp_globals_t, |
| ACCT_HOT_PATCHING, PROTECTED); |
| hotp_globals->ldr_safe_hook_removal = false; |
| hotp_globals->ldr_safe_hook_injection = false; |
| #endif |
| /* Currently hotp_patch_point_areas is used for hotp_only to do module |
| * matching (case 7279) and offset lookup (case 8132), and offset lookup |
| * only for hotp with fcache (case 10075). Later on it will be |
| * used for patch injection, removal, perscache stuff, etc; case 10728. |
| */ |
| VMVECTOR_ALLOC_VECTOR(hotp_patch_point_areas, GLOBAL_DCONTEXT, |
| VECTOR_SHARED | VECTOR_NEVER_MERGE, |
| hotp_patch_point_areas_lock); |
| |
| /* hotp_only trampolines should be allocated on a special heap that allows |
| * code to be executed in it. |
| */ |
| if (DYNAMO_OPTION(hotp_only)) |
| hotp_only_tramp_heap = special_heap_init(HOTP_ONLY_TRAMPOLINE_SIZE, |
| true, /* yes, use a lock */ |
| true, /* make it executable */ |
| true /* it is persistent */); |
| ASSERT(GLOBAL_VUL_TABLE == NULL && NUM_GLOBAL_VULS == 0); |
| GLOBAL_VUL_TABLE = hotp_read_policy_defs(&NUM_GLOBAL_VULS); |
| if (GLOBAL_VUL_TABLE != NULL) { |
| hotp_load_hotp_dlls(GLOBAL_VUL_TABLE, NUM_GLOBAL_VULS); |
| hotp_read_policy_modes(NULL); |
| /* Policy status table must be initialized after the global |
| * vulnerability table and modes are read, but before module list |
| * is iterated over. |
| */ |
| hotp_init_policy_status_table(); |
| |
| /* We don't need to call hotp_walk_loader_list() here as |
| * find_executable_vm_areas() will call hotp_process_image() for us. |
| */ |
| } |
| else { |
| LOG(GLOBAL, LOG_HOT_PATCHING, 2, "No hot patch definitions to read\n"); |
| } |
| |
| /* Release locks. */ |
| write_unlock(&hotp_vul_table_lock); |
| |
| /* Can't hold any locks at the end of hot patch initializations. */ |
| ASSERT_OWN_NO_LOCKS(); |
| } |
| |
| /* thread-shared initialization that should be repeated after a reset */ |
| void |
| hotp_reset_init(void) |
| { |
| /* nothing to do */ |
| } |
| |
| /* Free all thread-shared state not critical to forward progress; |
| * hotp_reset_init() will be called before continuing. |
| */ |
| void |
| hotp_reset_free(void) |
| { |
| /* Free old tables. Hot patch code must ensure that no old table |
| * pointer is kept across a synch-all point, which is also a reset |
| * point (case 7760 & 8921). |
| */ |
| hotp_vul_tab_t *current_tab, *temp_tab; |
| if (!DYNAMO_OPTION(hot_patching)) |
| return; |
| write_lock(&hotp_vul_table_lock); |
| temp_tab = hotp_old_vul_tabs; |
| while (temp_tab != NULL) { |
| current_tab = temp_tab; |
| temp_tab = temp_tab->next; |
| hotp_free_vul_table(current_tab->vul_tab, current_tab->num_vuls); |
| heap_free(GLOBAL_DCONTEXT, current_tab, sizeof(hotp_vul_tab_t) |
| HEAPACCT(ACCT_HOT_PATCHING)); |
| } |
| hotp_old_vul_tabs = NULL; |
| write_unlock(&hotp_vul_table_lock); |
| } |
| |
| /* Free up all allocated memory and delete hot patching lock. */ |
| void |
| hotp_exit(void) |
| { |
| /* This assert will ensure that there is only one thread in the process |
| * during exit. Grab the hot patch lock all the same because a nudge |
| * can come in at this point; freeing things without the lock is bad. |
| */ |
| ASSERT(dynamo_exited); |
| ASSERT(DYNAMO_OPTION(hot_patching)); |
| write_lock(&hotp_vul_table_lock); |
| |
| /* Release the hot patch policy status table if allocated. This table |
| * may not be allocated till the end if there were no hot patch definitions |
| * but -hot_patching was turned on. |
| */ |
| if (hotp_policy_status_table != NULL) { |
| heap_free(GLOBAL_DCONTEXT, hotp_policy_status_table, |
| hotp_policy_status_table->size HEAPACCT(ACCT_HOT_PATCHING)); |
| hotp_policy_status_table = NULL; |
| } |
| |
| /* Release the patch point areas vector before the table. */ |
| hotp_ppoint_areas_release(); |
| vmvector_delete_vector(GLOBAL_DCONTEXT, hotp_patch_point_areas); |
| hotp_patch_point_areas = NULL; |
| |
| /* Release the global vulnerability table and old tables if any. */ |
| hotp_free_vul_table(GLOBAL_VUL_TABLE, NUM_GLOBAL_VULS); |
| /* case 8118: set to NULL since referenced in hotp_print_diagnostics() */ |
| GLOBAL_VUL_TABLE = NULL; |
| |
| #ifdef DEBUG |
| HEAP_TYPE_FREE(GLOBAL_DCONTEXT, hotp_globals, hotp_globals_t, |
| ACCT_HOT_PATCHING, PROTECTED); |
| #endif |
| write_unlock(&hotp_vul_table_lock); |
| |
| hotp_reset_free(); |
| |
| if (DYNAMO_OPTION(hotp_only)) { |
| #ifdef WINDOWS |
| /* Don't free the heap upon detach - app may have hooked with our |
| * trampoline code; case 9593. Make this memory efficient, i.e., delete |
| * the heap if no collisions were detected; part of bookkeepping needed |
| * to not leak all removed hotp trampolines, but only those that have a |
| * potential collision; a minor TODO - - would save a max of 50kb. |
| * Note: heap lock should be deleted even if heap isn't! */ |
| /* If hotp_only_tramp_heap_cache is NULL, it means that no patches |
| * were removed (either because they weren't injected or just not |
| * removed). This means we don't have to leak the trampolines even |
| * for detach (PR 215520). */ |
| if (!doing_detach || hotp_only_tramp_heap_cache == NULL) |
| special_heap_exit(hotp_only_tramp_heap); |
| # ifdef DEBUG |
| else |
| special_heap_delete_lock(hotp_only_tramp_heap); |
| # endif |
| #else |
| special_heap_exit(hotp_only_tramp_heap); |
| #endif |
| |
| hotp_only_tramp_heap = NULL; |
| vmvector_delete_vector(GLOBAL_DCONTEXT, hotp_only_tramp_areas); |
| hotp_only_tramp_areas = NULL; |
| } |
| |
| DELETE_READWRITE_LOCK(hotp_vul_table_lock); |
| } |
| |
| /* Hot patch policy update action handler */ |
| bool |
| nudge_action_read_policies(void) |
| { |
| hotp_vul_t *old_vul_table = NULL, *new_vul_table = NULL; |
| uint num_old_vuls = 0, num_new_vuls; |
| int num_threads = 0; |
| thread_record_t **all_threads = NULL; |
| |
| STATS_INC(hotp_num_policy_nudge); |
| /* Fix for case 6090; TODO: remove when -hotp_policy_size is removed */ |
| synchronize_dynamic_options(); |
| new_vul_table = hotp_read_policy_defs(&num_new_vuls); |
| if (new_vul_table != NULL) { |
| bool old_value; |
| hotp_vul_tab_t *temp; |
| where_am_i_t wherewasi; |
| dcontext_t *dcontext = get_thread_private_dcontext(); |
| vm_area_vector_t toflush; /* never initialized for hotp_only */ |
| |
| /* If dynamo_exited was false till the check in this routine, then |
| * this thread would have been intercepted by the core, i.e., it |
| * would have got a dcontext. The assert is to catch |
| * bugs; the if is to make sure that the release build doesn't |
| * crash in case this happens. |
| */ |
| ASSERT(dcontext != NULL && dcontext != GLOBAL_DCONTEXT); |
| if (dcontext == NULL) { |
| return false; /* skip further processing */ |
| } |
| |
| /* When the nudge thread starts up, the core takes control and |
| * lets it go once it is identified as nudge. However, |
| * under_dynamo_control is still true because we come here from the cache. |
| * We need to set under_dynamo_control to false during hot patch dll loading, |
| * otherwise the core will take over again at the dll loading interception point. |
| * Once hot patch dlls are loaded we restore under_dynamo_control in case it's |
| * relied on elsewhere. Note - this isn't needed for |
| * loading hot patch dlls at startup because thread init comes |
| * after hotp_init(), so under_dynamo_control isn't set. Only |
| * hot patch dll loading during nudge needs this. |
| * TODO: under_dynamo_control needs cleanup - see case 529, 5183. |
| */ |
| old_value = dcontext->thread_record->under_dynamo_control; |
| dcontext->thread_record->under_dynamo_control = false; |
| |
| /* Fix for case 5367. TODO: undo fix after writing own loader. */ |
| wherewasi = dcontext->whereami; |
| dcontext->whereami = WHERE_APP; /* WHERE_APP? more like WHERE_DR */ |
| dcontext->nudge_thread = true; |
| |
| /* Fix for case 5376. There can be a deadlock if a nudge happened |
| * to result in hot patch dlls being loaded when at the same time |
| * an app dll was being loaded; hotp_vul_table_lock & LoaderLock |
| * would create a deadlock. So while loading the hot patch dlls |
| * the hotp_vul_table_lock shouldn't be held. |
| * To avoid this the table is read, stored in a temporary variable |
| * and hot patch dlls are loaded using that temp. table - all this |
| * is now done without the hotp_vul_table_lock. Then the vul table |
| * lock is grabbed (see below) and the global table is setup. |
| * |
| * FIXME: The longer term solution is to have our own loader to |
| * load hot patch dlls. |
| */ |
| hotp_load_hotp_dlls(new_vul_table, num_new_vuls); |
| |
| /* Must be set to false, otherwise the subsequent module list |
| * walking will be useless, i.e., won't be able to identify |
| * modules for hot patching because hotp_process_image() won't work. |
| */ |
| dcontext->nudge_thread = false; |
| |
| /* If whereami changed, that means, the probably was a callback, |
| * which can lead to other bugs. So, let us make sure it doesn't. |
| */ |
| ASSERT(dcontext->whereami == WHERE_APP); |
| dcontext->whereami = wherewasi; |
| dcontext->thread_record->under_dynamo_control = old_value; |
| |
| /* Suspend all threads (for hotp_only) and grab locks. */ |
| if (DYNAMO_OPTION(hotp_only)) { |
| #ifdef WINDOWS |
| DEBUG_DECLARE(bool ok =) |
| synch_with_all_threads(THREAD_SYNCH_SUSPENDED, &all_threads, |
| /* Case 6821: other synch-all-thread uses that |
| * only care about threads carrying fcache |
| * state can ignore us |
| */ |
| &num_threads, THREAD_SYNCH_NO_LOCKS_NO_XFER, |
| /* if we fail to suspend a thread (e.g., privilege |
| * problems) ignore it. FIXME: retry instead? */ |
| THREAD_SYNCH_SUSPEND_FAILURE_IGNORE); |
| ASSERT(ok); |
| #endif |
| } |
| /* CAUTION: Setting up the global table, reading modes, setting up |
| * policy status table and module list walking MUST all be |
| * done in that order with the table lock held as all of them |
| * update the global table. |
| */ |
| write_lock(&hotp_vul_table_lock); |
| |
| /* For hotp_only, all patches have to be removed before doing |
| * anything with new vulnerability data, and nothing after that, |
| * which is unlike hotp, where removal has to be done before & after. |
| */ |
| if (DYNAMO_OPTION(hotp_only)) { |
| hotp_remove_hot_patches(GLOBAL_VUL_TABLE, NUM_GLOBAL_VULS, |
| true, NULL); |
| } |
| /* Save the old table for flushing & launch the new table. */ |
| old_vul_table = GLOBAL_VUL_TABLE; |
| num_old_vuls = NUM_GLOBAL_VULS; |
| hotp_ppoint_areas_release(); /* throw out the old patch points */ |
| SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT); |
| GLOBAL_VUL_TABLE = new_vul_table; |
| NUM_GLOBAL_VULS = num_new_vuls; |
| SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); |
| |
| hotp_read_policy_modes(NULL); |
| |
| /* Policy status table must be initialized after the global |
| * vulnerability table and modes are read, but before module list |
| * is iterated over. |
| */ |
| SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT |