blob: 707a3ce846a2b5334c8299dd3ae2852a50726a73 [file] [log] [blame]
/* **********************************************************
* Copyright (c) 2012 Google, Inc. All rights reserved.
* Copyright (c) 2005-2010 VMware, Inc. All rights reserved.
* **********************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of VMware, Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/* Copyright (c) 2005-2007 Determina Corp. */
/*
* aslr.c - ASLR: address space layout randomization from user mode
*/
#include "../globals.h"
#include "ntdll.h"
#include "os_private.h"
#include "aslr.h"
#include "instr.h" /* for instr_t and OP_* opcodes */
#include "decode.h" /* for decode_opcode */
#ifdef GBOP
# include "gbop.h"
# include "../fragment.h"
# include "../hotpatch.h"
#endif
#include "../module_shared.h"
enum {ASLR_MAP_GRANULARITY = 64*1024}; /* 64KB - OS allocation granularity */
/* A signature appended to relocated files in our DLL cache providing
* stronger consistency check between source and target.
*
* Note that this uses another page or sector on disk but at least we
* don't waste another file and directory entry and any additional
* security descriptors. Raw reads of data after the end of a
* MEM_IMAGE may result in a new file cache mapping, yet unlikely to
* be worse in performance or memory than having a separate
* file. FIXME: should measure
*/
typedef struct {
module_digest_t original_source;
module_digest_t relocated_target;
/* minimal file corruption check. A mismatched signature is most
* likely due to version mismatch, or power failure. Note that we
* do not require guaranteed order of flushing, so a matching
* signature doesn't guarantee that the previous blocks are
* consistently written. We must maintain internal file
* consistency by making sure that any failed file write
* immediately terminates further work, such incomplete file
* prefixes should never be published under well-known name.
*
*/
uint magic;
/* although old files should be invalidated anyways, in case we'd
* want to report suspiciously corrupt files we better be sure
* we're not matching against the wrong version.
*/
uint version;
/* do not add any fields after version - it has to be last word in file */
} aslr_persistent_digest_t;
/* version number for file signature */
enum {ASLR_PERSISTENT_CACHE_VERSION = 1};
/* magic footer: ADPE */
enum {ASLR_PERSISTENT_CACHE_MAGIC = 0x45504441};
/* all ASLR state protected by this lock */
DECLARE_CXTSWPROT_VAR(static mutex_t aslr_lock, INIT_LOCK_FREE(aslr_lock));
/* We keep these vars on the heap for selfprot (case 8074). */
typedef struct _aslr_last_dll_bounds_t {
app_pc end;
/* used by ASLR_RANGE_BOTTOM_UP to capture failures
* FIXME: should allow UnmapViewOfSection to rewind last DLL */
app_pc start;
} aslr_last_dll_bounds_t;
static aslr_last_dll_bounds_t *aslr_last_dll_bounds;
/* FIXME: case 6739 to properly keep track on UnmapViewOfSection we
* should either QueryMemory for the jitter block or keep preceding
* padding plus the modules we've bumped into a vmarea.
*/
/* FIXME: ASLR_RANGE_TOP_DOWN needs aslr_last_dll_bounds->start and not the end */
/* used for ASLR_TRACK_AREAS and ASLR_AVOID_AREAS. Tracks preferred
* address ranges where a DLL would usually be located without ASLR.
* data is base of current mapping of rebased DLL that would be in that area
* Kept on the heap for selfprot (case 7957).
*/
vm_area_vector_t *aslr_wouldbe_areas;
/* used for ASLR_HEAP and ASLR_HEAP_FILL - tracks added pad areas that should be freed.
* Data is base of associated real heap allocation that precedes allocation,
*
* FIXME: (TOTEST) We currently expect to be able to lookup the pad
* region whenever the application heap region is freed, if any
* version of Windows allows a subregion of original to be freed or
* free crossing boundaries, we'll just add a real backmap as well.
* Kept on the heap for selfprot (case 7957).
*/
vm_area_vector_t *aslr_heap_pad_areas;
/* shared Object directory for publishing Sections */
HANDLE shared_object_directory = INVALID_HANDLE_VALUE;
/* file_t directory of relocated DLL cache - shared
* FIXME: should have one according to starting user SID
*/
HANDLE relocated_dlls_filecache_initial = INVALID_HANDLE_VALUE;
#define KNOWN_DLLS_OBJECT_DIRECTORY L"\\KnownDlls"
HANDLE known_dlls_object_directory = INVALID_HANDLE_VALUE;
#define KNOWN_DLL_PATH_SYMLINK L"KnownDllPath"
wchar_t known_dll_path[MAX_PATH];
/* needed even by consumers to be handle NtOpenSection */
/* forwards */
#ifdef DEBUG
static bool
aslr_doublecheck_wouldbe_areas(void);
#endif
static void aslr_free_heap_pads(void);
static app_pc aslr_reserve_initial_heap_pad(app_pc preferred_base, size_t reserve_offset);
static bool
aslr_publish_file(const wchar_t *module_name);
static void
aslr_process_worklist(void);
static HANDLE
open_relocated_dlls_filecache_directory(void);
static void
aslr_get_known_dll_path(wchar_t *w_known_dll_path, /* OUT */
uint max_length_characters);
static bool
aslr_generate_relocated_section(IN HANDLE unmodified_section,
IN OUT app_pc *new_base, /* presumably random */
bool search_fitting_base,
OUT app_pc *mapped_base,
OUT size_t *mapped_size,
OUT module_digest_t *file_digest);
void aslr_free_dynamorio_loadblock(void);
/* end of forwards */
void
aslr_init(void)
{
/* big delta should be harder to guess or brute force */
size_t big_delta;
ASSERT(ALIGNED(DYNAMO_OPTION(aslr_dll_base), ASLR_MAP_GRANULARITY));
ASSERT_NOT_IMPLEMENTED(!TESTANY(~(ASLR_DLL|ASLR_STACK|ASLR_HEAP|ASLR_HEAP_FILL),
DYNAMO_OPTION(aslr)));
ASSERT_NOT_IMPLEMENTED(!TESTANY(~(ASLR_SHARED_INITIALIZE|ASLR_SHARED_INITIALIZE_NONPERMANENT|
ASLR_SHARED_CONTENTS|
ASLR_SHARED_PUBLISHER|ASLR_SHARED_SUBSCRIBER|
ASLR_SHARED_ANONYMOUS_CONSUMER|
ASLR_SHARED_WORKLIST|ASLR_SHARED_FILE_PRODUCER|
ASLR_ALLOW_ORIGINAL_CLOBBER|ASLR_RANDOMIZE_EXECUTABLE|
ASLR_AVOID_NET20_NATIVE_IMAGES|
ASLR_SHARED_PER_USER
), DYNAMO_OPTION(aslr_cache)));
ASSERT_NOT_IMPLEMENTED(!TESTANY(~(ASLR_PERSISTENT_PARANOID
| ASLR_PERSISTENT_SOURCE_DIGEST
| ASLR_PERSISTENT_TARGET_DIGEST
| ASLR_PERSISTENT_SHORT_DIGESTS
| ASLR_PERSISTENT_PARANOID_TRANSFORM_EXPLICITLY
| ASLR_PERSISTENT_PARANOID_PREFIX
), DYNAMO_OPTION(aslr_validation)));
ASSERT_NOT_IMPLEMENTED(!TESTANY(~(ASLR_INTERNAL_SAME_STRESS|ASLR_INTERNAL_RANGE_NONE|
ASLR_INTERNAL_SHARED_NONUNIQUE),
INTERNAL_OPTION(aslr_internal)));
ASSERT_NOT_IMPLEMENTED(!TESTANY(~
(ASLR_TRACK_AREAS | ASLR_DETECT_EXECUTE | ASLR_REPORT),
DYNAMO_OPTION(aslr_action)));
/* FIXME: NYI ASLR_AVOID_AREAS|ASLR_RESERVE_AREAS|
* ASLR_DETECT_READ|ASLR_DETECT_WRITE|
* ASLR_HANDLING|ASLR_NORMALIZE_ID
*/
ASSERT_CURIOSITY(!TEST(ASLR_RANDOMIZE_EXECUTABLE,
DYNAMO_OPTION(aslr_cache))
|| TEST(ASLR_ALLOW_ORIGINAL_CLOBBER,
DYNAMO_OPTION(aslr_cache))
&& "case 8902 - need to duplicate handle in child");
/* case 8902 tracks the extra work if we want to support this
* non-recommended configuration */
ASSERT(ASLR_CLIENT_DEFAULT == 0x7);
ASSERT(ASLR_CACHE_DEFAULT == 0x192); /* match any numeric use in optionsx.h */
#ifdef GBOP
ASSERT(GBOP_CLIENT_DEFAULT == 0x6037);
#endif
aslr_last_dll_bounds = HEAP_TYPE_ALLOC(GLOBAL_DCONTEXT, aslr_last_dll_bounds_t,
ACCT_OTHER, PROTECTED);
aslr_last_dll_bounds->start = NULL;
big_delta = get_random_offset(DYNAMO_OPTION(aslr_dll_offset));
aslr_last_dll_bounds->end = (app_pc) ALIGN_FORWARD(DYNAMO_OPTION(aslr_dll_base)
+ big_delta,
ASLR_MAP_GRANULARITY);
LOG(GLOBAL, LOG_SYSCALLS|LOG_VMAREAS, 1,
"ASLR: -aslr "PFX", dll end="PFX", "
"base="PFX", offset="PFX" -> delta="PFX", pad="PFX"\n",
DYNAMO_OPTION(aslr),
aslr_last_dll_bounds->end,
DYNAMO_OPTION(aslr_dll_base), DYNAMO_OPTION(aslr_dll_offset), big_delta,
DYNAMO_OPTION(aslr_dll_pad));
VMVECTOR_ALLOC_VECTOR(aslr_wouldbe_areas, GLOBAL_DCONTEXT,
/* allow overlap due to conflicting DLLs */
VECTOR_SHARED | VECTOR_NEVER_MERGE_ADJACENT,
aslr_areas);
VMVECTOR_ALLOC_VECTOR(aslr_heap_pad_areas, GLOBAL_DCONTEXT,
VECTOR_SHARED | VECTOR_NEVER_MERGE,
aslr_pad_areas);
if (DYNAMO_OPTION(aslr_dr)) {
/* free loadblocks if injected by parent */
aslr_free_dynamorio_loadblock();
} else {
/* note if parent had the flag enabled while the child doesn't
* we won't risk freeing */
}
if (TEST(ASLR_HEAP, DYNAMO_OPTION(aslr))) {
/* we only reserve a random padding from the beginning of memory
* and let the OS handle all other allocations normally
*/
app_pc big_front_pad_base =
aslr_reserve_initial_heap_pad(NULL /* earliest possible */,
DYNAMO_OPTION(aslr_heap_reserve_offset));
/* FIXME: If we want to consider ASLR_HEAP (but not
* ASLR_HEAP_FILL) as a default option we may want to use this
* padding as the randomization for our own memory. If we
* want to avoid address space fragmentation for important
* services, we may want to add the initial padding before
* vmm_heap_init() disable -vm_max_offset 0x0 and use -vm_base 0x0. */
/* FIXME: Our large reservation maybe too large to fit in
* front of the executable, when we're not in early there may
* be heap regions already allocated. While apps commonly
* start at 0x00400000, many windows services start at
* 0x01000000 (16MB) and the initial hole may be too small to
* randomize anyways.
*
* Office apps start at 0x30000000 so they may end up having
* two heap regions if an attacker is able to control memory
* allocations. We only use the smaller
* aslr_heap_exe_reserve_offset for after the executable in
* case the original mapping was before the imagebase.
*
*/
/* FIXME: though just grabbing big and small usually works
* should just fill in any space in front of the
* executable,
*
* FIXME: add a random pad after the executable to make sure
* no heap allocation will eventually be at a predictable
* location.
*/
app_pc small_pad_after_executable_base =
aslr_reserve_initial_heap_pad(NULL /* FIXME: should be after executable */,
DYNAMO_OPTION(aslr_heap_exe_reserve_offset));
}
/* initialize shared object directory - note that this should be
* done in a high privilege process (e.g. winlogon.exe) that may
* otherwise have no other role to serve in ASLR_SHARED_CONTENTS
*/
if (TEST(ASLR_SHARED_INITIALIZE, DYNAMO_OPTION(aslr_cache))) {
HANDLE initialize_directory;
NTSTATUS res = nt_initialize_shared_directory(&initialize_directory,
true /* permanent */);
/* we currently don't need to do anything else with this
* handle, unless we can't make the object permanent then may
* want to 'leak' the handle to persist the object directory
* until this process dies.
*/
/* FIXME: would be nice to provide a drcontrol -shared
* -destroy (using NtMakeTemporaryObject()) to clear the permanent directory
* -init to recreate it for easier testing and saving a reboot.
*/
/* FIXME: Note that in a model in which per-session or
* per-user sharing is allowed we may have extra levels to
* create. Otherwise, this nt_close_object_directory() can be
* done inside nt_initialize_shared_directory() for permanent
* directories.
*/
if (NT_SUCCESS(res)) {
nt_close_object_directory(initialize_directory);
} else {
/* STATUS_PRIVILEGE_NOT_HELD (0xc0000061) is an expected
* failure code for low privileged processes. Note for
* testing may need a dummy process with high enough
* privileges
*/
/* FIXME: may want to make this non-internal flag to allow
* simple experiments with unprivileged processes in
* release builds as well
*/
ASSERT_CURIOSITY(res == STATUS_PRIVILEGE_NOT_HELD);
if (TEST(ASLR_SHARED_INITIALIZE_NONPERMANENT,
DYNAMO_OPTION(aslr_cache))) {
NTSTATUS res = nt_initialize_shared_directory(&initialize_directory,
false /* temporary */);
ASSERT(NT_SUCCESS(res) && "unable to initialize");
/* must 'leak' initialize_directory to persist
* directory until process terminates,
* so there is no corresponding nt_close_object_directory()
*/
}
}
}
if (TESTANY(ASLR_SHARED_SUBSCRIBER|ASLR_SHARED_PUBLISHER,
DYNAMO_OPTION(aslr_cache))) {
/* Open shared DLL object directory '\Determina\SharedCache' */
/* publisher will ask for permission to create objects in that
* directory, consumer needs read only access */
/* FIXME: this should change to become SID related */
NTSTATUS res = nt_open_object_directory(&shared_object_directory,
DYNAMORIO_SHARED_OBJECT_DIRECTORY,
TEST(ASLR_SHARED_PUBLISHER, DYNAMO_OPTION(aslr_cache))
);
/* Only trusted publishers should be allowed to publish in the
* SharedCache */
/* Note */
/* if any of these fail in release build (most likely if the
* root is not created, or it is created with restrictive
* permissions) we won't be able to publish named sections.
* Not a critical failure though.
*/
/* FIXME: should test shared_object_directory before any
* consumer requests, so that we don't waste any time trying
* to request sharing */
ASSERT_CURIOSITY(NT_SUCCESS(res) && "can't open \\Determina\\SharedCache");
}
if (DYNAMO_OPTION(track_module_filenames) ||
TESTANY(ASLR_SHARED_SUBSCRIBER | ASLR_SHARED_ANONYMOUS_CONSUMER |
ASLR_SHARED_PUBLISHER /* just in case */,
DYNAMO_OPTION(aslr_cache))) {
/* we'll need to match sections from \KnownDlls, note that all
* direct or indirect consumers have to handle NtOpenSection()
* here to deal with KnownDlls
*/
NTSTATUS res = nt_open_object_directory(&known_dlls_object_directory,
KNOWN_DLLS_OBJECT_DIRECTORY,
false);
ASSERT(NT_SUCCESS(res));
/* open the \KnowdnDlls\KnownDllPath directory */
aslr_get_known_dll_path(known_dll_path,
BUFFER_SIZE_ELEMENTS(known_dll_path));
}
if (TESTANY(ASLR_SHARED_PUBLISHER | ASLR_SHARED_ANONYMOUS_CONSUMER,
DYNAMO_OPTION(aslr_cache))) {
/* Open shared cache file directory */
relocated_dlls_filecache_initial =
open_relocated_dlls_filecache_directory();
/* FIXME: may need to open one shared and in addition one
* per-user
*/
/* FIXME: a ASLR_SHARED_FILE_PRODUCER | ASLR_SHARED_WORKLIST
* producer may want to be able to write to the filecache
* directory
*/
}
if (TEST(ASLR_SHARED_WORKLIST, DYNAMO_OPTION(aslr_cache))) {
aslr_process_worklist();
}
/* FIXME: case 6725 ASLR functionality is not fully dynamic. The
* only state that needs to be set up is the above random number,
* which we can just always initialize here. Yet not enough for
* the product:
* o we can't really undo changes, so not very useful to begin with, but
* at least DLLs after a change can be controlled
* o not planning on synchronizing options, yet may allow nudge to do so
* o post_syscall mappings does attempt to handle dynamic changes, not tested
*/
if (DYNAMO_OPTION(aslr) == ASLR_DISABLED)
return;
}
void
aslr_exit(void)
{
if (TEST(ASLR_TRACK_AREAS, DYNAMO_OPTION(aslr_action))) {
/* doublecheck and print entries to make sure they match */
DOLOG(1, LOG_VMAREAS, { print_modules_safe(GLOBAL, DUMP_NOT_XML); });
ASSERT(aslr_doublecheck_wouldbe_areas());
}
/* dynamic option => free no matter the option value now */
/* at startup: ASLR_TRACK_AREAS */
vmvector_delete_vector(GLOBAL_DCONTEXT, aslr_wouldbe_areas);
/* at startup: ASLR_HEAP_FILL|ASLR_HEAP */
aslr_free_heap_pads();
vmvector_delete_vector(GLOBAL_DCONTEXT, aslr_heap_pad_areas);
if (shared_object_directory != INVALID_HANDLE_VALUE) {
ASSERT_CURIOSITY(TEST(ASLR_SHARED_CONTENTS, DYNAMO_OPTION(aslr_cache)));
nt_close_object_directory(shared_object_directory);
}
if (known_dlls_object_directory != INVALID_HANDLE_VALUE) {
ASSERT_CURIOSITY(DYNAMO_OPTION(track_module_filenames) ||
TEST(ASLR_SHARED_CONTENTS, DYNAMO_OPTION(aslr_cache)));
nt_close_object_directory(known_dlls_object_directory);
}
if (relocated_dlls_filecache_initial != INVALID_HANDLE_VALUE) {
ASSERT_CURIOSITY(TEST(ASLR_SHARED_CONTENTS, DYNAMO_OPTION(aslr_cache)));
close_handle(relocated_dlls_filecache_initial);
}
HEAP_TYPE_FREE(GLOBAL_DCONTEXT, aslr_last_dll_bounds, aslr_last_dll_bounds_t,
ACCT_OTHER, PROTECTED);
aslr_last_dll_bounds = NULL;
/* always release lock in case -aslr was dynamically changed
* although currently it is not dynamic
*/
DELETE_LOCK(aslr_lock);
}
void
aslr_thread_init(dcontext_t *dcontext)
{
}
void
aslr_thread_exit(dcontext_t *dcontext)
{
}
/* ASLR random range choice */
/* use aslr_get_next_base() to start using a range, and in combination
* with aslr_update_failed() on failure to use it, and
* aslr_update_view_size() to flag success and proceed to the next base.
*/
static app_pc
aslr_get_next_base(void)
{
/* although the loader holds a lock for the DLL mappings, other
* MapViewOfFile calls may be racy. If really serialzed by app,
* there will never be contention on the locks grabbed here.
*/
size_t jitter = get_random_offset(DYNAMO_OPTION(aslr_dll_pad));
app_pc returned_base;
/*
* FIXME: [minor security] Although DLLs are definitely not loaded
* racily, if we are using this for other potentially racy
* allocations from the same region we may have races. The
* aslr_last_dll_bounds->end won't be updated so multiple callers may get
* based not far from the same last end. If aslr_dll_pad is
* comparable to the size of an average mapping, the jitter here
* will make it possible for multiple racy callers to receive
* bases that may succeed. Nevertheless, that is not really
* necessary nor sufficient to avoid collisions. Still even
* though on collision we'll currently give up attackers can't
* rely much on this.
*/
mutex_lock(&aslr_lock);
/* note that we always lose the low 16 bits of randomness of the
* padding, so adding to last dll page-aligned doesn't matter */
aslr_last_dll_bounds->start = aslr_last_dll_bounds->end + jitter;
aslr_last_dll_bounds->start = (app_pc)
ALIGN_FORWARD(aslr_last_dll_bounds->start, ASLR_MAP_GRANULARITY);
returned_base = aslr_last_dll_bounds->start; /* for racy callers */
mutex_unlock(&aslr_lock);
LOG(THREAD_GET, LOG_SYSCALLS|LOG_VMAREAS, 1,
"ASLR: next dll recommended="PFX"\n", returned_base);
return returned_base;
}
/* preverify the range is available, leaving possibility of failure
* only to race. Allows us to skip ranges that get in our way,
* especially common when used for ASLR sharing, where we quickly
* fragment our address space when DLLs are generated by multiple
* processs.
*/
/* returns NULL if no valid range exists */
static app_pc
aslr_get_fitting_base(app_pc requested_base, size_t view_size)
{
bool available = true;
app_pc current_base = requested_base;
ASSERT(ALIGNED(current_base, ASLR_MAP_GRANULARITY));
/* currently march forward through OS allocated regions */
do {
app_pc allocated_base;
size_t size;
if ((ptr_uint_t)(current_base + view_size) > DYNAMO_OPTION(aslr_dll_top)) {
/* FIXME: case 6739 could try to wrap around (ONCE!) */
ASSERT_CURIOSITY((ptr_uint_t)current_base <= DYNAMO_OPTION(aslr_dll_top) ||
/* case 9844: suppress for short regression for now */
check_filter("win32.reload-race.exe",
get_short_name(get_application_name())));
ASSERT_CURIOSITY(false && "exhausted DLL range" ||
/* case 9378: suppress for short regression for now */
check_filter("win32.reload-race.exe",
get_short_name(get_application_name())));
return NULL;
}
size = get_allocation_size(current_base, &allocated_base);
if (size == 0) {
/* very unusual, can't have passed into kernel ranges */
ASSERT_NOT_REACHED();
return NULL;
}
/* note that get_allocation_size() returns allocation size of
* non FREE regions, while for FREE regions is the available
* region size (exactly what we need)
*/
if (allocated_base != NULL /* taken, skip */) {
ASSERT(current_base < allocated_base + size);
current_base = allocated_base + size;
/* skip unusable unaligned MEM_FREE region */
current_base = (app_pc)ALIGN_FORWARD(current_base, ASLR_MAP_GRANULARITY);
available = false;
} else { /* free */
if (size < view_size) { /* we don't fit in free size, skip */
available = false;
ASSERT(size > 0);
current_base = current_base + size;
/* free blocks should end aligned at allocation granularity */
ASSERT_CURIOSITY(ALIGNED(current_base, ASLR_MAP_GRANULARITY));
/* can't be too sure - could be in the middle of freed TEB entries */
current_base = (app_pc)ALIGN_FORWARD(current_base, ASLR_MAP_GRANULARITY);
} else {
/* we can take this, unless someone beats us */
available = true;
}
}
} while (!available);
if (requested_base != current_base) {
/* update our expectations, so that aslr_update_view_size()
* doesn't get surprised */
mutex_lock(&aslr_lock);
if (aslr_last_dll_bounds->start == requested_base) {
aslr_last_dll_bounds->start = current_base;
} else {
/* racy requests? */
ASSERT_CURIOSITY(false && "aslr_get_fitting_base: racy ASLR mapping");
ASSERT_NOT_TESTED();
}
mutex_unlock(&aslr_lock);
}
ASSERT(ALIGNED(current_base, ASLR_MAP_GRANULARITY));
return current_base;
}
/* update on failure, if request_new is true, we should look for a
* better fit given the module needed_size. Note requested_base is
* just a hint for what we have tried
*/
static app_pc
aslr_update_failed(bool request_new,
app_pc requested_base,
size_t needed_size)
{
app_pc new_base = NULL; /* default to native preferred base */
LOG(THREAD_GET, LOG_SYSCALLS|LOG_VMAREAS, 1,
"ASLR: aslr_update_failed for "PFX"\n", aslr_last_dll_bounds->start);
if (request_new) {
ASSERT(requested_base != NULL);
ASSERT(needed_size != 0);
if (requested_base != NULL && needed_size != 0) {
new_base = aslr_get_fitting_base(requested_base, needed_size);
ASSERT_CURIOSITY(new_base != NULL ||
/* case 9894: suppress for short regression for now */
check_filter("win32.reload-race.exe",
get_short_name(get_application_name())));
} else {
/* give up, something is not right, just reset */
ASSERT(new_base == NULL);
}
}
if (new_base == NULL) {
/* update old base, currently just so we can ASSERT elsewhere */
mutex_lock(&aslr_lock);
aslr_last_dll_bounds->start = NULL;
mutex_unlock(&aslr_lock);
/* just giving up, no need for new base */
}
return new_base;
}
static void
aslr_update_view_size(app_pc view_base, size_t view_size)
{
ASSERT(view_base != NULL);
ASSERT(view_size != 0);
ASSERT_CURIOSITY_ONCE((ptr_uint_t)(view_base + view_size) <=
DYNAMO_OPTION(aslr_dll_top) ||
/* case 7059: suppress for short regr for now */
EXEMPT_TEST("win32.reload-race.exe"));
/* FIXME: if aslr_dll_top is not reachable should wrap around, or
* know not to try anymore. Currently we'll just keep trying to
* rebase and giving up all the time.
*/
if (TEST(ASLR_INTERNAL_SAME_STRESS, INTERNAL_OPTION(aslr_internal))) {
return;
}
/* NOTE we don't have a lock for the actual system call so we can
* get out of order here
*/
mutex_lock(&aslr_lock);
if (aslr_last_dll_bounds->start == view_base) {
aslr_last_dll_bounds->end = view_base + view_size;
} else {
/* racy requests? */
ASSERT_CURIOSITY(false && "racy ASLR mapping");
/* when the last known request is not the same we just bump to
* largest value to resynch, although it is more likely that a
* collision would have prevented one from reaching here
*/
aslr_last_dll_bounds->end = MAX(aslr_last_dll_bounds->end, view_base + view_size);
ASSERT_NOT_TESTED();
}
mutex_unlock(&aslr_lock);
}
/* used for tracking potential violations in ASLR_TRACK_AREAS */
static void
aslr_track_randomized_dlls(dcontext_t *dcontext, app_pc base, size_t size, bool map,
bool our_shared_file)
{
if (map) {
/* note can't use get_module_preferred_base_safe() here, since
* not yet added to loaded_module_areas */
app_pc preferred_base;
if (our_shared_file) {
DEBUG_DECLARE(app_pc our_relocated_preferred_base =
get_module_preferred_base(base););
ASSERT(TEST(ASLR_SHARED_CONTENTS, DYNAMO_OPTION(aslr_cache)));
ASSERT(dcontext->aslr_context.original_section_base != ASLR_INVALID_SECTION_BASE);
ASSERT_CURIOSITY(our_relocated_preferred_base == base
&& "useless conflicting shared");
LOG(GLOBAL, LOG_SYSCALLS|LOG_VMAREAS, 2,
"ASLR: SHARED: mapped base "PFX", preferred random "PFX
", original "PFX"\n", base,
our_relocated_preferred_base,
dcontext->aslr_context.original_section_base);
preferred_base = dcontext->aslr_context.original_section_base;
} else {
preferred_base = get_module_preferred_base(base);
}
/* FIXME: should be rare, otherwise could retry if this happens */
ASSERT_CURIOSITY(preferred_base != base && "randomly preferred base");
/* FIXME: with ASLR_SHARED_CONTENTS we now have three bases to
* consider original preferred base, shared preferred base,
* real base (our shared DLL can be rebased due to conflict).
*/
if (preferred_base != NULL
&& preferred_base != base /* for the rare case of staying at base */) {
/* FIXME: if overlap in aslr_wouldbe_areas then we cannot
* tell which DLL is the one really being targeted. Yet
* unlikely that attackers would bother targeting one of
* these, can still use the first loaded as most likely.
* Note we can't properly remove overlapping DLLs either.
* FIXME: Maybe we shouldn't flag compatibility issues and
* accidental read/write in such contested areas.
*/
DOLOG(0, LOG_SYSCALLS, {
if (vmvector_overlap(aslr_wouldbe_areas,
preferred_base, preferred_base + size)) {
LOG(THREAD_GET, LOG_SYSCALLS|LOG_VMAREAS, 1,
"aslr: conflicting preferred range "PFX"-"PFX" currently "PFX,
preferred_base, preferred_base + size, base);
}
});
vmvector_add(aslr_wouldbe_areas, preferred_base, preferred_base + size,
base /* current mapping of DLL */);
} else {
/* FIXME: shouldn't happen for ASLR_DLL */
ASSERT_CURIOSITY(false && "not a PE or no image base");
}
} else {
/* not all unmappings are to modules, and double mappings of a
* PE file both as a module and as a linear memory mapping
* exist - e.g. USER32!ExtractIconFromEXE. Would need an
* explicit MEM_IMAGE check on the area.
*/
/* It should be faster to check in loaded_module_areas.
* Ignore if unmapped view was not loaded as DLL. Called
* before process_mmap(unmap), still ok to use loaded module
* list. */
app_pc preferred_base = get_module_preferred_base_safe(base);
if (preferred_base != NULL /* tracked module */
&& preferred_base != base /* randomized by us, or simply rebased? */
) {
/* FIXME: we don't know which DLLs we have randomized
* ourselves and which have had a conflict, but not a
* significant loss if we remove the range from tracking.
* Note that simple technique for silencing the ASSERT
* doesn't work for rebased DLLs that have been loaded
* before we're loaded.
*/
DOLOG(0, LOG_SYSCALLS, {
/* case 7797 any conflicting natively DLLs may hit this */
if (!vmvector_overlap(aslr_wouldbe_areas,
preferred_base, preferred_base + size)) {
LOG(THREAD_GET, LOG_SYSCALLS|LOG_VMAREAS, 1,
"ASLR: unmap missing preferred range "PFX"-"PFX", "
"probably conflict?", preferred_base, preferred_base + size);
}
});
/* doublecheck unsafe base, since PE still mapped in,
* however preferred base from PE is not what we want in ASLR shared
* see case 8507
*/
ASSERT(preferred_base == get_module_preferred_base(base)
|| TEST(ASLR_SHARED_CONTENTS,
DYNAMO_OPTION(aslr_cache)));
/* FIXME: if multiple DLLs preferred regions overlap we
* wouldn't know not to remove a hole, need refcounting, but
* since whole notification is best effort, not doing that */
vmvector_remove(aslr_wouldbe_areas,
preferred_base, preferred_base + size);
}
}
}
/* PRE hook for NtMapViewOfSection */
void
aslr_pre_process_mapview(dcontext_t *dcontext)
{
reg_t *param_base = dcontext->sys_param_base;
priv_mcontext_t *mc = get_mcontext(dcontext);
HANDLE section_handle = (HANDLE) sys_param(dcontext, param_base, 0);
HANDLE process_handle = (HANDLE) sys_param(dcontext, param_base, 1);
void **pbase_unsafe = (void *) sys_param(dcontext, param_base, 2);
uint zerobits = (uint) sys_param(dcontext, param_base, 3);
size_t commit_size = (size_t) sys_param(dcontext, param_base, 4);
LARGE_INTEGER *psection_offs_unsafe =
(LARGE_INTEGER *) sys_param(dcontext, param_base, 5); /* OPTIONAL */
size_t *pview_size_unsafe = (size_t *) sys_param(dcontext, param_base, 6);
uint inherit_disposition = (uint) sys_param(dcontext, param_base, 7);
uint allocation_type = (uint) sys_param(dcontext, param_base, 8);
uint prot = (uint) sys_param(dcontext, param_base, 9);
app_pc requested_base;
size_t requested_size;
app_pc modified_base = 0;
/* flag currently used only for MapViewOfSection */
dcontext->aslr_context.sys_aslr_clobbered = false;
if (!safe_read(pbase_unsafe, sizeof(requested_base), &requested_base) ||
!safe_read(pview_size_unsafe, sizeof(requested_size), &requested_size)) {
/* we expect the system call to fail */
DODEBUG(dcontext->expect_last_syscall_to_fail = true;);
return;
}
DOLOG(1, LOG_SYSCALLS, {
uint queried_section_attributes = 0;
bool attrib_ok =
get_section_attributes(section_handle, &queried_section_attributes, NULL);
/* Unfortunately, the loader creates sections that do not have
* Query access (SECTION_QUERY 0x1), and we can't rely on
* being able to use this
*
* windbg> !handle 0 f section
* GrantedAccess 0xe:
* None, MapWrite,MapRead,MapExecute
* vs
* GrantedAccess 0xf001f:
* Delete,ReadControl,WriteDac,WriteOwner
* Query,MapWrite,MapRead,MapExecute,Extend
* Object Specific Information
* Section base address 0
* Section attributes 0x4000000
*/
/* FIXME: unknown flag 0x20000000
* when running notepad I get
* Section attributes 0x21800000 only on two DLLs
* I:\Program Files\WIDCOMM\Bluetooth Software\btkeyind.dll (my bluetooth extension)
* I:\Program Files\Dell\QuickSet\dadkeyb.dll are using 0x20000000, why are they special?
*/
ASSERT_CURIOSITY(!TESTANY(~(SEC_BASED_UNSUPPORTED | SEC_NO_CHANGE_UNSUPPORTED
| SEC_FILE | SEC_IMAGE | SEC_VLM | SEC_RESERVE
| SEC_COMMIT | SEC_NOCACHE
/* FIXME: value is 0x20000000
* could also be IMAGE_SCN_MEM_EXECUTE , or MEM_LARGE_PAGES
*/
| GENERIC_EXECUTE
),
queried_section_attributes));
LOG(THREAD, LOG_SYSCALLS|LOG_VMAREAS, 1,
"syscall: pre NtMapViewOfSection *base="PFX" *size="PIFX" prot=%s\n"
" sh="PIFX" zero=%d commit=%d &secoffs="PIFX" inherit=%d type=0x%x;"
"%s%x\n",
requested_base, requested_size, prot_string(prot),
section_handle, zerobits, commit_size, psection_offs_unsafe, inherit_disposition,
allocation_type,
attrib_ok ? "attrib=0x" : "unknown ", queried_section_attributes);
});
/* Reversing notes: on XP SP2
*
* o Protect - all modules are first attempted with --x- but
* DLLs that need rebasing are remapped as rw--
*
* intercept_load_dll: I:\Program Files\Dell\QuickSet\dadkeyb.dll (always conflicts)
* syscall: NtMapViewOfSection *base=0x00000000 *size=0x0 prot=--x-
* sh=1832 zero=0 commit=0 &secoffs=0 inherit=1 type=0
* syscall: NtMapViewOfSection 0x00980000 size=0x12000 prot=--x- => 0
* syscall: NtUnmapViewOfSection 0x00980000 size=0x12000
*
* syscall: NtMapViewOfSection *base=0x00000000 *size=0x0 prot=rw--
* sh=1836 zero=0 commit=0 &secoffs=0 inherit=1 type=0
* syscall: NtMapViewOfSection 0x00980000 size=0x13000 prot=rw-- => 0x40000003
* Note the size is now larger, in fact mapping is MEM_IMAGE, and
* so get STATUS_IMAGE_NOT_AT_BASE, yet we can't always even query our section,
* so we would have to track NtCreateSection to determine that.
*
* syscall: NtProtectVirtualMemory process=0xffffffff base=0x00981000 size=0x8000 prot=rw-- 0x4
*
* And most weird is a call that always fails while processing the above DLL
* syscall: NtMapViewOfSection *base=0x00980000 *size=0x13000 prot=rw--
* sh=1832 zero=0 commit=0 &secoffs=0 inherit=1 type=0
* syscall: failed NtMapViewOfSection prot=rw-- => 0xc0000018 STATUS_CONFLICTING_ADDRESSES
*/
if (is_phandle_me(process_handle)) {
ASSERT_CURIOSITY(psection_offs_unsafe == NULL || prot != PAGE_EXECUTE);
/* haven't seen a DLL mapping that specifies an offset */
/* * SectionOffset is NULL for the loader,
* kernel32!MapViewOfFileEx (on Windows XP and Win2k)
* always passes the psection_offs_unsafe as a stack
* variable, since offset is user exposed.
* DLL loading on the other hand doesn't need this argument.
*/
ASSERT_NOT_IMPLEMENTED(!TEST(ASLR_MAPPED, DYNAMO_OPTION(aslr)));
if (psection_offs_unsafe == NULL && prot != PAGE_READONLY) {
/* FIXME: should distinguish SEC_IMAGE for the
* purpose of ASLR_MAPPED in pre-processing, and
* should be able to tell MEM_IMAGE from
* MEM_MAPPED. Can do only if tracking
* NtCreateSection(), or better yet should just
* NtQuerySection() which would work for
* NtCreateSection(), but the loader uses NtOpenSection()
* without SECTION_QUERY.
*
* FIXME: see if using queried_section_attributes would
* help. There is nothing interesting in
* SectionImageInformation (other than that
* NtQuerySection() would return STATUS_SECTION_NOT_IMAGE
* when asking for it, if not ). We should touch only
* SEC_IMAGE and definitely not mess with SEC_BASED
*
* Extra syscall here won't be too critical - we're
* already calling at least NtQueryVirtualMemory() in
* process_mmap(), and currently safe_read/safe_write are
* also system calls.
*/
/* FIXME: still unclear whether the loader always
* first maps as PAGE_EXECUTE and only afterwards
* it tries a rw- mapping
*/
/* on xp sp2 seen this use of NtMapViewOfSection PAGE_READONLY
* kernel32!BasepCreateActCtx+0x3d8:
* 7c8159b1 push 0x2
* 7c8159cf call dword ptr [kernel32!_imp__NtMapViewOfSection]
*/
ASSERT_CURIOSITY(zerobits == 0);
ASSERT_CURIOSITY(commit_size == 0);
/* only nodemgr and services have been observed to use
* ViewUnmap, in nodemgr it is on Module32Next from the
* ToolHelp.
* FIXME: unclear whether we'll want to do something
* different for ViewShare handle inheritance if we go
* after ASLR_SHARED_PER_USER. Unlikely that a high
* privilege service will share handles with a low
* privilege one though.
*/
ASSERT_CURIOSITY(inherit_disposition == 0x1 /* ViewShare */ ||
inherit_disposition == 0x2 /* ViewUnmap */);
/* cygwin uses AT_ROUND_TO_PAGE but specify file offset,
* not seen in DLL mappings */
ASSERT_CURIOSITY(allocation_type == 0);
ASSERT_CURIOSITY(prot == PAGE_EXECUTE || prot == PAGE_READWRITE);
DOSTATS({
if (prot == PAGE_EXECUTE)
STATS_INC(app_mmap_section_x);
else {
STATS_INC(app_mmap_section_rw);
}
});
/* seen only either both 0 or both set */
ASSERT_CURIOSITY(requested_size == 0 || requested_base != 0);
/* assumption: loader never suggests base in 1st map */
if (requested_base == 0) {
DODEBUG({
if (TEST(ASLR_SHARED_CONTENTS, DYNAMO_OPTION(aslr_cache)) &&
dcontext->aslr_context.randomized_section_handle !=
section_handle) {
STATS_INC(aslr_dlls_not_shared);
ASSERT_CURIOSITY(dcontext->aslr_context.
last_app_section_handle == section_handle);
/* note that unusual uses of sections other than the loader can trigger this */
if (dcontext->aslr_context.
last_app_section_handle == section_handle)
/* FIXME: with MapViewOfSection private
* ASLR processing we don't quite know
* whether we're dealing with an image or
* mapped file. This is always hit by
* LdrpCheckForLoadedDll, it suggests that
* only SEC_IMAGE should be bumped,
* instead of SEC_COMMIT as well. Maybe
* there is nothing with doing this and
* should take out this warning.
*/
SYSLOG_INTERNAL_WARNING_ONCE("non-image DLL pre-processed for private ASLR");
else {
/* could have been exempted */
SYSLOG_INTERNAL_WARNING_ONCE("image DLL ASLRed without sharing");
}
}
});
if (TEST(ASLR_SHARED_CONTENTS, DYNAMO_OPTION(aslr_cache)) &&
dcontext->aslr_context.randomized_section_handle ==
section_handle) {
/* shared DLL mapping at presumably randomized location,
* leave base unset for preferred mapping
*/
/* we may want to check whether preferred
* base+size is available, but since racy we
* anyways have to check the success afterwards
*/
STATS_INC(aslr_dlls_shared_mapped);
/* mark so that we can handle failures */
dcontext->aslr_context.sys_aslr_clobbered = true;
} else { /* private ASLR */
/* FIXME: we may want to take a hint from prot and expected size */
modified_base = aslr_get_next_base();
if (!TEST(ASLR_INTERNAL_RANGE_NONE,
INTERNAL_OPTION(aslr_internal))) {
/* really modify base now */
/* note that pbase_unsafe is an IN/OUT argument,
* so it is not likely that the application would
* have used the passed value. If we instead
* passed a pointer to our own (dcontext) variable
* we'd have to safe_write it back in aslr_post_process_mapview.
*/
DEBUG_DECLARE(bool ok = )
safe_write(pbase_unsafe, sizeof(modified_base), &modified_base);
ASSERT(ok);
STATS_INC(aslr_dlls_bumped);
LOG(THREAD, LOG_SYSCALLS|LOG_VMAREAS, 1,
"ASLR: NtMapViewOfSection prot=%s BUMPED to "PFX"\n",
prot_string(prot), modified_base);
/* mark so that we can handle failures, not allow
* detach when system call arguments are modified
* from what the application can handle if we do
* not deal with possible failures */
dcontext->aslr_context.sys_aslr_clobbered = true;
} else {
LOG(THREAD, LOG_SYSCALLS|LOG_VMAREAS, 1,
"ASLR: NtMapViewOfSection prot=%s RANGE_NONE: would be at "
PFX"\n", prot_string(prot), modified_base);
}
}
} else {
/* Apparently the loader maps again with the known base and size
* since we have modified the base already, we'll just leave it alone
* in same example as noted in the above dadkeyb.dll
* syscall: NtMapViewOfSection *base=0x00980000 *size=0x13000 prot=rw--
* sh=1832 zero=0 commit=0 &secoffs=0 inherit=1 type=0
* syscall: failed NtMapViewOfSection prot=rw-- => 0xc0000018
* Since it fails and goes to already randomized DLL nothing to do here.
*
* All other yet to be seen users that set base are
* also assumed to not need to be randomized. We may
* have to revisit for MEM_MAPPED.
*/
ASSERT_CURIOSITY(aslr_last_dll_bounds->start == 0 || /* given up */
aslr_last_dll_bounds->start ==
requested_base /* may be race? */
|| TEST(ASLR_SHARED_CONTENTS,
DYNAMO_OPTION(aslr_cache))
/* not keeping keep track for shared */);
/* FIXME: for ASLR_SHARED_CONTENTS would be at the
* requested shared preferred mapping address which is
* not the same as the private address! or if it is
* hitting a conflict it is in fact the base of the
* last mapping that was left to the kernel */
LOG(THREAD, LOG_SYSCALLS|LOG_VMAREAS, 1,
"ASLR: not touching NtMapViewOfSection prot=%s requested "PFX"\n",
prot_string(prot), requested_base);
STATS_INC(app_mmap_requested_base);
}
} else {
DOSTATS({
if (psection_offs_unsafe == NULL) {
if (prot == PAGE_READONLY)
STATS_INC(app_mmap_section_r);
else {
/* not seen other prot requests */
ASSERT_CURIOSITY(false && "unseen protection");
}
}
});
}
} else {
IPC_ALERT("WARNING: MapViewOfSection on another process\n");
}
}
NTSTATUS
aslr_retry_map_syscall(dcontext_t *dcontext, reg_t *param_base)
{
/* FIXME: we could issue a system call from app and just pass the
* sysnum and param_base, yet we don't have the facility to handle
* post_system_call for that case. Instead we issue our own copy
* of the arguments, note that all OUT arguments will be modified
* directly in the app space anyways. Only any IN argument races
* and overwrites won't be transparent.
*/
NTSTATUS res;
/* Minor hit of unnecessary argument copying, allows us to work
* with any special handling needed by NT_SYSCALL
*/
HANDLE section_handle = (HANDLE) postsys_param(dcontext, param_base, 0);
HANDLE process_handle = (HANDLE) postsys_param(dcontext, param_base, 1);
void **pbase_unsafe = (void *) postsys_param(dcontext, param_base, 2);
ULONG_PTR zerobits = (ULONG_PTR) postsys_param(dcontext, param_base, 3);
size_t commit_size = (size_t) postsys_param(dcontext, param_base, 4);
LARGE_INTEGER *section_offs = (LARGE_INTEGER *)
postsys_param(dcontext, param_base, 5);
SIZE_T *view_size = (SIZE_T *) postsys_param(dcontext, param_base, 6);
uint inherit_disposition = (uint) postsys_param(dcontext, param_base, 7);
uint type = (uint) postsys_param(dcontext, param_base, 8);
uint prot = (uint) postsys_param(dcontext, param_base, 9);
/* Atypical use of NT types in nt_map_view_of_section to reaffirm
* that we are using this on behalf of the application. */
res = nt_raw_MapViewOfSection(section_handle, /* 0 */
process_handle, /* 1 */
pbase_unsafe, /* 2 */
zerobits, /* 3 */
commit_size, /* 4 */
section_offs, /* 5 */
view_size, /* 6 */
inherit_disposition, /* 7 */
type, /* 8 */
prot); /* 9 */
LOG(THREAD_GET, LOG_SYSCALLS|LOG_VMAREAS, 1,
"syscall: aslr_retry_map_syscall NtMapViewOfSection *pbase="PFX
", prot=%s, res "PFX"\n", *pbase_unsafe, prot_string(prot), res);
ASSERT_CURIOSITY(NT_SUCCESS(res));
return res;
}
/* get mapping size needed for an application section */
bool
aslr_get_module_mapping_size(HANDLE section_handle,
size_t *module_size,
uint prot)
{
NTSTATUS res;
app_pc base = (app_pc)0x0; /* default mapping */
size_t commit_size = 0;
SIZE_T view_size = 0; /* we need to know full size */
uint type = 0; /* commit is default */
/* note the section characteristics determine whether MEM_MAPPED
* or MEM_IMAGE is needed */
/* we need protection flags given by the caller, so we can avert a
* STATUS_SECTION_PROTECTION error - A view to a section specifies
* a protection which is incompatible with the initial view's
* protection.
*/
/* FIXME: case 9669 - if we have SECTION_QUERY privilege we can
* try to get the size from SectionBasicInformation.Size, and map
* only on failure
*/
res = nt_raw_MapViewOfSection(section_handle, /* 0 */
NT_CURRENT_PROCESS, /* 1 */
&base, /* 2 */
0, /* 3 */
commit_size, /* 4 */
NULL, /* 5 */
&view_size, /* 6 */
ViewShare, /* 7 */
type, /* 8 */
prot); /* 9 */
ASSERT(NT_SUCCESS(res));
if (!NT_SUCCESS(res))
return false;
/* side note: windbg receives a ModLoad: for our temporary mapping
* at the NtMapViewOfSection(), no harm */
*module_size = view_size;
res = nt_raw_UnmapViewOfSection(NT_CURRENT_PROCESS, base);
ASSERT(NT_SUCCESS(res));
return true;
}
/* since always coming from dispatch now, only need to set mcontext, but we
* continue to set reg_eax in case it's read later in the routine
* FIXME: assumes local variable reg_eax
*/
#define SET_RETURN_VAL(dc, val) \
reg_eax = (reg_t) (val); \
get_mcontext(dc)->xax = (reg_t) (val);
/* POST processing of NtMapViewOfSection. Should be called only when
* base is clobbered by us. Potentially modifies app registers and system
* call parameters.
*/
void
aslr_post_process_mapview(dcontext_t *dcontext)
{
reg_t *param_base = dcontext->sys_param_base;
reg_t reg_eax = get_mcontext(dcontext)->xax;
NTSTATUS status = (NTSTATUS) reg_eax; /* get signed result */
HANDLE section_handle = (HANDLE) postsys_param(dcontext, param_base, 0);
HANDLE process_handle = (HANDLE) postsys_param(dcontext, param_base, 1);
void **pbase_unsafe = (void *) postsys_param(dcontext, param_base, 2);
uint zerobits = (uint) postsys_param(dcontext, param_base, 3);
size_t commit_size = (size_t) postsys_param(dcontext, param_base, 4);
uint *section_offs = (uint *) postsys_param(dcontext, param_base, 5);
size_t *view_size = (size_t *) postsys_param(dcontext, param_base, 6);
uint inherit_disposition = (uint) postsys_param(dcontext, param_base, 7);
uint type = (uint) postsys_param(dcontext, param_base, 8);
uint prot = (uint) postsys_param(dcontext, param_base, 9);
size_t size;
app_pc base;
/* retries to recover private ASLR from range conflict */
uint retries_left = DYNAMO_OPTION(aslr_retry) + 1 /* must fallback to native */;
ASSERT(dcontext->aslr_context.sys_aslr_clobbered);
/* unlikely that a dynamic option change happened in-between */
ASSERT_CURIOSITY(TESTANY(ASLR_DLL|ASLR_MAPPED, DYNAMO_OPTION(aslr)));
ASSERT(is_phandle_me(process_handle));
/* FIXME: should distinguish SEC_IMAGE for the purpose of
* ASLR_MAPPED in pre-processing. Should be able to tell
* MEM_IMAGE from MEM_MAPPED, here at least ASSERT.
*/
/* expected attributes only when we have decided to clobber,
* under ASLR_DLL it is only loader objects.
*/
DOCHECK(1, {
uint section_attributes;
get_section_attributes(section_handle, &section_attributes, NULL);
ASSERT_CURIOSITY(section_attributes == 0 ||
TESTALL(SEC_IMAGE | SEC_FILE, section_attributes));
ASSERT_CURIOSITY(section_attributes == 0 || /* no Query access */
!TESTANY(~(SEC_IMAGE | SEC_FILE | GENERIC_EXECUTE),
section_attributes));
});
ASSERT_CURIOSITY(status == STATUS_SUCCESS ||
status == STATUS_IMAGE_NOT_AT_BASE ||
status == STATUS_CONFLICTING_ADDRESSES);
/* handle shared DLL ASLR mapping */
if (TEST(ASLR_SHARED_CONTENTS, DYNAMO_OPTION(aslr_cache)) &&
dcontext->aslr_context.randomized_section_handle == section_handle) {
if (NT_SUCCESS(status)) {
if (status == STATUS_SUCCESS) {
STATS_INC(aslr_dlls_shared_mapped_good);
} else if (status == STATUS_IMAGE_NOT_AT_BASE) {
/* we can live with not being at our choice as well,
* though it breaks all the work we did to share this
* mapping.
*/
/* If we fail to map a shared DLL at its preferred
* base we're not gaining any sharing. Should revert
* this DLL back to private randomization for better
* controlled randomization worse the kernel will pick
* the lowest possible address that may be easier to
* predict. TOFILE currently useful to leave
* as is for testing full sharing.
*/
SYSLOG_INTERNAL_WARNING("conflicting shared mapping "
"should use private instead\n");
/* FIXME: should get some systemwide stats on how
* often do we get the correct base so we can measure
* the effectiveness of the randomization mapping */
STATS_INC(aslr_dlls_shared_map_rebased);
} else
ASSERT_NOT_REACHED();
/* if successful, we'll use the original base from our records,
* not from mapped PE, so we can detect attacks.
*
* case 8507 similarly we have to register to fool hotpatching's
* timestamp/checksum. Saved on section create or open
* aslr_context.original_section_{base,checksum,timestamp}.
*/
/* add to preferred module range */
if (TEST(ASLR_TRACK_AREAS, DYNAMO_OPTION(aslr_action))) {
ASSERT(NT_SUCCESS(status));
/* we assume that since syscall succeeded these dereferences are safe
* FIXME : could always be multi-thread races though */
size = *view_size; /* ignore commit_size? */
base = *((app_pc *)pbase_unsafe);
LOG(THREAD, LOG_SYSCALLS|LOG_VMAREAS, 1,
"ASLR: SHARED NtMapViewOfSection "PFX" size="PIFX" prot=%s => "
PFX"\n", base, size, prot_string(prot), reg_eax);
/* We need to provide the original preferred address
* which was preserved at the section creation in
* aslr_context. We also keep the original base in
* the module list so that on UnMapViewOfSection we
* can remove the preferred region
*/
aslr_track_randomized_dlls(dcontext, base, size, true /* Map */,
true /* Our Shared File */);
}
} else {
/* FIXME: we've went too far here - we can still switch the
* file handle to the original handle for creating a new
* section, and then map that instead and recover the
* application's intent. Or should have kept the
* original_section_handle open until here?
*/
STATS_INC(aslr_dlls_shared_map_failed);
LOG(THREAD, LOG_SYSCALLS|LOG_VMAREAS, 1,
"ASLR: unexpected failure on shared NtMapViewOfSection"
" prot=%s => "PFX"\n",
prot_string(prot), reg_eax);
/* we can't simply restore application request below, and retry */
ASSERT_CURIOSITY(false && "unexpected error status");
/* FIXME: return error to app hoping it would have been a
* native error as well. Would we be out of virtual
* address space?
*/
ASSERT_NOT_IMPLEMENTED(false);
}
dcontext->aslr_context.randomized_section_handle =
INVALID_HANDLE_VALUE;
dcontext->aslr_context.sys_aslr_clobbered = false;
return;
} else if (TEST(ASLR_SHARED_CONTENTS, DYNAMO_OPTION(aslr_cache)) &&
dcontext->aslr_context.randomized_section_handle != section_handle) {
/* flag that private mapping should be processed in
* update_module_list()
*/
dcontext->aslr_context.original_section_base = ASLR_INVALID_SECTION_BASE;
}
/* handle private rebasing on ASLR mapping */
/* FIXME: STATUS_ILLEGAL_DLL_RELOCATION: what defines the
* "The system DLL %hs was relocated in memory. The
* application will not run properly. The relocation
* occurred because the DLL %hs occupied an address range
* reserved for Windows system DLLs. "
*/
/* Three potential problems that prevent us from randomizing all
* mappings: DLL exemptions by name, DLL FIXED, and races due to
* non-atomic interposition on system calls.
*
* Three approaches to solving them:
* 1) keep track of handles from file to section -
* may be able to do exemption on file name
* 2) presyscall
* o may grab a lock to deal with in-process races
* o extra map/unmap - can figure out FIXED or PE name
* 3) postsyscall undo and redo
* o can handle racy allocation failure by retrying on failure
* o can figure out PE name and FIXED exemption, unmap & retry
*
* Currently using 3) to avoid adding the Map/UnMap on the normal
* path, assuming exemptions are exceptions not the norm, and also
* allows dealing with IPC allocations.
*/
if (!NT_SUCCESS(status)) {
app_pc retry_base = 0;
int retry_result = 0;
/* Need to handle failures and retry. For sure we can cause
* STATUS_CONFLICTING_ADDRESSES and since the loader doesn't
* retry we have to retry for it. Conservatively we should
* retry on any other unknown failure.
*/
/* FIXME: should we look for the end/beginning of the current
* mapping at the conflicting location and try one more time?
* mostly needed for ASLR_RANGE_BOTTOM_UP/ASLR_RANGE_TOP_DOWN.
* ASLR_RANGE_RANDOM should have a full address space map to
* allow it to choose any location.
*/
/* Note that SQL server is grabbing a lot of virtual
* address space in the example I've seen it has taken
* everything from after sqlsort.dll 42b70000 and
* reserves all the memory until rpcrt4.dll 77d30000
* So a scheme that simply gives up randomizing after
* hitting these will not do us much good here.
* Should wrap around and continue looking for good
* ranges.
*
* Side note that due to the above reservation some
* dynamically loaded DLLs are not at predictable
* locations, since loaded by multiple threads. SQL
* Slammer used a stable location in the statically
* linked sqlsort.dll as a trampoline.
*/
/* FIXME: alternative solution is to retry with no
* base address - and use the returned mapping as a
* hint where the OS would rather have us, then unmap,
* add jitter and try again. The problem is that most
* DLLs in the usual case will prefer to be at their
* preferred base.
*/
ASSERT_CURIOSITY(status == STATUS_CONFLICTING_ADDRESSES);
ASSERT(*pbase_unsafe != 0); /* ASSERT can take a risk */
ASSERT(*pbase_unsafe == aslr_last_dll_bounds->start);
ASSERT(retries_left >= 0);
/* possibly several ASLR attempts, and a final native base retry */
/* retry syscall */
do {
if (status == STATUS_CONFLICTING_ADDRESSES) {
/* we can modify the arguments and give it another shot */
if (retries_left > 1) {
/* note aslr_last_dll_bounds->start is global so
* subject to race, while the *pbase_unsafe is app
* memory similarly beyond our control, so neither
* one can really be trusted to be what the
* syscall really used. We choose to use the app
* for the base_requested hint.
*/
app_pc base_requested = 0;
size_t size_needed;
TRY_EXCEPT(dcontext, {
base_requested = *pbase_unsafe;
}, { /* nothing */ });
/* although we could skip the first MEM_FREE block
* and assume we were too big, we're not
* guaranteed we'd find enough room in the next
* hole either in a small number of retries, so
* we're doing a full NtMapViewOfSection() to
* obtain the actual size needed
*/
if (aslr_get_module_mapping_size(section_handle, &size_needed, prot)) {
retry_base = aslr_update_failed(true /* request a better fit */,
base_requested,
size_needed);
ASSERT_CURIOSITY(retry_base != 0 ||
/* case 9893: suppress for short regr for now */
check_filter("win32.reload-race.exe",
get_short_name(get_application_name())));
} else {
retry_base = NULL;
}
if (retry_base == NULL) {
SYSLOG_INTERNAL_WARNING_ONCE("ASLR conflict at "PFX", "
"no good fit, giving up",
*pbase_unsafe);
/* couldn't find a better match */
STATS_INC(aslr_dll_conflict_giveup);
/* if giving up we just process as if application request */
retries_left = 0;
/* same as handling any other error */
} else {
SYSLOG_INTERNAL_WARNING_ONCE("ASLR conflict at "PFX
", retrying at "PFX,
*pbase_unsafe, retry_base);
/* we'll give it another shot at the new address
* although it may still fail there due to races,
* so we have to be ready to retry the original app
*/
ASSERT(dcontext->aslr_context.sys_aslr_clobbered);
retries_left--;
ASSERT(retries_left > 0);
STATS_INC(aslr_dll_conflict_fit_retry);
}
} else {
/* first solution: give up our randomization and move on */
SYSLOG_INTERNAL_WARNING_ONCE("ASLR conflict at "PFX", giving up",
*pbase_unsafe);
/* if giving up we just process as if application request */
retries_left = 0;
retry_base = aslr_update_failed(false /* no new request */, NULL, 0);
STATS_INC(aslr_dll_conflict_giveup);
}
/* side note: WinDbg seems to get notified even when the system call fails
* so when executing this under a debugger a sequence like this is seen:
* when run with ASLR_RANGE_SAME_STRESS
*
* WARNING: WS2HELP overlaps Msi
* ModLoad: 43b40000 43b40000 I:\WINDOWS\system32\WS2HELP.dll
* ModLoad: 71aa0000 71aa8000 I:\WINDOWS\system32\WS2HELP.dll
*
* WARNING: WSOCK32 overlaps IMAGEHLP
* WARNING: WSOCK32 overlaps urlmon
* WARNING: WSOCK32 overlaps appHelp
* WARNING: WSOCK32 overlaps btkeyind
* ModLoad: 43aa0000 43aeb000 I:\WINDOWS\system32\WSOCK32.dll
* ModLoad: 71ad0000 71ad9000 I:\WINDOWS\system32\WSOCK32.dll
*/
} else {
ASSERT_NOT_TESTED();
LOG(THREAD, LOG_SYSCALLS|LOG_VMAREAS, 1,
"ASLR: unexpected failure on NtMapViewOfSection prot=%s => "PFX"\n",
prot_string(prot), reg_eax);
/* FIXME: note that we may be able to retry on out of
* page file memory if there was a transient memory
* use, though success is unlikely to be worthwhile
*/
/* just restore application request below, and retry */
ASSERT_CURIOSITY(false && "unexpected error status");
/* directly pass retried result to the application */
retries_left = 0;
retry_base = aslr_update_failed(false /* no retry */, NULL, 0);
}
if (retries_left == 0) {
dcontext->aslr_context.sys_aslr_clobbered = false;
ASSERT(retry_base == NULL);
/* we get here only when aslr_pre_process_mapview()
* has verified the app request was for base 0
*/
}
safe_write(pbase_unsafe, sizeof(retry_base), &retry_base);
/* here we reset all IN/OUT arguments */
/* make sure that even on syscall failure OUT arguments aren't set */
ASSERT(*view_size == 0); /* we handle only when not set */
ASSERT(section_offs == NULL); /* optional, we handle only when not set */
/* we have to be able to handle failure of new base */
ASSERT(retry_base == 0 || dcontext->aslr_context.sys_aslr_clobbered);
ASSERT(*pbase_unsafe == retry_base); /* retry at base,
* unsafe ASSERT can take a risk */
/* retry with new mapping base - passing arguments */
retry_result = aslr_retry_map_syscall(dcontext, param_base);
SET_RETURN_VAL(dcontext, retry_result); /* sets reg_eax */
/* reread all OUT arguments since we have to handle
* the retried system call as if that's what really happened
*/
ASSERT(section_handle == (HANDLE) postsys_param(dcontext, param_base, 0));
ASSERT(process_handle == (HANDLE) postsys_param(dcontext, param_base, 1));
pbase_unsafe = (void *) postsys_param(dcontext, param_base, 2);/* OUT */
ASSERT(zerobits == (uint) postsys_param(dcontext, param_base, 3));
ASSERT(commit_size == (size_t) postsys_param(dcontext, param_base, 4));
section_offs = (uint *) postsys_param(dcontext, param_base, 5);/* OUT */
view_size = (size_t *) postsys_param(dcontext, param_base, 6); /* OUT */
ASSERT(inherit_disposition == (uint) postsys_param(dcontext, param_base, 7));
ASSERT(type == (uint) postsys_param(dcontext, param_base, 8));
ASSERT(prot == (uint) postsys_param(dcontext, param_base, 9));
STATS_INC(aslr_error_retry);
DOSTATS({
if (!NT_SUCCESS(status)) {
STATS_INC(aslr_error_on_retry);
} else {
if (status == STATUS_SUCCESS)
STATS_INC(aslr_retry_at_base);
else if (status == STATUS_IMAGE_NOT_AT_BASE)
STATS_INC(aslr_retry_not_at_base);
else
ASSERT_NOT_REACHED();
}
});
/* we retry further only if we tried a different base, and
* otherwise leave to the application as it was
*/
} while (!NT_SUCCESS(status) &&
(retries_left > 0));
/* last retry is native, implication */
ASSERT(!(retries_left == 0) ||
!dcontext->aslr_context.sys_aslr_clobbered);
ASSERT(!dcontext->aslr_context.sys_aslr_clobbered ||
NT_SUCCESS(status));
}
DOCHECK(1, {
if (dcontext->aslr_context.sys_aslr_clobbered
&& NT_SUCCESS(status)) {
/* really handle success later, after safe read of base and size */
/* verify that we always get a (success) code */
/* STATUS_IMAGE_NOT_AT_BASE ((NTSTATUS)0x40000003L)
*
* FIXME: I presume the loader maps MEM_MAPPED as
* MapViewOfSection(--x) and it maybe just reads the PE
* headers? Only the MapViewOfSection(rw-) in fact returns
* STATUS_IMAGE_NOT_AT_BASE
*/
/* Note the confusing mapping of MEM_MAPPED as --x, and of MEM_IMAGE as rw-! */
ASSERT_CURIOSITY(prot == PAGE_EXECUTE && status == STATUS_SUCCESS ||
prot == PAGE_READWRITE && status == STATUS_IMAGE_NOT_AT_BASE);
/* FIXME: case 6736 is hitting this as well - assumed
* SEC_RESERVE 0x4000000, prot = RW, inherit_disposition = ViewUnmap
* and should simply allow that to get STATUS_SUCCESS
*/
/* FIXME: case 2298 needs to check for /FIXED DLLs - are they
* going to fail above, or loader will fail when presented
* with them.
*
* FIXME: -exempt_aslr_list needs to be handled here
* FIXME: need to reset all IN/OUT arguments
*/
}
});
/* note this is failure after retrying at default base */
/* so if it fails it is not our fault
*/
if (!NT_SUCCESS(status)) {
ASSERT_NOT_TESTED();
LOG(THREAD, LOG_SYSCALLS|LOG_VMAREAS, 1,
"ASLR: retry failed NtMapViewOfSection prot=%s => "PFX"\n",
prot_string(prot), reg_eax);
ASSERT_CURIOSITY(false);
/* directly pass retried result to the application */
return;
}
ASSERT(NT_SUCCESS(status));
/* we assume that since syscall succeeded these dereferences are safe
* FIXME : could always be multi-thread races though */
size = *view_size; /* ignore commit_size? */
base = *((app_pc *)pbase_unsafe);
LOG(THREAD, LOG_SYSCALLS|LOG_VMAREAS, 1,
"ASLR: NtMapViewOfSection "PFX" size="PIFX" prot=%s => "PFX"\n",
base, size, prot_string(prot), reg_eax);
/* verify if need to exempt, only if we are still processing our randomization */
/* we are exempting only after the fact here */
/* keep in synch with is_aslr_exempted_file_name() */
if (dcontext->aslr_context.sys_aslr_clobbered &&
(!IS_STRING_OPTION_EMPTY(exempt_aslr_default_list) ||
!IS_STRING_OPTION_EMPTY(exempt_aslr_list) ||
!IS_STRING_OPTION_EMPTY(exempt_aslr_extra_list))) {
MEMORY_BASIC_INFORMATION mbi;
bool exempt = false; /* NOTE: we do not give up if can't find
* name, case 3858 if no name */
/* -exempt_aslr_list '*' is really only interesting as a
* stress test option, otherwise should just turn off ASLR_DLL
*/
if (IS_LISTSTRING_OPTION_FORALL(exempt_aslr_list))
exempt = true;
if (query_virtual_memory(base, &mbi, sizeof(mbi)) == sizeof(mbi)) {
ASSERT(mbi.Type == MEM_IMAGE || mbi.Type == MEM_MAPPED);
LOG(THREAD, LOG_SYSCALLS, 2, "ASLR: !vprot "PFX"\n", base);
DOLOG(2, LOG_SYSCALLS, {
dump_mbi(THREAD, &mbi, false);
});
} else
ASSERT_NOT_REACHED();
if (is_readable_pe_base(base)) {
/* Note that the loader first maps an image as MEM_MAPPED */
/* FIXME: in those allocations RVAs have to be converted
* for our reads of export table and thus PE name to work
* properly!
*/
/*
* 0:000> !vprot 0x43ab0000
* BaseAddress: 43ab0000
* AllocationBase: 43ab0000
* AllocationProtect: 00000010 PAGE_EXECUTE
* RegionSize: 00048000
* State: 00001000 MEM_COMMIT
* Protect: 00000010 PAGE_EXECUTE
* Type: 00040000 MEM_MAPPED
*/
if (mbi.Type == MEM_IMAGE) {
/* For MEM_IMAGE can properly get PE name. We haven't yet added
* to the loaded_module_areas so we can't use
* get_module_short_name(). We could use
* get_module_short_name_uncached(), but
* is_aslr_exempted_file_name() uses file name only, so we use
* that as well. (For example, in IE we have browselc.dll
* filename vs BROWSEUI.DLL rsrc name, and we don't want the
* user having to specify a different name for private vs shared
* exemptions).
*/
const char *module_name = NULL;
bool alloc = false;
uint module_characteristics;
if (DYNAMO_OPTION(track_module_filenames)) {
const char *path = section_to_file_lookup(section_handle);
if (path != NULL) {
module_name = get_short_name(path);
if (module_name != NULL)
module_name = dr_strdup(module_name HEAPACCT(ACCT_OTHER));
dr_strfree(path HEAPACCT(ACCT_VMAREAS));
}
}
if (module_name == NULL) {
alloc = true;
module_name =
get_module_short_name_uncached(dcontext, base, true/*at map*/
HEAPACCT(ACCT_OTHER));
}
LOG(THREAD, LOG_SYSCALLS|LOG_VMAREAS, 1,
"ASLR: NtMapViewOfSection prot=%s mapped %s\n",
prot_string(prot), module_name ? module_name : "<noname>");
/* note that although we are undoing randomization
* of the MEM_IMAGE mapping (usually done in
* ntdll!LdrpMapDll), we don't handle it when
* loaded as MEM_MAPPED earlier */
ASSERT(module_name != NULL);
if (module_name != NULL &&
check_list_default_and_append(dynamo_options.exempt_aslr_default_list,
dynamo_options.exempt_aslr_list,
module_name)) {
SYSLOG_INTERNAL_WARNING("ASLR exempted DLL %s",
module_name);
exempt = true;
}
if (module_name != NULL &&
DYNAMO_OPTION(aslr_extra) &&
check_list_default_and_append("", /* no default list */
dynamo_options.exempt_aslr_extra_list,
module_name)) {
SYSLOG_INTERNAL_WARNING("ASLR exempted extra DLL %s",
module_name);
exempt = true;
}
module_characteristics = get_module_characteristics(base);
if (TEST(IMAGE_FILE_DLL, module_characteristics) &&
TEST(IMAGE_FILE_RELOCS_STRIPPED, module_characteristics)) {
/* Note that we still privately ASLR EXEs that are
* presumed to not be executable but only loaded
* for ther resources */
/* FIXME: case 2298 this test doesn't really work
* for one version of /FIXED in our test suite as
* security-win32/secalign-fixed.dll.c, yet works
* for sec-fixed.dll.c*/
SYSLOG_INTERNAL_WARNING("ASLR exempted /FIXED DLL %s",
module_name ? module_name : "noname");
exempt = true;
}
DODEBUG({
if (!exempt && !TEST(IMAGE_FILE_DLL, module_characteristics)) {
/* EXE usually have no PE name, and note that we
* see for example in notepad.exe help on (xp sp2)
* we get helpctr.exe loaded as
* C:\WINDOWS\PCHealth\HelpCtr\Binaries\HelpCtr.exe
* LDRP_ENTRY_PROCESSED
* LDRP_IMAGE_NOT_AT_BASE
*/
SYSLOG_INTERNAL_INFO("ASLR note randomizing mapped EXE %s",
module_name != NULL ? module_name :
"noname");
}
});
/* add to preferred module range only if MEM_IMAGE */
if (TEST(ASLR_TRACK_AREAS, DYNAMO_OPTION(aslr_action))
&& !exempt) {
/* FIXME: only DLLs that are randomized by us get added,
* not any DLL rebased due to other conflicts (even if
* due to overlap our own allocations we don't take blame)
*/
/* FIXME: case 8490 on moving out */
aslr_track_randomized_dlls(dcontext, base, size, true /* Map */,
false /* Original File */);
}
if (alloc && module_name != NULL)
dr_strfree(module_name HEAPACCT(ACCT_OTHER));
} else {
ASSERT(mbi.Type == MEM_MAPPED);
/* FIXME: case 5325 still have to call get_dll_short_name()
* alternative that knows to use our ImageRvaToVa() FIXME: case 6766
* to get the PE name and properly exempt these mappings
*/
/* Note: Although ntdll!LdrpCheckForLoadedDll maps DLL
* as MEM_MAPPED and we'll currently randomize that,
* it in fact doesn't depend on this mapping to be at
* the normal DLL location. We will not exempt here.
*/
LOG(THREAD, LOG_SYSCALLS, 1,
"ASLR: NtMapViewOfSection "PFX" module not mapped as image!\n", base);
STATS_INC(app_mmap_PE_as_MAPPED);
/* FIXME: we do not check nor set exempt here! */
}
} else {
/* FIXME: case 6737 ASLR_MAPPED should we rebase other
* mappings that are not PEs? Reversing note: seen in
* notepad help, and currently rebased even for ASLR_DLL
*
* <?xml version="1.0" ...>
* <assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
* <assemblyIdentity processorArchitecture="*" version="5.1.0.0"
* type="win32" name="Microsoft.Windows.Shell.shell32"/>
* <description>Windows Shell</description>
*
* 00b664e4 7c91659e ntdll!LdrGetDllHandleEx+0x258
* 00b66500 7c801d1f ntdll!LdrGetDllHandle+0x18
* 00b66568 7c816f55 kernel32!LoadLibraryExW+0x161 "I:\WINDOWS\WindowsShell.manifest"
* 00b66594 7c816ed5 kernel32!BasepSxsFindSuitableManifestResourceFor+0x51
* 00b66894 7d58f157 kernel32!CreateActCtxW+0x69e
* 00b66acc 7d58f0a8 mshtml!DllGetClassObject+0x1291
*/
LOG(THREAD, LOG_SYSCALLS, 1,
"ASLR: NtMapViewOfSection "PFX" not a module!\n", base);
STATS_INC(app_mmap_not_PE_rebased);
}
if (exempt) {
/* have to undo and redo app mapping */
app_pc redo_base = 0;
app_pc redo_size = 0;
int redo_result;
/* undo: issue unmap on what we have bumped */
NTSTATUS res = nt_raw_UnmapViewOfSection(process_handle, base);
LOG(THREAD_GET, LOG_SYSCALLS|LOG_VMAREAS, 1,
"syscall: aslr exempt: NtUnmapViewOfSection base="PFX", res "PFX"\n",
base, res);
ASSERT(NT_SUCCESS(res));
/* if we cannot unmap our own mapping we're in trouble, but app should be ok
* it will just have some wasted memory, we can continue
*/
/* here we reset IN/OUT arguments in our current param_base
* (currently only pbase_unsafe and view_size),
* then retry just as above to remap at good base. */
safe_write(pbase_unsafe, sizeof(redo_base), &redo_base);
/* redo OUT argument view_size, whose value would have changed */
ASSERT_CURIOSITY(*view_size != 0);
safe_write(view_size, sizeof(redo_size), &redo_size);
ASSERT(*view_size == 0); /* we handle only when not set originally */
ASSERT(section_offs == NULL); /* optional, we handle only when not set */
/* no plans on trying a different base */
ASSERT(*pbase_unsafe == 0); /* retry at base, unsafe ASSERT can take a risk */
/* retry with new mapping base - passing arguments */
redo_result = aslr_retry_map_syscall(dcontext, param_base);
SET_RETURN_VAL(dcontext, redo_result); /* sets reg_eax */
LOG(THREAD_GET, LOG_SYSCALLS|LOG_VMAREAS, 1,
"syscall: aslr exempt: NtMapViewOfSection got base="PFX", res "PFX"\n",
*pbase_unsafe, res);
/* no further processing of arguments here */
/* this worked for us, it should succeed for app, though there
* may be a conflict at the original base, while ours was good
*/
ASSERT_CURIOSITY(NT_SUCCESS(status));
ASSERT(dcontext->aslr_context.sys_aslr_clobbered);
aslr_update_failed(false /* no retry */, NULL, 0);
dcontext->aslr_context.sys_aslr_clobbered = false;
STATS_INC(aslr_dlls_exempted);
}
}
/* update if randomized, but not if had to retry on conflict, or if exempted */
if (dcontext->aslr_context.sys_aslr_clobbered) {
aslr_update_view_size(base, size);
dcontext->aslr_context.sys_aslr_clobbered = false;
}
}
/* PRE hook for NtUnmapViewOfSection */
void aslr_pre_process_unmapview(dcontext_t *dcontext,
app_pc base, size_t size)
{
reg_t *param_base = dcontext->sys_param_base;
/* remove from preferred module range */
if (TEST(ASLR_TRACK_AREAS, DYNAMO_OPTION(aslr_action))) {
/* FIXME: should move to post processing in
* aslr_post_process_mapview, for the unlikely case
* NtUnmapViewOfSection fails, and so that we remove only when
* really removed. We need to preserve all our data across
* system call.
*/
aslr_track_randomized_dlls(dcontext, base, size, false /* Unmap */,
false);
}
/* FIXME: need to mark in our range or vmmap that memory
* is available Note that the loader always does a
* MapViewOfSection(--x);UnmapViewOfSection();MapViewOfSection(rw-);
* so we'll leave a growing hole in case of DLL churn -
* case 6739 about virtual memory reclamation
* case 6729 on committed memory leaks and optimizations this also affects
*/
ASSERT_NOT_IMPLEMENTED(true);
}
/* POST processing of NtUnmapViewOfSection with possibly clobbered by us base */
reg_t
aslr_post_process_unmapview(dcontext_t *dcontext)
{
reg_t *param_base = dcontext->sys_param_base;
reg_t reg_eax = get_mcontext(dcontext)->xax;
NTSTATUS status = (NTSTATUS) reg_eax; /* get signed result */
ASSERT_NOT_IMPLEMENTED(false);
return reg_eax;
}
#ifdef DEBUG
/* doublecheck wouldbe areas subset of loaded module preferred ranges
* by removing all known loaded modules preferred ranges
* returns true if vmvector_empty(aslr_wouldbe_areas)
* Call once only!
*/
static bool
aslr_doublecheck_wouldbe_areas(void)
{
module_iterator_t *iter = module_iterator_start();
while (module_iterator_hasnext(iter)) {
size_t size;
module_area_t *ma = module_iterator_next(iter);
ASSERT (ma != NULL);
size = (ma->end - ma->start);
/* not all modules are randomized, ok not to find an overlapping one */
vmvector_remove(aslr_wouldbe_areas,
ma->os_data.preferred_base,
ma->os_data.preferred_base + size);
}
module_iterator_stop(iter);
return vmvector_empty(aslr_wouldbe_areas);
}
#endif /* DEBUG */
bool
aslr_is_possible_attack(app_pc target_addr)
{
/* FIXME: split by ASLR_DETECT_EXECUTE, ASLR_DETECT_READ,
* ASLR_DETECT_WRITE */
/* FIXME: case 7017 case 6287 check aslr_heap_pad_areas - less
* clear that this is an attack rather than stray execution, so
* we'd want that check under a different flag.
*
* FIXME: case TOFILE: should have a flag to detect any read/write
* exceptions in the aslr_wouldbe_areas or aslr_heap_pad_areas
* areas and make sure they are incompatibilities or real
* application bugs, although maybe present only with
* randomization so considered incompatibilities.
*/
return TESTALL(ASLR_TRACK_AREAS | ASLR_DETECT_EXECUTE, DYNAMO_OPTION(aslr_action)) &&
vmvector_overlap(aslr_wouldbe_areas, target_addr, target_addr + 1);
}
/* returns NULL if not relevant or not found */
app_pc
aslr_possible_preferred_address(app_pc target_addr)
{
if (TESTALL(ASLR_TRACK_AREAS | ASLR_DETECT_EXECUTE, DYNAMO_OPTION(aslr_action))) {
app_pc wouldbe_module_current_base =
vmvector_lookup(aslr_wouldbe_areas, target_addr);
app_pc wouldbe_preferred_base;
if (wouldbe_module_current_base == NULL) {
/* note we check according to aslr_action (e.g. always
* since default on) even in case ASLR was never enabled,
* to be able to handle having -aslr dynamically disabled.
* We add areas only when ASLR is enabled.
*/
return NULL;
}
/* note that we don't have a vmvector interface to get the
* base of the wouldbe area, from which we got this */
/* but we anyways doublecheck with the loaded_module_areas as well */
/* FIXME: such an interface is being added on the Marlin branch, use when ready */
wouldbe_preferred_base =
get_module_preferred_base_safe(wouldbe_module_current_base);
ASSERT(vmvector_lookup(aslr_wouldbe_areas, wouldbe_preferred_base)
== wouldbe_module_current_base ||
/* FIXME case 10727: if serious then let's fix this */
check_filter("win32.reload-race.exe",
get_short_name(get_application_name())));
return target_addr - wouldbe_preferred_base
+ wouldbe_module_current_base;
} else {
ASSERT_NOT_TESTED();
return NULL;
}
}
static bool
aslr_reserve_remote_random_pad(HANDLE process_handle, size_t pad_size)
{
NTSTATUS res;
HANDLE child_handle = process_handle;
void *early_reservation_base = NULL; /* allocate earliest possible address */
size_t early_reservation_delta = get_random_offset(pad_size);
size_t early_reservation_size = ALIGN_FORWARD(early_reservation_delta,
ASLR_MAP_GRANULARITY);
ASSERT(!is_phandle_me(process_handle));
res = nt_remote_allocate_virtual_memory(child_handle,
&early_reservation_base,
early_reservation_size, PAGE_NOACCESS,
MEMORY_RESERVE_ONLY);
ASSERT(NT_SUCCESS(res));
/* not a critical failure if reservation has failed */
LOG(GLOBAL, LOG_SYSCALLS|LOG_VMAREAS, 1,
"ASLR: initial padding in child "PIFX", pad base="PFX", size="PIFX", res=0x%x\n",
child_handle,
early_reservation_base, early_reservation_size, res);
/* FIXME: case 7017 should pass the early
* reservation region to child for detecting exploits
* targeting a predictable stack (for ASLR_STACK), need to identify a good
* stable across core versions mechanism. Once that's there
* child should ASSERT its stack immediately follows this.
*
* Alternatively, for case 5366 we may choose to free this
* padding, and if freeing we can use a lot larger initial one,
* risking only fragmentation
*/
return NT_SUCCESS(res);
}
/* FIXME: this routine bases its decisions on the parent options
* instead of the target process, currently controlled by option
* string options, too much effort to check remotely.
*/
/* may decide that the target is not a stack */
void
aslr_maybe_pad_stack(dcontext_t *dcontext, HANDLE process_handle)
{
/* note that should be careful to properly detect only this is
* done before very first thread injection in a newly created
* process, otherwise we'd risk a virtual memory leak
*
* FIXME: case 7682 tracks correctly identifying remote thread
* injectors other than parent process
*/
ASSERT(!is_phandle_me(process_handle));
/* we should only handle remote reservation from parent to
* child */
/* we check if child is at all configured, note that by doing this
* check only for a presumed thread stack, we can rely on
* ProcessParameters being created. FIXME: Since the
* ProcessParameters will get normalized from offsets to pointers
* only when the child starts running, if this is not a first
* child we may get a random or incorrect value - e.g. the global
* settings if the read name is not good enough.
*/
/* Remotely injected threads should not need this since will get
* their padding from the general ASLR_HEAP in the child.
*/
LOG(GLOBAL, LOG_SYSCALLS|LOG_VMAREAS, 1,
"ASLR: check if thread is in new child "PIFX"\n", process_handle);
if (TEST(ASLR_STACK, DYNAMO_OPTION(aslr)) &&
DYNAMO_OPTION(aslr_parent_offset) > 0 &&
should_inject_into_process(get_thread_private_dcontext(),
process_handle,
NULL, NULL)) {
/* Case 9173: ensure we only do this once, as 3rd party
* hookers allocating memory can cause this routine to be
* invoked many times for the same child
*/
process_id_t pid = process_id_from_handle(process_handle);
if (pid == dcontext->aslr_context.last_child_padded) {
SYSLOG_INTERNAL_WARNING_ONCE("extra memory allocations for child "PIFX
" %d: hooker?", process_handle, pid);
} else {
bool ok = aslr_reserve_remote_random_pad(process_handle,
DYNAMO_OPTION(aslr_parent_offset));
ASSERT(ok);
if (ok)
dcontext->aslr_context.last_child_padded = pid;
}
} else {
DODEBUG({
if (TEST(ASLR_STACK, DYNAMO_OPTION(aslr)) &&
DYNAMO_OPTION(aslr_parent_offset) > 0) {
LOG(GLOBAL, LOG_SYSCALLS|LOG_VMAREAS, 1,
"ASLR: child not configured for protection, not padding\n");
}
});
}
}
#define LOADBLOCK_PAGE_PROTECT (PAGE_READWRITE | PAGE_GUARD)
/* Prevents the loader from mapping our DLL at its preferred base, by
* allocating a random initial padding before we're loaded, padding is
* independent of ASLR_STACK or ASLR_PROCESS_PARAM if those aren't
* enabled.
*
* Child process should call aslr_free_dynamorio_loadblock() to free
* FIXME: may want to make this routine available for pre_inject.c
*
* Note option active in parent and applies to its children.
* FIXME: Eventually should share views similar to ASLR_SHARED_CONTENT.
*/
void
aslr_force_dynamorio_rebase(HANDLE process_handle)
{
/* We'll assume that the preferred base is the same in parent and
* child */
app_pc preferred_base = get_dynamorio_dll_preferred_base();
NTSTATUS res;
DEBUG_DECLARE(bool ok;)
LOG(THREAD_GET, LOG_SYSCALLS|LOG_THREADS, 1, "\ttaking over expected DLL base\n");
ASSERT(DYNAMO_OPTION(aslr_dr));
ASSERT(!is_phandle_me(process_handle));
res = nt_remote_allocate_virtual_memory(process_handle,
&preferred_base,
1*PAGE_SIZE, LOADBLOCK_PAGE_PROTECT,
MEM_RESERVE);
ASSERT(NT_SUCCESS(res));
/* not critical if we fail, though failure expected only if
* the target executable is also at our preferred base */
/* child process should free the page at preferred base if it
* looks like what we have created to not fragment the address
* space.
*/
/* no need to do both */
if (!(TEST(ASLR_STACK, DYNAMO_OPTION(aslr)))) {
/* random padding to have the loader load us in a not so
* determinstic location */
DEBUG_DECLARE(ok = )
aslr_reserve_remote_random_pad(process_handle,
DYNAMO_OPTION(aslr_parent_offset));
ASSERT(ok);
} else {
/* do nothing, ASLR_STACK will add a pad */
}
/* FIXME: note that we should pass this region just as ASLR_STACK
* is supposed to so that the child can free that region, yet only
* at beginning of address space, and can double as extra heap
* randomization
*/
}
void
aslr_free_dynamorio_loadblock(void)
{
/* we don't want the l-roadblock to be a tombstone and get in the
* way of other allocations, so we'll try to clean it up.
*/
/* we also need to make sure that we have the preferred_base base
* collected earlier */
app_pc preferred_base = get_dynamorio_dll_preferred_base();
NTSTATUS res;
MEMORY_BASIC_INFORMATION mbi;
/* note that parent may have had different settings */
ASSERT(DYNAMO_OPTION(aslr_dr));
if (get_dynamorio_dll_start() == preferred_base) {
/* not rebased, no loadblock to free */
return;
}
/* first check whether we have allocated this */
if (query_virtual_memory(preferred_base, &mbi, sizeof(mbi)) == sizeof(mbi)) {
/* FIXME: the only way to get a strong guarantee that no other
* block is allocated at our preferred base by passing the
* loadblock information to the child. This check using
* trying unusual combination of State and AllocationProtect
* will make very unlikely we'd accidentally free something
* else. */
if (mbi.RegionSize == PAGE_SIZE &&
mbi.State == MEM_RESERVE &&
mbi.Type == MEM_PRIVATE &&
mbi.AllocationProtect == LOADBLOCK_PAGE_PROTECT) {
LOG(GLOBAL, LOG_SYSCALLS|LOG_THREADS, 1, "\t freeing loadblock at preferred base\n");
res = nt_free_virtual_memory(preferred_base);
ASSERT(NT_SUCCESS(res));
} else {
/* We'd expect mbi.State==MEM_FREE, or the large reserved block
* that cygwin apps use if we come in late, or an executable
* at our preferred base (for which this will fire).
*/
ASSERT_CURIOSITY(mbi.State == MEM_FREE || !dr_early_injected);
LOG(GLOBAL, LOG_SYSCALLS|LOG_THREADS, 1,
"something other than loadblock, leaving as is\n");
}
}
}
/* post processing of successful application reservations */
void
aslr_post_process_allocate_virtual_memory(dcontext_t *dcontext,
app_pc last_allocation_base,
size_t last_allocation_size)
{
ASSERT(ALIGNED(last_allocation_base, PAGE_SIZE));
ASSERT(ALIGNED(last_allocation_size, PAGE_SIZE));
ASSERT(TEST(ASLR_HEAP_FILL, DYNAMO_OPTION(aslr)));
if (DYNAMO_OPTION(aslr_reserve_pad) > 0) {
/* We need to randomly pad memory around each memory
* allocation as well. Conservatively, we reserve a new
* region after each successful native reservation and would
* have to free it whenever the target region itself is freed.
* Assumption: one can't free separately allocated regions
* with a single NtFreeVirtualMemory.
*
* Alternatively we can increase the size of the allocation,
* at the risk of breaking some application. Further even
* more risky, within a larger reservation, we could return a
* base that is not at the allocation granularity (but I
* wouldn't consider not returning at page granularity).
* Instead of actually keeping the reservation we could just
* forcefully reserve at a slighly padded address without
* really keeping the reservation ourselves.
*/
heap_error_code_t error_code;
size_t heap_pad_delta =
get_random_offset(DYNAMO_OPTION(aslr_reserve_pad));
size_t heap_pad_size = ALIGN_FORWARD(heap_pad_delta,
ASLR_MAP_GRANULARITY);
app_pc heap_pad_base;
app_pc append_heap_pad_base = (app_pc)
ALIGN_FORWARD(last_allocation_base + last_allocation_size,
ASLR_MAP_GRANULARITY);
bool immediate_taken =
get_memory_info(append_heap_pad_base, NULL, NULL, NULL);
/* there may be an allocation immediately tracking us, or a
* hole too small for our request.
*
* FIXME: get_memory_info() should provide size of hole, but
* can't change the interface on Linux easily, so not using
* that for now, so we just try
*/
if (immediate_taken) {
STATS_INC(aslr_heap_giveup_filling);
/* FIXME: TOFILE we shouldn't give up here if we also want
* to fill uniformly */
/* currently not adding a pad if the immediate next region
* is already allocated e.g. MEM_MAPPED, or due to best
* fit allocation/fragmentation virtual memory allocation
* is in non-linear order)
*/
LOG(GLOBAL, LOG_SYSCALLS|LOG_VMAREAS, 1,
"ASLR: ASLR_HEAP: giving up since next region "PFX" is taken\n",
append_heap_pad_base);
return;
}
/* racy memory reservation with respect to other threads, but
* if we didn't get one where we wanted it to be, unlikely to
* be useful to attackers if not deterministic.
*/
heap_pad_base = os_heap_reserve(append_heap_pad_base,
heap_pad_size,
&error_code,
false/*ignored on Windows*/);
if (heap_pad_base == NULL) {
/* unable to get preferred, let the os pick a spot */
/* FIXME - remove this - no real reason to reserve if we can't get our
* preferred, but the old os_heap_reserve implementation automatically
* tried again for us and the code below assumes so. */
heap_pad_base = os_heap_reserve(NULL, heap_pad_size, &error_code,
false/*ignored on Windows*/);
}
LOG(GLOBAL, LOG_SYSCALLS|LOG_VMAREAS, 2,
"ASLR: ASLR_HEAP: reserved pad base="PFX", size="PIFX", err=%x, after "PFX"\n",
heap_pad_base, heap_pad_size, error_code, append_heap_pad_base);
ASSERT_CURIOSITY(NT_SUCCESS(error_code) ||
check_filter("win32.oomtest.exe",
get_short_name(get_application_name())));
/* not critical functionality loss if we have failed to
* reserve this memory, but shouldn't happen */
if (NT_SUCCESS(error_code)) {
/* FIXME: currently nt_remote_allocate_virtual_memory()
* automatically retries for the next available region and for
* dual meaning of padding to mean waste some memory to detect
* brute force fill attacks, we can keep the allocation.
*
* However, we'd need a way to quickly lookup a region getting
* freed to find its corresponding pad.
* FIXME: For now on race I'd immediately give up the padding.
*/
/* FIXME: we checked earlier only if the immediate next
* region is already allocated, but when the size of the
* allocation is too large we also miss here
*/
if (heap_pad_base != append_heap_pad_base) {
size_t existing_size = 0;
bool now_immediate_taken =
get_memory_info(append_heap_pad_base, NULL, &existing_size, NULL);
/* FIXME: possible to simply not have enough room in current hole */
/* or somebody else already got the immediate next region */
ASSERT_CURIOSITY(!now_immediate_taken && "racy allocate");
/* FIXME: get_memory_info() DOESN'T fill in size when MEM_FREE,
* this DOESN'T actually check existing_size it's just 0 */
if (!now_immediate_taken && existing_size < heap_pad_size) {
/* FIXME: should we at least fill the hole? */
LOG(GLOBAL, LOG_SYSCALLS|LOG_VMAREAS, 2,
"ASLR: ASLR_HEAP: giving up, hole after region "PFX
" is too small, req "PIFX" hole\n",
append_heap_pad_base, heap_pad_size);
/* FIXME: need to keep track of these - is there too much fragmentation? */
}
STATS_INC(aslr_heap_giveup_filling);
os_heap_free(heap_pad_base, heap_pad_size, &error_code);
LOG(GLOBAL, LOG_SYSCALLS|LOG_VMAREAS, 2,
"ASLR: ASLR_HEAP: giving up, freed pad base="PFX", size="
PIFX", err=0x%x\n",
heap_pad_base, heap_pad_size, error_code);
ASSERT(NT_SUCCESS(error_code));
} else {
/* register this allocation
* 1) so that we can lookup free from original base and return the memory
*
* 2) so that we can detect attempted execution from it and flag
*/
ASSERT(!vmvector_overlap(aslr_heap_pad_areas,
heap_pad_base,
heap_pad_base + heap_pad_size));
/* FIXME: case 7017 should check the reservation region
* for detecting attacks targeting predictable heaps or
* brute force heap fill style attacks */
vmvector_add(aslr_heap_pad_areas,
heap_pad_base,
heap_pad_base + heap_pad_size,
last_allocation_base /* tag to match reservations */);
ASSERT(vmvector_overlap(aslr_heap_pad_areas,
heap_pad_base,
heap_pad_base + heap_pad_size));
ASSERT(vmvector_lookup(aslr_heap_pad_areas,
heap_pad_base) == last_allocation_base);
STATS_ADD_PEAK(aslr_heap_total_reservation, heap_pad_size/1024);
STATS_ADD_PEAK(aslr_heap_pads, 1);
STATS_INC(ever_aslr_heap_pads);
}
} else {
SYSLOG_INTERNAL_WARNING("ASLR_HEAP_FILL: error "PIFX" on ("PFX","PFX")\n",
error_code, append_heap_pad_base,
append_heap_pad_base + heap_pad_size);
/* FIXME: should try to flag if out of memory - could be
* an application incompatible with too aggressive ASLR_HEAP_FILL
*
* (NTSTATUS) 0xc00000f2 - An invalid parameter was passed to a
* service or function as the fourth argument.
*
* This was the result of 0x7ff90000+80000 = 0x80010000 which of course is an invalid region.
*/
/* or
* Error code: (NTSTATUS) 0xc0000017 (3221225495) - {Not Enough Quota} Not enough virtual memory or paging file quota is available to complete the specified operation.
*/
ASSERT_CURIOSITY(error_code == STATUS_INVALID_PARAMETER_4 ||
error_code == STATUS_NO_MEMORY);
}
}
}
/* should be called before application memory reservation is released.
* Note that currently in addition to explicit memory free it is also
* called for implicit stack release on XP+.
* If application system call fails not a critical failure that we have freed a pad.
*/
void
aslr_pre_process_free_virtual_memory(dcontext_t *dcontext,
app_pc freed_base, size_t freed_size)
{
/* properly adjusted base and size for next allocation unit */
app_pc expected_pad_base = (app_pc)
ALIGN_FORWARD(freed_base + freed_size,
ASLR_MAP_GRANULARITY);
app_pc heap_pad_base, heap_pad_end;
size_t heap_pad_size;
heap_error_code_t error_code;
ASSERT(ALIGNED(freed_base, PAGE_SIZE));
ASSERT(ALIGNED(freed_size, PAGE_SIZE));
/* should have had a pad */
if (vmvector_lookup(aslr_heap_pad_areas,
expected_pad_base) != NULL) {
/* case 6287: due to handling MEM_COMMIT on stack allocations
* now it is possible that the original MEM_RESERVE allocation
* fails to pad (e.g. due to a MEM_MAPPED) allocation, yet the
* later MEM_RESERVE|MEM_COMMIT has a second chance. Rare, so
* leaving for now. */
ASSERT_CURIOSITY(vmvector_lookup(aslr_heap_pad_areas,
expected_pad_base) == freed_base);
/* need to remove atomically to make sure that nobody else is
* freeing the same region at this point, otherwise on an
* application double free race, we may attempt to double free
* a region that may have been given back to the application.
*/
vmvector_remove_containing_area(aslr_heap_pad_areas, expected_pad_base,
&heap_pad_base, &heap_pad_end);
ASSERT(heap_pad_base == expected_pad_base);
ASSERT_CURIOSITY(!vmvector_overlap(aslr_heap_pad_areas,
expected_pad_base,
expected_pad_base + 1));
/* have to free it up, even if we picked the wrong pad we
* already removed it from vmvector */
heap_pad_size = heap_pad_end - heap_pad_base;
os_heap_free(heap_pad_base, heap_pad_size, &error_code);
LOG(GLOBAL, LOG_SYSCALLS|LOG_VMAREAS, 2,
"ASLR: ASLR_HEAP: freed pad base="PFX", size="PIFX", err=0x%x\n",
heap_pad_base, heap_pad_size, error_code);
ASSERT(NT_SUCCESS(error_code));
STATS_SUB(aslr_heap_total_reservation, (heap_pad_size/1024));
STATS_DEC(aslr_heap_pads);
} else {
/* no overlap */
ASSERT_CURIOSITY(!vmvector_overlap(aslr_heap_pad_areas,
expected_pad_base,
expected_pad_base + 1));
}
}
/* called at startup to randomize immediately after known fixed
* addresses, note that if a hole at preferred_base is not available
* we let the OS choose an allocation
*/
static
app_pc
aslr_reserve_initial_heap_pad(app_pc preferred_base, size_t reserve_offset)
{
size_t heap_initial_delta =
get_random_offset(reserve_offset);
heap_error_code_t error_code;
size_t heap_reservation_size = ALIGN_FORWARD(heap_initial_delta,
ASLR_MAP_GRANULARITY);
app_pc heap_reservation_base = os_heap_reserve(preferred_base,
heap_reservation_size,
&error_code,
false/*ignored on Windows*/);
if (heap_reservation_base == NULL) {
/* unable to get a preferred, let the os pick a spot */
/* FIXME - remove this - no real reason to reserve if we can't get our
* preferred, but the old os_heap_reserve implementation automatically
* tried again for us and the code below assumes so. */
heap_reservation_base = os_heap_reserve(NULL, heap_reservation_size, &error_code,
false/*ignored on Windows*/);
}
LOG(GLOBAL, LOG_SYSCALLS|LOG_VMAREAS, 1,
"ASLR: ASLR_HEAP: requested random offset="PFX",\n"
"ASLR: reservation base="PFX", real size="PFX", err=0x%x\n",
reserve_offset,
heap_reservation_base, heap_reservation_size, error_code);
ASSERT_CURIOSITY(NT_SUCCESS(error_code));
/* not critical functionality loss if we have failed to
* reserve this memory, but shouldn't happen */
if (NT_SUCCESS(error_code)) {
/* register this allocation */
STATS_ADD(aslr_heap_initial_reservation, heap_reservation_size/1024);
STATS_ADD_PEAK(aslr_heap_total_reservation, heap_reservation_size/1024);
STATS_ADD_PEAK(aslr_heap_pads, 1);
STATS_INC(ever_aslr_heap_pads);
ASSERT(!vmvector_overlap(aslr_heap_pad_areas,
heap_reservation_base,
heap_reservation_base + heap_reservation_size));
/* FIXME: case 7017 should check the reservation region
* for detecting attacks targeting predictable heaps or
* brute force heap fill style attacks */
vmvector_add(aslr_heap_pad_areas,
heap_reservation_base,
heap_reservation_base + heap_reservation_size,
preferred_base);
/* Note breaking invariant for custom field - this is not
* base of previous allocation but initial padding or
* executable are not supposed to be freed, and in case
* there was a smaller region in front of our pad that
* gets freed we still get to keep it
*/
}
return heap_reservation_base;
}
/* release all heap reservation pads go through the
* aslr_heap_pad_areas, used on exit or detach. There will still
* be lasting effects due to fragmentation.
*
* FIXME: case 6287 on application! or on DR out of reservation memory
* should release all heap pads as well - the big initial reservations
* should help free up some. Should do if case 6498 can be reproduced
* with inflated reservation sizes. Yet attackers may control the
* reservation sizes and would force a failing large request, or may
* be able to fill all available heap in smaller requests.
*/
static
void
aslr_free_heap_pads(void)
{
DEBUG_DECLARE(uint count_freed = 0;)
vmvector_iterator_t vmvi;
vmvector_iterator_start(aslr_heap_pad_areas, &vmvi);
while (vmvector_iterator_hasnext(&vmvi)) {
app_pc start, end;
app_pc previous_base = vmvector_iterator_next(&vmvi, &start, &end);
app_pc heap_pad_base = start; /* assuming not overlapping */
size_t heap_pad_size = (end - start);
heap_error_code_t error_code;
os_heap_free(heap_pad_base, heap_pad_size, &error_code);
LOG(GLOBAL, LOG_SYSCALLS|LOG_VMAREAS, 2,
"ASLR: ASLR_HEAP: final cleanup pad base="PFX", size="PIFX
", app_base="PFX", err=0x%x\n",
heap_pad_base, heap_pad_size, previous_base, error_code);
ASSERT(NT_SUCCESS(error_code));
STATS_SUB(aslr_heap_total_reservation, (heap_pad_size/1024));
STATS_DEC(aslr_heap_pads);
DODEBUG({count_freed++;});
}
vmvector_iterator_stop(&vmvi);
LOG(GLOBAL, LOG_SYSCALLS|LOG_VMAREAS, 1, "aslr_free_heap_pads: %d freed\n", count_freed);
}
/* ASLR_SHARED_CONTENTS related functions */
/* File backing seems to be unavoidable to allow using our own private
* section of a file, that is sharing the relocated version. See
* comments in aslr_experiment_with_section_handle() about the
* different layers of file, section, and view content backing and
* sharing.
*/
/* currently doesn't require callers to close_handle(),
* Note: subject to change if we support impersonation
*/
HANDLE
get_relocated_dlls_filecache_directory(bool write_access)
{
/* FIXME: today both publishers and producers are getting full access */
/* the file cache may be per user (though impersonation may make
* this harder) we'll assume that only cache of initial caller
* should be used, while all other later impersonations will
* simply fail to share anything. (The FTP problem still exists
* if the FTP user is allowed to create files.)
*/
/* FIXME: currently only a single shared one, with full permissions */
return relocated_dlls_filecache_initial;
}
/* opens the DLL cache directory for this user,
* for now assuming both read and write privileges and opening a shared directory.
*/
/* FIXME: if per user may keep in e.g. \??\Program
* Files\Determina\Determina Agent\cache\USER or some other better
* per-USER location not under \Program Files. Note nodemgr may not
* be able to create these directories in advance, e.g. in a domain
* where a new user may log in at any time. For such a scenario,
* maybe we really wouldn't want the per-user directory at all...
*/
static HANDLE
open_relocated_dlls_filecache_directory(void)
{
char base_directory[MAXIMUM_PATH];
wchar_t wbuf[MAXIMUM_PATH];
HANDLE directory_handle;
int retval;
bool per_user = TEST(ASLR_SHARED_PER_USER, DYNAMO_OPTION(aslr_cache));
/* FIXME: note a lot of overlap with code in os_create_dir() yet
* that assumes we'll deal with file names, while I want to avoid
* further string concatenation. It also goes through a lot more
* hoops for unique and not yet created paths, while here we
* assume proper installer.
*/
/* If not per user we use the SHARED directory which requires
* content validation. FIXME: note that ASLR_SHARED_INHERIT may
* ask for opening two directories as trusted sources
* DYNAMORIO_VAR_CACHE_ROOT (\cache) in addition to the a per USER
* subdirectory \cache\SID
*/
retval = get_parameter((per_user ?
PARAM_STR(DYNAMORIO_VAR_CACHE_ROOT) :
PARAM_STR(DYNAMORIO_VAR_CACHE_SHARED)),
base_directory, sizeof(base_directory));
if (IS_GET_PARAMETER_FAILURE(retval) ||
strchr(base_directory, DIRSEP) == NULL) {
SYSLOG_INTERNAL_ERROR(" %s not set!"
" ASLR sharing ineffective.\n",
(per_user ?
DYNAMORIO_VAR_CACHE_ROOT :
DYNAMORIO_VAR_CACHE_SHARED));
return INVALID_HANDLE_VALUE;
}
NULL_TERMINATE_BUFFER(base_directory);
LOG(GLOBAL, LOG_ALL, 1, "ASLR_SHARED: Opening file cache directory %s\n",
base_directory);
if (per_user) {
/* for now we'll always create directory, since without
* ASLR_SHARED_INHERIT almost every process will need to
* create some non-exempt from sharing DLLs
*/
bool res = os_current_user_directory(base_directory,
BUFFER_SIZE_ELEMENTS(base_directory),
true /* create if missing */);
if (!res) {
/* directory may be set even on failure */
LOG(GLOBAL, LOG_CACHE, 2, "\terror creating per-user dir %s\n", base_directory);
return INVALID_HANDLE_VALUE;
}
}
/* now using potentially modified base_directory per-user */
snwprintf(wbuf, BUFFER_SIZE_ELEMENTS(wbuf), GLOBAL_NT_PREFIX L"%hs",
base_directory);
NULL_TERMINATE_BUFFER(wbuf);
/* the shared directory is supposed to be created by nodemgr as
* world writable. We should not create it if it doesn't exist
* with FILE_OPEN_IF (if we did it would inherit the permissions
* of the parent which are too restrictive).
*/
directory_handle = create_file(wbuf, true /* is_dir */,
READ_CONTROL /* generic rights */,
FILE_SHARE_READ
/* case 10255: allow persisted cache files
* in same directory */
| FILE_SHARE_WRITE,
FILE_OPEN, true);
if (directory_handle == INVALID_HANDLE_VALUE) {
SYSLOG_INTERNAL_ERROR("%s=%s is invalid!"
" ASLR sharing is ineffective.\n",
(per_user ?
DYNAMORIO_VAR_CACHE_ROOT :
DYNAMORIO_VAR_CACHE_SHARED), base_directory);
} else {
/* note that now that we have the actual handle open, we can validate */
if (per_user &&
(DYNAMO_OPTION(validate_owner_dir) || DYNAMO_OPTION(validate_owner_file))) {
/* see os_current_user_directory() for details */
if (!os_validate_user_owned(directory_handle)) {
/* we could report in release, but it's unlikely that
* it will get reported */
SYSLOG_INTERNAL_ERROR("%s -> %s is OWNED by an impostor!"
" ASLR sharing is disabled.",
(per_user ?
DYNAMORIO_VAR_CACHE_ROOT :
DYNAMORIO_VAR_CACHE_SHARED),
base_directory);
close_handle(directory_handle);
directory_handle = INVALID_HANDLE_VALUE;
} else {
/* either FAT32 or we are the proper owner */
/* FIXME: case 10504 we have to verify that the final permissions and
* sharing attributes for cache/ and for the current
* directory, do NOT allow anyone to rename our directory
* while in use, and replace it. Otherwise we'd still
* have to verify owner for each file as well with -validate_owner_file.
*/
}
}
}
return directory_handle;
}
/* note that this is currently done mostly as a hack, to allow fast
* first level checksum comparison just based on a file handle.
* Returns true if the files were the same size, or we have
* successfully made them so.
*/
static bool
aslr_module_force_size(IN HANDLE app_file_handle,
IN HANDLE randomized_file_handle,
const wchar_t *file_name,
OUT uint64 *final_file_size)
{
uint64 app_file_size;
uint64 randomized_file_size;
bool ok;
ok = os_get_file_size_by_handle(app_file_handle,
&app_file_size);
if (!ok) {
ASSERT_NOT_TESTED();
return false;
}
ok = os_get_file_size_by_handle(randomized_file_handle,
&randomized_file_size);
if (!ok) {
ASSERT_NOT_TESTED();
return false;
}
if (randomized_file_size != app_file_size) {
ASSERT(randomized_file_size < app_file_size);
SYSLOG_INTERNAL_WARNING("aslr_module_force_size: "
"forcing %ls, padding %d bytes\n", file_name,
(app_file_size - randomized_file_size));
/* note that Certificates Directory or debugging information
* are the usual sources of such not-loaded by
* NtMapViewOfSection memory. Since we pass such file handle
* only to SEC_IMAGE NtCreateSection() calls, we don't need to
* call os_copy_file() to fill the missing data. The
* SEC_COMMIT use by the loader in ntdll!LdrpCheckForLoadedDll
* will be given the original file.
*/
ok = os_set_file_size(randomized_file_handle, app_file_size);
if (!ok) {
ASSERT_NOT_TESTED();
return false;
}
ok = os_get_file_size_by_handle(randomized_file_handle,
final_file_size);
if (!ok) {
ASSERT_NOT_TESTED();
return false;
}
ASSERT(*final_file_size == app_file_size);
if (*final_file_size != app_file_size) {
ASSERT_NOT_TESTED();
return false;
}
/* note we don't care whether we have had to force */
} else {
*final_file_size = randomized_file_size;
}
return true;
}
/* we expect produced_file_pointer to be a location where the file's
* signature can be written
*/
static bool
aslr_module_append_signature(HANDLE produced_file,
uint64* produced_file_pointer,
aslr_persistent_digest_t *persistent_digest)
{
bool ok;
size_t num_written;
persistent_digest->version = ASLR_PERSISTENT_CACHE_VERSION;
persistent_digest->magic = ASLR_PERSISTENT_CACHE_MAGIC;
/* Note we do not preclude having aslr_module_force_size()
* always force the size to be the final size |app size|+|aslr_persistent_digest_t|
* but unlikely we'd care to do this
*/
DOLOG(1, LOG_SYSCALLS|LOG_VMAREAS, {
LOG(GLOBAL, LOG_SYSCALLS|LOG_VMAREAS, 1,
"ASLR: aslr_module_append_signature:");
LOG(GLOBAL, LOG_SYSCALLS|LOG_VMAREAS, 1,
"\n\t source.full :");
/* FIXME: should abstract out the md5sum style printing from
* get_md5_for_file() */
dump_buffer_as_bytes(GLOBAL,
persistent_digest->original_source.full_MD5,
MD5_RAW_BYTES, DUMP_RAW);
LOG(GLOBAL, LOG_SYSCALLS|LOG_VMAREAS, 1,
"\n\t source.short:");
dump_buffer_as_bytes(GLOBAL,
persistent_digest->original_source.short_MD5,
MD5_RAW_BYTES, DUMP_RAW);
LOG(GLOBAL, LOG_SYSCALLS|LOG_VMAREAS, 1,
"\n\t target.full :");
dump_buffer_as_bytes(GLOBAL,
persistent_digest->relocated_target.full_MD5,
MD5_RAW_BYTES, DUMP_RAW);
LOG(GLOBAL, LOG_SYSCALLS|LOG_VMAREAS, 1,
"\n\t target.short:");
dump_buffer_as_bytes(GLOBAL,
persistent_digest->relocated_target.short_MD5,
MD5_RAW_BYTES, DUMP_RAW);
LOG(GLOBAL, LOG_SYSCALLS|LOG_VMAREAS, 1, "\n");
});
ok = write_file(produced_file,
persistent_digest, sizeof(aslr_persistent_digest_t),
produced_file_pointer,
&num_written);
ASSERT(ok);
ASSERT(num_written == sizeof(aslr_persistent_digest_t));
ok = ok && (num_written == sizeof(aslr_persistent_digest_t));
return ok;
}
static bool
aslr_module_read_signature(HANDLE randomized_file,
uint64* randomized_file_pointer,
OUT aslr_persistent_digest_t *persistent_digest)
{
size_t num_read;
bool ok;
ok = read_file(randomized_file,
persistent_digest, sizeof(*persistent_digest),
randomized_file_pointer,
&num_read);
ASSERT(ok);
ok = ok && (num_read == sizeof(aslr_persistent_digest_t));
ASSERT(ok);
if (ok) {
ok = persistent_digest->version == ASLR_PERSISTENT_CACHE_VERSION;
ASSERT_CURIOSITY(ok && "invalid version");
}
if (ok) {
ok = persistent_digest->magic == ASLR_PERSISTENT_CACHE_MAGIC;
ASSERT_CURIOSITY(ok && "bad magic");
}
/* Where can we store any additional checksums and metadata:
* - [CURRENTLY] after the end of the file - just like certificates and
* debugging information is in PEs, we could replace the
* existing certificates, but we simply pad the file with 0 for those,
* and add our signature after the end of the file.
* Alternatives:
* - in a PE field - good enough if using only a 32bit Checksum
* - NTFS streams - no, since we need to support FAT32
* - in a separate file or .chksum or for many files in a .cat
* most flexible though adds overhead. Could throw in the
* registry, but we already have to secure the files so easier to
* use the same permissions.
*
* <metadata> <!- not really going be in xml -->
* name=""
* <original>
* <checksum md5|crc32|sha1= /> <-- staleness -->
* <rebased>
* <checksum md5|crc32|sha1= /> <-- corruption -->
* </metadata>
* <hash>md5(metadata)</hash>
*
* - append to file name - content-based addressing
* possible only for data based on original application file
*/
/* FIXME: for unique name we can add the PE section Image.Checksum
* to generate different ID's. Note we do not keep different
* possible mappings for the same name. So we hope no two
* simultaneously needed files will clobber each other due to name
* collision.
*
* FIXME: yet we still need to verify any calculated checksum between our
* generated file and the file that it purportedly backs
* or better yet fully compare it
*/
/* see reactos/0.2.9/lib/ntdll/ldr/utils.c for the original
* LdrpCheckImageChecksum, though we could produce our own crc32()
* checksum on original file as well and store it as checksum of
* our generated file in some PE orifice.
*/
/* see pecoff v6 Appendix B, or pecoff v8 Appendix A: Calculating
* Authenticode PE Image Hash for reference
* where Checksum and Certificate Tables are excluded
*/
return ok;
}
/* For our relocated version we should be validating a private section
* before publishing one. Note that when calculating digest on
* original application section we have a section handle already that
* is assumed to be private.
*/
static
bool
aslr_get_section_digest(OUT module_digest_t *digest,
HANDLE section_handle,
bool short_digest_only)
{
NTSTATUS res;
app_pc base = (app_pc)0x0;
size_t commit_size = 0;
SIZE_T view_size = 0;
/* full file view, since even our short digest includes both
* header and footer */
uint type = 0; /* commit not needed for original DLL */
uint prot = PAGE_READONLY;
res = nt_raw_MapViewOfSection(section_handle, /* 0 */
NT_CURRENT_PROCESS, /* 1 */
&base, /* 2 */
0, /* 3 */
commit_size, /* 4 */
NULL, /* 5 */
&view_size, /* 6 */
ViewShare, /* 7 */
type, /* 8 */
prot); /* 9 */
ASSERT(NT_SUCCESS(res));
if (!NT_SUCCESS(res))
return false;
/* side note: windbg receives a ModLoad: for our temporary mapping
* at the NtMapViewOfSection(), no harm */
module_calculate_digest(digest,
base,
view_size,
!short_digest_only, /* full */
short_digest_only, /* short */
DYNAMO_OPTION(aslr_short_digest),
UINT_MAX/*all secs*/, 0/*all secs*/);
res = nt_raw_UnmapViewOfSection(NT_CURRENT_PROCESS, base);
ASSERT(NT_SUCCESS(res));
return true;
}
/* returns a private image section.
* a simple wrapper around nt_create_section() with common attributes
* on success callers need to close_handle() after use
*/
static inline
bool
aslr_create_private_module_section(OUT HANDLE private_section,
HANDLE file_handle)
{
NTSTATUS res;
res = nt_create_section(private_section,
SECTION_ALL_ACCESS, /* FIXME: maybe less privileges needed */
NULL, /* full file size */
PAGE_EXECUTE,
/* PAGE_EXECUTE gives us COW in readers
* but can't share any changes.
* Unmodified pages are always shared.
*/
/* PAGE_EXECUTE_READWRITE - gives us true
* overwrite ability only in SEC_COMMIT */
/* PAGE_EXECUTE_WRITECOPY is still COW,
* though it also needs FILE_READ_DATA
* privileges to at all create the section
* which the loader doesn't use */
SEC_IMAGE,
file_handle,
/* process private - no security needed */
/* object name attributes */
NULL /* unnamed */, 0, NULL, NULL);
ASSERT_CURIOSITY(NT_SUCCESS(res) && "create failed - maybe invalid PE");
/* seen STATUS_INVALID_IMAGE_FORMAT when testing non-aligned PE base */
if (!NT_SUCCESS(res))
return false;
return true;
}
static
bool
aslr_get_file_digest(OUT module_digest_t *digest,
HANDLE relocated_file_handle,
bool short_only)
{
/* Keep in mind that we have to create a private section mapping
* before we publish it for other consumers to use
* in aslr_publish_section_handle
*/
/* note we produce all of these on MEM_IMAGE versions so that the
* kernel doesn't have to page in both MEM_IMAGE and MEM_MAPPED
* copies, and the only cost of these is the extra virtual address
* remappings
*/
/* see comments in aslr_get_original_metadata() about sharing some
* of the extraneous mappings
*/
HANDLE private_section;
bool ok;
ok = aslr_create_private_module_section(&private_section,
relocated_file_handle);
if (!ok)
return false;
ok = aslr_get_section_digest(digest, private_section,
short_only);
close_handle(private_section);
/* Note: we may need to keep this handle OPEN if that is to
* guarantee that the file cannot be overwritten. Assuming that
* effect is already achieved by the flags we use to open the file
* and we will not close the file handle until finished.
*/
return ok;
}
/* Caller must unmap mapping if original_mapped_base != NULL
* regardless of return value.
*
* Also see notes in aslr_generate_relocated_section() which this
* routine mostly borrows from. Comparing in place avoids the
* CopyOnWrite faults and associated page copies.
*/
static bool
aslr_compare_in_place(IN HANDLE original_section,
OUT app_pc *original_mapped_base,
OUT size_t *original_mapped_size,
app_pc suspect_mapped_base,
size_t suspect_mapped_size,
app_pc suspect_preferred_base,
size_t validation_prefix
)
{
bool ok;
NTSTATUS res;
HANDLE section_handle = original_section;
app_pc base = (app_pc)0x0;
size_t commit_size = 0;
SIZE_T view_size = 0; /* full file view */
uint type = 0; /* commit not needed for original DLL */
uint prot = PAGE_READWRITE;
/* PAGE_READWRITE would allow us to update the backing section */
/* PAGE_WRITECOPY - will only provide the current mapping */
app_pc original_preferred_base;
ASSERT(*original_mapped_base == NULL);
res = nt_raw_MapViewOfSection(section_handle, /* 0 */
NT_CURRENT_PROCESS, /* 1 */
&base, /* 2 */
0, /* 3 */
commit_size, /* 4 */
NULL, /* 5 */
&view_size, /* 6 */
ViewShare, /* 7 */
type, /* 8 */
prot); /* 9 */
ASSERT_CURIOSITY(NT_SUCCESS(res));
if (!NT_SUCCESS(res)) {
*original_mapped_base = NULL;
return false;
}
*original_mapped_base = base;
*original_mapped_size = view_size;
/* Be aware of LdrVerifyImageMatchesChecksum() for our relocations
* - but that maps in as SEC_COMMIT based on the original file, so
* even if it is called for anything other than what is exported
* in KnownDlls we'd be ok. If we want to match that checksum we
* can follow suit and process the file image, or we can emulate
* that on a mapped image Section.
*
* FIXME: check what is the meaning of
* "IMAGE_DLL_CHARACTERISTICS_FORCE_INTEGRITY 0x0080 Code
* Integrity checks are enforced", documented in PECOFF v8.0
*/
original_preferred_base = get_module_preferred_base(base);
if (original_preferred_base == NULL) {
ASSERT_CURIOSITY(false && "base at 0, bad PE?");
/* maybe not a PE */
ASSERT_NOT_TESTED();
return false;
}
if (suspect_preferred_base == original_preferred_base) {
/* note we don't really care */
ASSERT_CURIOSITY(false && "old and new base the same!");
ASSERT_NOT_TESTED();
/* FIXME: we may want to force the new base in
* aslr_generate_relocated_section() to never be the same as
* original, but that may or may not be generally good,
* remember Enigma */
}
ok = (*original_mapped_size == suspect_mapped_size) &&
module_contents_compare(*original_mapped_base,
suspect_mapped_base, *original_mapped_size,
false /* not relocated */,
suspect_preferred_base - original_preferred_base,
validation_prefix);
return ok;
}
/* paranoid mode check that a provided memory area is what it claims
* to be. FIXME: note the relocated file should have such permissions
* that its contents cannot be overwritten after this point.
*
*/
static bool
aslr_module_verify_relocated_contents(HANDLE original_file_handle,
HANDLE suspect_file_handle)
{
/* In paranoid mode: should verify that the image is exactly the
* same as the original except for the relocations which should be
* exactly what we expect.
*/
HANDLE original_file_section;
app_pc relocated_original_mapped_base = NULL;
size_t relocated_original_size = 0;
HANDLE suspect_file_section;
app_pc suspect_base = NULL; /* any base */
SIZE_T suspect_size = 0; /* request full file view */
app_pc suspect_preferred_base;
bool ok;
NTSTATUS res;
size_t validation_prefix = (TEST(ASLR_PERSISTENT_PARANOID_PREFIX,
DYNAMO_OPTION(aslr_validation)) ?
DYNAMO_OPTION(aslr_section_prefix) : POINTER_MAX);
/* create a private section for suspect */
ok = aslr_create_private_module_section(&suspect_file_section,
suspect_file_handle);
if (!ok) {
return false;
}
/* map relocated suspect copy */
res = nt_raw_MapViewOfSection(suspect_file_section, /* 0 */
NT_CURRENT_PROCESS, /* 1 */
&suspect_base, /* 2 */
0, /* 3 */
0, /* 4 commit_size*/
NULL, /* 5 */
&suspect_size, /* 6 */
ViewShare, /* 7 */
0, /* 8 type */
PAGE_READWRITE); /* 9 prot */
/* FIXME: we are asking for PAGE_READWRITE on the whole file -
* affecting commit memory case 10251 */
/* we can close the handle as soon as we have a mapping */
close_handle(suspect_file_section);
ASSERT_CURIOSITY(NT_SUCCESS(res) && "map failed - maybe invalid PE");
if (!NT_SUCCESS(res)) {
ASSERT_NOT_TESTED();
return false;
}
/* FIXME: [minor perf] we should pass a handle to original section
* which is available to all publishers
*/
ok = aslr_create_private_module_section(&original_file_section,
original_file_handle);
if (!ok) {
nt_raw_UnmapViewOfSection(NT_CURRENT_PROCESS, suspect_base);
return false;
}
/* Compare relocated files byte by byte
* 1.1) memcmp() relocated copy at the same base
*
* 1.1.1) [CURRENTLY] apply relocations to original DLL as if
* going to the relocated DLL location
* memcmp(original privately relocated, relocated DLL)
* Note that our changes to a mapping of the original are only
* temporary (even if we did reuse the application section).
* 1.1.2) alternatively we could transform the relocated section
* back into the original. We would also have to be extra
* careful when processing a potentially rogue PE.
*
* Note that the MD5 sum of the relocated DLL may be more expensive
* than comparing the DLLs adjusting for relocations.
* Note we can't trust MD5s saved in the file.
* 1.2) (MD5(relocated DLL) == MD5(original privately relocated)
*
* FIXME: [perf] Need to do some perf testing to see 1.1 is good enough -
* note we will have to check this only once for publisher, not consumer
*
* 2.1) relocation at a time we'd save the extra private copy of
* the pages we need to touch if we do this smarter. We need to
* provide a compare function that for each section traverses
* relocations to do the proper match and compares verbatim all
* bytes between relocations.
*/
suspect_preferred_base = get_module_preferred_base(suspect_base);
ASSERT_CURIOSITY(suspect_preferred_base != NULL && "bad PE file");
DODEBUG({
if (suspect_preferred_base != suspect_base) {
/* this is the earliest we know this DLL won't fit in this process */
SYSLOG_INTERNAL_WARNING("DLL mapping is not shareable");
/* of course we may have a conflict, and so DLL won't really be
* shared if not loaded at preferred base
*/
}
});
if (TEST(ASLR_PERSISTENT_PARANOID_TRANSFORM_EXPLICITLY,
DYNAMO_OPTION(aslr_validation))) {
KSTART(aslr_validate_relocate);
/* note we're transforming our good section into the relocated one
* including any header modifications
*/
ok = (suspect_preferred_base != NULL) &&
aslr_generate_relocated_section(original_file_section,
&suspect_preferred_base,
false,
&relocated_original_mapped_base,
&relocated_original_size,
NULL /* no digest */);
KSTOP(aslr_validate_relocate);
if (!ok) {
ASSERT(relocated_original_mapped_base == NULL);
} else {
ASSERT(relocated_original_mapped_base != NULL);
}
ASSERT_CURIOSITY(ok && "invalid source file!");
if (ok) {
KSTART(aslr_compare);
ok = (relocated_original_size == suspect_size) &&
module_contents_compare(relocated_original_mapped_base,
suspect_base, relocated_original_size,
true /* already relocated */,
0,
validation_prefix);
KSTOP(aslr_compare);
}
} else {
/* we must do the comparison in place */
KSTART(aslr_compare);
ok = aslr_compare_in_place(original_file_section,
&relocated_original_mapped_base,
&relocated_original_size,
suspect_base,
suspect_size,
suspect_preferred_base,
validation_prefix);
KSTOP(aslr_compare);
/* note we don't keep track whether failed due to bad original