blob: b8f4ee36cd0d823421c2ee4f29e9bb42fd18ba95 [file] [log] [blame]
/* **********************************************************
* Copyright (c) 2014 Google, Inc. All rights reserved.
* **********************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of VMware, Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/* file "emit_utils.c"
* The ARM processors does not maintain cache consistency in hardware,
* so we need be careful about getting stale cache entries.
*/
#include "../globals.h"
#include "arch.h"
#include "instr.h"
#include "instr_create.h"
#include "instrlist.h"
#include "instrument.h" /* for dr_insert_call() */
#define APP instrlist_meta_append
/***************************************************************************/
/* EXIT STUB */
/***************************************************************************/
byte *
insert_relative_target(byte *pc, cache_pc target, bool hot_patch)
{
/* FIXME i#1551: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
return NULL;
}
byte *
insert_relative_jump(byte *pc, cache_pc target, bool hot_patch)
{
/* FIXME i#1551: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
return NULL;
}
/* inserts any nop padding needed to ensure patchable branch offsets don't
* cross cache line boundaries. If emitting sets the offset field of all
* instructions, else sets the translation for the added nops (for
* recreating). If emitting and -pad_jmps_shift_{bb,trace} returns the number
* of bytes to shift the start_pc by (this avoids putting a nop before the
* first exit cti) else returns 0.
*/
uint
nop_pad_ilist(dcontext_t *dcontext, fragment_t *f, instrlist_t *ilist, bool emitting)
{
/* FIXME i#1551: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
return 0;
}
/* Emit code for the exit stub at stub_pc. Return the size of the
* emitted code in bytes. This routine assumes that the caller will
* take care of any cache synchronization necessary (though none is
* necessary on the Pentium).
* The stub is unlinked initially, except coarse grain indirect exits,
* which are always linked.
*/
int
insert_exit_stub_other_flags(dcontext_t *dcontext, fragment_t *f,
linkstub_t *l, cache_pc stub_pc, ushort l_flags)
{
/* FIXME i#1551: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
return 0;
}
cache_pc
exit_cti_disp_pc(cache_pc branch_pc)
{
/* FIXME i#1551: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
return NULL;
}
void
link_indirect_exit_arch(dcontext_t *dcontext, fragment_t *f,
linkstub_t *l, bool hot_patch,
app_pc target_tag)
{
/* FIXME i#1551: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
}
cache_pc
indirect_linkstub_stub_pc(dcontext_t *dcontext, fragment_t *f, linkstub_t *l)
{
/* FIXME i#1551: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
return NULL;
}
cache_pc
cbr_fallthrough_exit_cti(cache_pc prev_cti_pc)
{
/* FIXME i#1551: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
return NULL;
}
/* This is an atomic operation with respect to a thread executing in the
* cache (barring ifdef NATIVE_RETURN, which is now removed), for
* inlined indirect exits the
* unlinked path of the ibl routine detects the race condition between the
* two patching writes and handles it appropriately unless using the
* atomic_inlined_linking option in which case there is only one patching
* write (since tail is duplicated)
*/
void
unlink_indirect_exit(dcontext_t *dcontext, fragment_t *f, linkstub_t *l)
{
/* FIXME i#1551: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
}
/*******************************************************************************
* COARSE-GRAIN FRAGMENT SUPPORT
*/
cache_pc
entrance_stub_jmp(cache_pc stub)
{
/* FIXME i#1551: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
return NULL;
}
/* Returns whether stub is an entrance stub as opposed to a fragment
* or a coarse indirect stub. FIXME: if we separate coarse indirect
* stubs from bodies we'll need to put them somewhere else, or fix up
* decode_fragment() to be able to distinguish them in some other way
* like first instruction tls slot.
*/
bool
coarse_is_entrance_stub(cache_pc stub)
{
/* FIXME i#1551: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
return false;
}
/*###########################################################################
*
* fragment_t Prefixes
*
* Two types: indirect branch target, which restores eflags and xcx, and
* normal prefix, which just restores xcx
*/
void
insert_fragment_prefix(dcontext_t *dcontext, fragment_t *f)
{
/* FIXME i#1551: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
}
/***************************************************************************/
/* THREAD-PRIVATE/SHARED ROUTINE GENERATION */
/***************************************************************************/
/* helper functions for emit_fcache_enter_common */
void
append_call_exit_dr_hook(dcontext_t *dcontext, instrlist_t *ilist,
bool absolute, bool shared)
{
/* i#1551: DR_HOOK is not supported on ARM */
ASSERT_NOT_IMPLEMENTED(EXIT_DR_HOOK == NULL);
}
void
append_restore_xflags(dcontext_t *dcontext, instrlist_t *ilist, bool absolute)
{
/* FIXME i#1551: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
}
void
append_restore_simd_reg(dcontext_t *dcontext, instrlist_t *ilist, bool absolute)
{
/* s16–s31 (d8–d15, q4–q7) are callee save */
/* FIXME i#1551: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
}
void
append_restore_gpr(dcontext_t *dcontext, instrlist_t *ilist, bool absolute)
{
/* FIXME i#1551: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
/* FIXME i#1551: do not restore the register for TLS */
}
/* helper functions for append_fcache_return_common */
void
append_save_gpr(dcontext_t *dcontext, instrlist_t *ilist, bool ibl_end, bool absolute,
generated_code_t *code, linkstub_t *linkstub, bool coarse_info)
{
/* FIXME i#1551: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
/* FIXME i#1551: we steal register (R10) for TLS access,
* so we need special handling on R10 save/restore here.
*/
}
void
append_save_simd_reg(dcontext_t *dcontext, instrlist_t *ilist, bool absolute)
{
/* s16–s31 (d8–d15, q4–q7) are callee save */
/* FIXME i#1551: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
}
void
append_save_clear_xflags(dcontext_t *dcontext, instrlist_t *ilist, bool absolute)
{
/* FIXME i#1551: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
}
bool
append_call_enter_dr_hook(dcontext_t *dcontext, instrlist_t *ilist,
bool ibl_end, bool absolute)
{
/* i#1551: DR_HOOK is not supported on ARM */
ASSERT_NOT_IMPLEMENTED(EXIT_DR_HOOK == NULL);
return false;
}
void
insert_save_eflags(dcontext_t *dcontext, instrlist_t *ilist, instr_t *where,
uint flags, bool tls, bool absolute _IF_X64(bool x86_to_x64_ibl_opt))
{
/* FIXME i#1551: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
}
void
insert_restore_eflags(dcontext_t *dcontext, instrlist_t *ilist, instr_t *where,
uint flags, bool tls, bool absolute
_IF_X64(bool x86_to_x64_ibl_opt))
{
/* FIXME i#1551: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
}
/* create the inlined ibl exit stub template */
byte *
emit_inline_ibl_stub(dcontext_t *dcontext, byte *pc,
ibl_code_t *ibl_code, bool target_trace_table)
{
/* FIXME i#1551: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
return NULL;
}
byte *
emit_indirect_branch_lookup(dcontext_t *dcontext, generated_code_t *code, byte *pc,
byte *fcache_return_pc,
bool target_trace_table,
bool inline_ibl_head,
ibl_code_t *ibl_code /* IN/OUT */)
{
/* FIXME i#1551: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
return NULL;
}
bool
is_jmp_rel32(byte *code_buf, app_pc app_loc, app_pc *jmp_target /* OUT */)
{
/* FIXME i#1551: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
return false;
}
bool
is_jmp_rel8(byte *code_buf, app_pc app_loc, app_pc *jmp_target /* OUT */)
{
/* FIXME i#1551: NYI on ARM */
ASSERT_NOT_IMPLEMENTED(false);
return false;
}