blob: 0742da842a29b90afe5d9221956853e3ec5a5374 [file] [log] [blame]
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/wasm/baseline/liftoff-compiler.h"
#include "src/base/enum-set.h"
#include "src/base/optional.h"
#include "src/base/platform/wrappers.h"
#include "src/codegen/assembler-inl.h"
// TODO(clemensb): Remove dependences on compiler stuff.
#include "src/codegen/external-reference.h"
#include "src/codegen/interface-descriptors-inl.h"
#include "src/codegen/machine-type.h"
#include "src/codegen/macro-assembler-inl.h"
#include "src/compiler/linkage.h"
#include "src/compiler/wasm-compiler.h"
#include "src/logging/counters.h"
#include "src/logging/log.h"
#include "src/objects/smi.h"
#include "src/tracing/trace-event.h"
#include "src/utils/ostreams.h"
#include "src/utils/utils.h"
#include "src/wasm/baseline/liftoff-assembler.h"
#include "src/wasm/function-body-decoder-impl.h"
#include "src/wasm/function-compiler.h"
#include "src/wasm/memory-tracing.h"
#include "src/wasm/object-access.h"
#include "src/wasm/simd-shuffle.h"
#include "src/wasm/wasm-debug.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-objects.h"
#include "src/wasm/wasm-opcodes-inl.h"
namespace v8 {
namespace internal {
namespace wasm {
constexpr auto kRegister = LiftoffAssembler::VarState::kRegister;
constexpr auto kIntConst = LiftoffAssembler::VarState::kIntConst;
constexpr auto kStack = LiftoffAssembler::VarState::kStack;
namespace {
#define __ asm_.
#define TRACE(...) \
do { \
if (FLAG_trace_liftoff) PrintF("[liftoff] " __VA_ARGS__); \
} while (false)
#define WASM_INSTANCE_OBJECT_FIELD_OFFSET(name) \
ObjectAccess::ToTagged(WasmInstanceObject::k##name##Offset)
template <int expected_size, int actual_size>
struct assert_field_size {
static_assert(expected_size == actual_size,
"field in WasmInstance does not have the expected size");
static constexpr int size = actual_size;
};
#define WASM_INSTANCE_OBJECT_FIELD_SIZE(name) \
FIELD_SIZE(WasmInstanceObject::k##name##Offset)
#define LOAD_INSTANCE_FIELD(dst, name, load_size, pinned) \
__ LoadFromInstance(dst, LoadInstanceIntoRegister(pinned, dst), \
WASM_INSTANCE_OBJECT_FIELD_OFFSET(name), \
assert_field_size<WASM_INSTANCE_OBJECT_FIELD_SIZE(name), \
load_size>::size);
#define LOAD_TAGGED_PTR_INSTANCE_FIELD(dst, name, pinned) \
static_assert(WASM_INSTANCE_OBJECT_FIELD_SIZE(name) == kTaggedSize, \
"field in WasmInstance does not have the expected size"); \
__ LoadTaggedPointerFromInstance(dst, LoadInstanceIntoRegister(pinned, dst), \
WASM_INSTANCE_OBJECT_FIELD_OFFSET(name));
#ifdef V8_CODE_COMMENTS
#define DEBUG_CODE_COMMENT(str) \
do { \
__ RecordComment(str); \
} while (false)
#else
#define DEBUG_CODE_COMMENT(str) ((void)0)
#endif
constexpr LoadType::LoadTypeValue kPointerLoadType =
kSystemPointerSize == 8 ? LoadType::kI64Load : LoadType::kI32Load;
constexpr ValueKind kPointerKind = LiftoffAssembler::kPointerKind;
constexpr ValueKind kSmiKind = LiftoffAssembler::kSmiKind;
constexpr ValueKind kTaggedKind = LiftoffAssembler::kTaggedKind;
// Used to construct fixed-size signatures: MakeSig::Returns(...).Params(...);
using MakeSig = FixedSizeSignature<ValueKind>;
#if V8_TARGET_ARCH_ARM64
// On ARM64, the Assembler keeps track of pointers to Labels to resolve
// branches to distant targets. Moving labels would confuse the Assembler,
// thus store the label on the heap and keep a unique_ptr.
class MovableLabel {
public:
MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(MovableLabel);
MovableLabel() : label_(new Label()) {}
Label* get() { return label_.get(); }
private:
std::unique_ptr<Label> label_;
};
#else
// On all other platforms, just store the Label directly.
class MovableLabel {
public:
MOVE_ONLY_WITH_DEFAULT_CONSTRUCTORS(MovableLabel);
Label* get() { return &label_; }
private:
Label label_;
};
#endif
compiler::CallDescriptor* GetLoweredCallDescriptor(
Zone* zone, compiler::CallDescriptor* call_desc) {
return kSystemPointerSize == 4
? compiler::GetI32WasmCallDescriptor(zone, call_desc)
: call_desc;
}
constexpr LiftoffCondition GetCompareCondition(WasmOpcode opcode) {
switch (opcode) {
case kExprI32Eq:
return kEqual;
case kExprI32Ne:
return kUnequal;
case kExprI32LtS:
return kSignedLessThan;
case kExprI32LtU:
return kUnsignedLessThan;
case kExprI32GtS:
return kSignedGreaterThan;
case kExprI32GtU:
return kUnsignedGreaterThan;
case kExprI32LeS:
return kSignedLessEqual;
case kExprI32LeU:
return kUnsignedLessEqual;
case kExprI32GeS:
return kSignedGreaterEqual;
case kExprI32GeU:
return kUnsignedGreaterEqual;
default:
#if V8_HAS_CXX14_CONSTEXPR
UNREACHABLE();
#else
// We need to return something for old compilers here.
return kEqual;
#endif
}
}
// Builds a {DebugSideTable}.
class DebugSideTableBuilder {
using Entry = DebugSideTable::Entry;
using Value = Entry::Value;
public:
enum AssumeSpilling {
// All register values will be spilled before the pc covered by the debug
// side table entry. Register slots will be marked as stack slots in the
// generated debug side table entry.
kAssumeSpilling,
// Register slots will be written out as they are.
kAllowRegisters,
// Register slots cannot appear since we already spilled.
kDidSpill
};
class EntryBuilder {
public:
explicit EntryBuilder(int pc_offset, int stack_height,
std::vector<Value> changed_values)
: pc_offset_(pc_offset),
stack_height_(stack_height),
changed_values_(std::move(changed_values)) {}
Entry ToTableEntry() {
return Entry{pc_offset_, stack_height_, std::move(changed_values_)};
}
void MinimizeBasedOnPreviousStack(const std::vector<Value>& last_values) {
auto dst = changed_values_.begin();
auto end = changed_values_.end();
for (auto src = dst; src != end; ++src) {
if (src->index < static_cast<int>(last_values.size()) &&
*src == last_values[src->index]) {
continue;
}
if (dst != src) *dst = *src;
++dst;
}
changed_values_.erase(dst, end);
}
int pc_offset() const { return pc_offset_; }
void set_pc_offset(int new_pc_offset) { pc_offset_ = new_pc_offset; }
private:
int pc_offset_;
int stack_height_;
std::vector<Value> changed_values_;
};
// Adds a new entry in regular code.
void NewEntry(int pc_offset, Vector<DebugSideTable::Entry::Value> values) {
entries_.emplace_back(pc_offset, static_cast<int>(values.size()),
GetChangedStackValues(last_values_, values));
}
// Adds a new entry for OOL code, and returns a pointer to a builder for
// modifying that entry.
EntryBuilder* NewOOLEntry(Vector<DebugSideTable::Entry::Value> values) {
constexpr int kNoPcOffsetYet = -1;
ool_entries_.emplace_back(kNoPcOffsetYet, static_cast<int>(values.size()),
GetChangedStackValues(last_ool_values_, values));
return &ool_entries_.back();
}
void SetNumLocals(int num_locals) {
DCHECK_EQ(-1, num_locals_);
DCHECK_LE(0, num_locals);
num_locals_ = num_locals;
}
std::unique_ptr<DebugSideTable> GenerateDebugSideTable() {
DCHECK_LE(0, num_locals_);
// Connect {entries_} and {ool_entries_} by removing redundant stack
// information from the first {ool_entries_} entry (based on
// {last_values_}).
if (!entries_.empty() && !ool_entries_.empty()) {
ool_entries_.front().MinimizeBasedOnPreviousStack(last_values_);
}
std::vector<Entry> entries;
entries.reserve(entries_.size() + ool_entries_.size());
for (auto& entry : entries_) entries.push_back(entry.ToTableEntry());
for (auto& entry : ool_entries_) entries.push_back(entry.ToTableEntry());
DCHECK(std::is_sorted(
entries.begin(), entries.end(),
[](Entry& a, Entry& b) { return a.pc_offset() < b.pc_offset(); }));
return std::make_unique<DebugSideTable>(num_locals_, std::move(entries));
}
private:
static std::vector<Value> GetChangedStackValues(
std::vector<Value>& last_values,
Vector<DebugSideTable::Entry::Value> values) {
std::vector<Value> changed_values;
int old_stack_size = static_cast<int>(last_values.size());
last_values.resize(values.size());
int index = 0;
for (const auto& value : values) {
if (index >= old_stack_size || last_values[index] != value) {
changed_values.push_back(value);
last_values[index] = value;
}
++index;
}
return changed_values;
}
int num_locals_ = -1;
// Keep a snapshot of the stack of the last entry, to generate a delta to the
// next entry.
std::vector<Value> last_values_;
std::vector<EntryBuilder> entries_;
// Keep OOL code entries separate so we can do proper delta-encoding (more
// entries might be added between the existing {entries_} and the
// {ool_entries_}). Store the entries in a list so the pointer is not
// invalidated by adding more entries.
std::vector<Value> last_ool_values_;
std::list<EntryBuilder> ool_entries_;
};
void CheckBailoutAllowed(LiftoffBailoutReason reason, const char* detail,
const CompilationEnv* env) {
// Decode errors are ok.
if (reason == kDecodeError) return;
// Missing CPU features are also generally OK for now.
if (reason == kMissingCPUFeature) return;
// --liftoff-only ensures that tests actually exercise the Liftoff path
// without bailing out. Bailing out due to (simulated) lack of CPU support
// is okay though (see above).
if (FLAG_liftoff_only) {
FATAL("--liftoff-only: treating bailout as fatal error. Cause: %s", detail);
}
// If --enable-testing-opcode-in-wasm is set, we are expected to bailout with
// "testing opcode".
if (FLAG_enable_testing_opcode_in_wasm &&
strcmp(detail, "testing opcode") == 0) {
return;
}
// Some externally maintained architectures don't fully implement Liftoff yet.
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_S390X || \
V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
return;
#endif
// TODO(11235): On arm and arm64 there is still a limit on the size of
// supported stack frames.
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64
if (strstr(detail, "Stack limited to 512 bytes")) return;
#endif
#define LIST_FEATURE(name, ...) kFeature_##name,
constexpr WasmFeatures kExperimentalFeatures{
FOREACH_WASM_EXPERIMENTAL_FEATURE_FLAG(LIST_FEATURE)};
constexpr WasmFeatures kStagedFeatures{
FOREACH_WASM_STAGING_FEATURE_FLAG(LIST_FEATURE)};
#undef LIST_FEATURE
// Bailout is allowed if any experimental feature is enabled.
if (env->enabled_features.contains_any(kExperimentalFeatures)) return;
// Staged features should be feature complete in Liftoff according to
// https://v8.dev/docs/wasm-shipping-checklist. Some are not though. They are
// listed here explicitly, with a bug assigned to each of them.
// TODO(7581): Fully implement reftypes in Liftoff.
STATIC_ASSERT(kStagedFeatures.has_reftypes());
if (reason == kRefTypes) {
DCHECK(env->enabled_features.has_reftypes());
return;
}
// Otherwise, bailout is not allowed.
FATAL("Liftoff bailout should not happen. Cause: %s\n", detail);
}
class LiftoffCompiler {
public:
// TODO(clemensb): Make this a template parameter.
static constexpr Decoder::ValidateFlag validate = Decoder::kBooleanValidation;
using Value = ValueBase<validate>;
struct ElseState {
MovableLabel label;
LiftoffAssembler::CacheState state;
};
struct TryInfo {
TryInfo() = default;
LiftoffAssembler::CacheState catch_state;
Label catch_label;
bool catch_reached = false;
bool in_handler = false;
};
struct Control : public ControlBase<Value, validate> {
std::unique_ptr<ElseState> else_state;
LiftoffAssembler::CacheState label_state;
MovableLabel label;
std::unique_ptr<TryInfo> try_info;
// Number of exceptions on the stack below this control.
int num_exceptions = 0;
MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(Control);
template <typename... Args>
explicit Control(Args&&... args) V8_NOEXCEPT
: ControlBase(std::forward<Args>(args)...) {}
};
using FullDecoder = WasmFullDecoder<validate, LiftoffCompiler>;
using ValueKindSig = LiftoffAssembler::ValueKindSig;
// For debugging, we need to spill registers before a trap or a stack check to
// be able to inspect them.
struct SpilledRegistersForInspection : public ZoneObject {
struct Entry {
int offset;
LiftoffRegister reg;
ValueKind kind;
};
ZoneVector<Entry> entries;
explicit SpilledRegistersForInspection(Zone* zone) : entries(zone) {}
};
struct OutOfLineSafepointInfo {
ZoneVector<int> slots;
LiftoffRegList spills;
explicit OutOfLineSafepointInfo(Zone* zone) : slots(zone) {}
};
struct OutOfLineCode {
MovableLabel label;
MovableLabel continuation;
WasmCode::RuntimeStubId stub;
WasmCodePosition position;
LiftoffRegList regs_to_save;
Register cached_instance;
OutOfLineSafepointInfo* safepoint_info;
uint32_t pc; // for trap handler.
// These two pointers will only be used for debug code:
SpilledRegistersForInspection* spilled_registers;
DebugSideTableBuilder::EntryBuilder* debug_sidetable_entry_builder;
// Named constructors:
static OutOfLineCode Trap(
WasmCode::RuntimeStubId s, WasmCodePosition pos,
SpilledRegistersForInspection* spilled_registers,
OutOfLineSafepointInfo* safepoint_info, uint32_t pc,
DebugSideTableBuilder::EntryBuilder* debug_sidetable_entry_builder) {
DCHECK_LT(0, pos);
return {
{}, // label
{}, // continuation
s, // stub
pos, // position
{}, // regs_to_save
no_reg, // cached_instance
safepoint_info, // safepoint_info
pc, // pc
spilled_registers, // spilled_registers
debug_sidetable_entry_builder // debug_side_table_entry_builder
};
}
static OutOfLineCode StackCheck(
WasmCodePosition pos, LiftoffRegList regs_to_save,
Register cached_instance, SpilledRegistersForInspection* spilled_regs,
OutOfLineSafepointInfo* safepoint_info,
DebugSideTableBuilder::EntryBuilder* debug_sidetable_entry_builder) {
return {
{}, // label
{}, // continuation
WasmCode::kWasmStackGuard, // stub
pos, // position
regs_to_save, // regs_to_save
cached_instance, // cached_instance
safepoint_info, // safepoint_info
0, // pc
spilled_regs, // spilled_registers
debug_sidetable_entry_builder // debug_side_table_entry_builder
};
}
};
LiftoffCompiler(compiler::CallDescriptor* call_descriptor,
CompilationEnv* env, Zone* compilation_zone,
std::unique_ptr<AssemblerBuffer> buffer,
DebugSideTableBuilder* debug_sidetable_builder,
ForDebugging for_debugging, int func_index,
Vector<const int> breakpoints = {}, int dead_breakpoint = 0)
: asm_(std::move(buffer)),
descriptor_(
GetLoweredCallDescriptor(compilation_zone, call_descriptor)),
env_(env),
debug_sidetable_builder_(debug_sidetable_builder),
for_debugging_(for_debugging),
func_index_(func_index),
out_of_line_code_(compilation_zone),
source_position_table_builder_(compilation_zone),
protected_instructions_(compilation_zone),
compilation_zone_(compilation_zone),
safepoint_table_builder_(compilation_zone_),
next_breakpoint_ptr_(breakpoints.begin()),
next_breakpoint_end_(breakpoints.end()),
dead_breakpoint_(dead_breakpoint),
handlers_(compilation_zone) {
if (breakpoints.empty()) {
next_breakpoint_ptr_ = next_breakpoint_end_ = nullptr;
}
}
bool did_bailout() const { return bailout_reason_ != kSuccess; }
LiftoffBailoutReason bailout_reason() const { return bailout_reason_; }
void GetCode(CodeDesc* desc) {
asm_.GetCode(nullptr, desc, &safepoint_table_builder_,
handler_table_offset_);
}
OwnedVector<uint8_t> GetSourcePositionTable() {
return source_position_table_builder_.ToSourcePositionTableVector();
}
OwnedVector<uint8_t> GetProtectedInstructionsData() const {
return OwnedVector<uint8_t>::Of(
Vector<const uint8_t>::cast(VectorOf(protected_instructions_)));
}
uint32_t GetTotalFrameSlotCountForGC() const {
return __ GetTotalFrameSlotCountForGC();
}
void unsupported(FullDecoder* decoder, LiftoffBailoutReason reason,
const char* detail) {
DCHECK_NE(kSuccess, reason);
if (did_bailout()) return;
bailout_reason_ = reason;
TRACE("unsupported: %s\n", detail);
decoder->errorf(decoder->pc_offset(), "unsupported liftoff operation: %s",
detail);
UnuseLabels(decoder);
CheckBailoutAllowed(reason, detail, env_);
}
bool DidAssemblerBailout(FullDecoder* decoder) {
if (decoder->failed() || !__ did_bailout()) return false;
unsupported(decoder, __ bailout_reason(), __ bailout_detail());
return true;
}
V8_INLINE bool CheckSupportedType(FullDecoder* decoder, ValueKind kind,
const char* context) {
if (V8_LIKELY(supported_types_.contains(kind))) return true;
return MaybeBailoutForUnsupportedType(decoder, kind, context);
}
V8_NOINLINE bool MaybeBailoutForUnsupportedType(FullDecoder* decoder,
ValueKind kind,
const char* context) {
DCHECK(!supported_types_.contains(kind));
// Lazily update {supported_types_}; then check again.
if (CpuFeatures::SupportsWasmSimd128()) supported_types_.Add(kS128);
if (FLAG_experimental_liftoff_extern_ref) {
supported_types_.Add(kExternRefSupported);
}
if (supported_types_.contains(kind)) return true;
LiftoffBailoutReason bailout_reason;
switch (kind) {
case kS128:
bailout_reason = kMissingCPUFeature;
break;
case kRef:
case kOptRef:
case kRtt:
case kRttWithDepth:
case kI8:
case kI16:
bailout_reason = kRefTypes;
break;
default:
UNREACHABLE();
}
EmbeddedVector<char, 128> buffer;
SNPrintF(buffer, "%s %s", name(kind), context);
unsupported(decoder, bailout_reason, buffer.begin());
return false;
}
void UnuseLabels(FullDecoder* decoder) {
#ifdef DEBUG
auto Unuse = [](Label* label) {
label->Unuse();
label->UnuseNear();
};
// Unuse all labels now, otherwise their destructor will fire a DCHECK error
// if they where referenced before.
uint32_t control_depth = decoder ? decoder->control_depth() : 0;
for (uint32_t i = 0; i < control_depth; ++i) {
Control* c = decoder->control_at(i);
Unuse(c->label.get());
if (c->else_state) Unuse(c->else_state->label.get());
if (c->try_info != nullptr) Unuse(&c->try_info->catch_label);
}
for (auto& ool : out_of_line_code_) Unuse(ool.label.get());
#endif
}
void StartFunction(FullDecoder* decoder) {
if (FLAG_trace_liftoff && !FLAG_trace_wasm_decoder) {
StdoutStream{} << "hint: add --trace-wasm-decoder to also see the wasm "
"instructions being decoded\n";
}
int num_locals = decoder->num_locals();
__ set_num_locals(num_locals);
for (int i = 0; i < num_locals; ++i) {
ValueKind kind = decoder->local_type(i).kind();
__ set_local_kind(i, kind);
}
}
// TODO(ahaas): Make this function constexpr once GCC allows it.
LiftoffRegList RegsUnusedByParams() {
LiftoffRegList regs = kGpCacheRegList;
for (auto reg : kGpParamRegisters) {
regs.clear(reg);
}
return regs;
}
// Returns the number of inputs processed (1 or 2).
uint32_t ProcessParameter(ValueKind kind, uint32_t input_idx) {
const bool needs_pair = needs_gp_reg_pair(kind);
const ValueKind reg_kind = needs_pair ? kI32 : kind;
const RegClass rc = reg_class_for(reg_kind);
auto LoadToReg = [this, reg_kind, rc](compiler::LinkageLocation location,
LiftoffRegList pinned) {
if (location.IsRegister()) {
DCHECK(!location.IsAnyRegister());
return LiftoffRegister::from_external_code(rc, reg_kind,
location.AsRegister());
}
DCHECK(location.IsCallerFrameSlot());
// For reference type parameters we have to use registers that were not
// used for parameters because some reference type stack parameters may
// get processed before some value type register parameters.
LiftoffRegister reg = is_reference(reg_kind)
? __ GetUnusedRegister(RegsUnusedByParams())
: __ GetUnusedRegister(rc, pinned);
__ LoadCallerFrameSlot(reg, -location.AsCallerFrameSlot(), reg_kind);
return reg;
};
LiftoffRegister reg =
LoadToReg(descriptor_->GetInputLocation(input_idx), {});
if (needs_pair) {
LiftoffRegister reg2 =
LoadToReg(descriptor_->GetInputLocation(input_idx + 1),
LiftoffRegList::ForRegs(reg));
reg = LiftoffRegister::ForPair(reg.gp(), reg2.gp());
}
__ PushRegister(kind, reg);
return needs_pair ? 2 : 1;
}
void StackCheck(FullDecoder* decoder, WasmCodePosition position) {
DEBUG_CODE_COMMENT("stack check");
if (!FLAG_wasm_stack_checks || !env_->runtime_exception_support) return;
// Loading the limit address can change the stack state, hence do this
// before storing information about registers.
Register limit_address = __ GetUnusedRegister(kGpReg, {}).gp();
LOAD_INSTANCE_FIELD(limit_address, StackLimitAddress, kSystemPointerSize,
{});
LiftoffRegList regs_to_save = __ cache_state()->used_registers;
// The cached instance will be reloaded separately.
if (__ cache_state()->cached_instance != no_reg) {
DCHECK(regs_to_save.has(__ cache_state()->cached_instance));
regs_to_save.clear(__ cache_state()->cached_instance);
}
SpilledRegistersForInspection* spilled_regs = nullptr;
OutOfLineSafepointInfo* safepoint_info =
compilation_zone_->New<OutOfLineSafepointInfo>(compilation_zone_);
__ cache_state()->GetTaggedSlotsForOOLCode(
&safepoint_info->slots, &safepoint_info->spills,
for_debugging_
? LiftoffAssembler::CacheState::SpillLocation::kStackSlots
: LiftoffAssembler::CacheState::SpillLocation::kTopOfStack);
if (V8_UNLIKELY(for_debugging_)) {
regs_to_save = {};
spilled_regs = GetSpilledRegistersForInspection();
}
out_of_line_code_.push_back(OutOfLineCode::StackCheck(
position, regs_to_save, __ cache_state()->cached_instance, spilled_regs,
safepoint_info, RegisterOOLDebugSideTableEntry(decoder)));
OutOfLineCode& ool = out_of_line_code_.back();
__ StackCheck(ool.label.get(), limit_address);
__ bind(ool.continuation.get());
}
bool SpillLocalsInitially(FullDecoder* decoder, uint32_t num_params) {
int actual_locals = __ num_locals() - num_params;
DCHECK_LE(0, actual_locals);
constexpr int kNumCacheRegisters = NumRegs(kLiftoffAssemblerGpCacheRegs);
// If we have many locals, we put them on the stack initially. This avoids
// having to spill them on merge points. Use of these initial values should
// be rare anyway.
if (actual_locals > kNumCacheRegisters / 2) return true;
// If there are locals which are not i32 or i64, we also spill all locals,
// because other types cannot be initialized to constants.
for (uint32_t param_idx = num_params; param_idx < __ num_locals();
++param_idx) {
ValueKind kind = __ local_kind(param_idx);
if (kind != kI32 && kind != kI64) return true;
}
return false;
}
void TierUpFunction(FullDecoder* decoder) {
__ CallRuntimeStub(WasmCode::kWasmTriggerTierUp);
DefineSafepoint();
}
void TraceFunctionEntry(FullDecoder* decoder) {
DEBUG_CODE_COMMENT("trace function entry");
__ SpillAllRegisters();
source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(decoder->position()), false);
__ CallRuntimeStub(WasmCode::kWasmTraceEnter);
DefineSafepoint();
}
void StartFunctionBody(FullDecoder* decoder, Control* block) {
for (uint32_t i = 0; i < __ num_locals(); ++i) {
if (!CheckSupportedType(decoder, __ local_kind(i), "param")) return;
}
// Parameter 0 is the instance parameter.
uint32_t num_params =
static_cast<uint32_t>(decoder->sig_->parameter_count());
__ CodeEntry();
DEBUG_CODE_COMMENT("enter frame");
__ EnterFrame(StackFrame::WASM);
__ set_has_frame(true);
pc_offset_stack_frame_construction_ = __ PrepareStackFrame();
// {PrepareStackFrame} is the first platform-specific assembler method.
// If this failed, we can bail out immediately, avoiding runtime overhead
// and potential failures because of other unimplemented methods.
// A platform implementing {PrepareStackFrame} must ensure that we can
// finish compilation without errors even if we hit unimplemented
// LiftoffAssembler methods.
if (DidAssemblerBailout(decoder)) return;
// Input 0 is the call target, the instance is at 1.
constexpr int kInstanceParameterIndex = 1;
// Check that {kWasmInstanceRegister} matches our call descriptor.
DCHECK_EQ(kWasmInstanceRegister,
Register::from_code(
descriptor_->GetInputLocation(kInstanceParameterIndex)
.AsRegister()));
// Store the instance parameter to a special stack slot.
__ SpillInstance(kWasmInstanceRegister);
__ cache_state()->SetInstanceCacheRegister(kWasmInstanceRegister);
if (for_debugging_) __ ResetOSRTarget();
// Process parameters.
if (num_params) DEBUG_CODE_COMMENT("process parameters");
// Input 0 is the code target, 1 is the instance. First parameter at 2.
uint32_t input_idx = kInstanceParameterIndex + 1;
for (uint32_t param_idx = 0; param_idx < num_params; ++param_idx) {
input_idx += ProcessParameter(__ local_kind(param_idx), input_idx);
}
int params_size = __ TopSpillOffset();
DCHECK_EQ(input_idx, descriptor_->InputCount());
// Initialize locals beyond parameters.
if (num_params < __ num_locals()) DEBUG_CODE_COMMENT("init locals");
if (SpillLocalsInitially(decoder, num_params)) {
for (uint32_t param_idx = num_params; param_idx < __ num_locals();
++param_idx) {
ValueKind kind = __ local_kind(param_idx);
__ PushStack(kind);
}
int spill_size = __ TopSpillOffset() - params_size;
__ FillStackSlotsWithZero(params_size, spill_size);
} else {
for (uint32_t param_idx = num_params; param_idx < __ num_locals();
++param_idx) {
ValueKind kind = __ local_kind(param_idx);
__ PushConstant(kind, int32_t{0});
}
}
if (FLAG_experimental_liftoff_extern_ref) {
// Initialize all reference type locals with ref.null.
Register null_ref_reg = no_reg;
for (uint32_t local_index = num_params; local_index < __ num_locals();
++local_index) {
ValueKind kind = __ local_kind(local_index);
if (is_reference(kind)) {
if (null_ref_reg == no_reg) {
null_ref_reg = __ GetUnusedRegister(kGpReg, {}).gp();
LoadNullValue(null_ref_reg, {});
}
__ Spill(__ cache_state()->stack_state[local_index].offset(),
LiftoffRegister(null_ref_reg), kind);
}
}
}
DCHECK_EQ(__ num_locals(), __ cache_state()->stack_height());
if (V8_UNLIKELY(debug_sidetable_builder_)) {
debug_sidetable_builder_->SetNumLocals(__ num_locals());
}
// The function-prologue stack check is associated with position 0, which
// is never a position of any instruction in the function.
StackCheck(decoder, 0);
if (FLAG_wasm_dynamic_tiering) {
// TODO(arobin): Avoid spilling registers unconditionally.
__ SpillAllRegisters();
DEBUG_CODE_COMMENT("dynamic tiering");
LiftoffRegList pinned;
// Get the number of calls array address.
LiftoffRegister array_address =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
LOAD_INSTANCE_FIELD(array_address.gp(), NumLiftoffFunctionCallsArray,
kSystemPointerSize, pinned);
// Compute the correct offset in the array.
uint32_t offset =
kInt32Size * declared_function_index(env_->module, func_index_);
// Get the number of calls and update it.
LiftoffRegister old_number_of_calls =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
LiftoffRegister new_number_of_calls =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
__ Load(old_number_of_calls, array_address.gp(), no_reg, offset,
LoadType::kI32Load, pinned);
__ emit_i32_addi(new_number_of_calls.gp(), old_number_of_calls.gp(), 1);
__ Store(array_address.gp(), no_reg, offset, new_number_of_calls,
StoreType::kI32Store, pinned);
// Emit the runtime call if necessary.
Label no_tierup;
// Check if the number of calls is a power of 2.
__ emit_i32_and(old_number_of_calls.gp(), old_number_of_calls.gp(),
new_number_of_calls.gp());
// Unary "unequal" means "different from zero".
__ emit_cond_jump(kUnequal, &no_tierup, kI32, old_number_of_calls.gp());
TierUpFunction(decoder);
// After the runtime call, the instance cache register is clobbered (we
// reset it already in {SpillAllRegisters} above, but then we still access
// the instance afterwards).
__ cache_state()->ClearCachedInstanceRegister();
__ bind(&no_tierup);
}
if (FLAG_trace_wasm) TraceFunctionEntry(decoder);
}
void GenerateOutOfLineCode(OutOfLineCode* ool) {
DEBUG_CODE_COMMENT(
(std::string("out of line: ") + GetRuntimeStubName(ool->stub)).c_str());
__ bind(ool->label.get());
const bool is_stack_check = ool->stub == WasmCode::kWasmStackGuard;
const bool is_mem_out_of_bounds =
ool->stub == WasmCode::kThrowWasmTrapMemOutOfBounds;
if (is_mem_out_of_bounds && env_->use_trap_handler) {
uint32_t pc = static_cast<uint32_t>(__ pc_offset());
DCHECK_EQ(pc, __ pc_offset());
protected_instructions_.emplace_back(
trap_handler::ProtectedInstructionData{ool->pc, pc});
}
if (!env_->runtime_exception_support) {
// We cannot test calls to the runtime in cctest/test-run-wasm.
// Therefore we emit a call to C here instead of a call to the runtime.
// In this mode, we never generate stack checks.
DCHECK(!is_stack_check);
__ CallTrapCallbackForTesting();
DEBUG_CODE_COMMENT("leave frame");
__ LeaveFrame(StackFrame::WASM);
__ DropStackSlotsAndRet(
static_cast<uint32_t>(descriptor_->ParameterSlotCount()));
return;
}
// We cannot both push and spill registers.
DCHECK(ool->regs_to_save.is_empty() || ool->spilled_registers == nullptr);
if (!ool->regs_to_save.is_empty()) {
__ PushRegisters(ool->regs_to_save);
} else if (V8_UNLIKELY(ool->spilled_registers != nullptr)) {
for (auto& entry : ool->spilled_registers->entries) {
__ Spill(entry.offset, entry.reg, entry.kind);
}
}
source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(ool->position), true);
__ CallRuntimeStub(ool->stub);
Safepoint safepoint = safepoint_table_builder_.DefineSafepoint(&asm_);
if (ool->safepoint_info) {
for (auto index : ool->safepoint_info->slots) {
safepoint.DefinePointerSlot(index);
}
int total_frame_size = __ GetTotalFrameSize();
LiftoffRegList gp_regs = ool->regs_to_save & kGpCacheRegList;
// {total_frame_size} is the highest offset from the FP that is used to
// store a value. The offset of the first spill slot should therefore be
// {(total_frame_size / kSystemPointerSize) + 1}. However, spill slots
// don't start at offset '0' but at offset '-1' (or
// {-kSystemPointerSize}). Therefore we have to add another '+ 1' to the
// index of the first spill slot.
int index = (total_frame_size / kSystemPointerSize) + 2;
__ RecordSpillsInSafepoint(safepoint, gp_regs,
ool->safepoint_info->spills, index);
}
DCHECK_EQ(!debug_sidetable_builder_, !ool->debug_sidetable_entry_builder);
if (V8_UNLIKELY(ool->debug_sidetable_entry_builder)) {
ool->debug_sidetable_entry_builder->set_pc_offset(__ pc_offset());
}
DCHECK_EQ(ool->continuation.get()->is_bound(), is_stack_check);
if (is_stack_check) {
MaybeOSR();
}
if (!ool->regs_to_save.is_empty()) __ PopRegisters(ool->regs_to_save);
if (is_stack_check) {
if (V8_UNLIKELY(ool->spilled_registers != nullptr)) {
DCHECK(for_debugging_);
for (auto& entry : ool->spilled_registers->entries) {
__ Fill(entry.reg, entry.offset, entry.kind);
}
}
if (ool->cached_instance != no_reg) {
__ LoadInstanceFromFrame(ool->cached_instance);
}
__ emit_jump(ool->continuation.get());
} else {
__ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
}
}
void FinishFunction(FullDecoder* decoder) {
if (DidAssemblerBailout(decoder)) return;
__ AlignFrameSize();
#if DEBUG
int frame_size = __ GetTotalFrameSize();
#endif
for (OutOfLineCode& ool : out_of_line_code_) {
GenerateOutOfLineCode(&ool);
}
DCHECK_EQ(frame_size, __ GetTotalFrameSize());
__ PatchPrepareStackFrame(pc_offset_stack_frame_construction_);
__ FinishCode();
safepoint_table_builder_.Emit(&asm_, __ GetTotalFrameSlotCountForGC());
// Emit the handler table.
if (!handlers_.empty()) {
handler_table_offset_ = HandlerTable::EmitReturnTableStart(&asm_);
for (auto& handler : handlers_) {
HandlerTable::EmitReturnEntry(&asm_, handler.pc_offset,
handler.handler.get()->pos());
}
}
__ MaybeEmitOutOfLineConstantPool();
// The previous calls may have also generated a bailout.
DidAssemblerBailout(decoder);
DCHECK_EQ(num_exceptions_, 0);
}
void OnFirstError(FullDecoder* decoder) {
if (!did_bailout()) bailout_reason_ = kDecodeError;
UnuseLabels(decoder);
asm_.AbortCompilation();
}
V8_NOINLINE void EmitDebuggingInfo(FullDecoder* decoder, WasmOpcode opcode) {
DCHECK(for_debugging_);
if (!WasmOpcodes::IsBreakable(opcode)) return;
bool has_breakpoint = false;
if (next_breakpoint_ptr_) {
if (*next_breakpoint_ptr_ == 0) {
// A single breakpoint at offset 0 indicates stepping.
DCHECK_EQ(next_breakpoint_ptr_ + 1, next_breakpoint_end_);
has_breakpoint = true;
} else {
while (next_breakpoint_ptr_ != next_breakpoint_end_ &&
*next_breakpoint_ptr_ < decoder->position()) {
// Skip unreachable breakpoints.
++next_breakpoint_ptr_;
}
if (next_breakpoint_ptr_ == next_breakpoint_end_) {
next_breakpoint_ptr_ = next_breakpoint_end_ = nullptr;
} else if (*next_breakpoint_ptr_ == decoder->position()) {
has_breakpoint = true;
}
}
}
if (has_breakpoint) {
EmitBreakpoint(decoder);
// Once we emitted an unconditional breakpoint, we don't need to check
// function entry breaks any more.
did_function_entry_break_checks_ = true;
} else if (!did_function_entry_break_checks_) {
did_function_entry_break_checks_ = true;
DEBUG_CODE_COMMENT("check function entry break");
Label do_break;
Label no_break;
Register flag = __ GetUnusedRegister(kGpReg, {}).gp();
// Check the "hook on function call" flag. If set, trigger a break.
LOAD_INSTANCE_FIELD(flag, HookOnFunctionCallAddress, kSystemPointerSize,
{});
__ Load(LiftoffRegister{flag}, flag, no_reg, 0, LoadType::kI32Load8U, {});
// Unary "unequal" means "not equals zero".
__ emit_cond_jump(kUnequal, &do_break, kI32, flag);
// Check if we should stop on "script entry".
LOAD_INSTANCE_FIELD(flag, BreakOnEntry, kUInt8Size, {});
// Unary "equal" means "equals zero".
__ emit_cond_jump(kEqual, &no_break, kI32, flag);
__ bind(&do_break);
EmitBreakpoint(decoder);
__ bind(&no_break);
} else if (dead_breakpoint_ == decoder->position()) {
DCHECK(!next_breakpoint_ptr_ ||
*next_breakpoint_ptr_ != dead_breakpoint_);
// The top frame is paused at this position, but the breakpoint was
// removed. Adding a dead breakpoint here ensures that the source
// position exists, and that the offset to the return address is the
// same as in the old code.
Label cont;
__ emit_jump(&cont);
EmitBreakpoint(decoder);
__ bind(&cont);
}
}
void NextInstruction(FullDecoder* decoder, WasmOpcode opcode) {
// Add a single check, so that the fast path can be inlined while
// {EmitDebuggingInfo} stays outlined.
if (V8_UNLIKELY(for_debugging_)) EmitDebuggingInfo(decoder, opcode);
TraceCacheState(decoder);
#ifdef DEBUG
SLOW_DCHECK(__ ValidateCacheState());
if (WasmOpcodes::IsPrefixOpcode(opcode)) {
opcode = decoder->read_prefixed_opcode<Decoder::kFullValidation>(
decoder->pc());
}
DEBUG_CODE_COMMENT(WasmOpcodes::OpcodeName(opcode));
#endif
}
void EmitBreakpoint(FullDecoder* decoder) {
DEBUG_CODE_COMMENT("breakpoint");
DCHECK(for_debugging_);
source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(decoder->position()), true);
__ CallRuntimeStub(WasmCode::kWasmDebugBreak);
DefineSafepointWithCalleeSavedRegisters();
RegisterDebugSideTableEntry(decoder,
DebugSideTableBuilder::kAllowRegisters);
MaybeOSR();
}
void PushControl(Control* block) {
// The Liftoff stack includes implicit exception refs stored for catch
// blocks, so that they can be rethrown.
block->num_exceptions = num_exceptions_;
}
void Block(FullDecoder* decoder, Control* block) { PushControl(block); }
void Loop(FullDecoder* decoder, Control* loop) {
// Before entering a loop, spill all locals to the stack, in order to free
// the cache registers, and to avoid unnecessarily reloading stack values
// into registers at branches.
// TODO(clemensb): Come up with a better strategy here, involving
// pre-analysis of the function.
__ SpillLocals();
__ PrepareLoopArgs(loop->start_merge.arity);
// Loop labels bind at the beginning of the block.
__ bind(loop->label.get());
// Save the current cache state for the merge when jumping to this loop.
loop->label_state.Split(*__ cache_state());
PushControl(loop);
// Execute a stack check in the loop header.
StackCheck(decoder, decoder->position());
}
void Try(FullDecoder* decoder, Control* block) {
block->try_info = std::make_unique<TryInfo>();
PushControl(block);
}
// Load the property in {kReturnRegister0}.
LiftoffRegister GetExceptionProperty(LiftoffAssembler::VarState& exception,
RootIndex root_index) {
DCHECK(root_index == RootIndex::kwasm_exception_tag_symbol ||
root_index == RootIndex::kwasm_exception_values_symbol);
LiftoffRegList pinned;
LiftoffRegister tag_symbol_reg =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
LoadExceptionSymbol(tag_symbol_reg.gp(), pinned, root_index);
LiftoffRegister context_reg =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
LOAD_TAGGED_PTR_INSTANCE_FIELD(context_reg.gp(), NativeContext, pinned);
LiftoffAssembler::VarState tag_symbol(kPointerKind, tag_symbol_reg, 0);
LiftoffAssembler::VarState context(kPointerKind, context_reg, 0);
CallRuntimeStub(WasmCode::kWasmGetOwnProperty,
MakeSig::Returns(kPointerKind)
.Params(kPointerKind, kPointerKind, kPointerKind),
{exception, tag_symbol, context}, kNoSourcePosition);
return LiftoffRegister(kReturnRegister0);
}
void CatchException(FullDecoder* decoder,
const ExceptionIndexImmediate<validate>& imm,
Control* block, Vector<Value> values) {
DCHECK(block->is_try_catch());
__ emit_jump(block->label.get());
// The catch block is unreachable if no possible throws in the try block
// exist. We only build a landing pad if some node in the try block can
// (possibly) throw. Otherwise the catch environments remain empty.
if (!block->try_info->catch_reached) {
block->reachability = kSpecOnlyReachable;
return;
}
// This is the last use of this label. Re-use the field for the label of the
// next catch block, and jump there if the tag does not match.
__ bind(&block->try_info->catch_label);
new (&block->try_info->catch_label) Label();
__ cache_state()->Split(block->try_info->catch_state);
DEBUG_CODE_COMMENT("load caught exception tag");
DCHECK_EQ(__ cache_state()->stack_state.back().kind(), kRef);
LiftoffRegister caught_tag =
GetExceptionProperty(__ cache_state()->stack_state.back(),
RootIndex::kwasm_exception_tag_symbol);
LiftoffRegList pinned;
pinned.set(caught_tag);
DEBUG_CODE_COMMENT("load expected exception tag");
Register imm_tag = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LOAD_TAGGED_PTR_INSTANCE_FIELD(imm_tag, ExceptionsTable, pinned);
__ LoadTaggedPointer(
imm_tag, imm_tag, no_reg,
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(imm.index), {});
DEBUG_CODE_COMMENT("compare tags");
Label caught;
__ emit_cond_jump(kEqual, &caught, kI32, imm_tag, caught_tag.gp());
// The tags don't match, merge the current state into the catch state and
// jump to the next handler.
__ MergeFullStackWith(block->try_info->catch_state, *__ cache_state());
__ emit_jump(&block->try_info->catch_label);
__ bind(&caught);
if (!block->try_info->in_handler) {
block->try_info->in_handler = true;
num_exceptions_++;
}
GetExceptionValues(decoder, __ cache_state()->stack_state.back(),
imm.exception);
}
void Rethrow(FullDecoder* decoder,
const LiftoffAssembler::VarState& exception) {
DCHECK_EQ(exception.kind(), kRef);
CallRuntimeStub(WasmCode::kWasmRethrow, MakeSig::Params(kPointerKind),
{exception}, decoder->position());
}
void Delegate(FullDecoder* decoder, uint32_t depth, Control* block) {
DCHECK_EQ(block, decoder->control_at(0));
Control* target = decoder->control_at(depth);
DCHECK(block->is_incomplete_try());
__ bind(&block->try_info->catch_label);
if (block->try_info->catch_reached) {
__ cache_state()->Steal(block->try_info->catch_state);
if (depth == decoder->control_depth() - 1) {
// Delegate to the caller, do not emit a landing pad.
Rethrow(decoder, __ cache_state()->stack_state.back());
MaybeOSR();
} else {
DCHECK(target->is_incomplete_try());
if (!target->try_info->catch_reached) {
target->try_info->catch_state.InitMerge(
*__ cache_state(), __ num_locals(), 1,
target->stack_depth + target->num_exceptions);
target->try_info->catch_reached = true;
}
__ MergeStackWith(target->try_info->catch_state, 1,
LiftoffAssembler::kForwardJump);
__ emit_jump(&target->try_info->catch_label);
}
}
}
void Rethrow(FullDecoder* decoder, Control* try_block) {
int index = try_block->try_info->catch_state.stack_height() - 1;
auto& exception = __ cache_state()->stack_state[index];
Rethrow(decoder, exception);
int pc_offset = __ pc_offset();
MaybeOSR();
EmitLandingPad(decoder, pc_offset);
}
void CatchAll(FullDecoder* decoder, Control* block) {
DCHECK(block->is_try_catchall() || block->is_try_catch() ||
block->is_try_unwind());
DCHECK_EQ(decoder->control_at(0), block);
// The catch block is unreachable if no possible throws in the try block
// exist. We only build a landing pad if some node in the try block can
// (possibly) throw. Otherwise the catch environments remain empty.
if (!block->try_info->catch_reached) {
decoder->SetSucceedingCodeDynamicallyUnreachable();
return;
}
__ bind(&block->try_info->catch_label);
__ cache_state()->Steal(block->try_info->catch_state);
if (!block->try_info->in_handler) {
block->try_info->in_handler = true;
num_exceptions_++;
}
}
void If(FullDecoder* decoder, const Value& cond, Control* if_block) {
DCHECK_EQ(if_block, decoder->control_at(0));
DCHECK(if_block->is_if());
// Allocate the else state.
if_block->else_state = std::make_unique<ElseState>();
// Test the condition, jump to else if zero.
Register value = __ PopToRegister().gp();
__ emit_cond_jump(kEqual, if_block->else_state->label.get(), kI32, value);
// Store the state (after popping the value) for executing the else branch.
if_block->else_state->state.Split(*__ cache_state());
PushControl(if_block);
}
void FallThruTo(FullDecoder* decoder, Control* c) {
if (!c->end_merge.reached) {
c->label_state.InitMerge(*__ cache_state(), __ num_locals(),
c->end_merge.arity,
c->stack_depth + c->num_exceptions);
}
DCHECK(!c->is_try_catchall());
if (c->is_try_catch()) {
// Drop the implicit exception ref.
DCHECK_EQ(c->label_state.stack_height() + 1,
__ cache_state()->stack_height());
__ MergeStackWith(c->label_state, c->br_merge()->arity,
LiftoffAssembler::kForwardJump);
} else {
__ MergeFullStackWith(c->label_state, *__ cache_state());
}
__ emit_jump(c->label.get());
TraceCacheState(decoder);
}
void FinishOneArmedIf(FullDecoder* decoder, Control* c) {
DCHECK(c->is_onearmed_if());
if (c->end_merge.reached) {
// Someone already merged to the end of the if. Merge both arms into that.
if (c->reachable()) {
// Merge the if state into the end state.
__ MergeFullStackWith(c->label_state, *__ cache_state());
__ emit_jump(c->label.get());
}
// Merge the else state into the end state.
__ bind(c->else_state->label.get());
__ MergeFullStackWith(c->label_state, c->else_state->state);
__ cache_state()->Steal(c->label_state);
} else if (c->reachable()) {
// No merge yet at the end of the if, but we need to create a merge for
// the both arms of this if. Thus init the merge point from the else
// state, then merge the if state into that.
DCHECK_EQ(c->start_merge.arity, c->end_merge.arity);
c->label_state.InitMerge(c->else_state->state, __ num_locals(),
c->start_merge.arity,
c->stack_depth + c->num_exceptions);
__ MergeFullStackWith(c->label_state, *__ cache_state());
__ emit_jump(c->label.get());
// Merge the else state into the end state.
__ bind(c->else_state->label.get());
__ MergeFullStackWith(c->label_state, c->else_state->state);
__ cache_state()->Steal(c->label_state);
} else {
// No merge needed, just continue with the else state.
__ bind(c->else_state->label.get());
__ cache_state()->Steal(c->else_state->state);
}
}
void FinishTry(FullDecoder* decoder, Control* c) {
DCHECK(c->is_try_catch() || c->is_try_catchall() || c->is_try_unwind());
if (!c->end_merge.reached) {
if (c->try_info->catch_reached) {
// Drop the implicit exception ref.
__ DropValue(__ num_locals() + c->stack_depth + c->num_exceptions);
}
// Else we did not enter the catch state, continue with the current state.
} else {
if (c->reachable()) {
__ MergeStackWith(c->label_state, c->br_merge()->arity,
LiftoffAssembler::kForwardJump);
}
__ cache_state()->Steal(c->label_state);
}
if (c->try_info->catch_reached) {
num_exceptions_--;
}
}
void PopControl(FullDecoder* decoder, Control* c) {
if (c->is_loop()) return; // A loop just falls through.
if (c->is_onearmed_if()) {
// Special handling for one-armed ifs.
FinishOneArmedIf(decoder, c);
} else if (c->is_try_catch() || c->is_try_catchall() ||
c->is_try_unwind()) {
FinishTry(decoder, c);
} else if (c->end_merge.reached) {
// There is a merge already. Merge our state into that, then continue with
// that state.
if (c->reachable()) {
__ MergeFullStackWith(c->label_state, *__ cache_state());
}
__ cache_state()->Steal(c->label_state);
} else {
// No merge, just continue with our current state.
}
if (!c->label.get()->is_bound()) __ bind(c->label.get());
}
void GenerateCCall(const LiftoffRegister* result_regs,
const ValueKindSig* sig, ValueKind out_argument_kind,
const LiftoffRegister* arg_regs,
ExternalReference ext_ref) {
// Before making a call, spill all cache registers.
__ SpillAllRegisters();
// Store arguments on our stack, then align the stack for calling to C.
int param_bytes = 0;
for (ValueKind param_kind : sig->parameters()) {
param_bytes += element_size_bytes(param_kind);
}
int out_arg_bytes =
out_argument_kind == kVoid ? 0 : element_size_bytes(out_argument_kind);
int stack_bytes = std::max(param_bytes, out_arg_bytes);
__ CallC(sig, arg_regs, result_regs, out_argument_kind, stack_bytes,
ext_ref);
}
template <typename EmitFn, typename... Args>
typename std::enable_if<!std::is_member_function_pointer<EmitFn>::value>::type
CallEmitFn(EmitFn fn, Args... args) {
fn(args...);
}
template <typename EmitFn, typename... Args>
typename std::enable_if<std::is_member_function_pointer<EmitFn>::value>::type
CallEmitFn(EmitFn fn, Args... args) {
(asm_.*fn)(ConvertAssemblerArg(args)...);
}
// Wrap a {LiftoffRegister} with implicit conversions to {Register} and
// {DoubleRegister}.
struct AssemblerRegisterConverter {
LiftoffRegister reg;
operator LiftoffRegister() { return reg; }
operator Register() { return reg.gp(); }
operator DoubleRegister() { return reg.fp(); }
};
// Convert {LiftoffRegister} to {AssemblerRegisterConverter}, other types stay
// unchanged.
template <typename T>
typename std::conditional<std::is_same<LiftoffRegister, T>::value,
AssemblerRegisterConverter, T>::type
ConvertAssemblerArg(T t) {
return {t};
}
template <typename EmitFn, typename ArgType>
struct EmitFnWithFirstArg {
EmitFn fn;
ArgType first_arg;
};
template <typename EmitFn, typename ArgType>
EmitFnWithFirstArg<EmitFn, ArgType> BindFirst(EmitFn fn, ArgType arg) {
return {fn, arg};
}
template <typename EmitFn, typename T, typename... Args>
void CallEmitFn(EmitFnWithFirstArg<EmitFn, T> bound_fn, Args... args) {
CallEmitFn(bound_fn.fn, bound_fn.first_arg, ConvertAssemblerArg(args)...);
}
template <ValueKind src_kind, ValueKind result_kind, class EmitFn>
void EmitUnOp(EmitFn fn) {
constexpr RegClass src_rc = reg_class_for(src_kind);
constexpr RegClass result_rc = reg_class_for(result_kind);
LiftoffRegister src = __ PopToRegister();
LiftoffRegister dst = src_rc == result_rc
? __ GetUnusedRegister(result_rc, {src}, {})
: __ GetUnusedRegister(result_rc, {});
CallEmitFn(fn, dst, src);
__ PushRegister(result_kind, dst);
}
template <ValueKind kind>
void EmitFloatUnOpWithCFallback(
bool (LiftoffAssembler::*emit_fn)(DoubleRegister, DoubleRegister),
ExternalReference (*fallback_fn)()) {
auto emit_with_c_fallback = [=](LiftoffRegister dst, LiftoffRegister src) {
if ((asm_.*emit_fn)(dst.fp(), src.fp())) return;
ExternalReference ext_ref = fallback_fn();
auto sig = MakeSig::Params(kind);
GenerateCCall(&dst, &sig, kind, &src, ext_ref);
};
EmitUnOp<kind, kind>(emit_with_c_fallback);
}
enum TypeConversionTrapping : bool { kCanTrap = true, kNoTrap = false };
template <ValueKind dst_kind, ValueKind src_kind,
TypeConversionTrapping can_trap>
void EmitTypeConversion(FullDecoder* decoder, WasmOpcode opcode,
ExternalReference (*fallback_fn)()) {
static constexpr RegClass src_rc = reg_class_for(src_kind);
static constexpr RegClass dst_rc = reg_class_for(dst_kind);
LiftoffRegister src = __ PopToRegister();
LiftoffRegister dst = src_rc == dst_rc
? __ GetUnusedRegister(dst_rc, {src}, {})
: __ GetUnusedRegister(dst_rc, {});
Label* trap =
can_trap ? AddOutOfLineTrap(
decoder, WasmCode::kThrowWasmTrapFloatUnrepresentable)
: nullptr;
if (!__ emit_type_conversion(opcode, dst, src, trap)) {
DCHECK_NOT_NULL(fallback_fn);
ExternalReference ext_ref = fallback_fn();
if (can_trap) {
// External references for potentially trapping conversions return int.
auto sig = MakeSig::Returns(kI32).Params(src_kind);
LiftoffRegister ret_reg =
__ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst));
LiftoffRegister dst_regs[] = {ret_reg, dst};
GenerateCCall(dst_regs, &sig, dst_kind, &src, ext_ref);
__ emit_cond_jump(kEqual, trap, kI32, ret_reg.gp());
} else {
ValueKind sig_kinds[] = {src_kind};
ValueKindSig sig(0, 1, sig_kinds);
GenerateCCall(&dst, &sig, dst_kind, &src, ext_ref);
}
}
__ PushRegister(dst_kind, dst);
}
void UnOp(FullDecoder* decoder, WasmOpcode opcode, const Value& value,
Value* result) {
#define CASE_I32_UNOP(opcode, fn) \
case kExpr##opcode: \
return EmitUnOp<kI32, kI32>(&LiftoffAssembler::emit_##fn);
#define CASE_I64_UNOP(opcode, fn) \
case kExpr##opcode: \
return EmitUnOp<kI64, kI64>(&LiftoffAssembler::emit_##fn);
#define CASE_FLOAT_UNOP(opcode, kind, fn) \
case kExpr##opcode: \
return EmitUnOp<k##kind, k##kind>(&LiftoffAssembler::emit_##fn);
#define CASE_FLOAT_UNOP_WITH_CFALLBACK(opcode, kind, fn) \
case kExpr##opcode: \
return EmitFloatUnOpWithCFallback<k##kind>(&LiftoffAssembler::emit_##fn, \
&ExternalReference::wasm_##fn);
#define CASE_TYPE_CONVERSION(opcode, dst_kind, src_kind, ext_ref, can_trap) \
case kExpr##opcode: \
return EmitTypeConversion<k##dst_kind, k##src_kind, can_trap>( \
decoder, kExpr##opcode, ext_ref);
switch (opcode) {
CASE_I32_UNOP(I32Clz, i32_clz)
CASE_I32_UNOP(I32Ctz, i32_ctz)
CASE_FLOAT_UNOP(F32Abs, F32, f32_abs)
CASE_FLOAT_UNOP(F32Neg, F32, f32_neg)
CASE_FLOAT_UNOP_WITH_CFALLBACK(F32Ceil, F32, f32_ceil)
CASE_FLOAT_UNOP_WITH_CFALLBACK(F32Floor, F32, f32_floor)
CASE_FLOAT_UNOP_WITH_CFALLBACK(F32Trunc, F32, f32_trunc)
CASE_FLOAT_UNOP_WITH_CFALLBACK(F32NearestInt, F32, f32_nearest_int)
CASE_FLOAT_UNOP(F32Sqrt, F32, f32_sqrt)
CASE_FLOAT_UNOP(F64Abs, F64, f64_abs)
CASE_FLOAT_UNOP(F64Neg, F64, f64_neg)
CASE_FLOAT_UNOP_WITH_CFALLBACK(F64Ceil, F64, f64_ceil)
CASE_FLOAT_UNOP_WITH_CFALLBACK(F64Floor, F64, f64_floor)
CASE_FLOAT_UNOP_WITH_CFALLBACK(F64Trunc, F64, f64_trunc)
CASE_FLOAT_UNOP_WITH_CFALLBACK(F64NearestInt, F64, f64_nearest_int)
CASE_FLOAT_UNOP(F64Sqrt, F64, f64_sqrt)
CASE_TYPE_CONVERSION(I32ConvertI64, I32, I64, nullptr, kNoTrap)
CASE_TYPE_CONVERSION(I32SConvertF32, I32, F32, nullptr, kCanTrap)
CASE_TYPE_CONVERSION(I32UConvertF32, I32, F32, nullptr, kCanTrap)
CASE_TYPE_CONVERSION(I32SConvertF64, I32, F64, nullptr, kCanTrap)
CASE_TYPE_CONVERSION(I32UConvertF64, I32, F64, nullptr, kCanTrap)
CASE_TYPE_CONVERSION(I32ReinterpretF32, I32, F32, nullptr, kNoTrap)
CASE_TYPE_CONVERSION(I64SConvertI32, I64, I32, nullptr, kNoTrap)
CASE_TYPE_CONVERSION(I64UConvertI32, I64, I32, nullptr, kNoTrap)
CASE_TYPE_CONVERSION(I64SConvertF32, I64, F32,
&ExternalReference::wasm_float32_to_int64, kCanTrap)
CASE_TYPE_CONVERSION(I64UConvertF32, I64, F32,
&ExternalReference::wasm_float32_to_uint64, kCanTrap)
CASE_TYPE_CONVERSION(I64SConvertF64, I64, F64,
&ExternalReference::wasm_float64_to_int64, kCanTrap)
CASE_TYPE_CONVERSION(I64UConvertF64, I64, F64,
&ExternalReference::wasm_float64_to_uint64, kCanTrap)
CASE_TYPE_CONVERSION(I64ReinterpretF64, I64, F64, nullptr, kNoTrap)
CASE_TYPE_CONVERSION(F32SConvertI32, F32, I32, nullptr, kNoTrap)
CASE_TYPE_CONVERSION(F32UConvertI32, F32, I32, nullptr, kNoTrap)
CASE_TYPE_CONVERSION(F32SConvertI64, F32, I64,
&ExternalReference::wasm_int64_to_float32, kNoTrap)
CASE_TYPE_CONVERSION(F32UConvertI64, F32, I64,
&ExternalReference::wasm_uint64_to_float32, kNoTrap)
CASE_TYPE_CONVERSION(F32ConvertF64, F32, F64, nullptr, kNoTrap)
CASE_TYPE_CONVERSION(F32ReinterpretI32, F32, I32, nullptr, kNoTrap)
CASE_TYPE_CONVERSION(F64SConvertI32, F64, I32, nullptr, kNoTrap)
CASE_TYPE_CONVERSION(F64UConvertI32, F64, I32, nullptr, kNoTrap)
CASE_TYPE_CONVERSION(F64SConvertI64, F64, I64,
&ExternalReference::wasm_int64_to_float64, kNoTrap)
CASE_TYPE_CONVERSION(F64UConvertI64, F64, I64,
&ExternalReference::wasm_uint64_to_float64, kNoTrap)
CASE_TYPE_CONVERSION(F64ConvertF32, F64, F32, nullptr, kNoTrap)
CASE_TYPE_CONVERSION(F64ReinterpretI64, F64, I64, nullptr, kNoTrap)
CASE_I32_UNOP(I32SExtendI8, i32_signextend_i8)
CASE_I32_UNOP(I32SExtendI16, i32_signextend_i16)
CASE_I64_UNOP(I64SExtendI8, i64_signextend_i8)
CASE_I64_UNOP(I64SExtendI16, i64_signextend_i16)
CASE_I64_UNOP(I64SExtendI32, i64_signextend_i32)
CASE_I64_UNOP(I64Clz, i64_clz)
CASE_I64_UNOP(I64Ctz, i64_ctz)
CASE_TYPE_CONVERSION(I32SConvertSatF32, I32, F32, nullptr, kNoTrap)
CASE_TYPE_CONVERSION(I32UConvertSatF32, I32, F32, nullptr, kNoTrap)
CASE_TYPE_CONVERSION(I32SConvertSatF64, I32, F64, nullptr, kNoTrap)
CASE_TYPE_CONVERSION(I32UConvertSatF64, I32, F64, nullptr, kNoTrap)
CASE_TYPE_CONVERSION(I64SConvertSatF32, I64, F32,
&ExternalReference::wasm_float32_to_int64_sat,
kNoTrap)
CASE_TYPE_CONVERSION(I64UConvertSatF32, I64, F32,
&ExternalReference::wasm_float32_to_uint64_sat,
kNoTrap)
CASE_TYPE_CONVERSION(I64SConvertSatF64, I64, F64,
&ExternalReference::wasm_float64_to_int64_sat,
kNoTrap)
CASE_TYPE_CONVERSION(I64UConvertSatF64, I64, F64,
&ExternalReference::wasm_float64_to_uint64_sat,
kNoTrap)
case kExprI32Eqz:
DCHECK(decoder->lookahead(0, kExprI32Eqz));
if (decoder->lookahead(1, kExprBrIf) && !for_debugging_) {
DCHECK(!has_outstanding_op());
outstanding_op_ = kExprI32Eqz;
break;
}
return EmitUnOp<kI32, kI32>(&LiftoffAssembler::emit_i32_eqz);
case kExprI64Eqz:
return EmitUnOp<kI64, kI32>(&LiftoffAssembler::emit_i64_eqz);
case kExprI32Popcnt:
return EmitUnOp<kI32, kI32>(
[=](LiftoffRegister dst, LiftoffRegister src) {
if (__ emit_i32_popcnt(dst.gp(), src.gp())) return;
auto sig = MakeSig::Returns(kI32).Params(kI32);
GenerateCCall(&dst, &sig, kVoid, &src,
ExternalReference::wasm_word32_popcnt());
});
case kExprI64Popcnt:
return EmitUnOp<kI64, kI64>(
[=](LiftoffRegister dst, LiftoffRegister src) {
if (__ emit_i64_popcnt(dst, src)) return;
// The c function returns i32. We will zero-extend later.
auto sig = MakeSig::Returns(kI32).Params(kI64);
LiftoffRegister c_call_dst = kNeedI64RegPair ? dst.low() : dst;
GenerateCCall(&c_call_dst, &sig, kVoid, &src,
ExternalReference::wasm_word64_popcnt());
// Now zero-extend the result to i64.
__ emit_type_conversion(kExprI64UConvertI32, dst, c_call_dst,
nullptr);
});
case kExprRefIsNull: {
if (!FLAG_experimental_liftoff_extern_ref) {
unsupported(decoder, kRefTypes, "ref_is_null");
return;
}
LiftoffRegList pinned;
LiftoffRegister ref = pinned.set(__ PopToRegister());
LiftoffRegister null = __ GetUnusedRegister(kGpReg, pinned);
LoadNullValue(null.gp(), pinned);
// Prefer to overwrite one of the input registers with the result
// of the comparison.
LiftoffRegister dst = __ GetUnusedRegister(kGpReg, {ref, null}, {});
__ emit_ptrsize_set_cond(kEqual, dst.gp(), ref, null);
__ PushRegister(kI32, dst);
return;
}
default:
UNREACHABLE();
}
#undef CASE_I32_UNOP
#undef CASE_I64_UNOP
#undef CASE_FLOAT_UNOP
#undef CASE_FLOAT_UNOP_WITH_CFALLBACK
#undef CASE_TYPE_CONVERSION
}
template <ValueKind src_kind, ValueKind result_kind, typename EmitFn,
typename EmitFnImm>
void EmitBinOpImm(EmitFn fn, EmitFnImm fnImm) {
static constexpr RegClass src_rc = reg_class_for(src_kind);
static constexpr RegClass result_rc = reg_class_for(result_kind);
LiftoffAssembler::VarState rhs_slot = __ cache_state()->stack_state.back();
// Check if the RHS is an immediate.
if (rhs_slot.is_const()) {
__ cache_state()->stack_state.pop_back();
int32_t imm = rhs_slot.i32_const();
LiftoffRegister lhs = __ PopToRegister();
// Either reuse {lhs} for {dst}, or choose a register (pair) which does
// not overlap, for easier code generation.
LiftoffRegList pinned = LiftoffRegList::ForRegs(lhs);
LiftoffRegister dst = src_rc == result_rc
? __ GetUnusedRegister(result_rc, {lhs}, pinned)
: __ GetUnusedRegister(result_rc, pinned);
CallEmitFn(fnImm, dst, lhs, imm);
__ PushRegister(result_kind, dst);
} else {
// The RHS was not an immediate.
EmitBinOp<src_kind, result_kind>(fn);
}
}
template <ValueKind src_kind, ValueKind result_kind,
bool swap_lhs_rhs = false, typename EmitFn>
void EmitBinOp(EmitFn fn) {
static constexpr RegClass src_rc = reg_class_for(src_kind);
static constexpr RegClass result_rc = reg_class_for(result_kind);
LiftoffRegister rhs = __ PopToRegister();
LiftoffRegister lhs = __ PopToRegister(LiftoffRegList::ForRegs(rhs));
LiftoffRegister dst = src_rc == result_rc
? __ GetUnusedRegister(result_rc, {lhs, rhs}, {})
: __ GetUnusedRegister(result_rc, {});
if (swap_lhs_rhs) std::swap(lhs, rhs);
CallEmitFn(fn, dst, lhs, rhs);
__ PushRegister(result_kind, dst);
}
void EmitDivOrRem64CCall(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs, ExternalReference ext_ref,
Label* trap_by_zero,
Label* trap_unrepresentable = nullptr) {
// Cannot emit native instructions, build C call.
LiftoffRegister ret =
__ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst));
LiftoffRegister tmp =
__ GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(dst, ret));
LiftoffRegister arg_regs[] = {lhs, rhs};
LiftoffRegister result_regs[] = {ret, dst};
auto sig = MakeSig::Returns(kI32).Params(kI64, kI64);
GenerateCCall(result_regs, &sig, kI64, arg_regs, ext_ref);
__ LoadConstant(tmp, WasmValue(int32_t{0}));
__ emit_cond_jump(kEqual, trap_by_zero, kI32, ret.gp(), tmp.gp());
if (trap_unrepresentable) {
__ LoadConstant(tmp, WasmValue(int32_t{-1}));
__ emit_cond_jump(kEqual, trap_unrepresentable, kI32, ret.gp(), tmp.gp());
}
}
template <WasmOpcode opcode>
void EmitI32CmpOp(FullDecoder* decoder) {
DCHECK(decoder->lookahead(0, opcode));
if (decoder->lookahead(1, kExprBrIf) && !for_debugging_) {
DCHECK(!has_outstanding_op());
outstanding_op_ = opcode;
return;
}
return EmitBinOp<kI32, kI32>(BindFirst(&LiftoffAssembler::emit_i32_set_cond,
GetCompareCondition(opcode)));
}
void BinOp(FullDecoder* decoder, WasmOpcode opcode, const Value& lhs,
const Value& rhs, Value* result) {
#define CASE_I64_SHIFTOP(opcode, fn) \
case kExpr##opcode: \
return EmitBinOpImm<kI64, kI64>( \
[=](LiftoffRegister dst, LiftoffRegister src, \
LiftoffRegister amount) { \
__ emit_##fn(dst, src, \
amount.is_gp_pair() ? amount.low_gp() : amount.gp()); \
}, \
&LiftoffAssembler::emit_##fn##i);
#define CASE_CCALL_BINOP(opcode, kind, ext_ref_fn) \
case kExpr##opcode: \
return EmitBinOp<k##kind, k##kind>( \
[=](LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
LiftoffRegister args[] = {lhs, rhs}; \
auto ext_ref = ExternalReference::ext_ref_fn(); \
ValueKind sig_kinds[] = {k##kind, k##kind, k##kind}; \
const bool out_via_stack = k##kind == kI64; \
ValueKindSig sig(out_via_stack ? 0 : 1, 2, sig_kinds); \
ValueKind out_arg_kind = out_via_stack ? kI64 : kVoid; \
GenerateCCall(&dst, &sig, out_arg_kind, args, ext_ref); \
});
switch (opcode) {
case kExprI32Add:
return EmitBinOpImm<kI32, kI32>(&LiftoffAssembler::emit_i32_add,
&LiftoffAssembler::emit_i32_addi);
case kExprI32Sub:
return EmitBinOp<kI32, kI32>(&LiftoffAssembler::emit_i32_sub);
case kExprI32Mul:
return EmitBinOp<kI32, kI32>(&LiftoffAssembler::emit_i32_mul);
case kExprI32And:
return EmitBinOpImm<kI32, kI32>(&LiftoffAssembler::emit_i32_and,
&LiftoffAssembler::emit_i32_andi);
case kExprI32Ior:
return EmitBinOpImm<kI32, kI32>(&LiftoffAssembler::emit_i32_or,
&LiftoffAssembler::emit_i32_ori);
case kExprI32Xor:
return EmitBinOpImm<kI32, kI32>(&LiftoffAssembler::emit_i32_xor,
&LiftoffAssembler::emit_i32_xori);
case kExprI32Eq:
return EmitI32CmpOp<kExprI32Eq>(decoder);
case kExprI32Ne:
return EmitI32CmpOp<kExprI32Ne>(decoder);
case kExprI32LtS:
return EmitI32CmpOp<kExprI32LtS>(decoder);
case kExprI32LtU:
return EmitI32CmpOp<kExprI32LtU>(decoder);
case kExprI32GtS:
return EmitI32CmpOp<kExprI32GtS>(decoder);
case kExprI32GtU:
return EmitI32CmpOp<kExprI32GtU>(decoder);
case kExprI32LeS:
return EmitI32CmpOp<kExprI32LeS>(decoder);
case kExprI32LeU:
return EmitI32CmpOp<kExprI32LeU>(decoder);
case kExprI32GeS:
return EmitI32CmpOp<kExprI32GeS>(decoder);
case kExprI32GeU:
return EmitI32CmpOp<kExprI32GeU>(decoder);
case kExprI64Add:
return EmitBinOpImm<kI64, kI64>(&LiftoffAssembler::emit_i64_add,
&LiftoffAssembler::emit_i64_addi);
case kExprI64Sub:
return EmitBinOp<kI64, kI64>(&LiftoffAssembler::emit_i64_sub);
case kExprI64Mul:
return EmitBinOp<kI64, kI64>(&LiftoffAssembler::emit_i64_mul);
case kExprI64And:
return EmitBinOpImm<kI64, kI64>(&LiftoffAssembler::emit_i64_and,
&LiftoffAssembler::emit_i64_andi);
case kExprI64Ior:
return EmitBinOpImm<kI64, kI64>(&LiftoffAssembler::emit_i64_or,
&LiftoffAssembler::emit_i64_ori);
case kExprI64Xor:
return EmitBinOpImm<kI64, kI64>(&LiftoffAssembler::emit_i64_xor,
&LiftoffAssembler::emit_i64_xori);
case kExprI64Eq:
return EmitBinOp<kI64, kI32>(
BindFirst(&LiftoffAssembler::emit_i64_set_cond, kEqual));
case kExprI64Ne:
return EmitBinOp<kI64, kI32>(
BindFirst(&LiftoffAssembler::emit_i64_set_cond, kUnequal));
case kExprI64LtS:
return EmitBinOp<kI64, kI32>(
BindFirst(&LiftoffAssembler::emit_i64_set_cond, kSignedLessThan));
case kExprI64LtU:
return EmitBinOp<kI64, kI32>(
BindFirst(&LiftoffAssembler::emit_i64_set_cond, kUnsignedLessThan));
case kExprI64GtS:
return EmitBinOp<kI64, kI32>(BindFirst(
&LiftoffAssembler::emit_i64_set_cond, kSignedGreaterThan));
case kExprI64GtU:
return EmitBinOp<kI64, kI32>(BindFirst(
&LiftoffAssembler::emit_i64_set_cond, kUnsignedGreaterThan));
case kExprI64LeS:
return EmitBinOp<kI64, kI32>(
BindFirst(&LiftoffAssembler::emit_i64_set_cond, kSignedLessEqual));
case kExprI64LeU:
return EmitBinOp<kI64, kI32>(BindFirst(
&LiftoffAssembler::emit_i64_set_cond, kUnsignedLessEqual));
case kExprI64GeS:
return EmitBinOp<kI64, kI32>(BindFirst(
&LiftoffAssembler::emit_i64_set_cond, kSignedGreaterEqual));
case kExprI64GeU:
return EmitBinOp<kI64, kI32>(BindFirst(
&LiftoffAssembler::emit_i64_set_cond, kUnsignedGreaterEqual));
case kExprF32Eq:
return EmitBinOp<kF32, kI32>(
BindFirst(&LiftoffAssembler::emit_f32_set_cond, kEqual));
case kExprF32Ne:
return EmitBinOp<kF32, kI32>(
BindFirst(&LiftoffAssembler::emit_f32_set_cond, kUnequal));
case kExprF32Lt:
return EmitBinOp<kF32, kI32>(
BindFirst(&LiftoffAssembler::emit_f32_set_cond, kUnsignedLessThan));
case kExprF32Gt:
return EmitBinOp<kF32, kI32>(BindFirst(
&LiftoffAssembler::emit_f32_set_cond, kUnsignedGreaterThan));
case kExprF32Le:
return EmitBinOp<kF32, kI32>(BindFirst(
&LiftoffAssembler::emit_f32_set_cond, kUnsignedLessEqual));
case kExprF32Ge:
return EmitBinOp<kF32, kI32>(BindFirst(
&LiftoffAssembler::emit_f32_set_cond, kUnsignedGreaterEqual));
case kExprF64Eq:
return EmitBinOp<kF64, kI32>(
BindFirst(&LiftoffAssembler::emit_f64_set_cond, kEqual));
case kExprF64Ne:
return EmitBinOp<kF64, kI32>(
BindFirst(&LiftoffAssembler::emit_f64_set_cond, kUnequal));
case kExprF64Lt:
return EmitBinOp<kF64, kI32>(
BindFirst(&LiftoffAssembler::emit_f64_set_cond, kUnsignedLessThan));
case kExprF64Gt:
return EmitBinOp<kF64, kI32>(BindFirst(
&LiftoffAssembler::emit_f64_set_cond, kUnsignedGreaterThan));
case kExprF64Le:
return EmitBinOp<kF64, kI32>(BindFirst(
&LiftoffAssembler::emit_f64_set_cond, kUnsignedLessEqual));
case kExprF64Ge:
return EmitBinOp<kF64, kI32>(BindFirst(
&LiftoffAssembler::emit_f64_set_cond, kUnsignedGreaterEqual));
case kExprI32Shl:
return EmitBinOpImm<kI32, kI32>(&LiftoffAssembler::emit_i32_shl,
&LiftoffAssembler::emit_i32_shli);
case kExprI32ShrS:
return EmitBinOpImm<kI32, kI32>(&LiftoffAssembler::emit_i32_sar,
&LiftoffAssembler::emit_i32_sari);
case kExprI32ShrU:
return EmitBinOpImm<kI32, kI32>(&LiftoffAssembler::emit_i32_shr,
&LiftoffAssembler::emit_i32_shri);
CASE_CCALL_BINOP(I32Rol, I32, wasm_word32_rol)
CASE_CCALL_BINOP(I32Ror, I32, wasm_word32_ror)
CASE_I64_SHIFTOP(I64Shl, i64_shl)
CASE_I64_SHIFTOP(I64ShrS, i64_sar)
CASE_I64_SHIFTOP(I64ShrU, i64_shr)
CASE_CCALL_BINOP(I64Rol, I64, wasm_word64_rol)
CASE_CCALL_BINOP(I64Ror, I64, wasm_word64_ror)
case kExprF32Add:
return EmitBinOp<kF32, kF32>(&LiftoffAssembler::emit_f32_add);
case kExprF32Sub:
return EmitBinOp<kF32, kF32>(&LiftoffAssembler::emit_f32_sub);
case kExprF32Mul:
return EmitBinOp<kF32, kF32>(&LiftoffAssembler::emit_f32_mul);
case kExprF32Div:
return EmitBinOp<kF32, kF32>(&LiftoffAssembler::emit_f32_div);
case kExprF32Min:
return EmitBinOp<kF32, kF32>(&LiftoffAssembler::emit_f32_min);
case kExprF32Max:
return EmitBinOp<kF32, kF32>(&LiftoffAssembler::emit_f32_max);
case kExprF32CopySign:
return EmitBinOp<kF32, kF32>(&LiftoffAssembler::emit_f32_copysign);
case kExprF64Add:
return EmitBinOp<kF64, kF64>(&LiftoffAssembler::emit_f64_add);
case kExprF64Sub:
return EmitBinOp<kF64, kF64>(&LiftoffAssembler::emit_f64_sub);
case kExprF64Mul:
return EmitBinOp<kF64, kF64>(&LiftoffAssembler::emit_f64_mul);
case kExprF64Div:
return EmitBinOp<kF64, kF64>(&LiftoffAssembler::emit_f64_div);
case kExprF64Min:
return EmitBinOp<kF64, kF64>(&LiftoffAssembler::emit_f64_min);
case kExprF64Max:
return EmitBinOp<kF64, kF64>(&LiftoffAssembler::emit_f64_max);
case kExprF64CopySign:
return EmitBinOp<kF64, kF64>(&LiftoffAssembler::emit_f64_copysign);
case kExprI32DivS:
return EmitBinOp<kI32, kI32>([this, decoder](LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapDivByZero);
// Adding the second trap might invalidate the pointer returned for
// the first one, thus get both pointers afterwards.
AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapDivUnrepresentable);
Label* div_by_zero = out_of_line_code_.end()[-2].label.get();
Label* div_unrepresentable = out_of_line_code_.end()[-1].label.get();
__ emit_i32_divs(dst.gp(), lhs.gp(), rhs.gp(), div_by_zero,
div_unrepresentable);
});
case kExprI32DivU:
return EmitBinOp<kI32, kI32>([this, decoder](LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
Label* div_by_zero =
AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapDivByZero);
__ emit_i32_divu(dst.gp(), lhs.gp(), rhs.gp(), div_by_zero);
});
case kExprI32RemS:
return EmitBinOp<kI32, kI32>([this, decoder](LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
Label* rem_by_zero =
AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapRemByZero);
__ emit_i32_rems(dst.gp(), lhs.gp(), rhs.gp(), rem_by_zero);
});
case kExprI32RemU:
return EmitBinOp<kI32, kI32>([this, decoder](LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
Label* rem_by_zero =
AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapRemByZero);
__ emit_i32_remu(dst.gp(), lhs.gp(), rhs.gp(), rem_by_zero);
});
case kExprI64DivS:
return EmitBinOp<kI64, kI64>([this, decoder](LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapDivByZero);
// Adding the second trap might invalidate the pointer returned for
// the first one, thus get both pointers afterwards.
AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapDivUnrepresentable);
Label* div_by_zero = out_of_line_code_.end()[-2].label.get();
Label* div_unrepresentable = out_of_line_code_.end()[-1].label.get();
if (!__ emit_i64_divs(dst, lhs, rhs, div_by_zero,
div_unrepresentable)) {
ExternalReference ext_ref = ExternalReference::wasm_int64_div();
EmitDivOrRem64CCall(dst, lhs, rhs, ext_ref, div_by_zero,
div_unrepresentable);
}
});
case kExprI64DivU:
return EmitBinOp<kI64, kI64>([this, decoder](LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
Label* div_by_zero =
AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapDivByZero);
if (!__ emit_i64_divu(dst, lhs, rhs, div_by_zero)) {
ExternalReference ext_ref = ExternalReference::wasm_uint64_div();
EmitDivOrRem64CCall(dst, lhs, rhs, ext_ref, div_by_zero);
}
});
case kExprI64RemS:
return EmitBinOp<kI64, kI64>([this, decoder](LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
Label* rem_by_zero =
AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapRemByZero);
if (!__ emit_i64_rems(dst, lhs, rhs, rem_by_zero)) {
ExternalReference ext_ref = ExternalReference::wasm_int64_mod();
EmitDivOrRem64CCall(dst, lhs, rhs, ext_ref, rem_by_zero);
}
});
case kExprI64RemU:
return EmitBinOp<kI64, kI64>([this, decoder](LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
Label* rem_by_zero =
AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapRemByZero);
if (!__ emit_i64_remu(dst, lhs, rhs, rem_by_zero)) {
ExternalReference ext_ref = ExternalReference::wasm_uint64_mod();
EmitDivOrRem64CCall(dst, lhs, rhs, ext_ref, rem_by_zero);
}
});
case kExprRefEq: {
return EmitBinOp<kOptRef, kI32>(
BindFirst(&LiftoffAssembler::emit_ptrsize_set_cond, kEqual));
}
default:
UNREACHABLE();
}
#undef CASE_I64_SHIFTOP
#undef CASE_CCALL_BINOP
}
void I32Const(FullDecoder* decoder, Value* result, int32_t value) {
__ PushConstant(kI32, value);
}
void I64Const(FullDecoder* decoder, Value* result, int64_t value) {
// The {VarState} stores constant values as int32_t, thus we only store
// 64-bit constants in this field if it fits in an int32_t. Larger values
// cannot be used as immediate value anyway, so we can also just put them in
// a register immediately.
int32_t value_i32 = static_cast<int32_t>(value);
if (value_i32 == value) {
__ PushConstant(kI64, value_i32);
} else {
LiftoffRegister reg = __ GetUnusedRegister(reg_class_for(kI64), {});
__ LoadConstant(reg, WasmValue(value));
__ PushRegister(kI64, reg);
}
}
void F32Const(FullDecoder* decoder, Value* result, float value) {
LiftoffRegister reg = __ GetUnusedRegister(kFpReg, {});
__ LoadConstant(reg, WasmValue(value));
__ PushRegister(kF32, reg);
}
void F64Const(FullDecoder* decoder, Value* result, double value) {
LiftoffRegister reg = __ GetUnusedRegister(kFpReg, {});
__ LoadConstant(reg, WasmValue(value));
__ PushRegister(kF64, reg);
}
void RefNull(FullDecoder* decoder, ValueType type, Value*) {
if (!FLAG_experimental_liftoff_extern_ref) {
unsupported(decoder, kRefTypes, "ref_null");
return;
}
LiftoffRegister null = __ GetUnusedRegister(kGpReg, {});
LoadNullValue(null.gp(), {});
__ PushRegister(type.kind(), null);
}
void RefFunc(FullDecoder* decoder, uint32_t function_index, Value* result) {
LiftoffRegister func_index_reg = __ GetUnusedRegister(kGpReg, {});
__ LoadConstant(func_index_reg, WasmValue(function_index));
LiftoffAssembler::VarState func_index_var(kI32, func_index_reg, 0);
CallRuntimeStub(WasmCode::kWasmRefFunc, MakeSig::Returns(kRef).Params(kI32),
{func_index_var}, decoder->position());
__ PushRegister(kRef, LiftoffRegister(kReturnRegister0));
}
void RefAsNonNull(FullDecoder* decoder, const Value& arg, Value* result) {
LiftoffRegList pinned;
LiftoffRegister obj = pinned.set(__ PopToRegister(pinned));
MaybeEmitNullCheck(decoder, obj.gp(), pinned, arg.type);
__ PushRegister(kRef, obj);
}
void Drop(FullDecoder* decoder) { __ DropValues(1); }
void TraceFunctionExit(FullDecoder* decoder) {
DEBUG_CODE_COMMENT("trace function exit");
// Before making the runtime call, spill all cache registers.
__ SpillAllRegisters();
LiftoffRegList pinned;
// Get a register to hold the stack slot for the return value.
LiftoffRegister info = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
__ AllocateStackSlot(info.gp(), sizeof(int64_t));
// Store the return value if there is exactly one. Multiple return values
// are not handled yet.
size_t num_returns = decoder->sig_->return_count();
if (num_returns == 1) {
ValueKind return_kind = decoder->sig_->GetReturn(0).kind();
LiftoffRegister return_reg =
__ LoadToRegister(__ cache_state()->stack_state.back(), pinned);
__ Store(info.gp(), no_reg, 0, return_reg,
StoreType::ForValueKind(return_kind), pinned);
}
// Put the parameter in its place.
WasmTraceExitDescriptor descriptor;
DCHECK_EQ(0, descriptor.GetStackParameterCount());
DCHECK_EQ(1, descriptor.GetRegisterParameterCount());
Register param_reg = descriptor.GetRegisterParameter(0);
if (info.gp() != param_reg) {
__ Move(param_reg, info.gp(), kPointerKind);
}
source_position_table_builder_.AddPosition(
__ pc_offset(), SourcePosition(decoder->position()), false);
__ CallRuntimeStub(WasmCode::kWasmTraceExit);
DefineSafepoint();
__ DeallocateStackSlot(sizeof(int64_t));
}
void DoReturn(FullDecoder* decoder, uint32_t /* drop_values */) {
if (FLAG_trace_wasm) TraceFunctionExit(decoder);
size_t num_returns = decoder->sig_->return_count();
if (num_returns > 0) __ MoveToReturnLocations(decoder->sig_, descriptor_);
DEBUG_CODE_COMMENT("leave frame");
__ LeaveFrame(StackFrame::WASM);
__ DropStackSlotsAndRet(
static_cast<uint32_t>(descriptor_->ParameterSlotCount()));
}
void LocalGet(FullDecoder* decoder, Value* result,
const LocalIndexImmediate<validate>& imm) {
auto local_slot = __ cache_state()->stack_state[imm.index];
__ cache_state()->stack_state.emplace_back(
local_slot.kind(), __ NextSpillOffset(local_slot.kind()));
auto* slot = &__ cache_state()->stack_state.back();
if (local_slot.is_reg()) {
__ cache_state()->inc_used(local_slot.reg());
slot->MakeRegister(local_slot.reg());
} else if (local_slot.is_const()) {
slot->MakeConstant(local_slot.i32_const());
} else {
DCHECK(local_slot.is_stack());
auto rc = reg_class_for(local_slot.kind());
LiftoffRegister reg = __ GetUnusedRegister(rc, {});
__ cache_state()->inc_used(reg);
slot->MakeRegister(reg);
__ Fill(reg, local_slot.offset(), local_slot.kind());
}
}
void LocalSetFromStackSlot(LiftoffAssembler::VarState* dst_slot,
uint32_t local_index) {
auto& state = *__ cache_state();
auto& src_slot = state.stack_state.back();
ValueKind kind = dst_slot->kind();
if (dst_slot->is_reg()) {
LiftoffRegister slot_reg = dst_slot->reg();
if (state.get_use_count(slot_reg) == 1) {
__ Fill(dst_slot->reg(), src_slot.offset(), kind);
return;
}
state.dec_used(slot_reg);
dst_slot->MakeStack();
}
DCHECK_EQ(kind, __ local_kind(local_index));
RegClass rc = reg_class_for(kind);
LiftoffRegister dst_reg = __ GetUnusedRegister(rc, {});
__ Fill(dst_reg, src_slot.offset(), kind);
*dst_slot = LiftoffAssembler::VarState(kind, dst_reg, dst_slot->offset());
__ cache_state()->inc_used(dst_reg);
}
void LocalSet(uint32_t local_index, bool is_tee) {
auto& state = *__ cache_state();
auto& source_slot = state.stack_state.back();
auto& target_slot = state.stack_state[local_index];
switch (source_slot.loc()) {
case kRegister:
if (target_slot.is_reg()) state.dec_used(target_slot.reg());
target_slot.Copy(source_slot);
if (is_tee) state.inc_used(target_slot.reg());
break;
case kIntConst:
if (target_slot.is_reg()) state.dec_used(target_slot.reg());
target_slot.Copy(source_slot);
break;
case kStack:
LocalSetFromStackSlot(&target_slot, local_index);
break;
}
if (!is_tee) __ cache_state()->stack_state.pop_back();
}
void LocalSet(FullDecoder* decoder, const Value& value,
const LocalIndexImmediate<validate>& imm) {
LocalSet(imm.index, false);
}
void LocalTee(FullDecoder* decoder, const Value& value, Value* result,
const LocalIndexImmediate<validate>& imm) {
LocalSet(imm.index, true);
}
void AllocateLocals(FullDecoder* decoder, Vector<Value> local_values) {
// TODO(7748): Introduce typed functions bailout reason
unsupported(decoder, kGC, "let");
}
void DeallocateLocals(FullDecoder* decoder, uint32_t count) {
// TODO(7748): Introduce typed functions bailout reason
unsupported(decoder, kGC, "let");
}
Register GetGlobalBaseAndOffset(const WasmGlobal* global,
LiftoffRegList* pinned, uint32_t* offset) {
Register addr = pinned->set(__ GetUnusedRegister(kGpReg, {})).gp();
if (global->mutability && global->imported) {
LOAD_INSTANCE_FIELD(addr, ImportedMutableGlobals, kSystemPointerSize,
*pinned);
__ Load(LiftoffRegister(addr), addr, no_reg,
global->index * sizeof(Address), kPointerLoadType, *pinned);
*offset = 0;
} else {
LOAD_INSTANCE_FIELD(addr, GlobalsStart, kSystemPointerSize, *pinned);
*offset = global->offset;
}
return addr;
}
void GetBaseAndOffsetForImportedMutableExternRefGlobal(
const WasmGlobal* global, LiftoffRegList* pinned, Register* base,
Register* offset) {
Register globals_buffer =
pinned->set(__ GetUnusedRegister(kGpReg, *pinned)).gp();
LOAD_TAGGED_PTR_INSTANCE_FIELD(globals_buffer,
ImportedMutableGlobalsBuffers, *pinned);
*base = globals_buffer;
__ LoadTaggedPointer(
*base, globals_buffer, no_reg,
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(global->offset),
*pinned);
// For the offset we need the index of the global in the buffer, and
// then calculate the actual offset from the index. Load the index from
// the ImportedMutableGlobals array of the instance.
Register imported_mutable_globals =
pinned->set(__ GetUnusedRegister(kGpReg, *pinned)).gp();
LOAD_INSTANCE_FIELD(imported_mutable_globals, ImportedMutableGlobals,
kSystemPointerSize, *pinned);
*offset = imported_mutable_globals;
__ Load(LiftoffRegister(*offset), imported_mutable_globals, no_reg,
global->index * sizeof(Address),
kSystemPointerSize == 4 ? LoadType::kI32Load : LoadType::kI64Load,
*pinned);
__ emit_i32_shli(*offset, *offset, kTaggedSizeLog2);
__ emit_i32_addi(*offset, *offset,
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(0));
}
void GlobalGet(FullDecoder* decoder, Value* result,
const GlobalIndexImmediate<validate>& imm) {
const auto* global = &env_->module->globals[imm.index];
ValueKind kind = global->type.kind();
if (!CheckSupportedType(decoder, kind, "global")) {
return;
}
if (is_reference(kind)) {
if (global->mutability && global->imported) {
LiftoffRegList pinned;
Register base = no_reg;
Register offset = no_reg;
GetBaseAndOffsetForImportedMutableExternRefGlobal(global, &pinned,
&base, &offset);
__ LoadTaggedPointer(base, base, offset, 0, pinned);
__ PushRegister(kind, LiftoffRegister(base));
return;
}
LiftoffRegList pinned;
Register globals_buffer =
pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LOAD_TAGGED_PTR_INSTANCE_FIELD(globals_buffer, TaggedGlobalsBuffer,
pinned);
Register value = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
__ LoadTaggedPointer(value, globals_buffer, no_reg,
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(
imm.global->offset),
pinned);
__ PushRegister(kind, LiftoffRegister(value));
return;
}
LiftoffRegList pinned;
uint32_t offset = 0;
Register addr = GetGlobalBaseAndOffset(global, &pinned, &offset);
LiftoffRegister value =
pinned.set(__ GetUnusedRegister(reg_class_for(kind), pinned));
LoadType type = LoadType::ForValueKind(kind);
__ Load(value, addr, no_reg, offset, type, pinned, nullptr, true);
__ PushRegister(kind, value);
}
void GlobalSet(FullDecoder* decoder, const Value& value,
const GlobalIndexImmediate<validate>& imm) {
auto* global = &env_->module->globals[imm.index];
ValueKind kind = global->type.kind();
if (!CheckSupportedType(decoder, kind, "global")) {
return;
}
if (is_reference(kind)) {
if (global->mutability && global->imported) {
LiftoffRegList pinned;
LiftoffRegister value = pinned.set(__ PopToRegister(pinned));
Register base = no_reg;
Register offset = no_reg;
GetBaseAndOffsetForImportedMutableExternRefGlobal(global, &pinned,
&base, &offset);
__ StoreTaggedPointer(base, offset, 0, value, pinned);
return;
}
LiftoffRegList pinned;
Register globals_buffer =
pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LOAD_TAGGED_PTR_INSTANCE_FIELD(globals_buffer, TaggedGlobalsBuffer,
pinned);
LiftoffRegister value = pinned.set(__ PopToRegister(pinned));
__ StoreTaggedPointer(globals_buffer, no_reg,
wasm::ObjectAccess::ElementOffsetInTaggedFixedArray(
imm.global->offset),
value, pinned);
return;
}
LiftoffRegList pinned;
uint32_t offset = 0;
Register addr = GetGlobalBaseAndOffset(global, &pinned, &offset);
LiftoffRegister reg = pinned.set(__ PopToRegister(pinned));
StoreType type = StoreType::ForValueKind(kind);
__ Store(addr, no_reg, offset, reg, type, {}, nullptr, true);
}
void TableGet(FullDecoder* decoder, const Value&, Value*,
const TableIndexImmediate<validate>& imm) {
LiftoffRegList pinned;
LiftoffRegister table_index_reg =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
__ LoadConstant(table_index_reg, WasmValue(imm.index));
LiftoffAssembler::VarState table_index(kPointerKind, table_index_reg, 0);
LiftoffAssembler::VarState index = __ cache_state()->stack_state.back();
ValueKind result_kind = env_->module->tables[imm.index].type.kind();
CallRuntimeStub(WasmCode::kWasmTableGet,
MakeSig::Returns(result_kind).Params(kI32, kI32),
{table_index, index}, decoder->position());
// Pop parameters from the value stack.
__ cache_state()->stack_state.pop_back(1);
RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
__ PushRegister(result_kind, LiftoffRegister(kReturnRegister0));
}
void TableSet(FullDecoder* decoder, const Value&, const Value&,
const TableIndexImmediate<validate>& imm) {
LiftoffRegList pinned;
LiftoffRegister table_index_reg =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
__ LoadConstant(table_index_reg, WasmValue(imm.index));
LiftoffAssembler::VarState table_index(kPointerKind, table_index_reg, 0);
LiftoffAssembler::VarState value = __ cache_state()->stack_state.end()[-1];
LiftoffAssembler::VarState index = __ cache_state()->stack_state.end()[-2];
ValueKind table_kind = env_->module->tables[imm.index].type.kind();
CallRuntimeStub(WasmCode::kWasmTableSet,
MakeSig::Params(kI32, kI32, table_kind),
{table_index, index, value}, decoder->position());
// Pop parameters from the value stack.
__ cache_state()->stack_state.pop_back(2);
RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill);
}
WasmCode::RuntimeStubId GetRuntimeStubIdForTrapReason(TrapReason reason) {
switch (reason) {
#define RUNTIME_STUB_FOR_TRAP(trap_reason) \
case k##trap_reason: \
return WasmCode::kThrowWasm##trap_reason;
FOREACH_WASM_TRAPREASON(RUNTIME_STUB_FOR_TRAP)
#undef RUNTIME_STUB_FOR_TRAP
default:
UNREACHABLE();
}
}
void Trap(FullDecoder* decoder, TrapReason reason) {
Label* trap_label =
AddOutOfLineTrap(decoder, GetRuntimeStubIdForTrapReason(reason));
__ emit_jump(trap_label);
__ AssertUnreachable(AbortReason::kUnexpectedReturnFromWasmTrap);
}
void AssertNull(FullDecoder* decoder, const Value& arg, Value* result) {
LiftoffRegList pinned;
LiftoffRegister obj = pinned.set(__ PopToRegister(pinned));
Label* trap_label =
AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapNullDereference);
LiftoffRegister null = __ GetUnusedRegister(kGpReg, pinned);
LoadNullValue(null.gp(), pinned);
__ emit_cond_jump(kUnequal, trap_label, kOptRef, obj.gp(), null.gp());
__ PushRegister(kOptRef, obj);
}
void NopForTestingUnsupportedInLiftoff(FullDecoder* decoder) {
unsupported(decoder, kOtherReason, "testing opcode");
}
void Select(FullDecoder* decoder, const Value& cond, const Value& fval,
const Value& tval, Value* result) {
LiftoffRegList pinned;
Register condition = pinned.set(__ PopToRegister()).gp();
ValueKind kind = __ cache_state()->stack_state.end()[-1].kind();
DCHECK_EQ(kind, __ cache_state()->stack_state.end()[-2].kind());
LiftoffRegister false_value = pinned.set(__ PopToRegister(pinned));
LiftoffRegister true_value = __ PopToRegister(pinned);
LiftoffRegister dst = __ GetUnusedRegister(true_value.reg_class(),
{true_value, false_value}, {});
if (!__ emit_select(dst, condition, true_value, false_value, kind)) {
// Emit generic code (using branches) instead.
Label cont;
Label case_false;
__ emit_cond_jump(kEqual, &case_false, kI32, condition);
if (dst != true_value) __ Move(dst, true_value, kind);
__ emit_jump(&cont);
__ bind(&case_false);
if (dst != false_value) __ Move(dst, false_value, kind);
__ bind(&cont);
}
__ PushRegister(kind, dst);
}
void BrImpl(Control* target) {
if (!target->br_merge()->reached) {
target->label_state.InitMerge(
*__ cache_state(), __ num_locals(), target->br_merge()->arity,
target->stack_depth + target->num_exceptions);
}
__ MergeStackWith(target->label_state, target->br_merge()->arity,
target->is_loop() ? LiftoffAssembler::kBackwardJump
: LiftoffAssembler::kForwardJump);
__ jmp(target->label.get());
}
void BrOrRet(FullDecoder* decoder, uint32_t depth,
uint32_t /* drop_values */) {
if (depth == decoder->control_depth()