blob: 0bbcd6d1a0256a4dc23b2472f9d6c53efc21982a [file] [log] [blame]
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/maglev/maglev-graph-builder.h"
#include <algorithm>
#include <limits>
#include "src/base/bounds.h"
#include "src/base/logging.h"
#include "src/base/optional.h"
#include "src/base/v8-fallthrough.h"
#include "src/base/vector.h"
#include "src/builtins/builtins-constructor.h"
#include "src/builtins/builtins.h"
#include "src/codegen/cpu-features.h"
#include "src/codegen/interface-descriptors-inl.h"
#include "src/common/assert-scope.h"
#include "src/common/globals.h"
#include "src/compiler/access-info.h"
#include "src/compiler/bytecode-liveness-map.h"
#include "src/compiler/compilation-dependencies.h"
#include "src/compiler/feedback-source.h"
#include "src/compiler/heap-refs.h"
#include "src/compiler/js-heap-broker-inl.h"
#include "src/compiler/processed-feedback.h"
#include "src/deoptimizer/deoptimize-reason.h"
#include "src/flags/flags.h"
#include "src/handles/maybe-handles-inl.h"
#include "src/ic/handler-configuration-inl.h"
#include "src/interpreter/bytecode-array-iterator.h"
#include "src/interpreter/bytecode-flags.h"
#include "src/interpreter/bytecode-register.h"
#include "src/interpreter/bytecodes.h"
#include "src/maglev/maglev-compilation-info.h"
#include "src/maglev/maglev-compilation-unit.h"
#include "src/maglev/maglev-graph-printer.h"
#include "src/maglev/maglev-interpreter-frame-state.h"
#include "src/maglev/maglev-ir.h"
#include "src/numbers/conversions.h"
#include "src/objects/elements-kind.h"
#include "src/objects/feedback-vector.h"
#include "src/objects/fixed-array.h"
#include "src/objects/heap-number-inl.h"
#include "src/objects/literal-objects-inl.h"
#include "src/objects/name-inl.h"
#include "src/objects/property-cell.h"
#include "src/objects/property-details.h"
#include "src/objects/shared-function-info.h"
#include "src/objects/slots-inl.h"
#include "src/objects/type-hints.h"
#include "src/roots/roots.h"
#include "src/utils/utils.h"
#ifdef V8_INTL_SUPPORT
#include "src/objects/intl-objects.h"
#endif
namespace v8::internal::maglev {
namespace {
enum class CpuOperation {
kFloat64Round,
};
// TODO(leszeks): Add a generic mechanism for marking nodes as optionally
// supported.
bool IsSupported(CpuOperation op) {
switch (op) {
case CpuOperation::kFloat64Round:
#if defined(V8_TARGET_ARCH_X64)
return CpuFeatures::IsSupported(SSE4_1) || CpuFeatures::IsSupported(AVX);
#elif defined(V8_TARGET_ARCH_ARM)
return CpuFeatures::IsSupported(ARMv8);
#elif defined(V8_TARGET_ARCH_ARM64)
return true;
#else
#error "Maglev does not support this architecture."
#endif
}
}
ValueNode* TryGetParentContext(ValueNode* node) {
if (CreateFunctionContext* n = node->TryCast<CreateFunctionContext>()) {
return n->context().node();
}
if (CallRuntime* n = node->TryCast<CallRuntime>()) {
switch (n->function_id()) {
case Runtime::kPushBlockContext:
case Runtime::kPushCatchContext:
case Runtime::kNewFunctionContext:
return n->context().node();
default:
break;
}
}
return nullptr;
}
// Attempts to walk up the context chain through the graph in order to reduce
// depth and thus the number of runtime loads.
void MinimizeContextChainDepth(ValueNode** context, size_t* depth) {
while (*depth > 0) {
ValueNode* parent_context = TryGetParentContext(*context);
if (parent_context == nullptr) return;
*context = parent_context;
(*depth)--;
}
}
class FunctionContextSpecialization final : public AllStatic {
public:
static compiler::OptionalContextRef TryToRef(
const MaglevCompilationUnit* unit, ValueNode* context, size_t* depth) {
DCHECK(unit->info()->specialize_to_function_context());
if (Constant* n = context->TryCast<Constant>()) {
return n->ref().AsContext().previous(unit->broker(), depth);
}
return {};
}
};
} // namespace
class CallArguments {
public:
enum Mode {
kDefault,
kWithSpread,
kWithArrayLike,
};
CallArguments(ConvertReceiverMode receiver_mode,
interpreter::RegisterList reglist,
const InterpreterFrameState& frame, Mode mode = kDefault)
: receiver_mode_(receiver_mode),
args_(reglist.register_count()),
mode_(mode) {
for (int i = 0; i < reglist.register_count(); i++) {
args_[i] = frame.get(reglist[i]);
}
DCHECK_IMPLIES(args_.size() == 0,
receiver_mode == ConvertReceiverMode::kNullOrUndefined);
DCHECK_IMPLIES(mode != kDefault,
receiver_mode == ConvertReceiverMode::kAny);
DCHECK_IMPLIES(mode == kWithArrayLike, args_.size() == 2);
}
explicit CallArguments(ConvertReceiverMode receiver_mode)
: receiver_mode_(receiver_mode), args_(), mode_(kDefault) {
DCHECK_EQ(receiver_mode, ConvertReceiverMode::kNullOrUndefined);
}
CallArguments(ConvertReceiverMode receiver_mode,
std::initializer_list<ValueNode*> args, Mode mode = kDefault)
: receiver_mode_(receiver_mode), args_(args), mode_(mode) {
DCHECK_IMPLIES(mode != kDefault,
receiver_mode == ConvertReceiverMode::kAny);
DCHECK_IMPLIES(mode == kWithArrayLike, args_.size() == 2);
}
CallArguments(ConvertReceiverMode receiver_mode,
base::SmallVector<ValueNode*, 8>&& args, Mode mode = kDefault)
: receiver_mode_(receiver_mode), args_(std::move(args)), mode_(mode) {
DCHECK_IMPLIES(mode != kDefault,
receiver_mode == ConvertReceiverMode::kAny);
DCHECK_IMPLIES(mode == kWithArrayLike, args_.size() == 2);
}
ValueNode* receiver() const {
if (receiver_mode_ == ConvertReceiverMode::kNullOrUndefined) {
return nullptr;
}
return args_[0];
}
void set_receiver(ValueNode* receiver) {
if (receiver_mode_ == ConvertReceiverMode::kNullOrUndefined) {
args_.insert(args_.data(), receiver);
receiver_mode_ = ConvertReceiverMode::kAny;
} else {
args_[0] = receiver;
}
}
size_t count() const {
if (receiver_mode_ == ConvertReceiverMode::kNullOrUndefined) {
return args_.size();
}
return args_.size() - 1;
}
size_t count_with_receiver() const { return count() + 1; }
ValueNode* operator[](size_t i) const {
if (receiver_mode_ != ConvertReceiverMode::kNullOrUndefined) {
i++;
}
if (i >= args_.size()) return nullptr;
return args_[i];
}
void set_arg(size_t i, ValueNode* node) {
if (receiver_mode_ != ConvertReceiverMode::kNullOrUndefined) {
i++;
}
DCHECK_LT(i, args_.size());
args_[i] = node;
}
Mode mode() const { return mode_; }
ConvertReceiverMode receiver_mode() const { return receiver_mode_; }
void Truncate(size_t new_args_count) {
if (new_args_count >= count()) return;
size_t args_to_pop = count() - new_args_count;
for (size_t i = 0; i < args_to_pop; i++) {
args_.pop_back();
}
}
void PopReceiver(ConvertReceiverMode new_receiver_mode) {
DCHECK_NE(receiver_mode_, ConvertReceiverMode::kNullOrUndefined);
DCHECK_NE(new_receiver_mode, ConvertReceiverMode::kNullOrUndefined);
DCHECK_GT(args_.size(), 0); // We have at least a receiver to pop!
// TODO(victorgomes): Do this better!
for (size_t i = 0; i < args_.size() - 1; i++) {
args_[i] = args_[i + 1];
}
args_.pop_back();
// If there is no non-receiver argument to become the new receiver,
// consider the new receiver to be known undefined.
receiver_mode_ = args_.size() == 0 ? ConvertReceiverMode::kNullOrUndefined
: new_receiver_mode;
}
private:
ConvertReceiverMode receiver_mode_;
base::SmallVector<ValueNode*, 8> args_;
Mode mode_;
};
class V8_NODISCARD MaglevGraphBuilder::CallSpeculationScope {
public:
CallSpeculationScope(MaglevGraphBuilder* builder,
compiler::FeedbackSource feedback_source)
: builder_(builder) {
DCHECK(!builder_->current_speculation_feedback_.IsValid());
if (feedback_source.IsValid()) {
DCHECK_EQ(
FeedbackNexus(feedback_source.vector, feedback_source.slot).kind(),
FeedbackSlotKind::kCall);
}
builder_->current_speculation_feedback_ = feedback_source;
}
~CallSpeculationScope() {
builder_->current_speculation_feedback_ = compiler::FeedbackSource();
}
private:
MaglevGraphBuilder* builder_;
};
class V8_NODISCARD MaglevGraphBuilder::SaveCallSpeculationScope {
public:
explicit SaveCallSpeculationScope(MaglevGraphBuilder* builder)
: builder_(builder) {
saved_ = builder_->current_speculation_feedback_;
builder_->current_speculation_feedback_ = compiler::FeedbackSource();
}
~SaveCallSpeculationScope() {
builder_->current_speculation_feedback_ = saved_;
}
const compiler::FeedbackSource& value() { return saved_; }
private:
compiler::FeedbackSource saved_;
MaglevGraphBuilder* builder_;
};
class V8_NODISCARD MaglevGraphBuilder::DeoptFrameScope {
public:
DeoptFrameScope(MaglevGraphBuilder* builder, Builtin continuation,
compiler::OptionalJSFunctionRef maybe_js_target = {})
: builder_(builder),
parent_(builder->current_deopt_scope_),
data_(DeoptFrame::BuiltinContinuationFrameData{
continuation, {}, builder->GetContext(), maybe_js_target}) {
builder_->current_deopt_scope_ = this;
data_.get<DeoptFrame::BuiltinContinuationFrameData>().context->add_use();
DCHECK(data_.get<DeoptFrame::BuiltinContinuationFrameData>()
.parameters.empty());
}
DeoptFrameScope(MaglevGraphBuilder* builder, Builtin continuation,
compiler::OptionalJSFunctionRef maybe_js_target,
base::Vector<ValueNode* const> parameters)
: builder_(builder),
parent_(builder->current_deopt_scope_),
data_(DeoptFrame::BuiltinContinuationFrameData{
continuation, builder->zone()->CloneVector(parameters),
builder->GetContext(), maybe_js_target}) {
builder_->current_deopt_scope_ = this;
data_.get<DeoptFrame::BuiltinContinuationFrameData>().context->add_use();
for (ValueNode* node :
data_.get<DeoptFrame::BuiltinContinuationFrameData>().parameters) {
node->add_use();
}
}
DeoptFrameScope(MaglevGraphBuilder* builder, ValueNode* receiver)
: builder_(builder),
parent_(builder->current_deopt_scope_),
data_(DeoptFrame::ConstructInvokeStubFrameData{
*builder->compilation_unit(), builder->current_source_position_,
receiver, builder->GetContext()}) {
builder_->current_deopt_scope_ = this;
data_.get<DeoptFrame::ConstructInvokeStubFrameData>().receiver->add_use();
data_.get<DeoptFrame::ConstructInvokeStubFrameData>().context->add_use();
}
~DeoptFrameScope() {
builder_->current_deopt_scope_ = parent_;
// We might have cached a checkpointed frame which includes this scope;
// reset it just in case.
builder_->latest_checkpointed_frame_.reset();
}
DeoptFrameScope* parent() const { return parent_; }
DeoptFrame::FrameData& data() { return data_; }
const DeoptFrame::FrameData& data() const { return data_; }
private:
MaglevGraphBuilder* builder_;
DeoptFrameScope* parent_;
DeoptFrame::FrameData data_;
};
class MaglevGraphBuilder::MaglevSubGraphBuilder::Variable {
public:
explicit Variable(int index) : pseudo_register_(index) {}
private:
friend class MaglevSubGraphBuilder;
// Variables pretend to be interpreter registers as far as the dummy
// compilation unit and merge states are concerned.
interpreter::Register pseudo_register_;
};
class MaglevGraphBuilder::MaglevSubGraphBuilder::Label {
public:
Label(MaglevSubGraphBuilder* sub_builder, int predecessor_count)
: predecessor_count_(predecessor_count),
liveness_(
sub_builder->builder_->zone()->New<compiler::BytecodeLivenessState>(
sub_builder->compilation_unit_->register_count(),
sub_builder->builder_->zone())) {}
Label(MaglevSubGraphBuilder* sub_builder, int predecessor_count,
std::initializer_list<Variable*> vars)
: Label(sub_builder, predecessor_count) {
for (Variable* var : vars) {
liveness_->MarkRegisterLive(var->pseudo_register_.index());
}
}
private:
explicit Label(MergePointInterpreterFrameState* merge_state,
BasicBlock* basic_block)
: merge_state_(merge_state), ref_(basic_block) {}
friend class MaglevSubGraphBuilder;
MergePointInterpreterFrameState* merge_state_ = nullptr;
int predecessor_count_ = -1;
compiler::BytecodeLivenessState* liveness_ = nullptr;
BasicBlockRef ref_;
};
class MaglevGraphBuilder::MaglevSubGraphBuilder::LoopLabel {
public:
private:
explicit LoopLabel(MergePointInterpreterFrameState* merge_state,
BasicBlock* loop_header)
: merge_state_(merge_state), loop_header_(loop_header) {}
friend class MaglevSubGraphBuilder;
MergePointInterpreterFrameState* merge_state_ = nullptr;
BasicBlock* loop_header_;
};
class MaglevGraphBuilder::MaglevSubGraphBuilder::BorrowParentKnownNodeAspects {
public:
explicit BorrowParentKnownNodeAspects(MaglevSubGraphBuilder* sub_builder)
: sub_builder_(sub_builder) {
sub_builder_->TakeKnownNodeAspectsFromParent();
}
~BorrowParentKnownNodeAspects() {
sub_builder_->MoveKnownNodeAspectsToParent();
}
private:
MaglevSubGraphBuilder* sub_builder_;
};
MaglevGraphBuilder::MaglevSubGraphBuilder::MaglevSubGraphBuilder(
MaglevGraphBuilder* builder, int variable_count)
: builder_(builder),
compilation_unit_(MaglevCompilationUnit::NewDummy(
builder->zone(), builder->compilation_unit(), variable_count, 0)),
pseudo_frame_(*compilation_unit_, nullptr) {
// We need to set a context, since this is unconditional in the frame state,
// so set it to the real context.
pseudo_frame_.set(interpreter::Register::current_context(),
builder_->current_interpreter_frame().get(
interpreter::Register::current_context()));
DCHECK_NULL(pseudo_frame_.known_node_aspects());
}
MaglevGraphBuilder::MaglevSubGraphBuilder::LoopLabel
MaglevGraphBuilder::MaglevSubGraphBuilder::BeginLoop(
std::initializer_list<Variable*> loop_vars) {
// Create fake liveness and loop info for the loop, with all given loop vars
// set to be live and assigned inside the loop.
compiler::BytecodeLivenessState* loop_header_liveness =
builder_->zone()->New<compiler::BytecodeLivenessState>(
compilation_unit_->register_count(), builder_->zone());
compiler::LoopInfo* loop_info = builder_->zone()->New<compiler::LoopInfo>(
-1, 0, kMaxInt, compilation_unit_->parameter_count(),
compilation_unit_->register_count(), builder_->zone());
for (Variable* var : loop_vars) {
loop_header_liveness->MarkRegisterLive(var->pseudo_register_.index());
loop_info->assignments().Add(var->pseudo_register_);
}
// Finish the current block, jumping (as a fallthrough) to the loop header.
BasicBlockRef loop_header_ref;
BasicBlock* loop_predecessor =
builder_->FinishBlock<Jump>({}, &loop_header_ref);
// Create a state for the loop header, with two predecessors (the above jump
// and the back edge), and initialise with the current state.
MergePointInterpreterFrameState* loop_state =
MergePointInterpreterFrameState::NewForLoop(
pseudo_frame_, *compilation_unit_, 0, 2, loop_header_liveness,
loop_info);
{
BorrowParentKnownNodeAspects borrow(this);
loop_state->Merge(builder_, *compilation_unit_, pseudo_frame_,
loop_predecessor);
}
// Start a new basic block for the loop.
DCHECK_NULL(pseudo_frame_.known_node_aspects());
pseudo_frame_.CopyFrom(*compilation_unit_, *loop_state);
MoveKnownNodeAspectsToParent();
builder_->ProcessMergePointPredecessors(*loop_state, loop_header_ref);
builder_->StartNewBlock(nullptr, loop_state, loop_header_ref);
return LoopLabel{loop_state, loop_header_ref.block_ptr()};
}
template <typename ControlNodeT, typename... Args>
void MaglevGraphBuilder::MaglevSubGraphBuilder::GotoIfTrue(
Label* true_target, std::initializer_list<ValueNode*> control_inputs,
Args&&... args) {
static_assert(IsConditionalControlNode(Node::opcode_of<ControlNodeT>));
BasicBlockRef fallthrough_ref;
// Pass through to FinishBlock, converting Labels to BasicBlockRefs and the
// fallthrough label to the fallthrough ref.
BasicBlock* block = builder_->FinishBlock<ControlNodeT>(
control_inputs, std::forward<Args>(args)..., &true_target->ref_,
&fallthrough_ref);
MergeIntoLabel(true_target, block);
builder_->StartNewBlock(block, nullptr, fallthrough_ref);
}
template <typename ControlNodeT, typename... Args>
void MaglevGraphBuilder::MaglevSubGraphBuilder::GotoIfFalse(
Label* false_target, std::initializer_list<ValueNode*> control_inputs,
Args&&... args) {
static_assert(IsConditionalControlNode(Node::opcode_of<ControlNodeT>));
BasicBlockRef fallthrough_ref;
// Pass through to FinishBlock, converting Labels to BasicBlockRefs and the
// fallthrough label to the fallthrough ref.
BasicBlock* block = builder_->FinishBlock<ControlNodeT>(
control_inputs, std::forward<Args>(args)..., &fallthrough_ref,
&false_target->ref_);
MergeIntoLabel(false_target, block);
builder_->StartNewBlock(block, nullptr, fallthrough_ref);
}
void MaglevGraphBuilder::MaglevSubGraphBuilder::GotoOrTrim(Label* label) {
if (builder_->current_block_ == nullptr) {
ReducePredecessorCount(label);
return;
}
Goto(label);
}
void MaglevGraphBuilder::MaglevSubGraphBuilder::Goto(Label* label) {
CHECK_NOT_NULL(builder_->current_block_);
BasicBlock* block = builder_->FinishBlock<Jump>({}, &label->ref_);
MergeIntoLabel(label, block);
}
void MaglevGraphBuilder::MaglevSubGraphBuilder::ReducePredecessorCount(
Label* label, unsigned num) {
DCHECK_GE(label->predecessor_count_, num);
if (num == 0) {
return;
}
label->predecessor_count_ -= num;
if (label->merge_state_ != nullptr) {
label->merge_state_->MergeDead(*compilation_unit_, num);
}
}
void MaglevGraphBuilder::MaglevSubGraphBuilder::EndLoop(LoopLabel* loop_label) {
if (builder_->current_block_ == nullptr) {
loop_label->merge_state_->MergeDeadLoop(*compilation_unit_);
return;
}
BasicBlock* block =
builder_->FinishBlock<JumpLoop>({}, loop_label->loop_header_);
{
BorrowParentKnownNodeAspects borrow(this);
loop_label->merge_state_->MergeLoop(builder_, *compilation_unit_,
pseudo_frame_, block);
}
block->set_predecessor_id(loop_label->merge_state_->predecessor_count() - 1);
}
ReduceResult MaglevGraphBuilder::MaglevSubGraphBuilder::TrimPredecessorsAndBind(
Label* label) {
DCHECK_LE(label->merge_state_->predecessors_so_far(),
label->predecessor_count_);
builder_->current_block_ = nullptr;
ReducePredecessorCount(label, label->predecessor_count_ -
label->merge_state_->predecessors_so_far());
if (label->merge_state_->predecessors_so_far() == 0) {
return ReduceResult::DoneWithAbort();
}
Bind(label);
return ReduceResult::Done();
}
void MaglevGraphBuilder::MaglevSubGraphBuilder::Bind(Label* label) {
DCHECK_NULL(builder_->current_block_);
DCHECK_NULL(pseudo_frame_.known_node_aspects());
pseudo_frame_.CopyFrom(*compilation_unit_, *label->merge_state_);
MoveKnownNodeAspectsToParent();
builder_->ProcessMergePointPredecessors(*label->merge_state_, label->ref_);
builder_->StartNewBlock(nullptr, label->merge_state_, label->ref_);
}
void MaglevGraphBuilder::MaglevSubGraphBuilder::set(Variable& var,
ValueNode* value) {
pseudo_frame_.set(var.pseudo_register_, value);
}
ValueNode* MaglevGraphBuilder::MaglevSubGraphBuilder::get(
const Variable& var) const {
return pseudo_frame_.get(var.pseudo_register_);
}
// Known node aspects for the pseudo frame are null aside from when merging --
// before each merge, we should borrow the node aspects from the parent
// builder, and after each merge point, we should copy the node aspects back
// to the parent. This is so that the parent graph builder can update its own
// known node aspects without having to worry about this pseudo frame.
void MaglevGraphBuilder::MaglevSubGraphBuilder::
TakeKnownNodeAspectsFromParent() {
DCHECK_NULL(pseudo_frame_.known_node_aspects());
pseudo_frame_.set_known_node_aspects(
builder_->current_interpreter_frame_.known_node_aspects());
}
void MaglevGraphBuilder::MaglevSubGraphBuilder::MoveKnownNodeAspectsToParent() {
DCHECK_NOT_NULL(pseudo_frame_.known_node_aspects());
builder_->current_interpreter_frame_.set_known_node_aspects(
pseudo_frame_.known_node_aspects());
pseudo_frame_.clear_known_node_aspects();
}
void MaglevGraphBuilder::MaglevSubGraphBuilder::MergeIntoLabel(
Label* label, BasicBlock* predecessor) {
BorrowParentKnownNodeAspects borrow(this);
if (label->merge_state_ == nullptr) {
// If there's no merge state, allocate a new one.
label->merge_state_ = MergePointInterpreterFrameState::New(
*compilation_unit_, pseudo_frame_, 0, label->predecessor_count_,
predecessor, label->liveness_);
} else {
// If there already is a frame state, merge.
label->merge_state_->Merge(builder_, *compilation_unit_, pseudo_frame_,
predecessor);
}
}
MaglevGraphBuilder::MaglevGraphBuilder(LocalIsolate* local_isolate,
MaglevCompilationUnit* compilation_unit,
Graph* graph, float call_frequency,
BytecodeOffset caller_bytecode_offset,
int inlining_id,
MaglevGraphBuilder* parent)
: local_isolate_(local_isolate),
compilation_unit_(compilation_unit),
parent_(parent),
graph_(graph),
bytecode_analysis_(bytecode().object(), zone(),
compilation_unit->osr_offset(), true),
iterator_(bytecode().object()),
source_position_iterator_(bytecode().SourcePositionTable(broker())),
allow_loop_peeling_(
// For osr we favor compilation speed over everything
!compilation_unit->is_osr() &&
(is_inline() ? parent_->allow_loop_peeling_
: v8_flags.maglev_loop_peeling)),
decremented_predecessor_offsets_(zone()),
loop_headers_to_peel_(bytecode().length(), zone()),
call_frequency_(call_frequency),
// Add an extra jump_target slot for the inline exit if needed.
jump_targets_(zone()->AllocateArray<BasicBlockRef>(
bytecode().length() + (is_inline() ? 1 : 0))),
// Overallocate merge_states_ by one to allow always looking up the
// next offset. This overallocated slot can also be used for the inline
// exit when needed.
merge_states_(zone()->AllocateArray<MergePointInterpreterFrameState*>(
bytecode().length() + 1)),
current_interpreter_frame_(
*compilation_unit_,
is_inline() ? parent->current_interpreter_frame_.known_node_aspects()
: compilation_unit_->zone()->New<KnownNodeAspects>(
compilation_unit_->zone())),
caller_bytecode_offset_(caller_bytecode_offset),
entrypoint_(compilation_unit->is_osr()
? bytecode_analysis_.osr_entry_point()
: 0),
inlining_id_(inlining_id),
catch_block_stack_(zone()) {
memset(merge_states_, 0,
(bytecode().length() + 1) * sizeof(InterpreterFrameState*));
// Default construct basic block refs.
// TODO(leszeks): This could be a memset of nullptr to ..._jump_targets_.
for (int i = 0; i < bytecode().length(); ++i) {
new (&jump_targets_[i]) BasicBlockRef();
}
if (is_inline()) {
DCHECK_NOT_NULL(parent_);
DCHECK_GT(compilation_unit->inlining_depth(), 0);
// The allocation/initialisation logic here relies on inline_exit_offset
// being the offset one past the end of the bytecode.
DCHECK_EQ(inline_exit_offset(), bytecode().length());
merge_states_[inline_exit_offset()] = nullptr;
new (&jump_targets_[inline_exit_offset()]) BasicBlockRef();
}
CHECK_IMPLIES(compilation_unit_->is_osr(), graph_->is_osr());
CHECK_EQ(compilation_unit_->info()->toplevel_osr_offset() !=
BytecodeOffset::None(),
graph_->is_osr());
if (compilation_unit_->is_osr()) {
CHECK(!is_inline());
#ifdef DEBUG
// OSR'ing into the middle of a loop is currently not supported. There
// should not be any issue with OSR'ing outside of loops, just we currently
// dont do it...
iterator_.SetOffset(compilation_unit_->osr_offset().ToInt());
DCHECK_EQ(iterator_.current_bytecode(), interpreter::Bytecode::kJumpLoop);
DCHECK_EQ(entrypoint_, iterator_.GetJumpTargetOffset());
iterator_.SetOffset(entrypoint_);
#endif
if (v8_flags.trace_maglev_graph_building) {
std::cerr << "- Non-standard entrypoint @" << entrypoint_
<< " by OSR from @" << compilation_unit_->osr_offset().ToInt()
<< std::endl;
}
}
CHECK_IMPLIES(!compilation_unit_->is_osr(), entrypoint_ == 0);
CalculatePredecessorCounts();
}
void MaglevGraphBuilder::StartPrologue() {
current_block_ = zone()->New<BasicBlock>(nullptr, zone());
}
BasicBlock* MaglevGraphBuilder::EndPrologue() {
BasicBlock* first_block;
if (!is_inline() &&
(v8_flags.maglev_hoist_osr_value_phi_untagging && graph_->is_osr())) {
first_block =
FinishBlock<CheckpointedJump>({}, &jump_targets_[entrypoint_]);
} else {
first_block = FinishBlock<Jump>({}, &jump_targets_[entrypoint_]);
}
MergeIntoFrameState(first_block, entrypoint_);
return first_block;
}
void MaglevGraphBuilder::SetArgument(int i, ValueNode* value) {
interpreter::Register reg = interpreter::Register::FromParameterIndex(i);
current_interpreter_frame_.set(reg, value);
}
ValueNode* MaglevGraphBuilder::GetTaggedArgument(int i) {
interpreter::Register reg = interpreter::Register::FromParameterIndex(i);
return GetTaggedValue(reg);
}
void MaglevGraphBuilder::InitializeRegister(interpreter::Register reg,
ValueNode* value) {
current_interpreter_frame_.set(
reg, value ? value : AddNewNode<InitialValue>({}, reg));
}
void MaglevGraphBuilder::BuildRegisterFrameInitialization(
ValueNode* context, ValueNode* closure, ValueNode* new_target) {
if (closure == nullptr &&
compilation_unit_->info()->specialize_to_function_context()) {
compiler::JSFunctionRef function = compiler::MakeRefAssumeMemoryFence(
broker(), broker()->CanonicalPersistentHandle(
compilation_unit_->info()->toplevel_function()));
closure = GetConstant(function);
context = GetConstant(function.context(broker()));
}
InitializeRegister(interpreter::Register::current_context(), context);
InitializeRegister(interpreter::Register::function_closure(), closure);
interpreter::Register new_target_or_generator_register =
bytecode().incoming_new_target_or_generator_register();
int register_index = 0;
if (compilation_unit_->is_osr()) {
for (; register_index < register_count(); register_index++) {
auto val =
AddNewNode<InitialValue>({}, interpreter::Register(register_index));
InitializeRegister(interpreter::Register(register_index), val);
graph_->osr_values().push_back(val);
}
return;
}
// TODO(leszeks): Don't emit if not needed.
ValueNode* undefined_value = GetRootConstant(RootIndex::kUndefinedValue);
if (new_target_or_generator_register.is_valid()) {
int new_target_index = new_target_or_generator_register.index();
for (; register_index < new_target_index; register_index++) {
current_interpreter_frame_.set(interpreter::Register(register_index),
undefined_value);
}
current_interpreter_frame_.set(
new_target_or_generator_register,
new_target ? new_target
: GetRegisterInput(kJavaScriptCallNewTargetRegister));
register_index++;
}
for (; register_index < register_count(); register_index++) {
InitializeRegister(interpreter::Register(register_index), undefined_value);
}
}
void MaglevGraphBuilder::BuildMergeStates() {
auto offset_and_info = bytecode_analysis().GetLoopInfos().begin();
auto end = bytecode_analysis().GetLoopInfos().end();
while (offset_and_info != end && offset_and_info->first < entrypoint_) {
++offset_and_info;
}
for (; offset_and_info != end; ++offset_and_info) {
int offset = offset_and_info->first;
const compiler::LoopInfo& loop_info = offset_and_info->second;
if (loop_headers_to_peel_.Contains(offset)) {
// Peeled loops are treated like normal merges at first. We will construct
// the proper loop header merge state when reaching the `JumpLoop` of the
// peeled iteration.
continue;
}
const compiler::BytecodeLivenessState* liveness = GetInLivenessFor(offset);
DCHECK_NULL(merge_states_[offset]);
if (v8_flags.trace_maglev_graph_building) {
std::cout << "- Creating loop merge state at @" << offset << std::endl;
}
merge_states_[offset] = MergePointInterpreterFrameState::NewForLoop(
current_interpreter_frame_, *compilation_unit_, offset,
NumPredecessors(offset), liveness, &loop_info);
}
if (bytecode().handler_table_size() > 0) {
HandlerTable table(*bytecode().object());
for (int i = 0; i < table.NumberOfRangeEntries(); i++) {
const int offset = table.GetRangeHandler(i);
const interpreter::Register context_reg(table.GetRangeData(i));
const compiler::BytecodeLivenessState* liveness =
GetInLivenessFor(offset);
DCHECK_EQ(NumPredecessors(offset), 0);
DCHECK_NULL(merge_states_[offset]);
if (v8_flags.trace_maglev_graph_building) {
std::cout << "- Creating exception merge state at @" << offset
<< ", context register r" << context_reg.index() << std::endl;
}
merge_states_[offset] = MergePointInterpreterFrameState::NewForCatchBlock(
*compilation_unit_, liveness, offset, context_reg, graph_);
}
}
}
namespace {
template <int index, interpreter::OperandType... operands>
struct GetResultLocationAndSizeHelper;
// Terminal cases
template <int index>
struct GetResultLocationAndSizeHelper<index> {
static std::pair<interpreter::Register, int> GetResultLocationAndSize(
const interpreter::BytecodeArrayIterator& iterator) {
// TODO(leszeks): This should probably actually be "UNREACHABLE" but we have
// lazy deopt info for interrupt budget updates at returns, not for actual
// lazy deopts, but just for stack iteration purposes.
return {interpreter::Register::invalid_value(), 0};
}
static bool HasOutputRegisterOperand() { return false; }
};
template <int index, interpreter::OperandType... operands>
struct GetResultLocationAndSizeHelper<index, interpreter::OperandType::kRegOut,
operands...> {
static std::pair<interpreter::Register, int> GetResultLocationAndSize(
const interpreter::BytecodeArrayIterator& iterator) {
// We shouldn't have any other output operands than this one.
return {iterator.GetRegisterOperand(index), 1};
}
static bool HasOutputRegisterOperand() { return true; }
};
template <int index, interpreter::OperandType... operands>
struct GetResultLocationAndSizeHelper<
index, interpreter::OperandType::kRegOutPair, operands...> {
static std::pair<interpreter::Register, int> GetResultLocationAndSize(
const interpreter::BytecodeArrayIterator& iterator) {
// We shouldn't have any other output operands than this one.
return {iterator.GetRegisterOperand(index), 2};
}
static bool HasOutputRegisterOperand() { return true; }
};
template <int index, interpreter::OperandType... operands>
struct GetResultLocationAndSizeHelper<
index, interpreter::OperandType::kRegOutTriple, operands...> {
static std::pair<interpreter::Register, int> GetResultLocationAndSize(
const interpreter::BytecodeArrayIterator& iterator) {
// We shouldn't have any other output operands than this one.
DCHECK(!(GetResultLocationAndSizeHelper<
index + 1, operands...>::HasOutputRegisterOperand()));
return {iterator.GetRegisterOperand(index), 3};
}
static bool HasOutputRegisterOperand() { return true; }
};
// We don't support RegOutList for lazy deopts.
template <int index, interpreter::OperandType... operands>
struct GetResultLocationAndSizeHelper<
index, interpreter::OperandType::kRegOutList, operands...> {
static std::pair<interpreter::Register, int> GetResultLocationAndSize(
const interpreter::BytecodeArrayIterator& iterator) {
interpreter::RegisterList list = iterator.GetRegisterListOperand(index);
return {list.first_register(), list.register_count()};
}
static bool HasOutputRegisterOperand() { return true; }
};
// Induction case.
template <int index, interpreter::OperandType operand,
interpreter::OperandType... operands>
struct GetResultLocationAndSizeHelper<index, operand, operands...> {
static std::pair<interpreter::Register, int> GetResultLocationAndSize(
const interpreter::BytecodeArrayIterator& iterator) {
return GetResultLocationAndSizeHelper<
index + 1, operands...>::GetResultLocationAndSize(iterator);
}
static bool HasOutputRegisterOperand() {
return GetResultLocationAndSizeHelper<
index + 1, operands...>::HasOutputRegisterOperand();
}
};
template <interpreter::Bytecode bytecode,
interpreter::ImplicitRegisterUse implicit_use,
interpreter::OperandType... operands>
std::pair<interpreter::Register, int> GetResultLocationAndSizeForBytecode(
const interpreter::BytecodeArrayIterator& iterator) {
// We don't support output registers for implicit registers.
DCHECK(!interpreter::BytecodeOperands::WritesImplicitRegister(implicit_use));
if (interpreter::BytecodeOperands::WritesAccumulator(implicit_use)) {
// If we write the accumulator, we shouldn't also write an output register.
DCHECK(!(GetResultLocationAndSizeHelper<
0, operands...>::HasOutputRegisterOperand()));
return {interpreter::Register::virtual_accumulator(), 1};
}
// Use template magic to output a the appropriate GetRegisterOperand call and
// size for this bytecode.
return GetResultLocationAndSizeHelper<
0, operands...>::GetResultLocationAndSize(iterator);
}
} // namespace
std::pair<interpreter::Register, int>
MaglevGraphBuilder::GetResultLocationAndSize() const {
using Bytecode = interpreter::Bytecode;
using OperandType = interpreter::OperandType;
using ImplicitRegisterUse = interpreter::ImplicitRegisterUse;
Bytecode bytecode = iterator_.current_bytecode();
// TODO(leszeks): Only emit these cases for bytecodes we know can lazy deopt.
switch (bytecode) {
#define CASE(Name, ...) \
case Bytecode::k##Name: \
return GetResultLocationAndSizeForBytecode<Bytecode::k##Name, \
__VA_ARGS__>(iterator_);
BYTECODE_LIST(CASE)
#undef CASE
}
UNREACHABLE();
}
#ifdef DEBUG
bool MaglevGraphBuilder::HasOutputRegister(interpreter::Register reg) const {
interpreter::Bytecode bytecode = iterator_.current_bytecode();
if (reg == interpreter::Register::virtual_accumulator()) {
return interpreter::Bytecodes::WritesAccumulator(bytecode);
}
for (int i = 0; i < interpreter::Bytecodes::NumberOfOperands(bytecode); ++i) {
if (interpreter::Bytecodes::IsRegisterOutputOperandType(
interpreter::Bytecodes::GetOperandType(bytecode, i))) {
interpreter::Register operand_reg = iterator_.GetRegisterOperand(i);
int operand_range = iterator_.GetRegisterOperandRange(i);
if (base::IsInRange(reg.index(), operand_reg.index(),
operand_reg.index() + operand_range)) {
return true;
}
}
}
return false;
}
#endif
DeoptFrame* MaglevGraphBuilder::GetParentDeoptFrame() {
if (parent_ == nullptr) return nullptr;
if (parent_deopt_frame_ == nullptr) {
// The parent resumes after the call, which is roughly equivalent to a lazy
// deopt. Use the helper function directly so that we can mark the
// accumulator as dead (since it'll be overwritten by this function's
// return value anyway).
// TODO(leszeks): This is true for our current set of
// inlinings/continuations, but there might be cases in the future where it
// isn't. We may need to store the relevant overwritten register in
// LazyDeoptFrameScope.
DCHECK(interpreter::Bytecodes::WritesAccumulator(
parent_->iterator_.current_bytecode()));
parent_deopt_frame_ =
zone()->New<DeoptFrame>(parent_->GetDeoptFrameForLazyDeoptHelper(
interpreter::Register::invalid_value(), 0,
parent_->current_deopt_scope_, true));
if (inlined_arguments_) {
parent_deopt_frame_ = zone()->New<InlinedArgumentsDeoptFrame>(
*compilation_unit_, caller_bytecode_offset_, GetClosure(),
*inlined_arguments_, parent_deopt_frame_);
GetClosure()->add_use();
for (ValueNode* arg : *inlined_arguments_) {
arg->add_use();
}
}
}
return parent_deopt_frame_;
}
DeoptFrame MaglevGraphBuilder::GetLatestCheckpointedFrame() {
if (in_prologue_) {
return GetDeoptFrameForEntryStackCheck();
}
if (!latest_checkpointed_frame_) {
latest_checkpointed_frame_.emplace(InterpretedDeoptFrame(
*compilation_unit_,
zone()->New<CompactInterpreterFrameState>(
*compilation_unit_, GetInLiveness(), current_interpreter_frame_),
GetClosure(), BytecodeOffset(iterator_.current_offset()),
current_source_position_, GetParentDeoptFrame()));
latest_checkpointed_frame_->as_interpreted().frame_state()->ForEachValue(
*compilation_unit_,
[](ValueNode* node, interpreter::Register) { node->add_use(); });
latest_checkpointed_frame_->as_interpreted().closure()->add_use();
if (current_deopt_scope_ != nullptr) {
// Support exactly one eager deopt builtin continuation. This can be
// expanded in the future if necessary.
DCHECK_NULL(current_deopt_scope_->parent());
DCHECK_EQ(current_deopt_scope_->data().tag(),
DeoptFrame::FrameType::kBuiltinContinuationFrame);
#ifdef DEBUG
if (current_deopt_scope_->data().tag() ==
DeoptFrame::FrameType::kBuiltinContinuationFrame) {
const DeoptFrame::BuiltinContinuationFrameData& frame =
current_deopt_scope_->data()
.get<DeoptFrame::BuiltinContinuationFrameData>();
if (frame.maybe_js_target) {
int stack_parameter_count =
Builtins::GetStackParameterCount(frame.builtin_id);
DCHECK_EQ(stack_parameter_count, frame.parameters.length());
} else {
CallInterfaceDescriptor descriptor =
Builtins::CallInterfaceDescriptorFor(frame.builtin_id);
DCHECK_EQ(descriptor.GetParameterCount(), frame.parameters.length());
}
}
#endif
// Wrap the above frame in the scope frame.
latest_checkpointed_frame_.emplace(
current_deopt_scope_->data(),
zone()->New<DeoptFrame>(*latest_checkpointed_frame_));
}
}
return *latest_checkpointed_frame_;
}
DeoptFrame MaglevGraphBuilder::GetDeoptFrameForLazyDeopt(
interpreter::Register result_location, int result_size) {
return GetDeoptFrameForLazyDeoptHelper(result_location, result_size,
current_deopt_scope_, false);
}
DeoptFrame MaglevGraphBuilder::GetDeoptFrameForLazyDeoptHelper(
interpreter::Register result_location, int result_size,
DeoptFrameScope* scope, bool mark_accumulator_dead) {
if (scope == nullptr) {
// Potentially copy the out liveness if we want to explicitly drop the
// accumulator.
const compiler::BytecodeLivenessState* liveness = GetOutLiveness();
if (mark_accumulator_dead && liveness->AccumulatorIsLive()) {
compiler::BytecodeLivenessState* liveness_copy =
zone()->New<compiler::BytecodeLivenessState>(*liveness, zone());
liveness_copy->MarkAccumulatorDead();
liveness = liveness_copy;
}
InterpretedDeoptFrame ret(
*compilation_unit_,
zone()->New<CompactInterpreterFrameState>(*compilation_unit_, liveness,
current_interpreter_frame_),
GetClosure(), BytecodeOffset(iterator_.current_offset()),
current_source_position_, GetParentDeoptFrame());
ret.frame_state()->ForEachValue(
*compilation_unit_, [result_location, result_size](
ValueNode* node, interpreter::Register reg) {
if (result_size == 0 ||
!base::IsInRange(reg.index(), result_location.index(),
result_location.index() + result_size - 1)) {
node->add_use();
}
});
ret.closure()->add_use();
return ret;
}
// Currently only support builtin continuations for bytecodes that write to
// the accumulator
DCHECK(
interpreter::Bytecodes::WritesAccumulator(iterator_.current_bytecode()));
#ifdef DEBUG
if (scope->data().tag() == DeoptFrame::FrameType::kBuiltinContinuationFrame) {
const DeoptFrame::BuiltinContinuationFrameData& frame =
current_deopt_scope_->data()
.get<DeoptFrame::BuiltinContinuationFrameData>();
if (frame.maybe_js_target) {
int stack_parameter_count =
Builtins::GetStackParameterCount(frame.builtin_id);
// The deopt input value is passed by the deoptimizer, so shouldn't be a
// parameter here.
DCHECK_EQ(stack_parameter_count, frame.parameters.length() + 1);
} else {
CallInterfaceDescriptor descriptor =
Builtins::CallInterfaceDescriptorFor(frame.builtin_id);
// The deopt input value is passed by the deoptimizer, so shouldn't be a
// parameter here.
DCHECK_EQ(descriptor.GetParameterCount(), frame.parameters.length() + 1);
// The deopt input value is passed on the stack.
DCHECK_GT(descriptor.GetStackParameterCount(), 0);
}
}
#endif
// Mark the accumulator dead in parent frames since we know that the
// continuation will write it.
return DeoptFrame(scope->data(),
zone()->New<DeoptFrame>(GetDeoptFrameForLazyDeoptHelper(
result_location, result_size, scope->parent(),
scope->data().tag() ==
DeoptFrame::FrameType::kBuiltinContinuationFrame)));
}
InterpretedDeoptFrame MaglevGraphBuilder::GetDeoptFrameForEntryStackCheck() {
if (entry_stack_check_frame_) return *entry_stack_check_frame_;
DCHECK_EQ(iterator_.current_offset(), entrypoint_);
DCHECK_NULL(parent_);
entry_stack_check_frame_.emplace(
*compilation_unit_,
zone()->New<CompactInterpreterFrameState>(
*compilation_unit_,
GetInLivenessFor(graph_->is_osr() ? bailout_for_entrypoint() : 0),
current_interpreter_frame_),
GetClosure(), BytecodeOffset(bailout_for_entrypoint()),
current_source_position_, nullptr);
(*entry_stack_check_frame_)
.frame_state()
->ForEachValue(
*compilation_unit_,
[](ValueNode* node, interpreter::Register) { node->add_use(); });
(*entry_stack_check_frame_).closure()->add_use();
return *entry_stack_check_frame_;
}
ValueNode* MaglevGraphBuilder::GetTaggedValue(
ValueNode* value, UseReprHintRecording record_use_repr_hint) {
if (V8_LIKELY(record_use_repr_hint == UseReprHintRecording::kRecord)) {
RecordUseReprHintIfPhi(value, UseRepresentation::kTagged);
}
ValueRepresentation representation =
value->properties().value_representation();
if (representation == ValueRepresentation::kTagged) return value;
NodeInfo* node_info = known_node_aspects().GetOrCreateInfoFor(value);
auto& alternative = node_info->alternative();
if (ValueNode* alt = alternative.tagged()) {
return alt;
}
switch (representation) {
case ValueRepresentation::kInt32: {
if (NodeTypeIsSmi(node_info->type())) {
return alternative.set_tagged(AddNewNode<UnsafeSmiTag>({value}));
}
return alternative.set_tagged(AddNewNode<Int32ToNumber>({value}));
}
case ValueRepresentation::kUint32: {
if (NodeTypeIsSmi(node_info->type())) {
return alternative.set_tagged(AddNewNode<UnsafeSmiTag>({value}));
}
return alternative.set_tagged(AddNewNode<Uint32ToNumber>({value}));
}
case ValueRepresentation::kFloat64: {
return alternative.set_tagged(AddNewNode<Float64ToTagged>({value}));
}
case ValueRepresentation::kHoleyFloat64: {
return alternative.set_tagged(AddNewNode<HoleyFloat64ToTagged>({value}));
}
case ValueRepresentation::kTagged:
case ValueRepresentation::kIntPtr:
UNREACHABLE();
}
UNREACHABLE();
}
ReduceResult MaglevGraphBuilder::GetSmiValue(
ValueNode* value, UseReprHintRecording record_use_repr_hint) {
if (V8_LIKELY(record_use_repr_hint == UseReprHintRecording::kRecord)) {
RecordUseReprHintIfPhi(value, UseRepresentation::kTagged);
}
NodeInfo* node_info = known_node_aspects().GetOrCreateInfoFor(value);
ValueRepresentation representation =
value->properties().value_representation();
if (representation == ValueRepresentation::kTagged) {
return BuildCheckSmi(value, !value->Is<Phi>());
}
auto& alternative = node_info->alternative();
if (ValueNode* alt = alternative.tagged()) {
return BuildCheckSmi(alt, !value->Is<Phi>());
}
switch (representation) {
case ValueRepresentation::kInt32: {
if (NodeTypeIsSmi(node_info->type())) {
return alternative.set_tagged(AddNewNode<UnsafeSmiTag>({value}));
}
return alternative.set_tagged(AddNewNode<CheckedSmiTagInt32>({value}));
}
case ValueRepresentation::kUint32: {
if (NodeTypeIsSmi(node_info->type())) {
return alternative.set_tagged(AddNewNode<UnsafeSmiTag>({value}));
}
return alternative.set_tagged(AddNewNode<CheckedSmiTagUint32>({value}));
}
case ValueRepresentation::kFloat64: {
return alternative.set_tagged(AddNewNode<CheckedSmiTagFloat64>({value}));
}
case ValueRepresentation::kHoleyFloat64: {
return alternative.set_tagged(AddNewNode<CheckedSmiTagFloat64>({value}));
}
case ValueRepresentation::kTagged:
case ValueRepresentation::kIntPtr:
UNREACHABLE();
}
UNREACHABLE();
}
namespace {
CheckType GetCheckType(NodeType type) {
return NodeTypeIs(type, NodeType::kAnyHeapObject)
? CheckType::kOmitHeapObjectCheck
: CheckType::kCheckHeapObject;
}
} // namespace
ValueNode* MaglevGraphBuilder::GetInternalizedString(
interpreter::Register reg) {
ValueNode* node = GetTaggedValue(reg);
NodeType old_type;
if (CheckType(node, NodeType::kInternalizedString, &old_type)) return node;
if (!NodeTypeIs(old_type, NodeType::kString)) {
NodeInfo* known_info = known_node_aspects().GetOrCreateInfoFor(node);
known_info->CombineType(NodeType::kString);
}
node = AddNewNode<CheckedInternalizedString>({node}, GetCheckType(old_type));
current_interpreter_frame_.set(reg, node);
return node;
}
namespace {
NodeType ToNumberHintToNodeType(ToNumberHint conversion_type) {
switch (conversion_type) {
case ToNumberHint::kAssumeSmi:
return NodeType::kSmi;
case ToNumberHint::kDisallowToNumber:
case ToNumberHint::kAssumeNumber:
return NodeType::kNumber;
case ToNumberHint::kAssumeNumberOrOddball:
return NodeType::kNumberOrOddball;
}
}
TaggedToFloat64ConversionType ToNumberHintToConversionType(
ToNumberHint conversion_type) {
switch (conversion_type) {
case ToNumberHint::kAssumeSmi:
UNREACHABLE();
case ToNumberHint::kDisallowToNumber:
case ToNumberHint::kAssumeNumber:
return TaggedToFloat64ConversionType::kOnlyNumber;
case ToNumberHint::kAssumeNumberOrOddball:
return TaggedToFloat64ConversionType::kNumberOrOddball;
}
}
} // namespace
ValueNode* MaglevGraphBuilder::GetTruncatedInt32ForToNumber(ValueNode* value,
ToNumberHint hint) {
RecordUseReprHintIfPhi(value, UseRepresentation::kTruncatedInt32);
ValueRepresentation representation =
value->properties().value_representation();
if (representation == ValueRepresentation::kInt32) return value;
if (representation == ValueRepresentation::kUint32) {
// This node is cheap (no code gen, just a bitcast), so don't cache it.
return AddNewNode<TruncateUint32ToInt32>({value});
}
// Process constants first to avoid allocating NodeInfo for them.
switch (value->opcode()) {
case Opcode::kConstant: {
compiler::ObjectRef object = value->Cast<Constant>()->object();
if (!object.IsHeapNumber()) break;
int32_t truncated_value = DoubleToInt32(object.AsHeapNumber().value());
if (!Smi::IsValid(truncated_value)) break;
return GetInt32Constant(truncated_value);
}
case Opcode::kSmiConstant:
return GetInt32Constant(value->Cast<SmiConstant>()->value().value());
case Opcode::kRootConstant: {
Tagged<Object> root_object =
local_isolate_->root(value->Cast<RootConstant>()->index());
if (!IsOddball(root_object, local_isolate_)) break;
int32_t truncated_value =
DoubleToInt32(Oddball::cast(root_object)->to_number_raw());
// All oddball ToNumber truncations are valid Smis.
DCHECK(Smi::IsValid(truncated_value));
return GetInt32Constant(truncated_value);
}
case Opcode::kFloat64Constant: {
int32_t truncated_value =
DoubleToInt32(value->Cast<Float64Constant>()->value().get_scalar());
if (!Smi::IsValid(truncated_value)) break;
return GetInt32Constant(truncated_value);
}
// We could emit unconditional eager deopts for other kinds of constant, but
// it's not necessary, the appropriate checking conversion nodes will deopt.
default:
break;
}
NodeInfo* node_info = known_node_aspects().GetOrCreateInfoFor(value);
auto& alternative = node_info->alternative();
// If there is an int32_alternative, then that works as a truncated value
// too.
if (ValueNode* alt = alternative.int32()) {
return alt;
}
if (ValueNode* alt = alternative.truncated_int32_to_number()) {
return alt;
}
switch (representation) {
case ValueRepresentation::kTagged: {
NodeType old_type;
NodeType desired_type = ToNumberHintToNodeType(hint);
EnsureType(value, desired_type, &old_type);
if (NodeTypeIsSmi(old_type)) {
// Smi untagging can be cached as an int32 alternative, not just a
// truncated alternative.
return alternative.set_int32(BuildSmiUntag(value));
}
if (desired_type == NodeType::kSmi) {
return alternative.set_int32(AddNewNode<CheckedSmiUntag>({value}));
}
TaggedToFloat64ConversionType conversion_type =
ToNumberHintToConversionType(hint);
if (NodeTypeIs(old_type, desired_type)) {
return alternative.set_truncated_int32_to_number(
AddNewNode<TruncateNumberOrOddballToInt32>({value},
conversion_type));
}
return alternative.set_truncated_int32_to_number(
AddNewNode<CheckedTruncateNumberOrOddballToInt32>({value},
conversion_type));
}
case ValueRepresentation::kFloat64:
// Ignore conversion_type for HoleyFloat64, and treat them like Float64.
// ToNumber of undefined is anyway a NaN, so we'll simply truncate away
// the NaN-ness of the hole, and don't need to do extra oddball checks so
// we can ignore the hint (though we'll miss updating the feedback).
case ValueRepresentation::kHoleyFloat64: {
return alternative.set_truncated_int32_to_number(
AddNewNode<TruncateFloat64ToInt32>({value}));
}
case ValueRepresentation::kInt32:
case ValueRepresentation::kUint32:
case ValueRepresentation::kIntPtr:
UNREACHABLE();
}
UNREACHABLE();
}
ValueNode* MaglevGraphBuilder::GetInt32(ValueNode* value) {
RecordUseReprHintIfPhi(value, UseRepresentation::kInt32);
ValueRepresentation representation =
value->properties().value_representation();
if (representation == ValueRepresentation::kInt32) return value;
// Process constants first to avoid allocating NodeInfo for them.
switch (value->opcode()) {
case Opcode::kSmiConstant:
return GetInt32Constant(value->Cast<SmiConstant>()->value().value());
case Opcode::kFloat64Constant: {
double double_value =
value->Cast<Float64Constant>()->value().get_scalar();
if (!IsSmiDouble(double_value)) break;
return GetInt32Constant(
FastD2I(value->Cast<Float64Constant>()->value().get_scalar()));
}
// We could emit unconditional eager deopts for other kinds of constant, but
// it's not necessary, the appropriate checking conversion nodes will deopt.
default:
break;
}
NodeInfo* node_info = known_node_aspects().GetOrCreateInfoFor(value);
auto& alternative = node_info->alternative();
if (ValueNode* alt = alternative.int32()) {
return alt;
}
switch (representation) {
case ValueRepresentation::kTagged: {
// TODO(leszeks): Widen this path to allow HeapNumbers with Int32 values.
return alternative.set_int32(BuildSmiUntag(value));
}
case ValueRepresentation::kUint32: {
if (node_info->is_smi()) {
return alternative.set_int32(
AddNewNode<TruncateUint32ToInt32>({value}));
}
return alternative.set_int32(AddNewNode<CheckedUint32ToInt32>({value}));
}
case ValueRepresentation::kFloat64:
// The check here will also work for the hole NaN, so we can treat
// HoleyFloat64 as Float64.
case ValueRepresentation::kHoleyFloat64: {
return alternative.set_int32(
AddNewNode<CheckedTruncateFloat64ToInt32>({value}));
}
case ValueRepresentation::kInt32:
case ValueRepresentation::kIntPtr:
UNREACHABLE();
}
UNREACHABLE();
}
ValueNode* MaglevGraphBuilder::GetFloat64(ValueNode* value) {
RecordUseReprHintIfPhi(value, UseRepresentation::kFloat64);
return GetFloat64ForToNumber(value, ToNumberHint::kDisallowToNumber);
}
ValueNode* MaglevGraphBuilder::GetFloat64ForToNumber(ValueNode* value,
ToNumberHint hint) {
ValueRepresentation representation =
value->properties().value_representation();
if (representation == ValueRepresentation::kFloat64) return value;
// Process constants first to avoid allocating NodeInfo for them.
switch (value->opcode()) {
case Opcode::kConstant: {
compiler::ObjectRef object = value->Cast<Constant>()->object();
if (object.IsHeapNumber()) {
return GetFloat64Constant(object.AsHeapNumber().value());
}
// Oddballs should be RootConstants.
DCHECK(!IsOddball(*object.object()));
break;
}
case Opcode::kSmiConstant:
return GetFloat64Constant(value->Cast<SmiConstant>()->value().value());
case Opcode::kInt32Constant:
return GetFloat64Constant(value->Cast<Int32Constant>()->value());
case Opcode::kRootConstant: {
Tagged<Object> root_object =
local_isolate_->root(value->Cast<RootConstant>()->index());
if (hint != ToNumberHint::kDisallowToNumber && IsOddball(root_object)) {
return GetFloat64Constant(Oddball::cast(root_object)->to_number_raw());
}
if (IsHeapNumber(root_object)) {
return GetFloat64Constant(HeapNumber::cast(root_object)->value());
}
break;
}
// We could emit unconditional eager deopts for other kinds of constant, but
// it's not necessary, the appropriate checking conversion nodes will deopt.
default:
break;
}
NodeInfo* node_info = known_node_aspects().GetOrCreateInfoFor(value);
auto& alternative = node_info->alternative();
if (ValueNode* alt = alternative.float64()) {
return alt;
}
switch (representation) {
case ValueRepresentation::kTagged: {
switch (hint) {
case ToNumberHint::kAssumeSmi:
// Get the float64 value of a Smi value its int32 representation.
return GetFloat64(GetInt32(value));
case ToNumberHint::kDisallowToNumber:
case ToNumberHint::kAssumeNumber:
// Number->Float64 conversions are exact alternatives, so they can
// also become the canonical float64_alternative.
return alternative.set_float64(BuildNumberOrOddballToFloat64(
value, TaggedToFloat64ConversionType::kOnlyNumber));
case ToNumberHint::kAssumeNumberOrOddball: {
// NumberOrOddball->Float64 conversions are not exact alternatives,
// since they lose the information that this is an oddball, so they
// can only become the canonical float64_alternative if they are a
// known number (and therefore not oddball).
ValueNode* float64_node = BuildNumberOrOddballToFloat64(
value, TaggedToFloat64ConversionType::kNumberOrOddball);
if (NodeTypeIsNumber(node_info->type())) {
alternative.set_float64(float64_node);
}
return float64_node;
}
}
}
case ValueRepresentation::kInt32:
return alternative.set_float64(AddNewNode<ChangeInt32ToFloat64>({value}));
case ValueRepresentation::kUint32:
return alternative.set_float64(
AddNewNode<ChangeUint32ToFloat64>({value}));
case ValueRepresentation::kHoleyFloat64: {
switch (hint) {
case ToNumberHint::kAssumeSmi:
case ToNumberHint::kDisallowToNumber:
case ToNumberHint::kAssumeNumber:
// Number->Float64 conversions are exact alternatives, so they can
// also become the canonical float64_alternative.
return alternative.set_float64(
AddNewNode<CheckedHoleyFloat64ToFloat64>({value}));
case ToNumberHint::kAssumeNumberOrOddball:
// NumberOrOddball->Float64 conversions are not exact alternatives,
// since they lose the information that this is an oddball, so they
// cannot become the canonical float64_alternative.
return AddNewNode<HoleyFloat64ToMaybeNanFloat64>({value});
}
}
case ValueRepresentation::kFloat64:
case ValueRepresentation::kIntPtr:
UNREACHABLE();
}
UNREACHABLE();
}
ValueNode* MaglevGraphBuilder::GetHoleyFloat64ForToNumber(ValueNode* value,
ToNumberHint hint) {
RecordUseReprHintIfPhi(value, UseRepresentation::kHoleyFloat64);
ValueRepresentation representation =
value->properties().value_representation();
// Ignore the hint for
if (representation == ValueRepresentation::kHoleyFloat64) return value;
return GetFloat64ForToNumber(value, hint);
}
namespace {
int32_t ClampToUint8(int32_t value) {
if (value < 0) return 0;
if (value > 255) return 255;
return value;
}
} // namespace
ValueNode* MaglevGraphBuilder::GetUint8ClampedForToNumber(ValueNode* value,
ToNumberHint hint) {
switch (value->properties().value_representation()) {
case ValueRepresentation::kIntPtr:
UNREACHABLE();
case ValueRepresentation::kTagged: {
if (SmiConstant* constant = value->TryCast<SmiConstant>()) {
return GetInt32Constant(ClampToUint8(constant->value().value()));
}
NodeInfo* info = known_node_aspects().TryGetInfoFor(value);
if (info && info->alternative().int32()) {
return AddNewNode<Int32ToUint8Clamped>({info->alternative().int32()});
}
return AddNewNode<CheckedNumberToUint8Clamped>({value});
}
// Ignore conversion_type for HoleyFloat64, and treat them like Float64.
// ToNumber of undefined is anyway a NaN, so we'll simply truncate away the
// NaN-ness of the hole, and don't need to do extra oddball checks so we can
// ignore the hint (though we'll miss updating the feedback).
case ValueRepresentation::kFloat64:
case ValueRepresentation::kHoleyFloat64:
// TODO(leszeks): Handle Float64Constant, which requires the correct
// rounding for clamping.
return AddNewNode<Float64ToUint8Clamped>({value});
case ValueRepresentation::kInt32:
if (Int32Constant* constant = value->TryCast<Int32Constant>()) {
return GetInt32Constant(ClampToUint8(constant->value()));
}
return AddNewNode<Int32ToUint8Clamped>({value});
case ValueRepresentation::kUint32:
return AddNewNode<Uint32ToUint8Clamped>({value});
}
UNREACHABLE();
}
namespace {
template <Operation kOperation>
struct NodeForOperationHelper;
#define NODE_FOR_OPERATION_HELPER(Name) \
template <> \
struct NodeForOperationHelper<Operation::k##Name> { \
using generic_type = Generic##Name; \
};
OPERATION_LIST(NODE_FOR_OPERATION_HELPER)
#undef NODE_FOR_OPERATION_HELPER
template <Operation kOperation>
using GenericNodeForOperation =
typename NodeForOperationHelper<kOperation>::generic_type;
// Bitwise operations reinterprets the numeric input as Int32 bits for a
// bitwise operation, which means we want to do slightly different conversions.
template <Operation kOperation>
constexpr bool BinaryOperationIsBitwiseInt32() {
switch (kOperation) {
case Operation::kBitwiseNot:
case Operation::kBitwiseAnd:
case Operation::kBitwiseOr:
case Operation::kBitwiseXor:
case Operation::kShiftLeft:
case Operation::kShiftRight:
case Operation::kShiftRightLogical:
return true;
default:
return false;
}
}
} // namespace
// MAP_OPERATION_TO_NODES are tuples with the following format:
// - Operation name,
// - Int32 operation node,
// - Identity of int32 operation (e.g, 0 for add/sub and 1 for mul/div), if it
// exists, or otherwise {}.
#define MAP_BINARY_OPERATION_TO_INT32_NODE(V) \
V(Add, Int32AddWithOverflow, 0) \
V(Subtract, Int32SubtractWithOverflow, 0) \
V(Multiply, Int32MultiplyWithOverflow, 1) \
V(Divide, Int32DivideWithOverflow, 1) \
V(Modulus, Int32ModulusWithOverflow, {}) \
V(BitwiseAnd, Int32BitwiseAnd, ~0) \
V(BitwiseOr, Int32BitwiseOr, 0) \
V(BitwiseXor, Int32BitwiseXor, 0) \
V(ShiftLeft, Int32ShiftLeft, 0) \
V(ShiftRight, Int32ShiftRight, 0) \
V(ShiftRightLogical, Int32ShiftRightLogical, {})
#define MAP_UNARY_OPERATION_TO_INT32_NODE(V) \
V(BitwiseNot, Int32BitwiseNot) \
V(Increment, Int32IncrementWithOverflow) \
V(Decrement, Int32DecrementWithOverflow) \
V(Negate, Int32NegateWithOverflow)
// MAP_OPERATION_TO_FLOAT64_NODE are tuples with the following format:
// (Operation name, Float64 operation node).
#define MAP_OPERATION_TO_FLOAT64_NODE(V) \
V(Add, Float64Add) \
V(Subtract, Float64Subtract) \
V(Multiply, Float64Multiply) \
V(Divide, Float64Divide) \
V(Modulus, Float64Modulus) \
V(Negate, Float64Negate) \
V(Exponentiate, Float64Exponentiate)
template <Operation kOperation>
static constexpr base::Optional<int> Int32Identity() {
switch (kOperation) {
#define CASE(op, _, identity) \
case Operation::k##op: \
return identity;
MAP_BINARY_OPERATION_TO_INT32_NODE(CASE)
#undef CASE
default:
UNREACHABLE();
}
}
namespace {
template <Operation kOperation>
struct Int32NodeForHelper;
#define SPECIALIZATION(op, OpNode, ...) \
template <> \
struct Int32NodeForHelper<Operation::k##op> { \
using type = OpNode; \
};
MAP_UNARY_OPERATION_TO_INT32_NODE(SPECIALIZATION)
MAP_BINARY_OPERATION_TO_INT32_NODE(SPECIALIZATION)
#undef SPECIALIZATION
template <Operation kOperation>
using Int32NodeFor = typename Int32NodeForHelper<kOperation>::type;
template <Operation kOperation>
struct Float64NodeForHelper;
#define SPECIALIZATION(op, OpNode) \
template <> \
struct Float64NodeForHelper<Operation::k##op> { \
using type = OpNode; \
};
MAP_OPERATION_TO_FLOAT64_NODE(SPECIALIZATION)
#undef SPECIALIZATION
template <Operation kOperation>
using Float64NodeFor = typename Float64NodeForHelper<kOperation>::type;
} // namespace
template <Operation kOperation>
void MaglevGraphBuilder::BuildGenericUnaryOperationNode() {
FeedbackSlot slot_index = GetSlotOperand(0);
ValueNode* value = GetAccumulatorTagged();
SetAccumulator(AddNewNode<GenericNodeForOperation<kOperation>>(
{value}, compiler::FeedbackSource{feedback(), slot_index}));
}
template <Operation kOperation>
void MaglevGraphBuilder::BuildGenericBinaryOperationNode() {
ValueNode* left = LoadRegisterTagged(0);
ValueNode* right = GetAccumulatorTagged();
FeedbackSlot slot_index = GetSlotOperand(1);
SetAccumulator(AddNewNode<GenericNodeForOperation<kOperation>>(
{left, right}, compiler::FeedbackSource{feedback(), slot_index}));
}
template <Operation kOperation>
void MaglevGraphBuilder::BuildGenericBinarySmiOperationNode() {
ValueNode* left = GetAccumulatorTagged();
int constant = iterator_.GetImmediateOperand(0);
ValueNode* right = GetSmiConstant(constant);
FeedbackSlot slot_index = GetSlotOperand(1);
SetAccumulator(AddNewNode<GenericNodeForOperation<kOperation>>(
{left, right}, compiler::FeedbackSource{feedback(), slot_index}));
}
template <Operation kOperation>
void MaglevGraphBuilder::BuildInt32UnaryOperationNode() {
// Use BuildTruncatingInt32BitwiseNotForToNumber with Smi input hint
// for truncating operations.
static_assert(!BinaryOperationIsBitwiseInt32<kOperation>());
// TODO(v8:7700): Do constant folding.
ValueNode* value = GetAccumulatorInt32();
using OpNodeT = Int32NodeFor<kOperation>;
SetAccumulator(AddNewNode<OpNodeT>({value}));
}
void MaglevGraphBuilder::BuildTruncatingInt32BitwiseNotForToNumber(
ToNumberHint hint) {
// TODO(v8:7700): Do constant folding.
ValueNode* value = GetTruncatedInt32ForToNumber(
current_interpreter_frame_.accumulator(), hint);
SetAccumulator(AddNewNode<Int32BitwiseNot>({value}));
}
template <Operation kOperation>
ValueNode* MaglevGraphBuilder::TryFoldInt32BinaryOperation(ValueNode* left,
ValueNode* right) {
switch (kOperation) {
case Operation::kModulus:
// Note the `x % x = 0` fold is invalid since for negative x values the
// result is -0.0.
// TODO(v8:7700): Consider re-enabling this fold if the result is used
// only in contexts where -0.0 is semantically equivalent to 0.0, or if x
// is known to be non-negative.
default:
// TODO(victorgomes): Implement more folds.
break;
}
return nullptr;
}
template <Operation kOperation>
ValueNode* MaglevGraphBuilder::TryFoldInt32BinaryOperation(ValueNode* left,
int right) {
switch (kOperation) {
case Operation::kModulus:
// Note the `x % 1 = 0` and `x % -1 = 0` folds are invalid since for
// negative x values the result is -0.0.
// TODO(v8:7700): Consider re-enabling this fold if the result is used
// only in contexts where -0.0 is semantically equivalent to 0.0, or if x
// is known to be non-negative.
// TODO(victorgomes): We can emit faster mod operation if {right} is power
// of 2, unfortunately we need to know if {left} is negative or not.
// Maybe emit a Int32ModulusRightIsPowerOf2?
default:
// TODO(victorgomes): Implement more folds.
break;
}
return nullptr;
}
template <Operation kOperation>
void MaglevGraphBuilder::BuildInt32BinaryOperationNode() {
// Use BuildTruncatingInt32BinaryOperationNodeForToNumber with Smi input hint
// for truncating operations.
static_assert(!BinaryOperationIsBitwiseInt32<kOperation>());
// TODO(v8:7700): Do constant folding.
ValueNode* left = LoadRegisterInt32(0);
ValueNode* right = GetAccumulatorInt32();
if (ValueNode* result =
TryFoldInt32BinaryOperation<kOperation>(left, right)) {
SetAccumulator(result);
return;
}
using OpNodeT = Int32NodeFor<kOperation>;
SetAccumulator(AddNewNode<OpNodeT>({left, right}));
}
template <Operation kOperation>
void MaglevGraphBuilder::BuildTruncatingInt32BinaryOperationNodeForToNumber(
ToNumberHint hint) {
static_assert(BinaryOperationIsBitwiseInt32<kOperation>());
// TODO(v8:7700): Do constant folding.
ValueNode* left;
ValueNode* right;
if (IsRegisterEqualToAccumulator(0)) {
left = right = GetTruncatedInt32ForToNumber(
current_interpreter_frame_.get(iterator_.GetRegisterOperand(0)), hint);
} else {
left = GetTruncatedInt32ForToNumber(
current_interpreter_frame_.get(iterator_.GetRegisterOperand(0)), hint);
right = GetTruncatedInt32ForToNumber(
current_interpreter_frame_.accumulator(), hint);
}
if (ValueNode* result =
TryFoldInt32BinaryOperation<kOperation>(left, right)) {
SetAccumulator(result);
return;
}
SetAccumulator(AddNewNode<Int32NodeFor<kOperation>>({left, right}));
}
template <Operation kOperation>
void MaglevGraphBuilder::BuildInt32BinarySmiOperationNode() {
// Truncating Int32 nodes treat their input as a signed int32 regardless
// of whether it's really signed or not, so we allow Uint32 by loading a
// TruncatedInt32 value.
static_assert(!BinaryOperationIsBitwiseInt32<kOperation>());
// TODO(v8:7700): Do constant folding.
ValueNode* left = GetAccumulatorInt32();
int32_t constant = iterator_.GetImmediateOperand(0);
if (base::Optional<int>(constant) == Int32Identity<kOperation>()) {
// If the constant is the unit of the operation, it already has the right
// value, so just return.
return;
}
if (ValueNode* result =
TryFoldInt32BinaryOperation<kOperation>(left, constant)) {
SetAccumulator(result);
return;
}
ValueNode* right = GetInt32Constant(constant);
using OpNodeT = Int32NodeFor<kOperation>;
SetAccumulator(AddNewNode<OpNodeT>({left, right}));
}
template <Operation kOperation>
void MaglevGraphBuilder::BuildTruncatingInt32BinarySmiOperationNodeForToNumber(
ToNumberHint hint) {
static_assert(BinaryOperationIsBitwiseInt32<kOperation>());
// TODO(v8:7700): Do constant folding.
ValueNode* left = GetTruncatedInt32ForToNumber(
current_interpreter_frame_.accumulator(), hint);
int32_t constant = iterator_.GetImmediateOperand(0);
if (base::Optional<int>(constant) == Int32Identity<kOperation>()) {
// If the constant is the unit of the operation, it already has the right
// value, so use the truncated value (if not just a conversion) and return.
if (!left->properties().is_conversion()) {
current_interpreter_frame_.set_accumulator(left);
}
return;
}
if (ValueNode* result =
TryFoldInt32BinaryOperation<kOperation>(left, constant)) {
SetAccumulator(result);
return;
}
ValueNode* right = GetInt32Constant(constant);
SetAccumulator(AddNewNode<Int32NodeFor<kOperation>>({left, right}));
}
template <Operation kOperation>
void MaglevGraphBuilder::BuildFloat64BinarySmiOperationNodeForToNumber(
ToNumberHint hint) {
// TODO(v8:7700): Do constant folding. Make sure to normalize HoleyFloat64
// nodes if constant folded.
ValueNode* left = GetAccumulatorHoleyFloat64ForToNumber(hint);
double constant = static_cast<double>(iterator_.GetImmediateOperand(0));
ValueNode* right = GetFloat64Constant(constant);
SetAccumulator(AddNewNode<Float64NodeFor<kOperation>>({left, right}));
}
template <Operation kOperation>
void MaglevGraphBuilder::BuildFloat64UnaryOperationNodeForToNumber(
ToNumberHint hint) {
// TODO(v8:7700): Do constant folding. Make sure to normalize HoleyFloat64
// nodes if constant folded.
ValueNode* value = GetAccumulatorHoleyFloat64ForToNumber(hint);
switch (kOperation) {
case Operation::kNegate:
SetAccumulator(AddNewNode<Float64Negate>({value}));
break;
case Operation::kIncrement:
SetAccumulator(AddNewNode<Float64Add>({value, GetFloat64Constant(1)}));
break;
case Operation::kDecrement:
SetAccumulator(
AddNewNode<Float64Subtract>({value, GetFloat64Constant(1)}));
break;
default:
UNREACHABLE();
}
}
template <Operation kOperation>
void MaglevGraphBuilder::BuildFloat64BinaryOperationNodeForToNumber(
ToNumberHint hint) {
// TODO(v8:7700): Do constant folding. Make sure to normalize HoleyFloat64
// nodes if constant folded.
ValueNode* left = LoadRegisterHoleyFloat64ForToNumber(0, hint);
ValueNode* right = GetAccumulatorHoleyFloat64ForToNumber(hint);
SetAccumulator(AddNewNode<Float64NodeFor<kOperation>>({left, right}));
}
namespace {
ToNumberHint BinopHintToToNumberHint(BinaryOperationHint hint) {
switch (hint) {
case BinaryOperationHint::kSignedSmall:
return ToNumberHint::kAssumeSmi;
case BinaryOperationHint::kSignedSmallInputs:
case BinaryOperationHint::kNumber:
return ToNumberHint::kAssumeNumber;
case BinaryOperationHint::kNumberOrOddball:
return ToNumberHint::kAssumeNumberOrOddball;
case BinaryOperationHint::kNone:
case BinaryOperationHint::kString:
case BinaryOperationHint::kBigInt:
case BinaryOperationHint::kBigInt64:
case BinaryOperationHint::kAny:
UNREACHABLE();
}
}
} // namespace
template <Operation kOperation>
void MaglevGraphBuilder::VisitUnaryOperation() {
FeedbackNexus nexus = FeedbackNexusForOperand(0);
BinaryOperationHint feedback_hint = nexus.GetBinaryOperationFeedback();
switch (feedback_hint) {
case BinaryOperationHint::kNone:
RETURN_VOID_ON_ABORT(EmitUnconditionalDeopt(
DeoptimizeReason::kInsufficientTypeFeedbackForBinaryOperation));
case BinaryOperationHint::kSignedSmall:
case BinaryOperationHint::kSignedSmallInputs:
case BinaryOperationHint::kNumber:
case BinaryOperationHint::kNumberOrOddball: {
ToNumberHint hint = BinopHintToToNumberHint(feedback_hint);
if constexpr (BinaryOperationIsBitwiseInt32<kOperation>()) {
static_assert(kOperation == Operation::kBitwiseNot);
return BuildTruncatingInt32BitwiseNotForToNumber(hint);
} else if (feedback_hint == BinaryOperationHint::kSignedSmall) {
return BuildInt32UnaryOperationNode<kOperation>();
}
return BuildFloat64UnaryOperationNodeForToNumber<kOperation>(hint);
break;
}
case BinaryOperationHint::kString:
case BinaryOperationHint::kBigInt:
case BinaryOperationHint::kBigInt64:
case BinaryOperationHint::kAny:
// Fallback to generic node.
break;
}
BuildGenericUnaryOperationNode<kOperation>();
}
template <Operation kOperation>
void MaglevGraphBuilder::VisitBinaryOperation() {
FeedbackNexus nexus = FeedbackNexusForOperand(1);
BinaryOperationHint feedback_hint = nexus.GetBinaryOperationFeedback();
switch (feedback_hint) {
case BinaryOperationHint::kNone:
RETURN_VOID_ON_ABORT(EmitUnconditionalDeopt(
DeoptimizeReason::kInsufficientTypeFeedbackForBinaryOperation));
case BinaryOperationHint::kSignedSmall:
case BinaryOperationHint::kSignedSmallInputs:
case BinaryOperationHint::kNumber:
case BinaryOperationHint::kNumberOrOddball: {
ToNumberHint hint = BinopHintToToNumberHint(feedback_hint);
if constexpr (BinaryOperationIsBitwiseInt32<kOperation>()) {
return BuildTruncatingInt32BinaryOperationNodeForToNumber<kOperation>(
hint);
} else if (feedback_hint == BinaryOperationHint::kSignedSmall) {
if constexpr (kOperation == Operation::kExponentiate) {
// Exponentiate never updates the feedback to be a Smi.
UNREACHABLE();
} else {
return BuildInt32BinaryOperationNode<kOperation>();
}
} else {
return BuildFloat64BinaryOperationNodeForToNumber<kOperation>(hint);
}
break;
}
case BinaryOperationHint::kString:
if constexpr (kOperation == Operation::kAdd) {
ValueNode* left = LoadRegisterTagged(0);
ValueNode* right = GetAccumulatorTagged();
BuildCheckString(left);
BuildCheckString(right);
SetAccumulator(AddNewNode<StringConcat>({left, right}));
return;
}
break;
case BinaryOperationHint::kBigInt:
case BinaryOperationHint::kBigInt64:
case BinaryOperationHint::kAny:
// Fallback to generic node.
break;
}
BuildGenericBinaryOperationNode<kOperation>();
}
template <Operation kOperation>
void MaglevGraphBuilder::VisitBinarySmiOperation() {
FeedbackNexus nexus = FeedbackNexusForOperand(1);
BinaryOperationHint feedback_hint = nexus.GetBinaryOperationFeedback();
switch (feedback_hint) {
case BinaryOperationHint::kNone:
RETURN_VOID_ON_ABORT(EmitUnconditionalDeopt(
DeoptimizeReason::kInsufficientTypeFeedbackForBinaryOperation));
case BinaryOperationHint::kSignedSmall:
case BinaryOperationHint::kSignedSmallInputs:
case BinaryOperationHint::kNumber:
case BinaryOperationHint::kNumberOrOddball: {
ToNumberHint hint = BinopHintToToNumberHint(feedback_hint);
if constexpr (BinaryOperationIsBitwiseInt32<kOperation>()) {
return BuildTruncatingInt32BinarySmiOperationNodeForToNumber<
kOperation>(hint);
} else if (feedback_hint == BinaryOperationHint::kSignedSmall) {
if constexpr (kOperation == Operation::kExponentiate) {
// Exponentiate never updates the feedback to be a Smi.
UNREACHABLE();
} else {
return BuildInt32BinarySmiOperationNode<kOperation>();
}
} else {
return BuildFloat64BinarySmiOperationNodeForToNumber<kOperation>(hint);
}
break;
}
case BinaryOperationHint::kString:
case BinaryOperationHint::kBigInt:
case BinaryOperationHint::kBigInt64:
case BinaryOperationHint::kAny:
// Fallback to generic node.
break;
}
BuildGenericBinarySmiOperationNode<kOperation>();
}
template <Operation kOperation, typename type>
bool OperationValue(type left, type right) {
switch (kOperation) {
case Operation::kEqual:
case Operation::kStrictEqual:
return left == right;
case Operation::kLessThan:
return left < right;
case Operation::kLessThanOrEqual:
return left <= right;
case Operation::kGreaterThan:
return left > right;
case Operation::kGreaterThanOrEqual:
return left >= right;
}
}
// static
compiler::OptionalHeapObjectRef MaglevGraphBuilder::TryGetConstant(
compiler::JSHeapBroker* broker, LocalIsolate* isolate, ValueNode* node) {
if (Constant* c = node->TryCast<Constant>()) {
return c->object();
}
if (RootConstant* c = node->TryCast<RootConstant>()) {
return MakeRef(broker, isolate->root_handle(c->index())).AsHeapObject();
}
return {};
}
compiler::OptionalHeapObjectRef MaglevGraphBuilder::TryGetConstant(
ValueNode* node, ValueNode** constant_node) {
if (auto result = TryGetConstant(broker(), local_isolate(), node)) {
if (constant_node) *constant_node = node;
return result;
}
const NodeInfo* info = known_node_aspects().TryGetInfoFor(node);
if (info) {
if (auto c = info->alternative().constant()) {
if (constant_node) *constant_node = c;
return TryGetConstant(c);
}
}
return {};
}
template <Operation kOperation>
bool MaglevGraphBuilder::TryReduceCompareEqualAgainstConstant() {
// First handle strict equal comparison with constant.
if (kOperation != Operation::kStrictEqual) return false;
ValueNode* left = LoadRegisterRaw(0);
ValueNode* right = GetRawAccumulator();
compiler::OptionalHeapObjectRef maybe_constant = TryGetConstant(left);
if (!maybe_constant) maybe_constant = TryGetConstant(right);
if (!maybe_constant) return false;
InstanceType type = maybe_constant.value().map(broker()).instance_type();
if (!InstanceTypeChecker::IsReferenceComparable(type)) return false;
if (left->properties().value_representation() !=
ValueRepresentation::kTagged ||
right->properties().value_representation() !=
ValueRepresentation::kTagged) {
SetAccumulator(GetBooleanConstant(false));
} else if (left == right) {
SetAccumulator(GetBooleanConstant(true));
} else {
SetAccumulator(AddNewNode<TaggedEqual>({left, right}));
}
return true;
}
template <Operation kOperation>
void MaglevGraphBuilder::VisitCompareOperation() {
FeedbackNexus nexus = FeedbackNexusForOperand(1);
switch (nexus.GetCompareOperationFeedback()) {
case CompareOperationHint::kNone:
RETURN_VOID_ON_ABORT(EmitUnconditionalDeopt(
DeoptimizeReason::kInsufficientTypeFeedbackForCompareOperation));
case CompareOperationHint::kSignedSmall: {
ValueNode* left = LoadRegisterInt32(0);
ValueNode* right = GetAccumulatorInt32();
if (left == right) {
SetAccumulator(
GetBooleanConstant(kOperation == Operation::kEqual ||
kOperation == Operation::kStrictEqual ||
kOperation == Operation::kLessThanOrEqual ||
kOperation == Operation::kGreaterThanOrEqual));
return;
}
if (left->Is<Int32Constant>() && right->Is<Int32Constant>()) {
int left_value = left->Cast<Int32Constant>()->value();
int right_value = right->Cast<Int32Constant>()->value();
SetAccumulator(GetBooleanConstant(
OperationValue<kOperation>(left_value, right_value)));
return;
}
SetAccumulator(AddNewNode<Int32Compare>({left, right}, kOperation));
return;
}
case CompareOperationHint::kNumber: {
// TODO(leszeks): we could support kNumberOrOddball with
// BranchIfFloat64Compare, but we'd need to special case comparing
// oddballs with NaN value (e.g. undefined) against themselves.
ValueNode* left = LoadRegisterFloat64(0);
ValueNode* right = GetAccumulatorFloat64();
if (left->Is<Float64Constant>() && right->Is<Float64Constant>()) {
double left_value = left->Cast<Float64Constant>()->value().get_scalar();
double right_value =
right->Cast<Float64Constant>()->value().get_scalar();
SetAccumulator(GetBooleanConstant(
OperationValue<kOperation>(left_value, right_value)));
return;
}
SetAccumulator(AddNewNode<Float64Compare>({left, right}, kOperation));
return;
}
case CompareOperationHint::kInternalizedString: {
DCHECK(kOperation == Operation::kEqual ||
kOperation == Operation::kStrictEqual);
ValueNode *left, *right;
if (IsRegisterEqualToAccumulator(0)) {
left = right = GetInternalizedString(iterator_.GetRegisterOperand(0));
SetAccumulator(GetRootConstant(RootIndex::kTrueValue));
return;
}
left = GetInternalizedString(iterator_.GetRegisterOperand(0));
right =
GetInternalizedString(interpreter::Register::virtual_accumulator());
if (left == right) {
SetAccumulator(GetBooleanConstant(true));
return;
}
SetAccumulator(AddNewNode<TaggedEqual>({left, right}));
return;
}
case CompareOperationHint::kSymbol: {
DCHECK(kOperation == Operation::kEqual ||
kOperation == Operation::kStrictEqual);
ValueNode* left = LoadRegisterTagged(0);
ValueNode* right = GetAccumulatorTagged();
BuildCheckSymbol(left);
BuildCheckSymbol(right);
if (left == right) {
SetAccumulator(GetBooleanConstant(true));
return;
}
SetAccumulator(AddNewNode<TaggedEqual>({left, right}));
return;
}
case CompareOperationHint::kString: {
if (TryReduceCompareEqualAgainstConstant<kOperation>()) return;
ValueNode* left = LoadRegisterTagged(0);
ValueNode* right = GetAccumulatorTagged();
BuildCheckString(left);
BuildCheckString(right);
ValueNode* result;
if (left == right) {
SetAccumulator(
GetBooleanConstant(kOperation == Operation::kEqual ||
kOperation == Operation::kStrictEqual ||
kOperation == Operation::kLessThanOrEqual ||
kOperation == Operation::kGreaterThanOrEqual));
return;
}
switch (kOperation) {
case Operation::kEqual:
case Operation::kStrictEqual:
result = AddNewNode<StringEqual>({left, right});
break;
case Operation::kLessThan:
result = BuildCallBuiltin<Builtin::kStringLessThan>({left, right});
break;
case Operation::kLessThanOrEqual:
result =
BuildCallBuiltin<Builtin::kStringLessThanOrEqual>({left, right});
break;
case Operation::kGreaterThan:
result = BuildCallBuiltin<Builtin::kStringGreaterThan>({left, right});
break;
case Operation::kGreaterThanOrEqual:
result = BuildCallBuiltin<Builtin::kStringGreaterThanOrEqual>(
{left, right});
break;
}
SetAccumulator(result);
return;
}
case CompareOperationHint::kAny:
case CompareOperationHint::kBigInt64:
case CompareOperationHint::kBigInt:
case CompareOperationHint::kNumberOrBoolean:
case CompareOperationHint::kNumberOrOddball:
case CompareOperationHint::kReceiverOrNullOrUndefined:
if (TryReduceCompareEqualAgainstConstant<kOperation>()) return;
break;
case CompareOperationHint::kReceiver: {
if (TryReduceCompareEqualAgainstConstant<kOperation>()) return;
DCHECK(kOperation == Operation::kEqual ||
kOperation == Operation::kStrictEqual);
ValueNode* left = LoadRegisterTagged(0);
ValueNode* right = GetAccumulatorTagged();
BuildCheckJSReceiver(left);
BuildCheckJSReceiver(right);
if (left == right) {
SetAccumulator(GetBooleanConstant(true));
return;
}
SetAccumulator(AddNewNode<TaggedEqual>({left, right}));
return;
}
}
BuildGenericBinaryOperationNode<kOperation>();
}
void MaglevGraphBuilder::VisitLdar() {
MoveNodeBetweenRegisters(iterator_.GetRegisterOperand(0),
interpreter::Register::virtual_accumulator());
}
void MaglevGraphBuilder::VisitLdaZero() { SetAccumulator(GetSmiConstant(0)); }
void MaglevGraphBuilder::VisitLdaSmi() {
int constant = iterator_.GetImmediateOperand(0);
SetAccumulator(GetSmiConstant(constant));
}
void MaglevGraphBuilder::VisitLdaUndefined() {
SetAccumulator(GetRootConstant(RootIndex::kUndefinedValue));
}
void MaglevGraphBuilder::VisitLdaNull() {
SetAccumulator(GetRootConstant(RootIndex::kNullValue));
}
void MaglevGraphBuilder::VisitLdaTheHole() {
SetAccumulator(GetRootConstant(RootIndex::kTheHoleValue));
}
void MaglevGraphBuilder::VisitLdaTrue() {
SetAccumulator(GetRootConstant(RootIndex::kTrueValue));
}
void MaglevGraphBuilder::VisitLdaFalse() {
SetAccumulator(GetRootConstant(RootIndex::kFalseValue));
}
void MaglevGraphBuilder::VisitLdaConstant() {
SetAccumulator(GetConstant(GetRefOperand<HeapObject>(0)));
}
bool MaglevGraphBuilder::TrySpecializeLoadContextSlotToFunctionContext(
ValueNode** context, size_t* depth, int slot_index,
ContextSlotMutability slot_mutability) {
DCHECK(compilation_unit_->info()->specialize_to_function_context());
size_t new_depth = *depth;
compiler::OptionalContextRef maybe_context_ref =
FunctionContextSpecialization::TryToRef(compilation_unit_, *context,
&new_depth);
if (!maybe_context_ref.has_value()) return false;
compiler::ContextRef context_ref = maybe_context_ref.value();
if (slot_mutability == kMutable || new_depth != 0) {
*depth = new_depth;
*context = GetConstant(context_ref);
return false;
}
compiler::OptionalObjectRef maybe_slot_value =
context_ref.get(broker(), slot_index);
if (!maybe_slot_value.has_value()) {
*depth = new_depth;
*context = GetConstant(context_ref);
return false;
}
compiler::ObjectRef slot_value = maybe_slot_value.value();
if (slot_value.IsHeapObject()) {
// Even though the context slot is immutable, the context might have escaped
// before the function to which it belongs has initialized the slot. We
// must be conservative and check if the value in the slot is currently the
// hole or undefined. Only if it is neither of these, can we be sure that it
// won't change anymore.
//
// See also: JSContextSpecialization::ReduceJSLoadContext.
compiler::OddballType oddball_type =
slot_value.AsHeapObject().map(broker()).oddball_type(broker());
if (oddball_type == compiler::OddballType::kUndefined ||
slot_value.IsTheHole()) {
*depth = new_depth;
*context = GetConstant(context_ref);
return false;
}
}
// Fold the load of the immutable slot.
SetAccumulator(GetConstant(slot_value));
return true;
}
ValueNode* MaglevGraphBuilder::LoadAndCacheContextSlot(
ValueNode* context, int offset, ContextSlotMutability slot_mutability) {
ValueNode*& cached_value =
slot_mutability == ContextSlotMutability::kMutable
? known_node_aspects().loaded_context_slots[{context, offset}]
: known_node_aspects().loaded_context_constants[{context, offset}];
if (cached_value) {
if (v8_flags.trace_maglev_graph_building) {
std::cout << " * Reusing cached context slot "
<< PrintNodeLabel(graph_labeller(), context) << "[" << offset
<< "]: " << PrintNode(graph_labeller(), cached_value)
<< std::endl;
}
return cached_value;
}
return cached_value = AddNewNode<LoadTaggedField>({context}, offset);
}
void MaglevGraphBuilder::StoreAndCacheContextSlot(ValueNode* context,
int offset,
ValueNode* value) {
DCHECK_EQ(
known_node_aspects().loaded_context_constants.count({context, offset}),
0);
BuildStoreTaggedField(context, GetTaggedValue(value), offset);
if (v8_flags.trace_maglev_graph_building) {
std::cout << " * Recording context slot store "
<< PrintNodeLabel(graph_labeller(), context) << "[" << offset
<< "]: " << PrintNode(graph_labeller(), value) << std::endl;
}
known_node_aspects().loaded_context_slots[{context, offset}] = value;
}
void MaglevGraphBuilder::BuildLoadContextSlot(
ValueNode* context, size_t depth, int slot_index,
ContextSlotMutability slot_mutability) {
MinimizeContextChainDepth(&context, &depth);
if (compilation_unit_->info()->specialize_to_function_context() &&
TrySpecializeLoadContextSlotToFunctionContext(
&context, &depth, slot_index, slot_mutability)) {
return; // Our work here is done.
}
for (size_t i = 0; i < depth; ++i) {
context = LoadAndCacheContextSlot(
context, Context::OffsetOfElementAt(Context::PREVIOUS_INDEX),
kImmutable);
}
// Always load the slot here as if it were mutable. Immutable slots have a
// narrow range of mutability if the context escapes before the slot is
// initialized, so we can't safely assume that the load can be cached in case
// it's a load before initialization (e.g. var a = a + 42).
current_interpreter_frame_.set_accumulator(LoadAndCacheContextSlot(
context, Context::OffsetOfElementAt(slot_index), kMutable));
}
void MaglevGraphBuilder::BuildStoreContextSlot(ValueNode* context, size_t depth,
int slot_index,
ValueNode* value) {
MinimizeContextChainDepth(&context, &depth);
if (compilation_unit_->info()->specialize_to_function_context()) {
compiler::OptionalContextRef maybe_ref =
FunctionContextSpecialization::TryToRef(compilation_unit_, context,
&depth);
if (maybe_ref.has_value()) {
context = GetConstant(maybe_ref.value());
}
}
for (size_t i = 0; i < depth; ++i) {
context = LoadAndCacheContextSlot(
context, Context::OffsetOfElementAt(Context::PREVIOUS_INDEX),
kImmutable);
}
StoreAndCacheContextSlot(context, Context::OffsetOfElementAt(slot_index),
value);
}
void MaglevGraphBuilder::VisitLdaContextSlot() {
ValueNode* context = LoadRegisterTagged(0);
int slot_index = iterator_.GetIndexOperand(1);
size_t depth = iterator_.GetUnsignedImmediateOperand(2);
BuildLoadContextSlot(context, depth, slot_index, kMutable);
}
void MaglevGraphBuilder::VisitLdaImmutableContextSlot() {
ValueNode* context = LoadRegisterTagged(0);
int slot_index = iterator_.GetIndexOperand(1);
size_t depth = iterator_.GetUnsignedImmediateOperand(2);
BuildLoadContextSlot(context, depth, slot_index, kImmutable);
}
void MaglevGraphBuilder::VisitLdaCurrentContextSlot() {
ValueNode* context = GetContext();
int slot_index = iterator_.GetIndexOperand(0);
BuildLoadContextSlot(context, 0, slot_index, kMutable);
}
void MaglevGraphBuilder::VisitLdaImmutableCurrentContextSlot() {
ValueNode* context = GetContext();
int slot_index = iterator_.GetIndexOperand(0);
BuildLoadContextSlot(context, 0, slot_index, kImmutable);
}
void MaglevGraphBuilder::VisitStaContextSlot() {
ValueNode* context = LoadRegisterTagged(0);
int slot_index = iterator_.GetIndexOperand(1);
size_t depth = iterator_.GetUnsignedImmediateOperand(2);
BuildStoreContextSlot(context, depth, slot_index, GetRawAccumulator());
}
void MaglevGraphBuilder::VisitStaCurrentContextSlot() {
ValueNode* context = GetContext();
int slot_index = iterator_.GetIndexOperand(0);
BuildStoreContextSlot(context, 0, slot_index, GetRawAccumulator());
}
void MaglevGraphBuilder::VisitStar() {
MoveNodeBetweenRegisters(interpreter::Register::virtual_accumulator(),
iterator_.GetRegisterOperand(0));
}
#define SHORT_STAR_VISITOR(Name, ...) \
void MaglevGraphBuilder::Visit##Name() { \
MoveNodeBetweenRegisters( \
interpreter::Register::virtual_accumulator(), \
interpreter::Register::FromShortStar(interpreter::Bytecode::k##Name)); \
}
SHORT_STAR_BYTECODE_LIST(SHORT_STAR_VISITOR)
#undef SHORT_STAR_VISITOR
void MaglevGraphBuilder::VisitMov() {
MoveNodeBetweenRegisters(iterator_.GetRegisterOperand(0),
iterator_.GetRegisterOperand(1));
}
void MaglevGraphBuilder::VisitPushContext() {
MoveNodeBetweenRegisters(interpreter::Register::current_context(),
iterator_.GetRegisterOperand(0));
SetContext(GetAccumulatorTagged());
}
void MaglevGraphBuilder::VisitPopContext() {
SetContext(LoadRegisterTagged(0));
}
void MaglevGraphBuilder::VisitTestReferenceEqual() {
ValueNode* lhs = LoadRegisterTagged(0);
ValueNode* rhs = GetAccumulatorTagged();
if (lhs == rhs) {
SetAccumulator(GetRootConstant(RootIndex::kTrueValue));
return;
}
SetAccumulator(AddNewNode<TaggedEqual>({lhs, rhs}));
}
void MaglevGraphBuilder::VisitTestUndetectable() {
ValueNode* value = GetAccumulatorTagged();
if (compiler::OptionalHeapObjectRef maybe_constant = TryGetConstant(value)) {
if (maybe_constant.value().map(broker()).is_undetectable()) {
SetAccumulator(GetRootConstant(RootIndex::kTrueValue));
} else {
SetAccumulator(GetRootConstant(RootIndex::kFalseValue));
}
return;
}
NodeType old_type;
if (CheckType(value, NodeType::kSmi, &old_type)) {
SetAccumulator(GetRootConstant(RootIndex::kFalseValue));
return;
}
enum CheckType type = GetCheckType(old_type);
SetAccumulator(AddNewNode<TestUndetectable>({value}, type));
}
void MaglevGraphBuilder::VisitTestNull() {
ValueNode* value = GetAccumulatorTagged();
if (IsConstantNode(value->opcode())) {
SetAccumulator(GetBooleanConstant(IsNullValue(value)));
return;
}
ValueNode* null_constant = GetRootConstant(RootIndex::kNullValue);
SetAccumulator(AddNewNode<TaggedEqual>({value, null_constant}));
}
void MaglevGraphBuilder::VisitTestUndefined() {
ValueNode* value = GetAccumulatorTagged();
if (IsConstantNode(value->opcode())) {
SetAccumulator(GetBooleanConstant(IsUndefinedValue(value)));
return;
}
ValueNode* undefined_constant = GetRootConstant(RootIndex::kUndefinedValue);
SetAccumulator(AddNewNode<TaggedEqual>({value, undefined_constant}));
}
void MaglevGraphBuilder::VisitTestTypeOf() {
using LiteralFlag = interpreter::TestTypeOfFlags::LiteralFlag;
// TODO(v8:7700): Add a branch version of TestTypeOf that does not need to
// materialise the boolean value.
LiteralFlag literal =
interpreter::TestTypeOfFlags::Decode(GetFlag8Operand(0));
if (literal == LiteralFlag::kOther) {
SetAccumulator(GetRootConstant(RootIndex::kFalseValue));
return;
}
ValueNode* value = GetAccumulatorTagged();
SetAccumulator(AddNewNode<TestTypeOf>({value}, literal));
}
ReduceResult MaglevGraphBuilder::TryBuildScriptContextStore(
const compiler::GlobalAccessFeedback& global_access_feedback) {
DCHECK(global_access_feedback.IsScriptContextSlot());
if (global_access_feedback.immutable()) {
return ReduceResult::Fail();
}
auto script_context = GetConstant(global_access_feedback.script_context());
int offset = Context::OffsetOfElementAt(global_access_feedback.slot_index());
StoreAndCacheContextSlot(script_context, offset, GetRawAccumulator());
return ReduceResult::Done();
}
ReduceResult MaglevGraphBuilder::TryBuildPropertyCellStore(
const compiler::GlobalAccessFeedback& global_access_feedback) {
DCHECK(global_access_feedback.IsPropertyCell());
compiler::PropertyCellRef property_cell =
global_access_feedback.property_cell();
if (!property_cell.Cache(broker())) return ReduceResult::Fail();
compiler::ObjectRef property_cell_value = property_cell.value(broker());
if (property_cell_value.IsPropertyCellHole()) {
// The property cell is no longer valid.
return EmitUnconditionalDeopt(
DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess);
}
PropertyDetails property_details = property_cell.property_details();
DCHECK_EQ(PropertyKind::kData, property_details.kind());
if (property_details.IsReadOnly()) {
// Don't even bother trying to lower stores to read-only data
// properties.
// TODO(neis): We could generate code that checks if the new value
// equals the old one and then does nothing or deopts, respectively.
return ReduceResult::Fail();
}
switch (property_details.cell_type()) {
case PropertyCellType::kUndefined:
return ReduceResult::Fail();
case PropertyCellType::kConstant: {
// TODO(victorgomes): Support non-internalized string.
if (property_cell_value.IsString() &&
!property_cell_value.IsInternalizedString()) {
return ReduceResult::Fail();
}
// Record a code dependency on the cell, and just deoptimize if the new
// value doesn't match the previous value stored inside the cell.
broker()->dependencies()->DependOnGlobalProperty(property_cell);
ValueNode* value = GetAccumulatorTagged();
return BuildCheckValue(value, property_cell_value);
}
case PropertyCellType::kConstantType: {
// We rely on stability further below.
if (property_cell_value.IsHeapObject() &&
!property_cell_value.AsHeapObject().map(broker()).is_stable()) {
return ReduceResult::Fail();
}
// Record a code dependency on the cell, and just deoptimize if the new
// value's type doesn't match the type of the previous value in the cell.
broker()->dependencies()->DependOnGlobalProperty(property_cell);
ValueNode* value;
if (property_cell_value.IsHeapObject()) {
value = GetAccumulatorTagged();
compiler::MapRef property_cell_value_map =
property_cell_value.AsHeapObject().map(broker());
broker()->dependencies()->DependOnStableMap(property_cell_value_map);
BuildCheckHeapObject(value);
RETURN_IF_ABORT(
BuildCheckMaps(value, base::VectorOf({property_cell_value_map})));
} else {
GET_VALUE_OR_ABORT(value, GetAccumulatorSmi());
}
ValueNode* property_cell_node = GetConstant(property_cell.AsHeapObject());
BuildStoreTaggedField(property_cell_node, value,
PropertyCell::kValueOffset);
break;
}
case PropertyCellType::kMutable: {
// Record a code dependency on the cell, and just deoptimize if the
// property ever becomes read-only.
broker()->dependencies()->DependOnGlobalProperty(property_cell);
ValueNode* property_cell_node = GetConstant(property_cell.AsHeapObject());
ValueNode* value = GetAccumulatorTagged();
BuildStoreTaggedField(property_cell_node, value,
PropertyCell::kValueOffset);
break;
}
case PropertyCellType::kInTransition:
UNREACHABLE();
}
return ReduceResult::Done();
}
ReduceResult MaglevGraphBuilder::TryBuildScriptContextConstantLoad(
const compiler::GlobalAccessFeedback& global_access_feedback) {
DCHECK(global_access_feedback.IsScriptContextSlot());
if (!global_access_feedback.immutable()) return ReduceResult::Fail();
compiler::OptionalObjectRef maybe_slot_value =
global_access_feedback.script_context().get(
broker(), global_access_feedback.slot_index());
if (!maybe_slot_value) return ReduceResult::Fail();
return GetConstant(maybe_slot_value.value());
}
ReduceResult MaglevGraphBuilder::TryBuildScriptContextLoad(
const compiler::GlobalAccessFeedback& global_access_feedback) {
DCHECK(global_access_feedback.IsScriptContextSlot());
RETURN_IF_DONE(TryBuildScriptContextConstantLoad(global_access_feedback));
auto script_context = GetConstant(global_access_feedback.script_context());
int offset = Context::OffsetOfElementAt(global_access_feedback.slot_index());
return LoadAndCacheContextSlot(
script_context, offset,
global_access_feedback.immutable() ? kImmutable : kMutable);
}
ReduceResult MaglevGraphBuilder::TryBuildPropertyCellLoad(
const compiler::GlobalAccessFeedback& global_access_feedback) {
// TODO(leszeks): A bunch of this is copied from
// js-native-context-specialization.cc -- I wonder if we can unify it
// somehow.
DCHECK(global_access_feedback.IsPropertyCell());
compiler::PropertyCellRef property_cell =
global_access_feedback.property_cell();
if (!property_cell.Cache(broker())) return ReduceResult::Fail();
compiler::ObjectRef property_cell_value = property_cell.value(broker());
if (property_cell_value.IsPropertyCellHole()) {
// The property cell is no longer valid.
return EmitUnconditionalDeopt(
DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess);
}
PropertyDetails property_details = property_cell.property_details();
PropertyCellType property_cell_type = property_details.cell_type();
DCHECK_EQ(PropertyKind::kData, property_details.kind());
if (!property_details.IsConfigurable() && property_details.IsReadOnly()) {
return GetConstant(property_cell_value);
}
// Record a code dependency on the cell if we can benefit from the
// additional feedback, or the global property is configurable (i.e.
// can be deleted or reconfigured to an accessor property).
if (property_cell_type != PropertyCellType::kMutable ||
property_details.IsConfigurable()) {
broker()->dependencies()->DependOnGlobalProperty(property_cell);
}
// Load from constant/undefined global property can be constant-folded.
if (property_cell_type == PropertyCellType::kConstant ||
property_cell_type == PropertyCellType::kUndefined) {
return GetConstant(property_cell_value);
}
ValueNode* property_cell_node = GetConstant(property_cell.AsHeapObject());
return AddNewNode<LoadTaggedField>({property_cell_node},
PropertyCell::kValueOffset);
}
ReduceResult MaglevGraphBuilder::TryBuildGlobalStore(
const compiler::GlobalAccessFeedback& global_access_feedback) {
if (global_access_feedback.IsScriptContextSlot()) {
return TryBuildScriptContextStore(global_access_feedback);
} else if (global_access_feedback.IsPropertyCell()) {
return TryBuildPropertyCellStore(global_access_feedback);
} else {
DCHECK(global_access_feedback.IsMegamorphic());
return ReduceResult::Fail();
}
}
ReduceResult MaglevGraphBuilder::TryBuildGlobalLoad(
const compiler::GlobalAccessFeedback& global_access_feedback) {
if (global_access_feedback.IsScriptContextSlot()) {
return TryBuildScriptContextLoad(global_access_feedback);
} else if (global_access_feedback.IsPropertyCell()) {
return TryBuildPropertyCellLoad(global_access_feedback);
} else {
DCHECK(global_access_feedback.IsMegamorphic());
return ReduceResult::Fail();
}
}
void MaglevGraphBuilder::VisitLdaGlobal() {
// LdaGlobal <name_index> <slot>
static const int kNameOperandIndex = 0;
static const int kSlotOperandIndex = 1;
compiler::NameRef name = GetRefOperand<Name>(kNameOperandIndex);
FeedbackSlot slot = GetSlotOperand(kSlotOperandIndex);
compiler::FeedbackSource feedback_source{feedback(), slot};
BuildLoadGlobal(name, feedback_source, TypeofMode::kNotInside);
}
void MaglevGraphBuilder::VisitLdaGlobalInsideTypeof() {
// LdaG