| // Copyright 2022 the V8 project authors. All rights reserved. |
| // Use of this source code is governed by a BSD-style license that can be |
| // found in the LICENSE file. |
| |
| #include "src/maglev/maglev-graph-builder.h" |
| |
| #include <limits> |
| |
| #include "src/base/logging.h" |
| #include "src/base/optional.h" |
| #include "src/base/v8-fallthrough.h" |
| #include "src/base/vector.h" |
| #include "src/builtins/builtins-constructor.h" |
| #include "src/builtins/builtins.h" |
| #include "src/codegen/cpu-features.h" |
| #include "src/codegen/interface-descriptors-inl.h" |
| #include "src/common/assert-scope.h" |
| #include "src/common/globals.h" |
| #include "src/compiler/access-info.h" |
| #include "src/compiler/bytecode-liveness-map.h" |
| #include "src/compiler/compilation-dependencies.h" |
| #include "src/compiler/feedback-source.h" |
| #include "src/compiler/heap-refs.h" |
| #include "src/compiler/js-heap-broker.h" |
| #include "src/compiler/processed-feedback.h" |
| #include "src/deoptimizer/deoptimize-reason.h" |
| #include "src/flags/flags.h" |
| #include "src/handles/maybe-handles-inl.h" |
| #include "src/ic/handler-configuration-inl.h" |
| #include "src/interpreter/bytecode-array-iterator.h" |
| #include "src/interpreter/bytecode-flags.h" |
| #include "src/interpreter/bytecode-register.h" |
| #include "src/interpreter/bytecodes.h" |
| #include "src/maglev/maglev-compilation-info.h" |
| #include "src/maglev/maglev-compilation-unit.h" |
| #include "src/maglev/maglev-graph-printer.h" |
| #include "src/maglev/maglev-interpreter-frame-state.h" |
| #include "src/maglev/maglev-ir.h" |
| #include "src/objects/elements-kind.h" |
| #include "src/objects/feedback-vector.h" |
| #include "src/objects/fixed-array.h" |
| #include "src/objects/heap-number-inl.h" |
| #include "src/objects/literal-objects-inl.h" |
| #include "src/objects/name-inl.h" |
| #include "src/objects/property-cell.h" |
| #include "src/objects/property-details.h" |
| #include "src/objects/shared-function-info.h" |
| #include "src/objects/slots-inl.h" |
| #include "src/objects/type-hints.h" |
| #include "src/utils/utils.h" |
| |
| namespace v8::internal::maglev { |
| |
| namespace { |
| |
| enum class CpuOperation { |
| kFloat64Round, |
| }; |
| |
| // TODO(leszeks): Add a generic mechanism for marking nodes as optionally |
| // supported. |
| bool IsSupported(CpuOperation op) { |
| #ifdef V8_TARGET_ARCH_X64 |
| switch (op) { |
| case CpuOperation::kFloat64Round: |
| return CpuFeatures::IsSupported(SSE4_1) || CpuFeatures::IsSupported(AVX); |
| } |
| #elif V8_TARGET_ARCH_ARM64 |
| return true; |
| #else |
| #error "Maglev does not support this architecture." |
| #endif |
| } |
| |
| ValueNode* TryGetParentContext(ValueNode* node) { |
| if (CreateFunctionContext* n = node->TryCast<CreateFunctionContext>()) { |
| return n->context().node(); |
| } |
| |
| if (CallRuntime* n = node->TryCast<CallRuntime>()) { |
| switch (n->function_id()) { |
| case Runtime::kPushBlockContext: |
| case Runtime::kPushCatchContext: |
| case Runtime::kNewFunctionContext: |
| return n->context().node(); |
| default: |
| break; |
| } |
| } |
| |
| return nullptr; |
| } |
| |
| // Attempts to walk up the context chain through the graph in order to reduce |
| // depth and thus the number of runtime loads. |
| void MinimizeContextChainDepth(ValueNode** context, size_t* depth) { |
| while (*depth > 0) { |
| ValueNode* parent_context = TryGetParentContext(*context); |
| if (parent_context == nullptr) return; |
| *context = parent_context; |
| (*depth)--; |
| } |
| } |
| |
| class FunctionContextSpecialization final : public AllStatic { |
| public: |
| static compiler::OptionalContextRef TryToRef( |
| const MaglevCompilationUnit* unit, ValueNode* context, size_t* depth) { |
| DCHECK(unit->info()->specialize_to_function_context()); |
| compiler::OptionalContextRef ref; |
| if (InitialValue* n = context->TryCast<InitialValue>()) { |
| if (n->source().is_current_context()) { |
| ref = compiler::MakeRefAssumeMemoryFence( |
| unit->broker(), unit->broker()->CanonicalPersistentHandle( |
| unit->info()->toplevel_function()->context())); |
| } |
| } else if (Constant* n = context->TryCast<Constant>()) { |
| ref = n->ref().AsContext(); |
| } |
| if (!ref.has_value()) return {}; |
| return ref->previous(unit->broker(), depth); |
| } |
| }; |
| |
| } // namespace |
| |
| class CallArguments { |
| public: |
| enum Mode { |
| kDefault, |
| kWithSpread, |
| kWithArrayLike, |
| }; |
| |
| CallArguments(ConvertReceiverMode receiver_mode, |
| interpreter::RegisterList reglist, |
| const InterpreterFrameState& frame, Mode mode = kDefault) |
| : receiver_mode_(receiver_mode), |
| args_(reglist.register_count()), |
| mode_(mode) { |
| for (int i = 0; i < reglist.register_count(); i++) { |
| args_[i] = frame.get(reglist[i]); |
| } |
| DCHECK_IMPLIES(args_.size() == 0, |
| receiver_mode == ConvertReceiverMode::kNullOrUndefined); |
| DCHECK_IMPLIES(mode != kDefault, |
| receiver_mode == ConvertReceiverMode::kAny); |
| DCHECK_IMPLIES(mode == kWithArrayLike, args_.size() == 2); |
| } |
| |
| explicit CallArguments(ConvertReceiverMode receiver_mode) |
| : receiver_mode_(receiver_mode), args_(), mode_(kDefault) { |
| DCHECK_EQ(receiver_mode, ConvertReceiverMode::kNullOrUndefined); |
| } |
| |
| CallArguments(ConvertReceiverMode receiver_mode, |
| std::initializer_list<ValueNode*> args, Mode mode = kDefault) |
| : receiver_mode_(receiver_mode), args_(args), mode_(mode) { |
| DCHECK_IMPLIES(mode != kDefault, |
| receiver_mode == ConvertReceiverMode::kAny); |
| DCHECK_IMPLIES(mode == kWithArrayLike, args_.size() == 2); |
| } |
| |
| ValueNode* receiver() const { |
| if (receiver_mode_ == ConvertReceiverMode::kNullOrUndefined) { |
| return nullptr; |
| } |
| return args_[0]; |
| } |
| |
| void set_receiver(ValueNode* receiver) { |
| if (receiver_mode_ == ConvertReceiverMode::kNullOrUndefined) { |
| args_.insert(args_.data(), receiver); |
| receiver_mode_ = ConvertReceiverMode::kAny; |
| } else { |
| args_[0] = receiver; |
| } |
| } |
| |
| size_t count() const { |
| if (receiver_mode_ == ConvertReceiverMode::kNullOrUndefined) { |
| return args_.size(); |
| } |
| return args_.size() - 1; |
| } |
| |
| size_t count_with_receiver() const { return count() + 1; } |
| |
| ValueNode* operator[](size_t i) const { |
| if (receiver_mode_ != ConvertReceiverMode::kNullOrUndefined) { |
| i++; |
| } |
| if (i >= args_.size()) return nullptr; |
| return args_[i]; |
| } |
| |
| void set_arg(size_t i, ValueNode* node) { |
| if (receiver_mode_ != ConvertReceiverMode::kNullOrUndefined) { |
| i++; |
| } |
| DCHECK_LT(i, args_.size()); |
| args_[i] = node; |
| } |
| |
| Mode mode() const { return mode_; } |
| |
| ConvertReceiverMode receiver_mode() const { return receiver_mode_; } |
| |
| void Truncate(size_t new_args_count) { |
| if (new_args_count >= count()) return; |
| size_t args_to_pop = count() - new_args_count; |
| for (size_t i = 0; i < args_to_pop; i++) { |
| args_.pop_back(); |
| } |
| } |
| |
| void PopReceiver(ConvertReceiverMode new_receiver_mode) { |
| DCHECK_NE(receiver_mode_, ConvertReceiverMode::kNullOrUndefined); |
| DCHECK_NE(new_receiver_mode, ConvertReceiverMode::kNullOrUndefined); |
| DCHECK_GT(args_.size(), 0); // We have at least a receiver to pop! |
| // TODO(victorgomes): Do this better! |
| for (size_t i = 0; i < args_.size() - 1; i++) { |
| args_[i] = args_[i + 1]; |
| } |
| args_.pop_back(); |
| |
| // If there is no non-receiver argument to become the new receiver, |
| // consider the new receiver to be known undefined. |
| receiver_mode_ = args_.size() == 0 ? ConvertReceiverMode::kNullOrUndefined |
| : new_receiver_mode; |
| } |
| |
| private: |
| ConvertReceiverMode receiver_mode_; |
| base::SmallVector<ValueNode*, 8> args_; |
| Mode mode_; |
| }; |
| |
| class V8_NODISCARD MaglevGraphBuilder::CallSpeculationScope { |
| public: |
| CallSpeculationScope(MaglevGraphBuilder* builder, |
| compiler::FeedbackSource feedback_source) |
| : builder_(builder) { |
| DCHECK(!builder_->current_speculation_feedback_.IsValid()); |
| if (feedback_source.IsValid()) { |
| DCHECK_EQ( |
| FeedbackNexus(feedback_source.vector, feedback_source.slot).kind(), |
| FeedbackSlotKind::kCall); |
| } |
| builder_->current_speculation_feedback_ = feedback_source; |
| } |
| ~CallSpeculationScope() { |
| builder_->current_speculation_feedback_ = compiler::FeedbackSource(); |
| } |
| |
| private: |
| MaglevGraphBuilder* builder_; |
| }; |
| |
| class V8_NODISCARD MaglevGraphBuilder::LazyDeoptFrameScope { |
| public: |
| LazyDeoptFrameScope(MaglevGraphBuilder* builder, Builtin continuation) |
| : builder_(builder), |
| parent_(builder->current_lazy_deopt_scope_), |
| data_(DeoptFrame::BuiltinContinuationFrameData{ |
| continuation, {}, builder->GetContext()}) { |
| builder_->current_lazy_deopt_scope_ = this; |
| } |
| |
| LazyDeoptFrameScope(MaglevGraphBuilder* builder, |
| BytecodeOffset bytecode_position, ValueNode* closure, |
| ValueNode* receiver, |
| const base::Vector<ValueNode*> arguments_without_receiver) |
| : builder_(builder), |
| parent_(builder->current_lazy_deopt_scope_), |
| data_(DeoptFrame::ConstructStubFrameData{ |
| *builder->compilation_unit(), bytecode_position, |
| builder->current_source_position_, closure, receiver, |
| arguments_without_receiver, builder->GetContext()}) { |
| builder_->current_lazy_deopt_scope_ = this; |
| } |
| |
| ~LazyDeoptFrameScope() { builder_->current_lazy_deopt_scope_ = parent_; } |
| |
| LazyDeoptFrameScope* parent() const { return parent_; } |
| |
| DeoptFrame::FrameData data() const { return data_; } |
| |
| private: |
| MaglevGraphBuilder* builder_; |
| LazyDeoptFrameScope* parent_; |
| DeoptFrame::FrameData data_; |
| }; |
| |
| MaglevGraphBuilder::MaglevGraphBuilder(LocalIsolate* local_isolate, |
| MaglevCompilationUnit* compilation_unit, |
| Graph* graph, float call_frequency, |
| BytecodeOffset bytecode_offset, |
| MaglevGraphBuilder* parent) |
| : local_isolate_(local_isolate), |
| compilation_unit_(compilation_unit), |
| parent_(parent), |
| graph_(graph), |
| bytecode_analysis_(bytecode().object(), zone(), BytecodeOffset::None(), |
| true), |
| iterator_(bytecode().object()), |
| source_position_iterator_(bytecode().SourcePositionTable(broker())), |
| allow_loop_peeling_(is_inline() ? parent_->allow_loop_peeling_ |
| : v8_flags.maglev_loop_peeling), |
| decremented_predecessor_offsets_(zone()), |
| loop_headers_to_peel_(bytecode().length(), zone()), |
| call_frequency_(call_frequency), |
| // Add an extra jump_target slot for the inline exit if needed. |
| jump_targets_(zone()->NewArray<BasicBlockRef>(bytecode().length() + |
| (is_inline() ? 1 : 0))), |
| // Overallocate merge_states_ by one to allow always looking up the |
| // next offset. This overallocated slot can also be used for the inline |
| // exit when needed. |
| merge_states_(zone()->NewArray<MergePointInterpreterFrameState*>( |
| bytecode().length() + 1)), |
| current_interpreter_frame_( |
| *compilation_unit_, |
| is_inline() ? parent->current_interpreter_frame_.known_node_aspects() |
| : compilation_unit_->zone()->New<KnownNodeAspects>( |
| compilation_unit_->zone())), |
| caller_bytecode_offset_(bytecode_offset), |
| catch_block_stack_(zone()) { |
| memset(merge_states_, 0, |
| (bytecode().length() + 1) * sizeof(InterpreterFrameState*)); |
| // Default construct basic block refs. |
| // TODO(leszeks): This could be a memset of nullptr to ..._jump_targets_. |
| for (int i = 0; i < bytecode().length(); ++i) { |
| new (&jump_targets_[i]) BasicBlockRef(); |
| } |
| |
| if (is_inline()) { |
| DCHECK_NOT_NULL(parent_); |
| DCHECK_GT(compilation_unit->inlining_depth(), 0); |
| // The allocation/initialisation logic here relies on inline_exit_offset |
| // being the offset one past the end of the bytecode. |
| DCHECK_EQ(inline_exit_offset(), bytecode().length()); |
| merge_states_[inline_exit_offset()] = nullptr; |
| new (&jump_targets_[inline_exit_offset()]) BasicBlockRef(); |
| } |
| |
| CalculatePredecessorCounts(); |
| } |
| |
| void MaglevGraphBuilder::StartPrologue() { |
| current_block_ = zone()->New<BasicBlock>(nullptr, zone()); |
| } |
| |
| BasicBlock* MaglevGraphBuilder::EndPrologue() { |
| BasicBlock* first_block = FinishBlock<Jump>({}, &jump_targets_[0]); |
| MergeIntoFrameState(first_block, 0); |
| return first_block; |
| } |
| |
| void MaglevGraphBuilder::SetArgument(int i, ValueNode* value) { |
| interpreter::Register reg = interpreter::Register::FromParameterIndex(i); |
| current_interpreter_frame_.set(reg, value); |
| } |
| |
| ValueNode* MaglevGraphBuilder::GetTaggedArgument(int i) { |
| interpreter::Register reg = interpreter::Register::FromParameterIndex(i); |
| return GetTaggedValue(reg); |
| } |
| |
| void MaglevGraphBuilder::InitializeRegister(interpreter::Register reg, |
| ValueNode* value) { |
| current_interpreter_frame_.set( |
| reg, value ? value : AddNewNode<InitialValue>({}, reg)); |
| } |
| |
| void MaglevGraphBuilder::BuildRegisterFrameInitialization(ValueNode* context, |
| ValueNode* closure) { |
| InitializeRegister(interpreter::Register::current_context(), context); |
| InitializeRegister(interpreter::Register::function_closure(), closure); |
| |
| interpreter::Register new_target_or_generator_register = |
| bytecode().incoming_new_target_or_generator_register(); |
| |
| int register_index = 0; |
| // TODO(leszeks): Don't emit if not needed. |
| ValueNode* undefined_value = GetRootConstant(RootIndex::kUndefinedValue); |
| if (new_target_or_generator_register.is_valid()) { |
| int new_target_index = new_target_or_generator_register.index(); |
| for (; register_index < new_target_index; register_index++) { |
| current_interpreter_frame_.set(interpreter::Register(register_index), |
| undefined_value); |
| } |
| current_interpreter_frame_.set( |
| new_target_or_generator_register, |
| GetRegisterInput(kJavaScriptCallNewTargetRegister)); |
| register_index++; |
| } |
| for (; register_index < register_count(); register_index++) { |
| current_interpreter_frame_.set(interpreter::Register(register_index), |
| undefined_value); |
| } |
| } |
| |
| void MaglevGraphBuilder::BuildMergeStates() { |
| for (auto& offset_and_info : bytecode_analysis().GetLoopInfos()) { |
| int offset = offset_and_info.first; |
| const compiler::LoopInfo& loop_info = offset_and_info.second; |
| if (loop_headers_to_peel_.Contains(offset)) { |
| // Peeled loops are treated like normal merges at first. We will construct |
| // the proper loop header merge state when reaching the `JumpLoop` of the |
| // peeled iteration. |
| continue; |
| } |
| const compiler::BytecodeLivenessState* liveness = GetInLivenessFor(offset); |
| DCHECK_NULL(merge_states_[offset]); |
| if (v8_flags.trace_maglev_graph_building) { |
| std::cout << "- Creating loop merge state at @" << offset << std::endl; |
| } |
| merge_states_[offset] = MergePointInterpreterFrameState::NewForLoop( |
| current_interpreter_frame_, *compilation_unit_, offset, |
| NumPredecessors(offset), liveness, &loop_info); |
| } |
| |
| if (bytecode().handler_table_size() > 0) { |
| HandlerTable table(*bytecode().object()); |
| for (int i = 0; i < table.NumberOfRangeEntries(); i++) { |
| const int offset = table.GetRangeHandler(i); |
| const interpreter::Register context_reg(table.GetRangeData(i)); |
| const compiler::BytecodeLivenessState* liveness = |
| GetInLivenessFor(offset); |
| DCHECK_EQ(NumPredecessors(offset), 0); |
| DCHECK_NULL(merge_states_[offset]); |
| if (v8_flags.trace_maglev_graph_building) { |
| std::cout << "- Creating exception merge state at @" << offset |
| << ", context register r" << context_reg.index() << std::endl; |
| } |
| merge_states_[offset] = MergePointInterpreterFrameState::NewForCatchBlock( |
| *compilation_unit_, liveness, offset, context_reg, graph_); |
| } |
| } |
| } |
| |
| namespace { |
| |
| template <int index, interpreter::OperandType... operands> |
| struct GetResultLocationAndSizeHelper; |
| |
| // Terminal cases |
| template <int index> |
| struct GetResultLocationAndSizeHelper<index> { |
| static std::pair<interpreter::Register, int> GetResultLocationAndSize( |
| const interpreter::BytecodeArrayIterator& iterator) { |
| // TODO(leszeks): This should probably actually be "UNREACHABLE" but we have |
| // lazy deopt info for interrupt budget updates at returns, not for actual |
| // lazy deopts, but just for stack iteration purposes. |
| return {interpreter::Register::invalid_value(), 0}; |
| } |
| static bool HasOutputRegisterOperand() { return false; } |
| }; |
| |
| template <int index, interpreter::OperandType... operands> |
| struct GetResultLocationAndSizeHelper<index, interpreter::OperandType::kRegOut, |
| operands...> { |
| static std::pair<interpreter::Register, int> GetResultLocationAndSize( |
| const interpreter::BytecodeArrayIterator& iterator) { |
| // We shouldn't have any other output operands than this one. |
| return {iterator.GetRegisterOperand(index), 1}; |
| } |
| static bool HasOutputRegisterOperand() { return true; } |
| }; |
| |
| template <int index, interpreter::OperandType... operands> |
| struct GetResultLocationAndSizeHelper< |
| index, interpreter::OperandType::kRegOutPair, operands...> { |
| static std::pair<interpreter::Register, int> GetResultLocationAndSize( |
| const interpreter::BytecodeArrayIterator& iterator) { |
| // We shouldn't have any other output operands than this one. |
| return {iterator.GetRegisterOperand(index), 2}; |
| } |
| static bool HasOutputRegisterOperand() { return true; } |
| }; |
| |
| template <int index, interpreter::OperandType... operands> |
| struct GetResultLocationAndSizeHelper< |
| index, interpreter::OperandType::kRegOutTriple, operands...> { |
| static std::pair<interpreter::Register, int> GetResultLocationAndSize( |
| const interpreter::BytecodeArrayIterator& iterator) { |
| // We shouldn't have any other output operands than this one. |
| DCHECK(!(GetResultLocationAndSizeHelper< |
| index + 1, operands...>::HasOutputRegisterOperand())); |
| return {iterator.GetRegisterOperand(index), 3}; |
| } |
| static bool HasOutputRegisterOperand() { return true; } |
| }; |
| |
| // We don't support RegOutList for lazy deopts. |
| template <int index, interpreter::OperandType... operands> |
| struct GetResultLocationAndSizeHelper< |
| index, interpreter::OperandType::kRegOutList, operands...> { |
| static std::pair<interpreter::Register, int> GetResultLocationAndSize( |
| const interpreter::BytecodeArrayIterator& iterator) { |
| interpreter::RegisterList list = iterator.GetRegisterListOperand(index); |
| return {list.first_register(), list.register_count()}; |
| } |
| static bool HasOutputRegisterOperand() { return true; } |
| }; |
| |
| // Induction case. |
| template <int index, interpreter::OperandType operand, |
| interpreter::OperandType... operands> |
| struct GetResultLocationAndSizeHelper<index, operand, operands...> { |
| static std::pair<interpreter::Register, int> GetResultLocationAndSize( |
| const interpreter::BytecodeArrayIterator& iterator) { |
| return GetResultLocationAndSizeHelper< |
| index + 1, operands...>::GetResultLocationAndSize(iterator); |
| } |
| static bool HasOutputRegisterOperand() { |
| return GetResultLocationAndSizeHelper< |
| index + 1, operands...>::HasOutputRegisterOperand(); |
| } |
| }; |
| |
| template <interpreter::Bytecode bytecode, |
| interpreter::ImplicitRegisterUse implicit_use, |
| interpreter::OperandType... operands> |
| std::pair<interpreter::Register, int> GetResultLocationAndSizeForBytecode( |
| const interpreter::BytecodeArrayIterator& iterator) { |
| // We don't support output registers for implicit registers. |
| DCHECK(!interpreter::BytecodeOperands::WritesImplicitRegister(implicit_use)); |
| if (interpreter::BytecodeOperands::WritesAccumulator(implicit_use)) { |
| // If we write the accumulator, we shouldn't also write an output register. |
| DCHECK(!(GetResultLocationAndSizeHelper< |
| 0, operands...>::HasOutputRegisterOperand())); |
| return {interpreter::Register::virtual_accumulator(), 1}; |
| } |
| |
| // Use template magic to output a the appropriate GetRegisterOperand call and |
| // size for this bytecode. |
| return GetResultLocationAndSizeHelper< |
| 0, operands...>::GetResultLocationAndSize(iterator); |
| } |
| |
| } // namespace |
| |
| std::pair<interpreter::Register, int> |
| MaglevGraphBuilder::GetResultLocationAndSize() const { |
| using Bytecode = interpreter::Bytecode; |
| using OperandType = interpreter::OperandType; |
| using ImplicitRegisterUse = interpreter::ImplicitRegisterUse; |
| Bytecode bytecode = iterator_.current_bytecode(); |
| // TODO(leszeks): Only emit these cases for bytecodes we know can lazy deopt. |
| switch (bytecode) { |
| #define CASE(Name, ...) \ |
| case Bytecode::k##Name: \ |
| return GetResultLocationAndSizeForBytecode<Bytecode::k##Name, \ |
| __VA_ARGS__>(iterator_); |
| BYTECODE_LIST(CASE) |
| #undef CASE |
| } |
| UNREACHABLE(); |
| } |
| |
| #ifdef DEBUG |
| bool MaglevGraphBuilder::HasOutputRegister(interpreter::Register reg) const { |
| interpreter::Bytecode bytecode = iterator_.current_bytecode(); |
| if (reg == interpreter::Register::virtual_accumulator()) { |
| return interpreter::Bytecodes::WritesAccumulator(bytecode); |
| } |
| for (int i = 0; i < interpreter::Bytecodes::NumberOfOperands(bytecode); ++i) { |
| if (interpreter::Bytecodes::IsRegisterOutputOperandType( |
| interpreter::Bytecodes::GetOperandType(bytecode, i))) { |
| interpreter::Register operand_reg = iterator_.GetRegisterOperand(i); |
| int operand_range = iterator_.GetRegisterOperandRange(i); |
| if (base::IsInRange(reg.index(), operand_reg.index(), |
| operand_reg.index() + operand_range)) { |
| return true; |
| } |
| } |
| } |
| return false; |
| } |
| #endif |
| |
| DeoptFrame* MaglevGraphBuilder::GetParentDeoptFrame() { |
| if (parent_ == nullptr) return nullptr; |
| if (parent_deopt_frame_ == nullptr) { |
| // The parent resumes after the call, which is roughly equivalent to a lazy |
| // deopt. Use the helper function directly so that we can mark the |
| // accumulator as dead (since it'll be overwritten by this function's |
| // return value anyway). |
| // TODO(leszeks): This is true for our current set of |
| // inlinings/continuations, but there might be cases in the future where it |
| // isn't. We may need to store the relevant overwritten register in |
| // LazyDeoptFrameScope. |
| DCHECK(interpreter::Bytecodes::WritesAccumulator( |
| parent_->iterator_.current_bytecode())); |
| |
| parent_deopt_frame_ = |
| zone()->New<DeoptFrame>(parent_->GetDeoptFrameForLazyDeoptHelper( |
| parent_->current_lazy_deopt_scope_, true)); |
| if (inlined_arguments_) { |
| parent_deopt_frame_ = zone()->New<InlinedArgumentsDeoptFrame>( |
| *compilation_unit_, caller_bytecode_offset_, GetClosure(), |
| *inlined_arguments_, parent_deopt_frame_); |
| } |
| } |
| return parent_deopt_frame_; |
| } |
| |
| DeoptFrame MaglevGraphBuilder::GetLatestCheckpointedFrame() { |
| if (!latest_checkpointed_frame_) { |
| // TODO(leszeks): Figure out a way of handling eager continuations. |
| DCHECK_NULL(current_lazy_deopt_scope_); |
| latest_checkpointed_frame_.emplace( |
| *compilation_unit_, |
| zone()->New<CompactInterpreterFrameState>( |
| *compilation_unit_, GetInLiveness(), current_interpreter_frame_), |
| GetClosure(), BytecodeOffset(iterator_.current_offset()), |
| current_source_position_, GetParentDeoptFrame()); |
| } |
| return *latest_checkpointed_frame_; |
| } |
| |
| DeoptFrame MaglevGraphBuilder::GetDeoptFrameForLazyDeopt() { |
| return GetDeoptFrameForLazyDeoptHelper(current_lazy_deopt_scope_, false); |
| } |
| |
| DeoptFrame MaglevGraphBuilder::GetDeoptFrameForLazyDeoptHelper( |
| LazyDeoptFrameScope* scope, bool mark_accumulator_dead) { |
| if (scope == nullptr) { |
| // Potentially copy the out liveness if we want to explicitly drop the |
| // accumulator. |
| const compiler::BytecodeLivenessState* liveness = GetOutLiveness(); |
| if (mark_accumulator_dead && liveness->AccumulatorIsLive()) { |
| compiler::BytecodeLivenessState* liveness_copy = |
| zone()->New<compiler::BytecodeLivenessState>(*liveness, zone()); |
| liveness_copy->MarkAccumulatorDead(); |
| liveness = liveness_copy; |
| } |
| return InterpretedDeoptFrame( |
| *compilation_unit_, |
| zone()->New<CompactInterpreterFrameState>(*compilation_unit_, liveness, |
| current_interpreter_frame_), |
| GetClosure(), BytecodeOffset(iterator_.current_offset()), |
| current_source_position_, GetParentDeoptFrame()); |
| } |
| |
| // Currently only support builtin continuations for bytecodes that write to |
| // the accumulator |
| DCHECK( |
| interpreter::Bytecodes::WritesAccumulator(iterator_.current_bytecode())); |
| // Mark the accumulator dead in parent frames since we know that the |
| // continuation will write it. |
| return DeoptFrame(scope->data(), |
| zone()->New<DeoptFrame>(GetDeoptFrameForLazyDeoptHelper( |
| scope->parent(), |
| scope->data().tag() == |
| DeoptFrame::FrameType::kBuiltinContinuationFrame))); |
| } |
| |
| InterpretedDeoptFrame MaglevGraphBuilder::GetDeoptFrameForEntryStackCheck() { |
| DCHECK_EQ(iterator_.current_offset(), 0); |
| DCHECK_NULL(parent_); |
| return InterpretedDeoptFrame( |
| *compilation_unit_, |
| zone()->New<CompactInterpreterFrameState>( |
| *compilation_unit_, GetInLivenessFor(0), current_interpreter_frame_), |
| GetClosure(), BytecodeOffset(kFunctionEntryBytecodeOffset), |
| current_source_position_, nullptr); |
| } |
| |
| ValueNode* MaglevGraphBuilder::GetTaggedValue( |
| ValueNode* value, UseReprHintRecording record_use_repr_hint) { |
| if (V8_LIKELY(record_use_repr_hint == UseReprHintRecording::kRecord)) { |
| RecordUseReprHintIfPhi(value, UseRepresentation::kTagged); |
| } |
| |
| ValueRepresentation representation = |
| value->properties().value_representation(); |
| if (representation == ValueRepresentation::kTagged) return value; |
| |
| NodeInfo* node_info = known_node_aspects().GetOrCreateInfoFor(value); |
| if (node_info->tagged_alternative != nullptr) { |
| return node_info->tagged_alternative; |
| } |
| |
| switch (representation) { |
| case ValueRepresentation::kInt32: { |
| if (NodeTypeIsSmi(node_info->type)) { |
| return node_info->tagged_alternative = |
| AddNewNode<UnsafeSmiTag>({value}); |
| } |
| return node_info->tagged_alternative = AddNewNode<Int32ToNumber>({value}); |
| } |
| case ValueRepresentation::kUint32: { |
| if (NodeTypeIsSmi(node_info->type)) { |
| return node_info->tagged_alternative = |
| AddNewNode<UnsafeSmiTag>({value}); |
| } |
| return node_info->tagged_alternative = |
| AddNewNode<Uint32ToNumber>({value}); |
| } |
| case ValueRepresentation::kFloat64: { |
| return node_info->tagged_alternative = |
| AddNewNode<Float64ToTagged>({value}); |
| } |
| case ValueRepresentation::kHoleyFloat64: { |
| return node_info->tagged_alternative = |
| AddNewNode<HoleyFloat64ToTagged>({value}); |
| } |
| |
| case ValueRepresentation::kTagged: |
| case ValueRepresentation::kWord64: |
| UNREACHABLE(); |
| } |
| UNREACHABLE(); |
| } |
| |
| ValueNode* MaglevGraphBuilder::GetSmiValue( |
| ValueNode* value, UseReprHintRecording record_use_repr_hint) { |
| if (V8_LIKELY(record_use_repr_hint == UseReprHintRecording::kRecord)) { |
| RecordUseReprHintIfPhi(value, UseRepresentation::kTagged); |
| } |
| |
| NodeInfo* node_info = known_node_aspects().GetOrCreateInfoFor(value); |
| |
| ValueRepresentation representation = |
| value->properties().value_representation(); |
| if (representation == ValueRepresentation::kTagged) { |
| BuildCheckSmi(value, !value->Is<Phi>()); |
| return value; |
| } |
| |
| if (node_info->tagged_alternative != nullptr) { |
| BuildCheckSmi(node_info->tagged_alternative, !value->Is<Phi>()); |
| return node_info->tagged_alternative; |
| } |
| |
| switch (representation) { |
| case ValueRepresentation::kInt32: { |
| if (NodeTypeIsSmi(node_info->type)) { |
| return node_info->tagged_alternative = |
| AddNewNode<UnsafeSmiTag>({value}); |
| } |
| return node_info->tagged_alternative = |
| AddNewNode<CheckedSmiTagInt32>({value}); |
| } |
| case ValueRepresentation::kUint32: { |
| if (NodeTypeIsSmi(node_info->type)) { |
| return node_info->tagged_alternative = |
| AddNewNode<UnsafeSmiTag>({value}); |
| } |
| return node_info->tagged_alternative = |
| AddNewNode<CheckedSmiTagUint32>({value}); |
| } |
| case ValueRepresentation::kFloat64: { |
| return node_info->tagged_alternative = |
| AddNewNode<CheckedSmiTagFloat64>({value}); |
| } |
| case ValueRepresentation::kHoleyFloat64: { |
| return node_info->tagged_alternative = |
| AddNewNode<CheckedSmiTagFloat64>({value}); |
| } |
| |
| case ValueRepresentation::kTagged: |
| case ValueRepresentation::kWord64: |
| UNREACHABLE(); |
| } |
| UNREACHABLE(); |
| } |
| |
| ValueNode* MaglevGraphBuilder::GetInternalizedString( |
| interpreter::Register reg) { |
| ValueNode* node = GetTaggedValue(reg); |
| if (CheckType(node, NodeType::kInternalizedString)) return node; |
| node = AddNewNode<CheckedInternalizedString>({node}); |
| current_interpreter_frame_.set(reg, node); |
| return node; |
| } |
| |
| namespace { |
| NodeType ToNumberHintToNodeType(ToNumberHint conversion_type) { |
| switch (conversion_type) { |
| case ToNumberHint::kAssumeSmi: |
| return NodeType::kSmi; |
| case ToNumberHint::kDisallowToNumber: |
| case ToNumberHint::kAssumeNumber: |
| return NodeType::kNumber; |
| case ToNumberHint::kAssumeNumberOrOddball: |
| return NodeType::kNumberOrOddball; |
| } |
| } |
| TaggedToFloat64ConversionType ToNumberHintToConversionType( |
| ToNumberHint conversion_type) { |
| switch (conversion_type) { |
| case ToNumberHint::kAssumeSmi: |
| UNREACHABLE(); |
| case ToNumberHint::kDisallowToNumber: |
| case ToNumberHint::kAssumeNumber: |
| return TaggedToFloat64ConversionType::kOnlyNumber; |
| case ToNumberHint::kAssumeNumberOrOddball: |
| return TaggedToFloat64ConversionType::kNumberOrOddball; |
| } |
| } |
| } // namespace |
| |
| ValueNode* MaglevGraphBuilder::GetTruncatedInt32ForToNumber(ValueNode* value, |
| ToNumberHint hint) { |
| RecordUseReprHintIfPhi(value, UseRepresentation::kTruncatedInt32); |
| |
| ValueRepresentation representation = |
| value->properties().value_representation(); |
| if (representation == ValueRepresentation::kInt32) return value; |
| if (representation == ValueRepresentation::kUint32) { |
| // This node is cheap (no code gen, just a bitcast), so don't cache it. |
| return AddNewNode<TruncateUint32ToInt32>({value}); |
| } |
| |
| // Process constants first to avoid allocating NodeInfo for them. |
| switch (value->opcode()) { |
| case Opcode::kConstant: { |
| compiler::ObjectRef object = value->Cast<Constant>()->object(); |
| if (!object.IsHeapNumber()) break; |
| int32_t truncated_value = DoubleToInt32(object.AsHeapNumber().value()); |
| if (!Smi::IsValid(truncated_value)) break; |
| return GetInt32Constant(truncated_value); |
| } |
| case Opcode::kSmiConstant: |
| return GetInt32Constant(value->Cast<SmiConstant>()->value().value()); |
| case Opcode::kRootConstant: { |
| Object root_object = |
| local_isolate_->root(value->Cast<RootConstant>()->index()); |
| if (!root_object.IsOddball(local_isolate_)) break; |
| int32_t truncated_value = |
| DoubleToInt32(Oddball::cast(root_object).to_number_raw()); |
| // All oddball ToNumber truncations are valid Smis. |
| DCHECK(Smi::IsValid(truncated_value)); |
| return GetInt32Constant(truncated_value); |
| } |
| case Opcode::kFloat64Constant: { |
| int32_t truncated_value = |
| DoubleToInt32(value->Cast<Float64Constant>()->value().get_scalar()); |
| if (!Smi::IsValid(truncated_value)) break; |
| return GetInt32Constant(truncated_value); |
| } |
| |
| // We could emit unconditional eager deopts for other kinds of constant, but |
| // it's not necessary, the appropriate checking conversion nodes will deopt. |
| default: |
| break; |
| } |
| |
| NodeInfo* node_info = known_node_aspects().GetOrCreateInfoFor(value); |
| // If there is an int32_alternative, then that works as a truncated value |
| // too. |
| if (node_info->int32_alternative != nullptr) { |
| return node_info->int32_alternative; |
| } |
| if (node_info->truncated_int32_to_number != nullptr) { |
| return node_info->truncated_int32_to_number; |
| } |
| |
| switch (representation) { |
| case ValueRepresentation::kTagged: { |
| NodeType old_type; |
| NodeType desired_type = ToNumberHintToNodeType(hint); |
| EnsureType(value, desired_type, &old_type); |
| if (NodeTypeIsSmi(old_type)) { |
| // Smi untagging can be cached as an int32 alternative, not just a |
| // truncated alternative. |
| return node_info->int32_alternative = |
| AddNewNode<UnsafeSmiUntag>({value}); |
| } |
| if (desired_type == NodeType::kSmi) { |
| return node_info->int32_alternative = |
| AddNewNode<CheckedSmiUntag>({value}); |
| } |
| TaggedToFloat64ConversionType conversion_type = |
| ToNumberHintToConversionType(hint); |
| if (NodeTypeIs(old_type, desired_type)) { |
| return node_info->truncated_int32_to_number = |
| AddNewNode<TruncateNumberOrOddballToInt32>({value}, |
| conversion_type); |
| } |
| return node_info->truncated_int32_to_number = |
| AddNewNode<CheckedTruncateNumberOrOddballToInt32>( |
| {value}, conversion_type); |
| } |
| case ValueRepresentation::kFloat64: |
| // Ignore conversion_type for HoleyFloat64, and treat them like Float64. |
| // ToNumber of undefined is anyway a NaN, so we'll simply truncate away |
| // the NaN-ness of the hole, and don't need to do extra oddball checks so |
| // we can ignore the hint (though we'll miss updating the feedback). |
| case ValueRepresentation::kHoleyFloat64: { |
| return node_info->truncated_int32_to_number = |
| AddNewNode<TruncateFloat64ToInt32>({value}); |
| } |
| |
| case ValueRepresentation::kInt32: |
| case ValueRepresentation::kUint32: |
| case ValueRepresentation::kWord64: |
| UNREACHABLE(); |
| } |
| UNREACHABLE(); |
| } |
| |
| ValueNode* MaglevGraphBuilder::GetInt32(ValueNode* value) { |
| RecordUseReprHintIfPhi(value, UseRepresentation::kInt32); |
| |
| ValueRepresentation representation = |
| value->properties().value_representation(); |
| if (representation == ValueRepresentation::kInt32) return value; |
| |
| // Process constants first to avoid allocating NodeInfo for them. |
| switch (value->opcode()) { |
| case Opcode::kSmiConstant: |
| return GetInt32Constant(value->Cast<SmiConstant>()->value().value()); |
| case Opcode::kFloat64Constant: { |
| double double_value = |
| value->Cast<Float64Constant>()->value().get_scalar(); |
| if (!IsSmiDouble(double_value)) break; |
| return GetInt32Constant( |
| FastD2I(value->Cast<Float64Constant>()->value().get_scalar())); |
| } |
| |
| // We could emit unconditional eager deopts for other kinds of constant, but |
| // it's not necessary, the appropriate checking conversion nodes will deopt. |
| default: |
| break; |
| } |
| |
| NodeInfo* node_info = known_node_aspects().GetOrCreateInfoFor(value); |
| if (node_info->int32_alternative != nullptr) { |
| return node_info->int32_alternative; |
| } |
| |
| switch (representation) { |
| case ValueRepresentation::kTagged: { |
| // TODO(leszeks): Widen this path to allow HeapNumbers with Int32 values. |
| return node_info->int32_alternative = BuildSmiUntag(value); |
| } |
| case ValueRepresentation::kUint32: { |
| if (node_info->is_smi()) { |
| return node_info->int32_alternative = |
| AddNewNode<TruncateUint32ToInt32>({value}); |
| } |
| return node_info->int32_alternative = |
| AddNewNode<CheckedUint32ToInt32>({value}); |
| } |
| case ValueRepresentation::kFloat64: |
| // The check here will also work for the hole NaN, so we can treat |
| // HoleyFloat64 as Float64. |
| case ValueRepresentation::kHoleyFloat64: { |
| return node_info->int32_alternative = |
| AddNewNode<CheckedTruncateFloat64ToInt32>({value}); |
| } |
| |
| case ValueRepresentation::kInt32: |
| case ValueRepresentation::kWord64: |
| UNREACHABLE(); |
| } |
| UNREACHABLE(); |
| } |
| |
| ValueNode* MaglevGraphBuilder::GetFloat64(ValueNode* value) { |
| RecordUseReprHintIfPhi(value, UseRepresentation::kFloat64); |
| |
| return GetFloat64ForToNumber(value, ToNumberHint::kDisallowToNumber); |
| } |
| |
| ValueNode* MaglevGraphBuilder::GetFloat64ForToNumber(ValueNode* value, |
| ToNumberHint hint) { |
| ValueRepresentation representation = |
| value->properties().value_representation(); |
| if (representation == ValueRepresentation::kFloat64) return value; |
| |
| // Process constants first to avoid allocating NodeInfo for them. |
| switch (value->opcode()) { |
| case Opcode::kConstant: { |
| compiler::ObjectRef object = value->Cast<Constant>()->object(); |
| if (object.IsHeapNumber()) { |
| return GetFloat64Constant(object.AsHeapNumber().value()); |
| } |
| // Oddballs should be RootConstants. |
| DCHECK(!object.object()->IsOddball()); |
| break; |
| } |
| case Opcode::kSmiConstant: |
| return GetFloat64Constant(value->Cast<SmiConstant>()->value().value()); |
| case Opcode::kInt32Constant: |
| return GetFloat64Constant(value->Cast<Int32Constant>()->value()); |
| case Opcode::kRootConstant: { |
| Object root_object = |
| local_isolate_->root(value->Cast<RootConstant>()->index()); |
| if (hint != ToNumberHint::kDisallowToNumber && root_object.IsOddball()) { |
| return GetFloat64Constant(Oddball::cast(root_object).to_number_raw()); |
| } |
| if (root_object.IsHeapNumber()) { |
| return GetFloat64Constant(HeapNumber::cast(root_object).value()); |
| } |
| break; |
| } |
| |
| // We could emit unconditional eager deopts for other kinds of constant, but |
| // it's not necessary, the appropriate checking conversion nodes will deopt. |
| default: |
| break; |
| } |
| |
| NodeInfo* node_info = known_node_aspects().GetOrCreateInfoFor(value); |
| if (node_info->float64_alternative != nullptr) { |
| return node_info->float64_alternative; |
| } |
| |
| switch (representation) { |
| case ValueRepresentation::kTagged: { |
| switch (hint) { |
| case ToNumberHint::kAssumeSmi: |
| // Get the float64 value of a Smi value its int32 representation. |
| return GetFloat64(GetInt32(value)); |
| case ToNumberHint::kDisallowToNumber: |
| case ToNumberHint::kAssumeNumber: |
| // Number->Float64 conversions are exact alternatives, so they can |
| // also become the canonical float64_alternative. |
| return node_info->float64_alternative = BuildNumberOrOddballToFloat64( |
| value, TaggedToFloat64ConversionType::kOnlyNumber); |
| case ToNumberHint::kAssumeNumberOrOddball: { |
| // NumberOrOddball->Float64 conversions are not exact alternatives, |
| // since they lose the information that this is an oddball, so they |
| // can only become the canonical float64_alternative if they are a |
| // known number (and therefore not oddball). |
| ValueNode* float64_node = BuildNumberOrOddballToFloat64( |
| value, TaggedToFloat64ConversionType::kNumberOrOddball); |
| if (NodeTypeIsNumber(node_info->type)) { |
| node_info->float64_alternative = float64_node; |
| } |
| return float64_node; |
| } |
| } |
| } |
| case ValueRepresentation::kInt32: |
| return node_info->float64_alternative = |
| AddNewNode<ChangeInt32ToFloat64>({value}); |
| case ValueRepresentation::kUint32: |
| return node_info->float64_alternative = |
| AddNewNode<ChangeUint32ToFloat64>({value}); |
| case ValueRepresentation::kHoleyFloat64: { |
| switch (hint) { |
| case ToNumberHint::kAssumeSmi: |
| case ToNumberHint::kDisallowToNumber: |
| case ToNumberHint::kAssumeNumber: |
| // Number->Float64 conversions are exact alternatives, so they can |
| // also become the canonical float64_alternative. |
| return node_info->float64_alternative = |
| AddNewNode<CheckedHoleyFloat64ToFloat64>({value}); |
| case ToNumberHint::kAssumeNumberOrOddball: |
| // NumberOrOddball->Float64 conversions are not exact alternatives, |
| // since they lose the information that this is an oddball, so they |
| // cannot become the canonical float64_alternative. |
| return AddNewNode<HoleyFloat64ToMaybeNanFloat64>({value}); |
| } |
| } |
| case ValueRepresentation::kFloat64: |
| case ValueRepresentation::kWord64: |
| UNREACHABLE(); |
| } |
| UNREACHABLE(); |
| } |
| |
| ValueNode* MaglevGraphBuilder::GetHoleyFloat64ForToNumber(ValueNode* value, |
| ToNumberHint hint) { |
| RecordUseReprHintIfPhi(value, UseRepresentation::kHoleyFloat64); |
| |
| ValueRepresentation representation = |
| value->properties().value_representation(); |
| // Ignore the hint for |
| if (representation == ValueRepresentation::kHoleyFloat64) return value; |
| return GetFloat64ForToNumber(value, hint); |
| } |
| |
| namespace { |
| int32_t ClampToUint8(int32_t value) { |
| if (value < 0) return 0; |
| if (value > 255) return 255; |
| return value; |
| } |
| } // namespace |
| |
| ValueNode* MaglevGraphBuilder::GetUint8ClampedForToNumber(ValueNode* value, |
| ToNumberHint hint) { |
| switch (value->properties().value_representation()) { |
| case ValueRepresentation::kWord64: |
| UNREACHABLE(); |
| case ValueRepresentation::kTagged: { |
| if (SmiConstant* constant = value->TryCast<SmiConstant>()) { |
| return GetInt32Constant(ClampToUint8(constant->value().value())); |
| } |
| NodeInfo* node_info = known_node_aspects().GetOrCreateInfoFor(value); |
| if (node_info->int32_alternative != nullptr) { |
| return AddNewNode<Int32ToUint8Clamped>({node_info->int32_alternative}); |
| } |
| return AddNewNode<CheckedNumberToUint8Clamped>({value}); |
| } |
| // Ignore conversion_type for HoleyFloat64, and treat them like Float64. |
| // ToNumber of undefined is anyway a NaN, so we'll simply truncate away the |
| // NaN-ness of the hole, and don't need to do extra oddball checks so we can |
| // ignore the hint (though we'll miss updating the feedback). |
| case ValueRepresentation::kFloat64: |
| case ValueRepresentation::kHoleyFloat64: |
| // TODO(leszeks): Handle Float64Constant, which requires the correct |
| // rounding for clamping. |
| return AddNewNode<Float64ToUint8Clamped>({value}); |
| case ValueRepresentation::kInt32: |
| if (Int32Constant* constant = value->TryCast<Int32Constant>()) { |
| return GetInt32Constant(ClampToUint8(constant->value())); |
| } |
| return AddNewNode<Int32ToUint8Clamped>({value}); |
| case ValueRepresentation::kUint32: |
| return AddNewNode<Uint32ToUint8Clamped>({value}); |
| } |
| UNREACHABLE(); |
| } |
| |
| namespace { |
| template <Operation kOperation> |
| struct NodeForOperationHelper; |
| |
| #define NODE_FOR_OPERATION_HELPER(Name) \ |
| template <> \ |
| struct NodeForOperationHelper<Operation::k##Name> { \ |
| using generic_type = Generic##Name; \ |
| }; |
| OPERATION_LIST(NODE_FOR_OPERATION_HELPER) |
| #undef NODE_FOR_OPERATION_HELPER |
| |
| template <Operation kOperation> |
| using GenericNodeForOperation = |
| typename NodeForOperationHelper<kOperation>::generic_type; |
| |
| // Bitwise operations reinterprets the numeric input as Int32 bits for a |
| // bitwise operation, which means we want to do slightly different conversions. |
| template <Operation kOperation> |
| constexpr bool BinaryOperationIsBitwiseInt32() { |
| switch (kOperation) { |
| case Operation::kBitwiseNot: |
| case Operation::kBitwiseAnd: |
| case Operation::kBitwiseOr: |
| case Operation::kBitwiseXor: |
| case Operation::kShiftLeft: |
| case Operation::kShiftRight: |
| case Operation::kShiftRightLogical: |
| return true; |
| default: |
| return false; |
| } |
| } |
| } // namespace |
| |
| // MAP_OPERATION_TO_NODES are tuples with the following format: |
| // - Operation name, |
| // - Int32 operation node, |
| // - Identity of int32 operation (e.g, 0 for add/sub and 1 for mul/div), if it |
| // exists, or otherwise {}. |
| #define MAP_BINARY_OPERATION_TO_INT32_NODE(V) \ |
| V(Add, Int32AddWithOverflow, 0) \ |
| V(Subtract, Int32SubtractWithOverflow, 0) \ |
| V(Multiply, Int32MultiplyWithOverflow, 1) \ |
| V(Divide, Int32DivideWithOverflow, 1) \ |
| V(Modulus, Int32ModulusWithOverflow, {}) \ |
| V(BitwiseAnd, Int32BitwiseAnd, ~0) \ |
| V(BitwiseOr, Int32BitwiseOr, 0) \ |
| V(BitwiseXor, Int32BitwiseXor, 0) \ |
| V(ShiftLeft, Int32ShiftLeft, 0) \ |
| V(ShiftRight, Int32ShiftRight, 0) \ |
| V(ShiftRightLogical, Int32ShiftRightLogical, {}) |
| |
| #define MAP_UNARY_OPERATION_TO_INT32_NODE(V) \ |
| V(BitwiseNot, Int32BitwiseNot) \ |
| V(Increment, Int32IncrementWithOverflow) \ |
| V(Decrement, Int32DecrementWithOverflow) \ |
| V(Negate, Int32NegateWithOverflow) |
| |
| #define MAP_COMPARE_OPERATION_TO_INT32_NODE(V) \ |
| V(Equal, Int32Equal) \ |
| V(StrictEqual, Int32StrictEqual) \ |
| V(LessThan, Int32LessThan) \ |
| V(LessThanOrEqual, Int32LessThanOrEqual) \ |
| V(GreaterThan, Int32GreaterThan) \ |
| V(GreaterThanOrEqual, Int32GreaterThanOrEqual) |
| |
| // MAP_OPERATION_TO_FLOAT64_NODE are tuples with the following format: |
| // (Operation name, Float64 operation node). |
| #define MAP_OPERATION_TO_FLOAT64_NODE(V) \ |
| V(Add, Float64Add) \ |
| V(Subtract, Float64Subtract) \ |
| V(Multiply, Float64Multiply) \ |
| V(Divide, Float64Divide) \ |
| V(Modulus, Float64Modulus) \ |
| V(Negate, Float64Negate) \ |
| V(Exponentiate, Float64Exponentiate) |
| |
| #define MAP_COMPARE_OPERATION_TO_FLOAT64_NODE(V) \ |
| V(Equal, Float64Equal) \ |
| V(StrictEqual, Float64StrictEqual) \ |
| V(LessThan, Float64LessThan) \ |
| V(LessThanOrEqual, Float64LessThanOrEqual) \ |
| V(GreaterThan, Float64GreaterThan) \ |
| V(GreaterThanOrEqual, Float64GreaterThanOrEqual) |
| |
| template <Operation kOperation> |
| static constexpr base::Optional<int> Int32Identity() { |
| switch (kOperation) { |
| #define CASE(op, _, identity) \ |
| case Operation::k##op: \ |
| return identity; |
| MAP_BINARY_OPERATION_TO_INT32_NODE(CASE) |
| #undef CASE |
| default: |
| UNREACHABLE(); |
| } |
| } |
| |
| namespace { |
| template <Operation kOperation> |
| struct Int32NodeForHelper; |
| #define SPECIALIZATION(op, OpNode, ...) \ |
| template <> \ |
| struct Int32NodeForHelper<Operation::k##op> { \ |
| using type = OpNode; \ |
| }; |
| MAP_UNARY_OPERATION_TO_INT32_NODE(SPECIALIZATION) |
| MAP_BINARY_OPERATION_TO_INT32_NODE(SPECIALIZATION) |
| MAP_COMPARE_OPERATION_TO_INT32_NODE(SPECIALIZATION) |
| #undef SPECIALIZATION |
| |
| template <Operation kOperation> |
| using Int32NodeFor = typename Int32NodeForHelper<kOperation>::type; |
| |
| template <Operation kOperation> |
| struct Float64NodeForHelper; |
| #define SPECIALIZATION(op, OpNode) \ |
| template <> \ |
| struct Float64NodeForHelper<Operation::k##op> { \ |
| using type = OpNode; \ |
| }; |
| MAP_OPERATION_TO_FLOAT64_NODE(SPECIALIZATION) |
| MAP_COMPARE_OPERATION_TO_FLOAT64_NODE(SPECIALIZATION) |
| #undef SPECIALIZATION |
| |
| template <Operation kOperation> |
| using Float64NodeFor = typename Float64NodeForHelper<kOperation>::type; |
| } // namespace |
| |
| template <Operation kOperation> |
| void MaglevGraphBuilder::BuildGenericUnaryOperationNode() { |
| FeedbackSlot slot_index = GetSlotOperand(0); |
| ValueNode* value = GetAccumulatorTagged(); |
| SetAccumulator(AddNewNode<GenericNodeForOperation<kOperation>>( |
| {value}, compiler::FeedbackSource{feedback(), slot_index})); |
| } |
| |
| template <Operation kOperation> |
| void MaglevGraphBuilder::BuildGenericBinaryOperationNode() { |
| ValueNode* left = LoadRegisterTagged(0); |
| ValueNode* right = GetAccumulatorTagged(); |
| FeedbackSlot slot_index = GetSlotOperand(1); |
| SetAccumulator(AddNewNode<GenericNodeForOperation<kOperation>>( |
| {left, right}, compiler::FeedbackSource{feedback(), slot_index})); |
| } |
| |
| template <Operation kOperation> |
| void MaglevGraphBuilder::BuildGenericBinarySmiOperationNode() { |
| ValueNode* left = GetAccumulatorTagged(); |
| int constant = iterator_.GetImmediateOperand(0); |
| ValueNode* right = GetSmiConstant(constant); |
| FeedbackSlot slot_index = GetSlotOperand(1); |
| SetAccumulator(AddNewNode<GenericNodeForOperation<kOperation>>( |
| {left, right}, compiler::FeedbackSource{feedback(), slot_index})); |
| } |
| |
| template <Operation kOperation> |
| void MaglevGraphBuilder::BuildInt32UnaryOperationNode() { |
| // Use BuildTruncatingInt32BitwiseNotForToNumber with Smi input hint |
| // for truncating operations. |
| static_assert(!BinaryOperationIsBitwiseInt32<kOperation>()); |
| // TODO(v8:7700): Do constant folding. |
| ValueNode* value = GetAccumulatorInt32(); |
| using OpNodeT = Int32NodeFor<kOperation>; |
| SetAccumulator(AddNewNode<OpNodeT>({value})); |
| } |
| |
| void MaglevGraphBuilder::BuildTruncatingInt32BitwiseNotForToNumber( |
| ToNumberHint hint) { |
| // TODO(v8:7700): Do constant folding. |
| ValueNode* value = GetTruncatedInt32ForToNumber( |
| current_interpreter_frame_.accumulator(), hint); |
| SetAccumulator(AddNewNode<Int32BitwiseNot>({value})); |
| } |
| |
| template <Operation kOperation> |
| ValueNode* MaglevGraphBuilder::TryFoldInt32BinaryOperation(ValueNode* left, |
| ValueNode* right) { |
| switch (kOperation) { |
| case Operation::kModulus: |
| // Note the `x % x = 0` fold is invalid since for negative x values the |
| // result is -0.0. |
| // TODO(v8:7700): Consider re-enabling this fold if the result is used |
| // only in contexts where -0.0 is semantically equivalent to 0.0, or if x |
| // is known to be non-negative. |
| default: |
| // TODO(victorgomes): Implement more folds. |
| break; |
| } |
| return nullptr; |
| } |
| |
| template <Operation kOperation> |
| ValueNode* MaglevGraphBuilder::TryFoldInt32BinaryOperation(ValueNode* left, |
| int right) { |
| switch (kOperation) { |
| case Operation::kModulus: |
| // Note the `x % 1 = 0` and `x % -1 = 0` folds are invalid since for |
| // negative x values the result is -0.0. |
| // TODO(v8:7700): Consider re-enabling this fold if the result is used |
| // only in contexts where -0.0 is semantically equivalent to 0.0, or if x |
| // is known to be non-negative. |
| // TODO(victorgomes): We can emit faster mod operation if {right} is power |
| // of 2, unfortunately we need to know if {left} is negative or not. |
| // Maybe emit a Int32ModulusRightIsPowerOf2? |
| default: |
| // TODO(victorgomes): Implement more folds. |
| break; |
| } |
| return nullptr; |
| } |
| |
| template <Operation kOperation> |
| void MaglevGraphBuilder::BuildInt32BinaryOperationNode() { |
| // Use BuildTruncatingInt32BinaryOperationNodeForToNumber with Smi input hint |
| // for truncating operations. |
| static_assert(!BinaryOperationIsBitwiseInt32<kOperation>()); |
| // TODO(v8:7700): Do constant folding. |
| ValueNode* left = LoadRegisterInt32(0); |
| ValueNode* right = GetAccumulatorInt32(); |
| |
| if (ValueNode* result = |
| TryFoldInt32BinaryOperation<kOperation>(left, right)) { |
| SetAccumulator(result); |
| return; |
| } |
| using OpNodeT = Int32NodeFor<kOperation>; |
| |
| SetAccumulator(AddNewNode<OpNodeT>({left, right})); |
| } |
| |
| template <Operation kOperation> |
| void MaglevGraphBuilder::BuildTruncatingInt32BinaryOperationNodeForToNumber( |
| ToNumberHint hint) { |
| static_assert(BinaryOperationIsBitwiseInt32<kOperation>()); |
| // TODO(v8:7700): Do constant folding. |
| ValueNode* left; |
| ValueNode* right; |
| if (IsRegisterEqualToAccumulator(0)) { |
| left = right = GetTruncatedInt32ForToNumber( |
| current_interpreter_frame_.get(iterator_.GetRegisterOperand(0)), hint); |
| } else { |
| left = GetTruncatedInt32ForToNumber( |
| current_interpreter_frame_.get(iterator_.GetRegisterOperand(0)), hint); |
| right = GetTruncatedInt32ForToNumber( |
| current_interpreter_frame_.accumulator(), hint); |
| } |
| |
| if (ValueNode* result = |
| TryFoldInt32BinaryOperation<kOperation>(left, right)) { |
| SetAccumulator(result); |
| return; |
| } |
| SetAccumulator(AddNewNode<Int32NodeFor<kOperation>>({left, right})); |
| } |
| |
| template <Operation kOperation> |
| void MaglevGraphBuilder::BuildInt32BinarySmiOperationNode() { |
| // Truncating Int32 nodes treat their input as a signed int32 regardless |
| // of whether it's really signed or not, so we allow Uint32 by loading a |
| // TruncatedInt32 value. |
| static_assert(!BinaryOperationIsBitwiseInt32<kOperation>()); |
| // TODO(v8:7700): Do constant folding. |
| ValueNode* left = GetAccumulatorInt32(); |
| int32_t constant = iterator_.GetImmediateOperand(0); |
| if (base::Optional<int>(constant) == Int32Identity<kOperation>()) { |
| // If the constant is the unit of the operation, it already has the right |
| // value, so just return. |
| return; |
| } |
| if (ValueNode* result = |
| TryFoldInt32BinaryOperation<kOperation>(left, constant)) { |
| SetAccumulator(result); |
| return; |
| } |
| ValueNode* right = GetInt32Constant(constant); |
| |
| using OpNodeT = Int32NodeFor<kOperation>; |
| |
| SetAccumulator(AddNewNode<OpNodeT>({left, right})); |
| } |
| |
| template <Operation kOperation> |
| void MaglevGraphBuilder::BuildTruncatingInt32BinarySmiOperationNodeForToNumber( |
| ToNumberHint hint) { |
| static_assert(BinaryOperationIsBitwiseInt32<kOperation>()); |
| // TODO(v8:7700): Do constant folding. |
| ValueNode* left = GetTruncatedInt32ForToNumber( |
| current_interpreter_frame_.accumulator(), hint); |
| int32_t constant = iterator_.GetImmediateOperand(0); |
| if (base::Optional<int>(constant) == Int32Identity<kOperation>()) { |
| // If the constant is the unit of the operation, it already has the right |
| // value, so use the truncated value (if not just a conversion) and return. |
| if (!left->properties().is_conversion()) { |
| current_interpreter_frame_.set_accumulator(left); |
| } |
| return; |
| } |
| if (ValueNode* result = |
| TryFoldInt32BinaryOperation<kOperation>(left, constant)) { |
| SetAccumulator(result); |
| return; |
| } |
| ValueNode* right = GetInt32Constant(constant); |
| SetAccumulator(AddNewNode<Int32NodeFor<kOperation>>({left, right})); |
| } |
| |
| template <Operation kOperation> |
| void MaglevGraphBuilder::BuildFloat64BinarySmiOperationNodeForToNumber( |
| ToNumberHint hint) { |
| // TODO(v8:7700): Do constant folding. Make sure to normalize HoleyFloat64 |
| // nodes if constant folded. |
| ValueNode* left = GetAccumulatorHoleyFloat64ForToNumber(hint); |
| double constant = static_cast<double>(iterator_.GetImmediateOperand(0)); |
| ValueNode* right = GetFloat64Constant(constant); |
| SetAccumulator(AddNewNode<Float64NodeFor<kOperation>>({left, right})); |
| } |
| |
| template <Operation kOperation> |
| void MaglevGraphBuilder::BuildFloat64UnaryOperationNodeForToNumber( |
| ToNumberHint hint) { |
| // TODO(v8:7700): Do constant folding. Make sure to normalize HoleyFloat64 |
| // nodes if constant folded. |
| ValueNode* value = GetAccumulatorHoleyFloat64ForToNumber(hint); |
| switch (kOperation) { |
| case Operation::kNegate: |
| SetAccumulator(AddNewNode<Float64Negate>({value})); |
| break; |
| case Operation::kIncrement: |
| SetAccumulator(AddNewNode<Float64Add>({value, GetFloat64Constant(1)})); |
| break; |
| case Operation::kDecrement: |
| SetAccumulator( |
| AddNewNode<Float64Subtract>({value, GetFloat64Constant(1)})); |
| break; |
| default: |
| UNREACHABLE(); |
| } |
| } |
| |
| template <Operation kOperation> |
| void MaglevGraphBuilder::BuildFloat64BinaryOperationNodeForToNumber( |
| ToNumberHint hint) { |
| // TODO(v8:7700): Do constant folding. Make sure to normalize HoleyFloat64 |
| // nodes if constant folded. |
| ValueNode* left = LoadRegisterHoleyFloat64ForToNumber(0, hint); |
| ValueNode* right = GetAccumulatorHoleyFloat64ForToNumber(hint); |
| SetAccumulator(AddNewNode<Float64NodeFor<kOperation>>({left, right})); |
| } |
| |
| namespace { |
| ToNumberHint BinopHintToToNumberHint(BinaryOperationHint hint) { |
| switch (hint) { |
| case BinaryOperationHint::kSignedSmall: |
| return ToNumberHint::kAssumeSmi; |
| case BinaryOperationHint::kSignedSmallInputs: |
| case BinaryOperationHint::kNumber: |
| return ToNumberHint::kAssumeNumber; |
| case BinaryOperationHint::kNumberOrOddball: |
| return ToNumberHint::kAssumeNumberOrOddball; |
| |
| case BinaryOperationHint::kNone: |
| case BinaryOperationHint::kString: |
| case BinaryOperationHint::kBigInt: |
| case BinaryOperationHint::kBigInt64: |
| case BinaryOperationHint::kAny: |
| UNREACHABLE(); |
| } |
| } |
| } // namespace |
| |
| template <Operation kOperation> |
| void MaglevGraphBuilder::VisitUnaryOperation() { |
| FeedbackNexus nexus = FeedbackNexusForOperand(0); |
| BinaryOperationHint feedback_hint = nexus.GetBinaryOperationFeedback(); |
| switch (feedback_hint) { |
| case BinaryOperationHint::kNone: |
| return EmitUnconditionalDeopt( |
| DeoptimizeReason::kInsufficientTypeFeedbackForBinaryOperation); |
| case BinaryOperationHint::kSignedSmall: |
| case BinaryOperationHint::kSignedSmallInputs: |
| case BinaryOperationHint::kNumber: |
| case BinaryOperationHint::kNumberOrOddball: { |
| ToNumberHint hint = BinopHintToToNumberHint(feedback_hint); |
| if constexpr (BinaryOperationIsBitwiseInt32<kOperation>()) { |
| static_assert(kOperation == Operation::kBitwiseNot); |
| return BuildTruncatingInt32BitwiseNotForToNumber(hint); |
| } else if (feedback_hint == BinaryOperationHint::kSignedSmall) { |
| return BuildInt32UnaryOperationNode<kOperation>(); |
| } |
| return BuildFloat64UnaryOperationNodeForToNumber<kOperation>(hint); |
| break; |
| } |
| case BinaryOperationHint::kString: |
| case BinaryOperationHint::kBigInt: |
| case BinaryOperationHint::kBigInt64: |
| case BinaryOperationHint::kAny: |
| // Fallback to generic node. |
| break; |
| } |
| BuildGenericUnaryOperationNode<kOperation>(); |
| } |
| |
| template <Operation kOperation> |
| void MaglevGraphBuilder::VisitBinaryOperation() { |
| FeedbackNexus nexus = FeedbackNexusForOperand(1); |
| BinaryOperationHint feedback_hint = nexus.GetBinaryOperationFeedback(); |
| switch (feedback_hint) { |
| case BinaryOperationHint::kNone: |
| return EmitUnconditionalDeopt( |
| DeoptimizeReason::kInsufficientTypeFeedbackForBinaryOperation); |
| case BinaryOperationHint::kSignedSmall: |
| case BinaryOperationHint::kSignedSmallInputs: |
| case BinaryOperationHint::kNumber: |
| case BinaryOperationHint::kNumberOrOddball: { |
| ToNumberHint hint = BinopHintToToNumberHint(feedback_hint); |
| if constexpr (BinaryOperationIsBitwiseInt32<kOperation>()) { |
| return BuildTruncatingInt32BinaryOperationNodeForToNumber<kOperation>( |
| hint); |
| } else if (feedback_hint == BinaryOperationHint::kSignedSmall) { |
| if constexpr (kOperation == Operation::kExponentiate) { |
| // Exponentiate never updates the feedback to be a Smi. |
| UNREACHABLE(); |
| } else { |
| return BuildInt32BinaryOperationNode<kOperation>(); |
| } |
| } else { |
| return BuildFloat64BinaryOperationNodeForToNumber<kOperation>(hint); |
| } |
| break; |
| } |
| case BinaryOperationHint::kString: |
| if constexpr (kOperation == Operation::kAdd) { |
| ValueNode* left = LoadRegisterTagged(0); |
| ValueNode* right = GetAccumulatorTagged(); |
| BuildCheckString(left); |
| BuildCheckString(right); |
| SetAccumulator(AddNewNode<StringConcat>({left, right})); |
| return; |
| } |
| break; |
| case BinaryOperationHint::kBigInt: |
| case BinaryOperationHint::kBigInt64: |
| case BinaryOperationHint::kAny: |
| // Fallback to generic node. |
| break; |
| } |
| BuildGenericBinaryOperationNode<kOperation>(); |
| } |
| |
| template <Operation kOperation> |
| void MaglevGraphBuilder::VisitBinarySmiOperation() { |
| FeedbackNexus nexus = FeedbackNexusForOperand(1); |
| BinaryOperationHint feedback_hint = nexus.GetBinaryOperationFeedback(); |
| switch (feedback_hint) { |
| case BinaryOperationHint::kNone: |
| return EmitUnconditionalDeopt( |
| DeoptimizeReason::kInsufficientTypeFeedbackForBinaryOperation); |
| case BinaryOperationHint::kSignedSmall: |
| case BinaryOperationHint::kSignedSmallInputs: |
| case BinaryOperationHint::kNumber: |
| case BinaryOperationHint::kNumberOrOddball: { |
| ToNumberHint hint = BinopHintToToNumberHint(feedback_hint); |
| if constexpr (BinaryOperationIsBitwiseInt32<kOperation>()) { |
| return BuildTruncatingInt32BinarySmiOperationNodeForToNumber< |
| kOperation>(hint); |
| } else if (feedback_hint == BinaryOperationHint::kSignedSmall) { |
| if constexpr (kOperation == Operation::kExponentiate) { |
| // Exponentiate never updates the feedback to be a Smi. |
| UNREACHABLE(); |
| } else { |
| return BuildInt32BinarySmiOperationNode<kOperation>(); |
| } |
| } else { |
| return BuildFloat64BinarySmiOperationNodeForToNumber<kOperation>(hint); |
| } |
| break; |
| } |
| case BinaryOperationHint::kString: |
| case BinaryOperationHint::kBigInt: |
| case BinaryOperationHint::kBigInt64: |
| case BinaryOperationHint::kAny: |
| // Fallback to generic node. |
| break; |
| } |
| BuildGenericBinarySmiOperationNode<kOperation>(); |
| } |
| |
| base::Optional<int> MaglevGraphBuilder::TryFindNextBranch() { |
| DisallowGarbageCollection no_gc; |
| // Copy the iterator so we can search for the next branch without changing |
| // current iterator state. |
| interpreter::BytecodeArrayIterator it(iterator_.bytecode_array(), |
| iterator_.current_offset(), no_gc); |
| |
| // Skip the current bytecode. |
| it.Advance(); |
| |
| for (; !it.done(); it.Advance()) { |
| // Bail out if there is a merge point before the next branch. |
| if (IsOffsetAMergePoint(it.current_offset())) { |
| if (v8_flags.trace_maglev_graph_building) { |
| std::cout |
| << " ! Bailing out of test->branch fusion because merge point" |
| << std::endl; |
| } |
| return {}; |
| } |
| switch (it.current_bytecode()) { |
| case interpreter::Bytecode::kMov: |
| case interpreter::Bytecode::kToBoolean: |
| case interpreter::Bytecode::kLogicalNot: |
| case interpreter::Bytecode::kToBooleanLogicalNot: |
| // No register moves, and only affecting the accumulator in a way that |
| // can be emulated with swapping branch targets. |
| continue; |
| |
| case interpreter::Bytecode::kStar: { |
| interpreter::Register store_reg = it.GetRegisterOperand(0); |
| // If the Star stores the accumulator to a live register, the |
| // accumulator boolean value is observable and must be materialized. |
| if (store_reg.is_parameter() || |
| GetOutLivenessFor(it.current_offset()) |
| ->RegisterIsLive(store_reg.index())) { |
| return {}; |
| } |
| continue; |
| } |
| |
| #define STAR_CASE(name, ...) case interpreter::Bytecode::k##name: |
| SHORT_STAR_BYTECODE_LIST(STAR_CASE) |
| #undef STAR_CASE |
| { |
| interpreter::Register store_reg = |
| interpreter::Register::FromShortStar(it.current_bytecode()); |
| if (store_reg.is_parameter() || |
| GetOutLivenessFor(it.current_offset()) |
| ->RegisterIsLive(store_reg.index())) { |
| return {}; |
| } |
| continue; |
| } |
| |
| case interpreter::Bytecode::kJumpIfFalse: |
| case interpreter::Bytecode::kJumpIfFalseConstant: |
| case interpreter::Bytecode::kJumpIfToBooleanFalse: |
| case interpreter::Bytecode::kJumpIfToBooleanFalseConstant: |
| case interpreter::Bytecode::kJumpIfTrue: |
| case interpreter::Bytecode::kJumpIfTrueConstant: |
| case interpreter::Bytecode::kJumpIfToBooleanTrue: |
| case interpreter::Bytecode::kJumpIfToBooleanTrueConstant: |
| return {it.current_offset()}; |
| |
| default: |
| return {}; |
| } |
| } |
| |
| return {}; |
| } |
| |
| template <typename BranchControlNodeT, typename... Args> |
| bool MaglevGraphBuilder::TryBuildBranchFor( |
| std::initializer_list<ValueNode*> control_inputs, Args&&... args) { |
| base::Optional<int> maybe_next_branch_offset = TryFindNextBranch(); |
| |
| // If we didn't find a branch, bail out. |
| if (!maybe_next_branch_offset) { |
| return false; |
| } |
| |
| int next_branch_offset = *maybe_next_branch_offset; |
| |
| if (v8_flags.trace_maglev_graph_building) { |
| std::cout << " * Fusing test @" << iterator_.current_offset() |
| << " and branch @" << next_branch_offset << std::endl; |
| } |
| // Advance past the test. |
| iterator_.Advance(); |
| |
| // Evaluate Movs and LogicalNots between test and jump. |
| bool flip = false; |
| for (;; iterator_.Advance()) { |
| DCHECK_LE(iterator_.current_offset(), next_branch_offset); |
| UpdateSourceAndBytecodePosition(iterator_.current_offset()); |
| switch (iterator_.current_bytecode()) { |
| case interpreter::Bytecode::kMov: { |
| interpreter::Register src = iterator_.GetRegisterOperand(0); |
| interpreter::Register dst = iterator_.GetRegisterOperand(1); |
| DCHECK_NOT_NULL(current_interpreter_frame_.get(src)); |
| current_interpreter_frame_.set(dst, |
| current_interpreter_frame_.get(src)); |
| |
| continue; |
| } |
| case interpreter::Bytecode::kToBoolean: |
| continue; |
| |
| case interpreter::Bytecode::kLogicalNot: |
| case interpreter::Bytecode::kToBooleanLogicalNot: |
| flip = !flip; |
| continue; |
| |
| case interpreter::Bytecode::kStar: |
| #define STAR_CASE(name, ...) case interpreter::Bytecode::k##name: |
| SHORT_STAR_BYTECODE_LIST(STAR_CASE) |
| #undef STAR_CASE |
| // We don't need to perform the Star, since the target register is |
| // already known to be dead. |
| continue; |
| |
| default: |
| // Otherwise, we've reached the jump, so abort the iteration. |
| DCHECK_EQ(iterator_.current_offset(), next_branch_offset); |
| break; |
| } |
| break; |
| } |
| |
| JumpType jump_type; |
| switch (iterator_.current_bytecode()) { |
| case interpreter::Bytecode::kJumpIfFalse: |
| case interpreter::Bytecode::kJumpIfFalseConstant: |
| case interpreter::Bytecode::kJumpIfToBooleanFalse: |
| case interpreter::Bytecode::kJumpIfToBooleanFalseConstant: |
| jump_type = flip ? JumpType::kJumpIfTrue : JumpType::kJumpIfFalse; |
| break; |
| case interpreter::Bytecode::kJumpIfTrue: |
| case interpreter::Bytecode::kJumpIfTrueConstant: |
| case interpreter::Bytecode::kJumpIfToBooleanTrue: |
| case interpreter::Bytecode::kJumpIfToBooleanTrueConstant: |
| jump_type = flip ? JumpType::kJumpIfFalse : JumpType::kJumpIfTrue; |
| break; |
| default: |
| UNREACHABLE(); |
| } |
| |
| int true_offset, false_offset; |
| if (jump_type == kJumpIfFalse) { |
| true_offset = next_offset(); |
| false_offset = iterator_.GetJumpTargetOffset(); |
| } else { |
| true_offset = iterator_.GetJumpTargetOffset(); |
| false_offset = next_offset(); |
| } |
| |
| BasicBlock* block = FinishBlock<BranchControlNodeT>( |
| control_inputs, std::forward<Args>(args)..., &jump_targets_[true_offset], |
| &jump_targets_[false_offset]); |
| |
| SetAccumulatorInBranch(GetBooleanConstant((jump_type == kJumpIfTrue) ^ flip)); |
| |
| MergeIntoFrameState(block, iterator_.GetJumpTargetOffset()); |
| |
| SetAccumulatorInBranch( |
| GetBooleanConstant((jump_type == kJumpIfFalse) ^ flip)); |
| StartFallthroughBlock(next_offset(), block); |
| return true; |
| } |
| |
| template <Operation kOperation, typename type> |
| bool OperationValue(type left, type right) { |
| switch (kOperation) { |
| case Operation::kEqual: |
| case Operation::kStrictEqual: |
| return left == right; |
| case Operation::kLessThan: |
| return left < right; |
| case Operation::kLessThanOrEqual: |
| return left <= right; |
| case Operation::kGreaterThan: |
| return left > right; |
| case Operation::kGreaterThanOrEqual: |
| return left >= right; |
| } |
| } |
| |
| // static |
| compiler::OptionalHeapObjectRef MaglevGraphBuilder::TryGetConstant( |
| compiler::JSHeapBroker* broker, LocalIsolate* isolate, ValueNode* node) { |
| if (Constant* c = node->TryCast<Constant>()) { |
| return c->object(); |
| } |
| if (RootConstant* c = node->TryCast<RootConstant>()) { |
| return MakeRef(broker, isolate->root_handle(c->index())).AsHeapObject(); |
| } |
| return {}; |
| } |
| |
| compiler::OptionalHeapObjectRef MaglevGraphBuilder::TryGetConstant( |
| ValueNode* node) { |
| if (auto result = TryGetConstant(broker(), local_isolate(), node)) { |
| return result; |
| } |
| const NodeInfo* info = known_node_aspects().TryGetInfoFor(node); |
| if (info && info->is_constant()) { |
| return TryGetConstant(info->constant_alternative); |
| } |
| return {}; |
| } |
| |
| template <Operation kOperation> |
| bool MaglevGraphBuilder::TryReduceCompareEqualAgainstConstant() { |
| // First handle strict equal comparison with constant. |
| if (kOperation != Operation::kStrictEqual) return false; |
| ValueNode* left = LoadRegisterRaw(0); |
| ValueNode* right = GetRawAccumulator(); |
| |
| compiler::OptionalHeapObjectRef maybe_constant = TryGetConstant(left); |
| if (!maybe_constant) maybe_constant = TryGetConstant(right); |
| if (!maybe_constant) return false; |
| InstanceType type = maybe_constant.value().map(broker()).instance_type(); |
| |
| if (!InstanceTypeChecker::IsReferenceComparable(type)) return false; |
| |
| if (left->properties().value_representation() != |
| ValueRepresentation::kTagged || |
| right->properties().value_representation() != |
| ValueRepresentation::kTagged) { |
| SetAccumulator(GetBooleanConstant(false)); |
| } else if (left == right) { |
| SetAccumulator(GetBooleanConstant(true)); |
| } else if (!TryBuildBranchFor<BranchIfReferenceCompare>({left, right}, |
| kOperation)) { |
| SetAccumulator(AddNewNode<TaggedEqual>({left, right})); |
| } |
| return true; |
| } |
| |
| template <Operation kOperation> |
| void MaglevGraphBuilder::VisitCompareOperation() { |
| FeedbackNexus nexus = FeedbackNexusForOperand(1); |
| switch (nexus.GetCompareOperationFeedback()) { |
| case CompareOperationHint::kNone: |
| EmitUnconditionalDeopt( |
| DeoptimizeReason::kInsufficientTypeFeedbackForCompareOperation); |
| return; |
| case CompareOperationHint::kSignedSmall: { |
| ValueNode* left = LoadRegisterInt32(0); |
| ValueNode* right = GetAccumulatorInt32(); |
| if (left == right) { |
| SetAccumulator( |
| GetBooleanConstant(kOperation == Operation::kEqual || |
| kOperation == Operation::kStrictEqual || |
| kOperation == Operation::kLessThanOrEqual || |
| kOperation == Operation::kGreaterThanOrEqual)); |
| return; |
| } |
| if (left->Is<Int32Constant>() && right->Is<Int32Constant>()) { |
| int left_value = left->Cast<Int32Constant>()->value(); |
| int right_value = right->Cast<Int32Constant>()->value(); |
| SetAccumulator(GetBooleanConstant( |
| OperationValue<kOperation>(left_value, right_value))); |
| return; |
| } |
| if (TryBuildBranchFor<BranchIfInt32Compare>({left, right}, kOperation)) { |
| return; |
| } |
| SetAccumulator(AddNewNode<Int32NodeFor<kOperation>>({left, right})); |
| return; |
| } |
| case CompareOperationHint::kNumber: { |
| // TODO(leszeks): we could support kNumberOrOddball with |
| // BranchIfFloat64Compare, but we'd need to special case comparing |
| // oddballs with NaN value (e.g. undefined) against themselves. |
| ValueNode* left = LoadRegisterFloat64(0); |
| ValueNode* right = GetAccumulatorFloat64(); |
| if (left->Is<Float64Constant>() && right->Is<Float64Constant>()) { |
| double left_value = left->Cast<Float64Constant>()->value().get_scalar(); |
| double right_value = |
| right->Cast<Float64Constant>()->value().get_scalar(); |
| SetAccumulator(GetBooleanConstant( |
| OperationValue<kOperation>(left_value, right_value))); |
| return; |
| } |
| if (TryBuildBranchFor<BranchIfFloat64Compare>({left, right}, |
| kOperation)) { |
| return; |
| } |
| SetAccumulator(AddNewNode<Float64NodeFor<kOperation>>({left, right})); |
| return; |
| } |
| case CompareOperationHint::kInternalizedString: { |
| DCHECK(kOperation == Operation::kEqual || |
| kOperation == Operation::kStrictEqual); |
| ValueNode *left, *right; |
| if (IsRegisterEqualToAccumulator(0)) { |
| left = right = GetInternalizedString(iterator_.GetRegisterOperand(0)); |
| SetAccumulator(GetRootConstant(RootIndex::kTrueValue)); |
| return; |
| } |
| left = GetInternalizedString(iterator_.GetRegisterOperand(0)); |
| right = |
| GetInternalizedString(interpreter::Register::virtual_accumulator()); |
| if (left == right) { |
| SetAccumulator(GetBooleanConstant(true)); |
| return; |
| } |
| if (TryBuildBranchFor<BranchIfReferenceCompare>({left, right}, |
| kOperation)) { |
| return; |
| } |
| SetAccumulator(AddNewNode<TaggedEqual>({left, right})); |
| return; |
| } |
| case CompareOperationHint::kSymbol: { |
| DCHECK(kOperation == Operation::kEqual || |
| kOperation == Operation::kStrictEqual); |
| |
| ValueNode* left = LoadRegisterTagged(0); |
| ValueNode* right = GetAccumulatorTagged(); |
| BuildCheckSymbol(left); |
| BuildCheckSymbol(right); |
| if (left == right) { |
| SetAccumulator(GetBooleanConstant(true)); |
| return; |
| } |
| if (TryBuildBranchFor<BranchIfReferenceCompare>({left, right}, |
| kOperation)) { |
| return; |
| } |
| SetAccumulator(AddNewNode<TaggedEqual>({left, right})); |
| return; |
| } |
| case CompareOperationHint::kString: { |
| if (TryReduceCompareEqualAgainstConstant<kOperation>()) return; |
| |
| ValueNode* left = LoadRegisterTagged(0); |
| ValueNode* right = GetAccumulatorTagged(); |
| BuildCheckString(left); |
| BuildCheckString(right); |
| |
| ValueNode* result; |
| if (left == right) { |
| SetAccumulator( |
| GetBooleanConstant(kOperation == Operation::kEqual || |
| kOperation == Operation::kStrictEqual || |
| kOperation == Operation::kLessThanOrEqual || |
| kOperation == Operation::kGreaterThanOrEqual)); |
| return; |
| } |
| switch (kOperation) { |
| case Operation::kEqual: |
| case Operation::kStrictEqual: |
| result = AddNewNode<StringEqual>({left, right}); |
| break; |
| case Operation::kLessThan: |
| result = BuildCallBuiltin<Builtin::kStringLessThan>({left, right}); |
| break; |
| case Operation::kLessThanOrEqual: |
| result = |
| BuildCallBuiltin<Builtin::kStringLessThanOrEqual>({left, right}); |
| break; |
| case Operation::kGreaterThan: |
| result = BuildCallBuiltin<Builtin::kStringGreaterThan>({left, right}); |
| break; |
| case Operation::kGreaterThanOrEqual: |
| result = BuildCallBuiltin<Builtin::kStringGreaterThanOrEqual>( |
| {left, right}); |
| break; |
| } |
| |
| SetAccumulator(result); |
| return; |
| } |
| case CompareOperationHint::kAny: |
| case CompareOperationHint::kBigInt64: |
| case CompareOperationHint::kBigInt: |
| case CompareOperationHint::kNumberOrBoolean: |
| case CompareOperationHint::kNumberOrOddball: |
| case CompareOperationHint::kReceiverOrNullOrUndefined: |
| if (TryReduceCompareEqualAgainstConstant<kOperation>()) return; |
| break; |
| case CompareOperationHint::kReceiver: { |
| if (TryReduceCompareEqualAgainstConstant<kOperation>()) return; |
| DCHECK(kOperation == Operation::kEqual || |
| kOperation == Operation::kStrictEqual); |
| |
| ValueNode* left = LoadRegisterTagged(0); |
| ValueNode* right = GetAccumulatorTagged(); |
| BuildCheckJSReceiver(left); |
| BuildCheckJSReceiver(right); |
| if (left == right) { |
| SetAccumulator(GetBooleanConstant(true)); |
| return; |
| } |
| if (TryBuildBranchFor<BranchIfReferenceCompare>({left, right}, |
| kOperation)) { |
| return; |
| } |
| SetAccumulator(AddNewNode<TaggedEqual>({left, right})); |
| return; |
| } |
| } |
| |
| BuildGenericBinaryOperationNode<kOperation>(); |
| } |
| |
| void MaglevGraphBuilder::VisitLdar() { |
| MoveNodeBetweenRegisters(iterator_.GetRegisterOperand(0), |
| interpreter::Register::virtual_accumulator()); |
| } |
| |
| void MaglevGraphBuilder::VisitLdaZero() { SetAccumulator(GetSmiConstant(0)); } |
| void MaglevGraphBuilder::VisitLdaSmi() { |
| int constant = iterator_.GetImmediateOperand(0); |
| SetAccumulator(GetSmiConstant(constant)); |
| } |
| void MaglevGraphBuilder::VisitLdaUndefined() { |
| SetAccumulator(GetRootConstant(RootIndex::kUndefinedValue)); |
| } |
| void MaglevGraphBuilder::VisitLdaNull() { |
| SetAccumulator(GetRootConstant(RootIndex::kNullValue)); |
| } |
| void MaglevGraphBuilder::VisitLdaTheHole() { |
| SetAccumulator(GetRootConstant(RootIndex::kTheHoleValue)); |
| } |
| void MaglevGraphBuilder::VisitLdaTrue() { |
| SetAccumulator(GetRootConstant(RootIndex::kTrueValue)); |
| } |
| void MaglevGraphBuilder::VisitLdaFalse() { |
| SetAccumulator(GetRootConstant(RootIndex::kFalseValue)); |
| } |
| void MaglevGraphBuilder::VisitLdaConstant() { |
| SetAccumulator(GetConstant(GetRefOperand<HeapObject>(0))); |
| } |
| |
| bool MaglevGraphBuilder::TrySpecializeLoadContextSlotToFunctionContext( |
| ValueNode** context, size_t* depth, int slot_index, |
| ContextSlotMutability slot_mutability) { |
| DCHECK(compilation_unit_->info()->specialize_to_function_context()); |
| |
| size_t new_depth = *depth; |
| compiler::OptionalContextRef maybe_context_ref = |
| FunctionContextSpecialization::TryToRef(compilation_unit_, *context, |
| &new_depth); |
| if (!maybe_context_ref.has_value()) return false; |
| |
| compiler::ContextRef context_ref = maybe_context_ref.value(); |
| if (slot_mutability == kMutable || new_depth != 0) { |
| *depth = new_depth; |
| *context = GetConstant(context_ref); |
| return false; |
| } |
| |
| compiler::OptionalObjectRef maybe_slot_value = |
| context_ref.get(broker(), slot_index); |
| if (!maybe_slot_value.has_value()) { |
| *depth = new_depth; |
| *context = GetConstant(context_ref); |
| return false; |
| } |
| |
| compiler::ObjectRef slot_value = maybe_slot_value.value(); |
| if (slot_value.IsHeapObject()) { |
| // Even though the context slot is immutable, the context might have escaped |
| // before the function to which it belongs has initialized the slot. We |
| // must be conservative and check if the value in the slot is currently the |
| // hole or undefined. Only if it is neither of these, can we be sure that it |
| // won't change anymore. |
| // |
| // See also: JSContextSpecialization::ReduceJSLoadContext. |
| compiler::OddballType oddball_type = |
| slot_value.AsHeapObject().map(broker()).oddball_type(broker()); |
| if (oddball_type == compiler::OddballType::kUndefined || |
| oddball_type == compiler::OddballType::kHole) { |
| *depth = new_depth; |
| *context = GetConstant(context_ref); |
| return false; |
| } |
| } |
| |
| // Fold the load of the immutable slot. |
| |
| SetAccumulator(GetConstant(slot_value)); |
| return true; |
| } |
| |
| ValueNode* MaglevGraphBuilder::LoadAndCacheContextSlot( |
| ValueNode* context, int offset, ContextSlotMutability slot_mutability) { |
| ValueNode*& cached_value = |
| slot_mutability == ContextSlotMutability::kMutable |
| ? known_node_aspects().loaded_context_slots[{context, offset}] |
| : known_node_aspects().loaded_context_constants[{context, offset}]; |
| if (cached_value) { |
| if (v8_flags.trace_maglev_graph_building) { |
| std::cout << " * Reusing cached context slot " |
| << PrintNodeLabel(graph_labeller(), context) << "[" << offset |
| << "]: " << PrintNode(graph_labeller(), cached_value) |
| << std::endl; |
| } |
| return cached_value; |
| } |
| return cached_value = AddNewNode<LoadTaggedField>({context}, offset); |
| } |
| |
| void MaglevGraphBuilder::StoreAndCacheContextSlot(ValueNode* context, |
| int offset, |
| ValueNode* value) { |
| DCHECK_EQ( |
| known_node_aspects().loaded_context_constants.count({context, offset}), |
| 0); |
| BuildStoreTaggedField(context, GetTaggedValue(value), offset); |
| |
| if (v8_flags.trace_maglev_graph_building) { |
| std::cout << " * Recording context slot store " |
| << PrintNodeLabel(graph_labeller(), context) << "[" << offset |
| << "]: " << PrintNode(graph_labeller(), value) << std::endl; |
| } |
| known_node_aspects().loaded_context_slots[{context, offset}] = value; |
| } |
| |
| void MaglevGraphBuilder::BuildLoadContextSlot( |
| ValueNode* context, size_t depth, int slot_index, |
| ContextSlotMutability slot_mutability) { |
| MinimizeContextChainDepth(&context, &depth); |
| |
| if (compilation_unit_->info()->specialize_to_function_context() && |
| TrySpecializeLoadContextSlotToFunctionContext( |
| &context, &depth, slot_index, slot_mutability)) { |
| return; // Our work here is done. |
| } |
| |
| for (size_t i = 0; i < depth; ++i) { |
| context = LoadAndCacheContextSlot( |
| context, Context::OffsetOfElementAt(Context::PREVIOUS_INDEX), |
| kImmutable); |
| } |
| |
| // Always load the slot here as if it were mutable. Immutable slots have a |
| // narrow range of mutability if the context escapes before the slot is |
| // initialized, so we can't safely assume that the load can be cached in case |
| // it's a load before initialization (e.g. var a = a + 42). |
| current_interpreter_frame_.set_accumulator(LoadAndCacheContextSlot( |
| context, Context::OffsetOfElementAt(slot_index), kMutable)); |
| } |
| |
| void MaglevGraphBuilder::BuildStoreContextSlot(ValueNode* context, size_t depth, |
| int slot_index, |
| ValueNode* value) { |
| MinimizeContextChainDepth(&context, &depth); |
| |
| if (compilation_unit_->info()->specialize_to_function_context()) { |
| compiler::OptionalContextRef maybe_ref = |
| FunctionContextSpecialization::TryToRef(compilation_unit_, context, |
| &depth); |
| if (maybe_ref.has_value()) { |
| context = GetConstant(maybe_ref.value()); |
| } |
| } |
| |
| for (size_t i = 0; i < depth; ++i) { |
| context = LoadAndCacheContextSlot( |
| context, Context::OffsetOfElementAt(Context::PREVIOUS_INDEX), |
| kImmutable); |
| } |
| |
| StoreAndCacheContextSlot(context, Context::OffsetOfElementAt(slot_index), |
| value); |
| } |
| |
| void MaglevGraphBuilder::VisitLdaContextSlot() { |
| ValueNode* context = LoadRegisterTagged(0); |
| int slot_index = iterator_.GetIndexOperand(1); |
| size_t depth = iterator_.GetUnsignedImmediateOperand(2); |
| BuildLoadContextSlot(context, depth, slot_index, kMutable); |
| } |
| void MaglevGraphBuilder::VisitLdaImmutableContextSlot() { |
| ValueNode* context = LoadRegisterTagged(0); |
| int slot_index = iterator_.GetIndexOperand(1); |
| size_t depth = iterator_.GetUnsignedImmediateOperand(2); |
| BuildLoadContextSlot(context, depth, slot_index, kImmutable); |
| } |
| void MaglevGraphBuilder::VisitLdaCurrentContextSlot() { |
| ValueNode* context = GetContext(); |
| int slot_index = iterator_.GetIndexOperand(0); |
| BuildLoadContextSlot(context, 0, slot_index, kMutable); |
| } |
| void MaglevGraphBuilder::VisitLdaImmutableCurrentContextSlot() { |
| ValueNode* context = GetContext(); |
| int slot_index = iterator_.GetIndexOperand(0); |
| BuildLoadContextSlot(context, 0, slot_index, kImmutable); |
| } |
| |
| void MaglevGraphBuilder::VisitStaContextSlot() { |
| ValueNode* context = LoadRegisterTagged(0); |
| int slot_index = iterator_.GetIndexOperand(1); |
| size_t depth = iterator_.GetUnsignedImmediateOperand(2); |
| BuildStoreContextSlot(context, depth, slot_index, GetRawAccumulator()); |
| } |
| void MaglevGraphBuilder::VisitStaCurrentContextSlot() { |
| ValueNode* context = GetContext(); |
| int slot_index = iterator_.GetIndexOperand(0); |
| BuildStoreContextSlot(context, 0, slot_index, GetRawAccumulator()); |
| } |
| |
| void MaglevGraphBuilder::VisitStar() { |
| MoveNodeBetweenRegisters(interpreter::Register::virtual_accumulator(), |
| iterator_.GetRegisterOperand(0)); |
| } |
| #define SHORT_STAR_VISITOR(Name, ...) \ |
| void MaglevGraphBuilder::Visit##Name() { \ |
| MoveNodeBetweenRegisters( \ |
| interpreter::Register::virtual_accumulator(), \ |
| interpreter::Register::FromShortStar(interpreter::Bytecode::k##Name)); \ |
| } |
| SHORT_STAR_BYTECODE_LIST(SHORT_STAR_VISITOR) |
| #undef SHORT_STAR_VISITOR |
| |
| void MaglevGraphBuilder::VisitMov() { |
| MoveNodeBetweenRegisters(iterator_.GetRegisterOperand(0), |
| iterator_.GetRegisterOperand(1)); |
| } |
| |
| void MaglevGraphBuilder::VisitPushContext() { |
| MoveNodeBetweenRegisters(interpreter::Register::current_context(), |
| iterator_.GetRegisterOperand(0)); |
| SetContext(GetAccumulatorTagged()); |
| } |
| |
| void MaglevGraphBuilder::VisitPopContext() { |
| SetContext(LoadRegisterTagged(0)); |
| } |
| |
| void MaglevGraphBuilder::VisitTestReferenceEqual() { |
| ValueNode* lhs = LoadRegisterTagged(0); |
| ValueNode* rhs = GetAccumulatorTagged(); |
| if (lhs == rhs) { |
| SetAccumulator(GetRootConstant(RootIndex::kTrueValue)); |
| return; |
| } |
| if (TryBuildBranchFor<BranchIfReferenceCompare>({lhs, rhs}, |
| Operation::kStrictEqual)) { |
| return; |
| } |
| SetAccumulator(AddNewNode<TaggedEqual>({lhs, rhs})); |
| } |
| |
| void MaglevGraphBuilder::VisitTestUndetectable() { |
| ValueNode* value = GetAccumulatorTagged(); |
| if (compiler::OptionalHeapObjectRef maybe_constant = TryGetConstant(value)) { |
| if (maybe_constant.value().map(broker()).is_undetectable()) { |
| SetAccumulator(GetRootConstant(RootIndex::kTrueValue)); |
| } else { |
| SetAccumulator(GetRootConstant(RootIndex::kFalseValue)); |
| } |
| return; |
| } |
| |
| if (CheckType(value, NodeType::kSmi)) { |
| SetAccumulator(GetRootConstant(RootIndex::kFalseValue)); |
| return; |
| } |
| |
| enum CheckType type = CheckType(value, NodeType::kAnyHeapObject) |
| ? CheckType::kOmitHeapObjectCheck |
| : CheckType::kCheckHeapObject; |
| if (TryBuildBranchFor<BranchIfUndetectable>({value}, type)) return; |
| SetAccumulator(AddNewNode<TestUndetectable>({value}, type)); |
| } |
| |
| void MaglevGraphBuilder::VisitTestNull() { |
| ValueNode* value = GetAccumulatorTagged(); |
| if (IsConstantNode(value->opcode())) { |
| SetAccumulator(GetBooleanConstant(IsNullValue(value))); |
| return; |
| } |
| if (TryBuildBranchFor<BranchIfRootConstant>({value}, RootIndex::kNullValue)) { |
| return; |
| } |
| ValueNode* null_constant = GetRootConstant(RootIndex::kNullValue); |
| SetAccumulator(AddNewNode<TaggedEqual>({value, null_constant})); |
| } |
| |
| void MaglevGraphBuilder::VisitTestUndefined() { |
| ValueNode* value = GetAccumulatorTagged(); |
| if (IsConstantNode(value->opcode())) { |
| SetAccumulator(GetBooleanConstant(IsUndefinedValue(value))); |
| return; |
| } |
| if (TryBuildBranchFor<BranchIfRootConstant>({value}, |
| RootIndex::kUndefinedValue)) { |
| return; |
| } |
| ValueNode* undefined_constant = GetRootConstant(RootIndex::kUndefinedValue); |
| SetAccumulator(AddNewNode<TaggedEqual>({value, undefined_constant})); |
| } |
| |
| void MaglevGraphBuilder::VisitTestTypeOf() { |
| using LiteralFlag = interpreter::TestTypeOfFlags::LiteralFlag; |
| // TODO(v8:7700): Add a branch version of TestTypeOf that does not need to |
| // materialise the boolean value. |
| LiteralFlag literal = |
| interpreter::TestTypeOfFlags::Decode(GetFlag8Operand(0)); |
| if (literal == LiteralFlag::kOther) { |
| SetAccumulator(GetRootConstant(RootIndex::kFalseValue)); |
| return; |
| } |
| ValueNode* value = GetAccumulatorTagged(); |
| if (TryBuildBranchFor<BranchIfTypeOf>({value}, literal)) { |
| return; |
| } |
| SetAccumulator(AddNewNode<TestTypeOf>({value}, literal)); |
| } |
| |
| ReduceResult MaglevGraphBuilder::TryBuildScriptContextStore( |
| const compiler::GlobalAccessFeedback& global_access_feedback) { |
| DCHECK(global_access_feedback.IsScriptContextSlot()); |
| if (global_access_feedback.immutable()) { |
| return ReduceResult::Fail(); |
| } |
| auto script_context = GetConstant(global_access_feedback.script_context()); |
| int offset = Context::OffsetOfElementAt(global_access_feedback.slot_index()); |
| StoreAndCacheContextSlot(script_context, offset, GetRawAccumulator()); |
| return ReduceResult::Done(); |
| } |
| |
| ReduceResult MaglevGraphBuilder::TryBuildPropertyCellStore( |
| const compiler::GlobalAccessFeedback& global_access_feedback) { |
| DCHECK(global_access_feedback.IsPropertyCell()); |
| |
| compiler::PropertyCellRef property_cell = |
| global_access_feedback.property_cell(); |
| if (!property_cell.Cache(broker())) return ReduceResult::Fail(); |
| |
| compiler::ObjectRef property_cell_value = property_cell.value(broker()); |
| if (property_cell_value.IsTheHole(broker())) { |
| // The property cell is no longer valid. |
| EmitUnconditionalDeopt( |
| DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess); |
| return ReduceResult::DoneWithAbort(); |
| } |
| |
| PropertyDetails property_details = property_cell.property_details(); |
| DCHECK_EQ(PropertyKind::kData, property_details.kind()); |
| |
| if (property_details.IsReadOnly()) { |
| // Don't even bother trying to lower stores to read-only data |
| // properties. |
| // TODO(neis): We could generate code that checks if the new value |
| // equals the old one and then does nothing or deopts, respectively. |
| return ReduceResult::Fail(); |
| } |
| |
| switch (property_details.cell_type()) { |
| case PropertyCellType::kUndefined: |
| return ReduceResult::Fail(); |
| case PropertyCellType::kConstant: { |
| // TODO(victorgomes): Support non-internalized string. |
| if (property_cell_value.IsString() && |
| !property_cell_value.IsInternalizedString()) { |
| return ReduceResult::Fail(); |
| } |
| // Record a code dependency on the cell, and just deoptimize if the new |
| // value doesn't match the previous value stored inside the cell. |
| broker()->dependencies()->DependOnGlobalProperty(property_cell); |
| ValueNode* value = GetAccumulatorTagged(); |
| return BuildCheckValue(value, property_cell_value); |
| } |
| case PropertyCellType::kConstantType: { |
| // We rely on stability further below. |
| if (property_cell_value.IsHeapObject() && |
| !property_cell_value.AsHeapObject().map(broker()).is_stable()) { |
| return ReduceResult::Fail(); |
| } |
| // Record a code dependency on the cell, and just deoptimize if the new |
| // value's type doesn't match the type of the previous value in the cell. |
| broker()->dependencies()->DependOnGlobalProperty(property_cell); |
| ValueNode* value; |
| if (property_cell_value.IsHeapObject()) { |
| value = GetAccumulatorTagged(); |
| compiler::MapRef property_cell_value_map = |
| property_cell_value.AsHeapObject().map(broker()); |
| broker()->dependencies()->DependOnStableMap(property_cell_value_map); |
| BuildCheckHeapObject(value); |
| BuildCheckMaps(value, base::VectorOf({property_cell_value_map})); |
| } else { |
| value = GetAccumulatorSmi(); |
| } |
| ValueNode* property_cell_node = GetConstant(property_cell.AsHeapObject()); |
| BuildStoreTaggedField(property_cell_node, value, |
| PropertyCell::kValueOffset); |
| break; |
| } |
| case PropertyCellType::kMutable: { |
| // Record a code dependency on the cell, and just deoptimize if the |
| // property ever becomes read-only. |
| broker()->dependencies()->DependOnGlobalProperty(property_cell); |
| ValueNode* property_cell_node = GetConstant(property_cell.AsHeapObject()); |
| ValueNode* value = GetAccumulatorTagged(); |
| BuildStoreTaggedField(property_cell_node, value, |
| PropertyCell::kValueOffset); |
| break; |
| } |
| case PropertyCellType::kInTransition: |
| UNREACHABLE(); |
| } |
| return ReduceResult::Done(); |
| } |
| |
| ReduceResult MaglevGraphBuilder::TryBuildScriptContextConstantLoad( |
| const compiler::GlobalAccessFeedback& global_access_feedback) { |
| DCHECK(global_access_feedback.IsScriptContextSlot()); |
| if (!global_access_feedback.immutable()) return ReduceResult::Fail(); |
| compiler::OptionalObjectRef maybe_slot_value = |
| global_access_feedback.script_context().get( |
| broker(), global_access_feedback.slot_index()); |
| if (!maybe_slot_value) return ReduceResult::Fail(); |
| return GetConstant(maybe_slot_value.value()); |
| } |
| |
| ReduceResult MaglevGraphBuilder::TryBuildScriptContextLoad( |
| const compiler::GlobalAccessFeedback& global_access_feedback) { |
| DCHECK(global_access_feedback.IsScriptContextSlot()); |
| RETURN_IF_DONE(TryBuildScriptContextConstantLoad(global_access_feedback)); |
| auto script_context = GetConstant(global_access_feedback.script_context()); |
| int offset = Context::OffsetOfElementAt(global_access_feedback.slot_index()); |
| return LoadAndCacheContextSlot( |
| script_context, offset, |
| global_access_feedback.immutable() ? kImmutable : kMutable); |
| } |
| |
| ReduceResult MaglevGraphBuilder::TryBuildPropertyCellLoad( |
| const compiler::GlobalAccessFeedback& global_access_feedback) { |
| // TODO(leszeks): A bunch of this is copied from |
| // js-native-context-specialization.cc -- I wonder if we can unify it |
| // somehow. |
| DCHECK(global_access_feedback.IsPropertyCell()); |
| |
| compiler::PropertyCellRef property_cell = |
| global_access_feedback.property_cell(); |
| if (!property_cell.Cache(broker())) return ReduceResult::Fail(); |
| |
| compiler::ObjectRef property_cell_value = property_cell.value(broker()); |
| if (property_cell_value.IsTheHole(broker())) { |
| // The property cell is no longer valid. |
| EmitUnconditionalDeopt( |
| DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess); |
| return ReduceResult::DoneWithAbort(); |
| } |
| |
| PropertyDetails property_details = property_cell.property_details(); |
| PropertyCellType property_cell_type = property_details.cell_type(); |
| DCHECK_EQ(PropertyKind::kData, property_details.kind()); |
| |
| if (!property_details.IsConfigurable() && property_details.IsReadOnly()) { |
| return GetConstant(property_cell_value); |
| } |
| |
| // Record a code dependency on the cell if we can benefit from the |
| // additional feedback, or the global property is configurable (i.e. |
| // can be deleted or reconfigured to an accessor property). |
| if (property_cell_type != PropertyCellType::kMutable || |
| property_details.IsConfigurable()) { |
| broker()->dependencies()->DependOnGlobalProperty(property_cell); |
| } |
| |
| // Load from constant/undefined global property can be constant-folded. |
| if (property_cell_type == PropertyCellType::kConstant || |
| property_cell_type == PropertyCellType::kUndefined) { |
| return GetConstant(property_cell_value); |
| } |
| |
| ValueNode* property_cell_node = GetConstant(property_cell.AsHeapObject()); |
| return AddNewNode<LoadTaggedField>({property_cell_node}, |
| PropertyCell::kValueOffset); |
| } |
| |
| ReduceResult MaglevGraphBuilder::TryBuildGlobalStore( |
| const compiler::GlobalAccessFeedback& global_access_feedback) { |
| if (global_access_feedback.IsScriptContextSlot()) { |
| return TryBuildScriptContextStore(global_access_feedback); |
| } else if (global_access_feedback.IsPropertyCell()) { |
| return TryBuildPropertyCellStore(global_access_feedback); |
| } else { |
| DCHECK(global_access_feedback.IsMegamorphic()); |
| return ReduceResult::Fail(); |
| } |
| } |
| |
| ReduceResult MaglevGraphBuilder::TryBuildGlobalLoad( |
| const compiler::GlobalAccessFeedback& global_access_feedback) { |
| if (global_access_feedback.IsScriptContextSlot()) { |
| return TryBuildScriptContextLoad(global_access_feedback); |
| } else if (global_access_feedback.IsPropertyCell()) { |
| return TryBuildPropertyCellLoad(global_access_feedback); |
| } else { |
| DCHECK(global_access_feedback.IsMegamorphic()); |
| return ReduceResult::Fail(); |
| } |
| } |
| |
| void MaglevGraphBuilder::VisitLdaGlobal() { |
| // LdaGlobal <name_index> <slot> |
| |
| static const int kNameOperandIndex = 0; |
| static const int kSlotOperandIndex = 1; |
| |
| compiler::NameRef name = GetRefOperand<Name>(kNameOperandIndex); |
| FeedbackSlot slot = GetSlotOperand(kSlotOperandIndex); |
| compiler::FeedbackSource feedback_source{feedback(), slot}; |
| BuildLoadGlobal(name, feedback_source, TypeofMode::kNotInside); |
| } |
| |
| void MaglevGraphBuilder::VisitLdaGlobalInsideTypeof() { |
| // LdaGlobalInsideTypeof <name_index> <slot> |
| |
| static const int kNameOperandIndex = 0; |
| static const int kSlotOperandIndex = 1; |
| |
| compiler::NameRef name = GetRefOperand<Name>(kNameOperandIndex); |
| FeedbackSlot slot = GetSlotOperand(kSlotOperandIndex); |
| compiler::FeedbackSource feedback_source{feedback(), slot}; |
| BuildLoadGlobal(name, feedback_source, TypeofMode::kInside); |
| } |
| |
| void MaglevGraphBuilder::VisitStaGlobal() { |
| // StaGlobal <name_index> <slot> |
| FeedbackSlot slot = GetSlotOperand(1); |
| compiler::FeedbackSource feedback_source{feedback(), slot}; |
| |
| const compiler::ProcessedFeedback& access_feedback = |
| broker()->GetFeedbackForGlobalAccess(feedback_source); |
| |
| if (access_feedback.IsInsufficient()) { |
| EmitUnconditionalDeopt( |
| DeoptimizeReason::kInsufficientTypeFeedbackForGenericGlobalAccess); |
| return; |
| } |
| |
| const compiler::GlobalAccessFeedback& global_access_feedback = |
| access_feedback.AsGlobalAccess(); |
| RETURN_VOID_IF_DONE(TryBuildGlobalStore(global_access_feedback)); |
| |
| ValueNode* value = GetAccumulatorTagged(); |
| compiler::NameRef name = GetRefOperand<Name>(0); |
| ValueNode* context = GetContext(); |
| AddNewNode<StoreGlobal>({context, value}, name, feedback_source); |
| } |
| |
| void MaglevGraphBuilder::VisitLdaLookupSlot() { |
| // LdaLookupSlot <name_index> |
| ValueNode* name = GetConstant(GetRefOperand<Name>(0)); |
| SetAccumulator(BuildCallRuntime(Runtime::kLoadLookupSlot, {name})); |
| } |
| |
| void MaglevGraphBuilder::VisitLdaLookupContextSlot() { |
| // LdaLookupContextSlot <name_index> <feedback_slot> <depth> |
| ValueNode* name = GetConstant(GetRefOperand<Name>(0)); |
| ValueNode* slot = GetSmiConstant(iterator_.GetIndexOperand(1)); |
| ValueNode* depth = GetSmiConstant(iterator_.GetUnsignedImmediateOperand(2)); |
| SetAccumulator( |
| BuildCallBuiltin<Builtin::kLookupContextTrampoline>({name, depth, slot})); |
| } |
| |
| void MaglevGraphBuilder::VisitLdaLookupGlobalSlot() { |
| // LdaLookupGlobalSlot <name_index> <feedback_slot> <depth> |
| ValueNode* name = GetConstant(GetRefOperand<Name>(0)); |
| ValueNode* slot = GetSmiConstant(iterator_.GetIndexOperand(1)); |
| ValueNode* depth = GetSmiConstant(iterator_.GetUnsignedImmediateOperand(2)); |
| ValueNode* result; |
| if (parent_) { |
| ValueNode* vector = GetConstant(feedback()); |
| result = |
| BuildCallBuiltin<Builtin::kLookupGlobalIC>({name, depth, slot, vector}); |
| } else { |
| result = BuildCallBuiltin<Builtin::kLookupGlobalICTrampoline>( |
| {name, depth, slot}); |
| } |
| SetAccumulator(result); |
| } |
| |
| void MaglevGraphBuilder::VisitLdaLookupSlotInsideTypeof() { |
| // LdaLookupSlotInsideTypeof <name_index> |
| ValueNode* name = GetConstant(GetRefOperand<Name>(0)); |
| SetAccumulator( |
| BuildCallRuntime(Runtime::kLoadLookupSlotInsideTypeof, {name})); |
| } |
| |
| void MaglevGraphBuilder::VisitLdaLookupContextSlotInsideTypeof() { |
| // LdaLookupContextSlotInsideTypeof <name_index> <context_slot> <depth> |
| ValueNode* name = GetConstant(GetRefOperand<Name>(0)); |
| ValueNode* slot = GetSmiConstant(iterator_.GetIndexOperand(1)); |
| ValueNode* depth = GetSmiConstant(iterator_.GetUnsignedImmediateOperand(2)); |
| SetAccumulator( |
| BuildCallBuiltin<Builtin::kLookupContextInsideTypeofTrampoline>( |
| {name, depth, slot})); |
| } |
| |
| void MaglevGraphBuilder::VisitLdaLookupGlobalSlotInsideTypeof() { |
| // LdaLookupGlobalSlotInsideTypeof <name_index> <context_slot> <depth> |
| ValueNode* name = GetConstant(GetRefOperand<Name>(0)); |
| ValueNode* slot = GetSmiConstant(iterator_.GetIndexOperand(1)); |
| ValueNode* depth = GetSmiConstant(iterator_.GetUnsignedImmediateOperand(2)); |
| ValueNode* result; |
| if (parent_) { |
| ValueNode* vector = GetConstant(feedback()); |
| result = BuildCallBuiltin<Builtin::kLookupGlobalICInsideTypeof>( |
| {name, depth, slot, vector}); |
| } else { |
| result = BuildCallBuiltin<Builtin::kLookupGlobalICInsideTypeofTrampoline>( |
| {name, depth, slot}); |
| } |
| SetAccumulator(result); |
| } |
| |
| namespace { |
| Runtime::FunctionId StaLookupSlotFunction(uint8_t sta_lookup_slot_flags) { |
| using Flags = interpreter::StoreLookupSlotFlags; |
| switch (Flags::GetLanguageMode(sta_lookup_slot_flags)) { |
| case LanguageMode::kStrict: |
| return Runtime::kStoreLookupSlot_Strict; |
| case LanguageMode::kSloppy: |
| if (Flags::IsLookupHoistingMode(sta_lookup_slot_flags)) { |
| return Runtime::kStoreLookupSlot_SloppyHoisting; |
| } else { |
| return Runtime::kStoreLookupSlot_Sloppy; |
| } |
| } |
| } |
| } // namespace |
| |
| void MaglevGraphBuilder::VisitStaLookupSlot() { |
| // StaLookupSlot <name_index> <flags> |
| ValueNode* value = GetAccumulatorTagged(); |
| ValueNode* name = GetConstant(GetRefOperand<Name>(0)); |
| uint32_t flags = GetFlag8Operand(1); |
| SetAccumulator(BuildCallRuntime(StaLookupSlotFunction(flags), {name, value})); |
| } |
| |
| namespace { |
| NodeType StaticTypeForConstant(compiler::HeapObjectRef ref) { |
| if (ref.IsString()) { |
| if (ref.IsInternalizedString()) { |
| return NodeType::kInternalizedString; |
| } |
| return NodeType::kString; |
| } else if (ref.IsSymbol()) { |
| return NodeType::kSymbol; |
| } else if (ref.IsHeapNumber()) { |
| return NodeType::kHeapNumber; |
| } else if (ref.IsJSReceiver()) { |
| return NodeType::kJSReceiverWithKnownMap; |
| } |
| return NodeType::kHeapObjectWithKnownMap; |
| } |
| NodeType StaticTypeForConstant(compiler::ObjectRef ref) { |
| if (ref.IsSmi()) return NodeType::kSmi; |
| return StaticTypeForConstant(ref.AsHeapObject()); |
| } |
| NodeType StaticTypeForNode(compiler::JSHeapBroker* broker, |
| LocalIsolate* isolate, ValueNode* node) { |
| switch (node->properties().value_representation()) { |
| case ValueRepresentation::kInt32: |
| case ValueRepresentation::kUint32: |
| case ValueRepresentation::kFloat64: |
| return NodeType::kNumber; |
| case ValueRepresentation::kHoleyFloat64: |
| return NodeType::kNumberOrOddball; |
| case ValueRepresentation::kWord64: |
| UNREACHABLE(); |
| case ValueRepresentation::kTagged: |
| break; |
| } |
| switch (node->opcode()) { |
| case Opcode::kCheckedSmiTagInt32: |
| case Opcode::kCheckedSmiTagUint32: |
| case Opcode::kCheckedSmiTagFloat64: |
| case Opcode::kUnsafeSmiTag: |
| case Opcode::kSmiConstant: |
| return NodeType::kSmi; |
| case Opcode::kAllocateRaw: |
| case Opcode::kFoldedAllocation: |
| return NodeType::kAnyHeapObject; |
| case Opcode::kRootConstant: { |
| RootConstant* constant = node->Cast<RootConstant>(); |
| switch (constant->index()) { |
| case RootIndex::kTrueValue: |
| case RootIndex::kFalseValue: |
| return NodeType::kBoolean; |
| case RootIndex::kUndefinedValue: |
| case RootIndex::kNullValue: |
| return NodeType::kOddball; |
| default: |
| break; |
| } |
| V8_FALLTHROUGH; |
| } |
| case Opcode::kConstant: { |
| compiler::HeapObjectRef ref = |
| MaglevGraphBuilder::TryGetConstant(broker, isolate, node).value(); |
| return StaticTypeForConstant(ref); |
| } |
| case Opcode::kLoadPolymorphicTaggedField: { |
| Representation field_representation = |
| node->Cast<LoadPolymorphicTaggedField>()->field_representation(); |
| switch (field_representation.kind()) { |
| case Representation::kSmi: |
| return NodeType::kSmi; |
| case Representation::kHeapObject: |
| return NodeType::kAnyHeapObject; |
| default: |
| return NodeType::kUnknown; |
| } |
| } |
| case Opcode::kToNumberOrNumeric: |
| if (node->Cast<ToNumberOrNumeric>()->mode() == |
| Object::Conversion::kToNumber) { |
| return NodeType::kNumber; |
| } |
| // TODO(verwaest): Check what we need here. |
| return NodeType::kUnknown; |
| case Opcode::kToString: |
| case Opcode::kNumberToString: |
| case Opcode::kStringConcat: |
| return NodeType::kString; |
| case Opcode::kCheckedInternalizedString: |
| return NodeType::kInternalizedString; |
| case Opcode::kToObject: |
| return NodeType::kJSReceiver; |
| case Opcode::kToName: |
| return NodeType::kName; |
| case Opcode::kFloat64Equal: |
| case Opcode::kFloat64GreaterThan: |
| case Opcode::kFloat64GreaterThanOrEqual: |
| case Opcode::kFloat64LessThan: |
| case Opcode::kFloat64LessThanOrEqual: |
| case Opcode::kFloat64StrictEqual: |
| case Opcode::kInt32Equal: |
| case Opcode::kInt32GreaterThan: |
| case Opcode::kInt32GreaterThanOrEqual: |
| case Opcode::kInt32LessThan: |
| case Opcode::kInt32LessThanOrEqual: |
| case Opcode::kInt32StrictEqual: |
| case Opcode::kGenericEqual: |
| case Opcode::kGenericStrictEqual: |
| case Opcode::kGenericLessThan: |
| case Opcode::kGenericLessThanOrEqual: |
| case Opcode::kGenericGreaterThan: |
| case Opcode::kGenericGreaterThanOrEqual: |
| case Opcode::kLogicalNot: |
| case Opcode::kStringEqual: |
| case Opcode::kTaggedEqual: |
| case Opcode::kTaggedNotEqual: |
| case Opcode::kTestInstanceOf: |
| case Opcode::kTestTypeOf: |
| case Opcode::kTestUndetectable: |
| case Opcode::kToBoolean: |
| case Opcode::kToBooleanLogicalNot: |
| return NodeType::kBoolean; |
| default: |
| return NodeType::kUnknown; |
| } |
| } |
| } // namespace |
| |
| bool MaglevGraphBuilder::EnsureType(ValueNode* node, NodeType type, |
| NodeType* old_type) { |
| NodeType static_type = StaticTypeForNode(broker(), local_isolate(), node); |
| if (NodeTypeIs(static_type, type)) { |
| if (old_type) *old_type = static_type; |
| return true; |
| } |