| // Copyright 2022 the V8 project authors. All rights reserved. |
| // Use of this source code is governed by a BSD-style license that can be |
| // found in the LICENSE file. |
| |
| #include "src/maglev/maglev-ir.h" |
| |
| #include <limits> |
| |
| #include "src/base/bounds.h" |
| #include "src/builtins/builtins-constructor.h" |
| #include "src/codegen/interface-descriptors-inl.h" |
| #include "src/codegen/interface-descriptors.h" |
| #include "src/compiler/heap-refs.h" |
| #include "src/deoptimizer/deoptimize-reason.h" |
| #include "src/execution/isolate-inl.h" |
| #include "src/heap/local-heap.h" |
| #include "src/heap/parked-scope.h" |
| #include "src/interpreter/bytecode-flags.h" |
| #include "src/maglev/maglev-assembler-inl.h" |
| #include "src/maglev/maglev-assembler.h" |
| #include "src/maglev/maglev-code-gen-state.h" |
| #include "src/maglev/maglev-compilation-unit.h" |
| #include "src/maglev/maglev-graph-labeller.h" |
| #include "src/maglev/maglev-graph-processor.h" |
| #include "src/maglev/maglev-ir-inl.h" |
| #include "src/roots/roots.h" |
| |
| namespace v8 { |
| namespace internal { |
| namespace maglev { |
| |
| #define __ masm-> |
| |
| const char* OpcodeToString(Opcode opcode) { |
| #define DEF_NAME(Name) #Name, |
| static constexpr const char* const names[] = {NODE_BASE_LIST(DEF_NAME)}; |
| #undef DEF_NAME |
| return names[static_cast<int>(opcode)]; |
| } |
| |
| BasicBlock* Phi::predecessor_at(int i) { |
| return merge_state_->predecessor_at(i); |
| } |
| |
| namespace { |
| |
| // Prevent people from accidentally using kScratchRegister here and having their |
| // code break in arm64. |
| struct Do_not_use_kScratchRegister_in_arch_independent_code { |
| } kScratchRegister; |
| struct Do_not_use_kScratchDoubleRegister_in_arch_independent_code { |
| } kScratchDoubleRegister; |
| static_assert(!std::is_same_v<decltype(kScratchRegister), Register>); |
| static_assert( |
| !std::is_same_v<decltype(kScratchDoubleRegister), DoubleRegister>); |
| |
| } // namespace |
| |
| #ifdef DEBUG |
| namespace { |
| |
| template <size_t InputCount, typename Base, typename Derived> |
| int StaticInputCount(FixedInputNodeTMixin<InputCount, Base, Derived>*) { |
| return InputCount; |
| } |
| |
| int StaticInputCount(NodeBase*) { UNREACHABLE(); } |
| |
| } // namespace |
| |
| void NodeBase::CheckCanOverwriteWith(Opcode new_opcode, |
| OpProperties new_properties) { |
| DCHECK_IMPLIES(new_properties.can_eager_deopt(), |
| properties().can_eager_deopt()); |
| DCHECK_IMPLIES(new_properties.can_lazy_deopt(), |
| properties().can_lazy_deopt()); |
| DCHECK_IMPLIES(new_properties.needs_register_snapshot(), |
| properties().needs_register_snapshot()); |
| |
| int old_input_count = input_count(); |
| size_t old_sizeof = -1; |
| switch (opcode()) { |
| #define CASE(op) \ |
| case Opcode::k##op: \ |
| old_sizeof = sizeof(op); \ |
| break; |
| NODE_BASE_LIST(CASE); |
| #undef CASE |
| } |
| |
| switch (new_opcode) { |
| #define CASE(op) \ |
| case Opcode::k##op: { \ |
| DCHECK_EQ(old_input_count, StaticInputCount(static_cast<op*>(this))); \ |
| DCHECK_EQ(sizeof(op), old_sizeof); \ |
| break; \ |
| } |
| NODE_BASE_LIST(CASE) |
| #undef CASE |
| } |
| } |
| |
| #endif // DEBUG |
| |
| bool Phi::is_loop_phi() const { return merge_state()->is_loop(); } |
| |
| void Phi::RecordUseReprHint(UseRepresentationSet repr_mask, |
| int current_offset) { |
| if (is_loop_phi() && merge_state()->loop_info()->Contains(current_offset)) { |
| same_loop_uses_repr_hint_.Add(repr_mask); |
| } |
| |
| if (!repr_mask.is_subset_of(uses_repr_hint_)) { |
| uses_repr_hint_.Add(repr_mask); |
| |
| // Propagate in inputs, ignoring unbounded loop backedges. |
| int bound_inputs = input_count(); |
| if (merge_state()->is_unmerged_loop()) --bound_inputs; |
| |
| for (int i = 0; i < bound_inputs; i++) { |
| if (Phi* phi_input = input(i).node()->TryCast<Phi>()) { |
| phi_input->RecordUseReprHint(repr_mask, current_offset); |
| } |
| } |
| } |
| } |
| |
| namespace { |
| |
| // --- |
| // Print |
| // --- |
| |
| void PrintInputs(std::ostream& os, MaglevGraphLabeller* graph_labeller, |
| const NodeBase* node) { |
| if (!node->has_inputs()) return; |
| |
| os << " ["; |
| for (int i = 0; i < node->input_count(); i++) { |
| if (i != 0) os << ", "; |
| graph_labeller->PrintInput(os, node->input(i)); |
| } |
| os << "]"; |
| } |
| |
| void PrintResult(std::ostream& os, MaglevGraphLabeller* graph_labeller, |
| const NodeBase* node) {} |
| |
| void PrintResult(std::ostream& os, MaglevGraphLabeller* graph_labeller, |
| const ValueNode* node) { |
| os << " → " << node->result().operand(); |
| if (node->result().operand().IsAllocated() && node->is_spilled() && |
| node->spill_slot() != node->result().operand()) { |
| os << " (spilled: " << node->spill_slot() << ")"; |
| } |
| if (node->has_valid_live_range()) { |
| os << ", live range: [" << node->live_range().start << "-" |
| << node->live_range().end << "]"; |
| } |
| } |
| |
| void PrintTargets(std::ostream& os, MaglevGraphLabeller* graph_labeller, |
| const NodeBase* node) {} |
| |
| void PrintTargets(std::ostream& os, MaglevGraphLabeller* graph_labeller, |
| const UnconditionalControlNode* node) { |
| os << " b" << graph_labeller->BlockId(node->target()); |
| } |
| |
| void PrintTargets(std::ostream& os, MaglevGraphLabeller* graph_labeller, |
| const BranchControlNode* node) { |
| os << " b" << graph_labeller->BlockId(node->if_true()) << " b" |
| << graph_labeller->BlockId(node->if_false()); |
| } |
| |
| void PrintTargets(std::ostream& os, MaglevGraphLabeller* graph_labeller, |
| const Switch* node) { |
| for (int i = 0; i < node->size(); i++) { |
| const BasicBlockRef& target = node->Cast<Switch>()->targets()[i]; |
| os << " b" << graph_labeller->BlockId(target.block_ptr()); |
| } |
| if (node->Cast<Switch>()->has_fallthrough()) { |
| BasicBlock* fallthrough_target = node->Cast<Switch>()->fallthrough(); |
| os << " b" << graph_labeller->BlockId(fallthrough_target); |
| } |
| } |
| |
| class MaybeUnparkForPrint { |
| public: |
| MaybeUnparkForPrint() { |
| LocalHeap* local_heap = LocalHeap::Current(); |
| if (!local_heap) { |
| local_heap = Isolate::Current()->main_thread_local_heap(); |
| } |
| DCHECK_NOT_NULL(local_heap); |
| if (local_heap->IsParked()) { |
| scope_.emplace(local_heap); |
| } |
| } |
| |
| private: |
| base::Optional<UnparkedScope> scope_; |
| }; |
| |
| template <typename NodeT> |
| void PrintImpl(std::ostream& os, MaglevGraphLabeller* graph_labeller, |
| const NodeT* node, bool skip_targets) { |
| MaybeUnparkForPrint unpark; |
| os << node->opcode(); |
| node->PrintParams(os, graph_labeller); |
| PrintInputs(os, graph_labeller, node); |
| PrintResult(os, graph_labeller, node); |
| if (!skip_targets) { |
| PrintTargets(os, graph_labeller, node); |
| } |
| } |
| |
| size_t GetInputLocationsArraySize(const DeoptFrame& top_frame) { |
| static constexpr int kClosureSize = 1; |
| static constexpr int kReceiverSize = 1; |
| static constexpr int kContextSize = 1; |
| size_t size = 0; |
| const DeoptFrame* frame = &top_frame; |
| do { |
| switch (frame->type()) { |
| case DeoptFrame::FrameType::kInterpretedFrame: |
| size += kClosureSize + frame->as_interpreted().frame_state()->size( |
| frame->as_interpreted().unit()); |
| break; |
| case DeoptFrame::FrameType::kInlinedArgumentsFrame: |
| size += kClosureSize + frame->as_inlined_arguments().arguments().size(); |
| break; |
| case DeoptFrame::FrameType::kConstructStubFrame: |
| size += kClosureSize + kReceiverSize + |
| frame->as_construct_stub().arguments_without_receiver().size() + |
| kContextSize; |
| break; |
| case DeoptFrame::FrameType::kBuiltinContinuationFrame: |
| size += |
| frame->as_builtin_continuation().parameters().size() + kContextSize; |
| break; |
| } |
| frame = frame->parent(); |
| } while (frame != nullptr); |
| return size; |
| } |
| |
| bool RootToBoolean(RootIndex index) { |
| switch (index) { |
| case RootIndex::kFalseValue: |
| case RootIndex::kNullValue: |
| case RootIndex::kUndefinedValue: |
| case RootIndex::kNanValue: |
| case RootIndex::kHoleNanValue: |
| case RootIndex::kMinusZeroValue: |
| case RootIndex::kempty_string: |
| #ifdef V8_ENABLE_WEBASSEMBLY |
| case RootIndex::kWasmNull: |
| #endif |
| return false; |
| default: |
| return true; |
| } |
| } |
| |
| #ifdef DEBUG |
| // For all RO roots, check that RootToBoolean returns the same value as |
| // BooleanValue on that root. |
| bool CheckToBooleanOnAllRoots(LocalIsolate* local_isolate) { |
| ReadOnlyRoots roots(local_isolate); |
| // Use the READ_ONLY_ROOT_LIST macro list rather than a for loop to get nicer |
| // error messages if there is a failure. |
| #define DO_CHECK(type, name, CamelName) \ |
| /* Ignore 'undefined' roots that are not the undefined value itself. */ \ |
| if (roots.name() != roots.undefined_value() || \ |
| RootIndex::k##CamelName == RootIndex::kUndefinedValue) { \ |
| DCHECK_EQ(roots.name().BooleanValue(local_isolate), \ |
| RootToBoolean(RootIndex::k##CamelName)); \ |
| } |
| READ_ONLY_ROOT_LIST(DO_CHECK) |
| #undef DO_CHECK |
| return true; |
| } |
| #endif |
| |
| } // namespace |
| |
| bool RootConstant::ToBoolean(LocalIsolate* local_isolate) const { |
| #ifdef DEBUG |
| // (Ab)use static locals to call CheckToBooleanOnAllRoots once, on first |
| // call to this function. |
| static bool check_once = CheckToBooleanOnAllRoots(local_isolate); |
| DCHECK(check_once); |
| #endif |
| // ToBoolean is only supported for RO roots. |
| DCHECK(RootsTable::IsReadOnly(index_)); |
| return RootToBoolean(index_); |
| } |
| |
| bool FromConstantToBool(LocalIsolate* local_isolate, ValueNode* node) { |
| DCHECK(IsConstantNode(node->opcode())); |
| switch (node->opcode()) { |
| #define CASE(Name) \ |
| case Opcode::k##Name: { \ |
| return node->Cast<Name>()->ToBoolean(local_isolate); \ |
| } |
| CONSTANT_VALUE_NODE_LIST(CASE) |
| #undef CASE |
| default: |
| UNREACHABLE(); |
| } |
| } |
| |
| bool FromConstantToBool(MaglevAssembler* masm, ValueNode* node) { |
| // TODO(leszeks): Getting the main thread local isolate is not what we |
| // actually want here, but it's all we have, and it happens to work because |
| // really all we're using it for is ReadOnlyRoots. We should change ToBoolean |
| // to be able to pass ReadOnlyRoots in directly. |
| return FromConstantToBool(masm->isolate()->AsLocalIsolate(), node); |
| } |
| |
| DeoptInfo::DeoptInfo(Zone* zone, const DeoptFrame top_frame, |
| compiler::FeedbackSource feedback_to_update) |
| : top_frame_(top_frame), |
| feedback_to_update_(feedback_to_update), |
| input_locations_(zone->NewArray<InputLocation>( |
| GetInputLocationsArraySize(top_frame))) { |
| // Initialise InputLocations so that they correctly don't have a next use id. |
| for (size_t i = 0; i < GetInputLocationsArraySize(top_frame); ++i) { |
| new (&input_locations_[i]) InputLocation(); |
| } |
| } |
| |
| bool LazyDeoptInfo::IsResultRegister(interpreter::Register reg) const { |
| if (top_frame().type() == DeoptFrame::FrameType::kConstructStubFrame) { |
| return reg == interpreter::Register::virtual_accumulator(); |
| } |
| if (V8_LIKELY(result_size() == 1)) { |
| return reg == result_location_; |
| } |
| if (result_size() == 0) { |
| return false; |
| } |
| DCHECK_EQ(result_size(), 2); |
| return reg == result_location_ || |
| reg == interpreter::Register(result_location_.index() + 1); |
| } |
| |
| void NodeBase::Print(std::ostream& os, MaglevGraphLabeller* graph_labeller, |
| bool skip_targets) const { |
| switch (opcode()) { |
| #define V(Name) \ |
| case Opcode::k##Name: \ |
| return PrintImpl(os, graph_labeller, this->Cast<Name>(), skip_targets); |
| NODE_BASE_LIST(V) |
| #undef V |
| } |
| UNREACHABLE(); |
| } |
| |
| void NodeBase::Print() const { |
| MaglevGraphLabeller labeller; |
| Print(std::cout, &labeller); |
| std::cout << std::endl; |
| } |
| |
| void ValueNode::SetHint(compiler::InstructionOperand hint) { |
| if (!hint_.IsInvalid()) return; |
| hint_ = hint; |
| if (result_.operand().IsUnallocated()) { |
| auto operand = compiler::UnallocatedOperand::cast(result_.operand()); |
| if (operand.HasSameAsInputPolicy()) { |
| input(operand.input_index()).node()->SetHint(hint); |
| } |
| } |
| if (this->Is<Phi>()) { |
| for (Input& input : *this) { |
| if (input.node()->has_id() && input.node()->id() < this->id()) { |
| input.node()->SetHint(hint); |
| } |
| } |
| } |
| } |
| |
| void ValueNode::SetNoSpill() { |
| DCHECK(!IsConstantNode(opcode())); |
| #ifdef DEBUG |
| state_ = kSpill; |
| #endif // DEBUG |
| spill_ = compiler::InstructionOperand(); |
| } |
| |
| void ValueNode::SetConstantLocation() { |
| DCHECK(IsConstantNode(opcode())); |
| #ifdef DEBUG |
| state_ = kSpill; |
| #endif // DEBUG |
| spill_ = compiler::ConstantOperand( |
| compiler::UnallocatedOperand::cast(result().operand()) |
| .virtual_register()); |
| } |
| |
| // --- |
| // Check input value representation |
| // --- |
| |
| ValueRepresentation ToValueRepresentation(MachineType type) { |
| switch (type.representation()) { |
| case MachineRepresentation::kTagged: |
| case MachineRepresentation::kTaggedSigned: |
| case MachineRepresentation::kTaggedPointer: |
| return ValueRepresentation::kTagged; |
| case MachineRepresentation::kFloat64: |
| return ValueRepresentation::kFloat64; |
| case MachineRepresentation::kWord64: |
| return ValueRepresentation::kWord64; |
| default: |
| return ValueRepresentation::kInt32; |
| } |
| } |
| |
| void CheckValueInputIs(const NodeBase* node, int i, |
| ValueRepresentation expected, |
| MaglevGraphLabeller* graph_labeller) { |
| ValueNode* input = node->input(i).node(); |
| DCHECK(!input->Is<Identity>()); |
| ValueRepresentation got = input->properties().value_representation(); |
| // Allow Float64 values to be inputs when HoleyFloat64 is expected. |
| bool valid = |
| (got == expected) || (got == ValueRepresentation::kFloat64 && |
| expected == ValueRepresentation::kHoleyFloat64); |
| if (!valid) { |
| std::ostringstream str; |
| str << "Type representation error: node "; |
| if (graph_labeller) { |
| str << "#" << graph_labeller->NodeId(node) << " : "; |
| } |
| str << node->opcode() << " (input @" << i << " = " << input->opcode() |
| << ") type " << got << " is not " << expected; |
| FATAL("%s", str.str().c_str()); |
| } |
| } |
| |
| void CheckValueInputIs(const NodeBase* node, int i, Opcode expected, |
| MaglevGraphLabeller* graph_labeller) { |
| ValueNode* input = node->input(i).node(); |
| Opcode got = input->opcode(); |
| if (got != expected) { |
| std::ostringstream str; |
| str << "Opcode error: node "; |
| if (graph_labeller) { |
| str << "#" << graph_labeller->NodeId(node) << " : "; |
| } |
| str << node->opcode() << " (input @" << i << " = " << input->opcode() |
| << ") opcode " << got << " is not " << expected; |
| FATAL("%s", str.str().c_str()); |
| } |
| } |
| |
| void CheckValueInputIsWord32(const NodeBase* node, int i, |
| MaglevGraphLabeller* graph_labeller) { |
| ValueNode* input = node->input(i).node(); |
| DCHECK(!input->Is<Identity>()); |
| ValueRepresentation got = input->properties().value_representation(); |
| if (got != ValueRepresentation::kInt32 && |
| got != ValueRepresentation::kUint32) { |
| std::ostringstream str; |
| str << "Type representation error: node "; |
| if (graph_labeller) { |
| str << "#" << graph_labeller->NodeId(node) << " : "; |
| } |
| str << node->opcode() << " (input @" << i << " = " << input->opcode() |
| << ") type " << got << " is not Word32 (Int32 or Uint32)"; |
| FATAL("%s", str.str().c_str()); |
| } |
| } |
| |
| void GeneratorStore::VerifyInputs(MaglevGraphLabeller* graph_labeller) const { |
| for (int i = 0; i < input_count(); i++) { |
| CheckValueInputIs(this, i, ValueRepresentation::kTagged, graph_labeller); |
| } |
| } |
| |
| void UnsafeSmiTag::VerifyInputs(MaglevGraphLabeller* graph_labeller) const { |
| DCHECK_EQ(input_count(), 1); |
| CheckValueInputIsWord32(this, 0, graph_labeller); |
| } |
| |
| void Phi::VerifyInputs(MaglevGraphLabeller* graph_labeller) const { |
| switch (value_representation()) { |
| #define CASE_REPR(repr) \ |
| case ValueRepresentation::k##repr: \ |
| for (int i = 0; i < input_count(); i++) { \ |
| CheckValueInputIs(this, i, ValueRepresentation::k##repr, \ |
| graph_labeller); \ |
| } \ |
| break; |
| |
| CASE_REPR(Tagged) |
| CASE_REPR(Int32) |
| CASE_REPR(Uint32) |
| CASE_REPR(Float64) |
| CASE_REPR(HoleyFloat64) |
| #undef CASE_REPR |
| case ValueRepresentation::kWord64: |
| UNREACHABLE(); |
| } |
| } |
| |
| void Call::VerifyInputs(MaglevGraphLabeller* graph_labeller) const { |
| for (int i = 0; i < input_count(); i++) { |
| CheckValueInputIs(this, i, ValueRepresentation::kTagged, graph_labeller); |
| } |
| } |
| |
| void Call::MarkTaggedInputsAsDecompressing() { |
| for (int i = 0; i < input_count(); i++) { |
| input(i).node()->SetTaggedResultNeedsDecompress(); |
| } |
| } |
| |
| void CallWithArrayLike::VerifyInputs( |
| MaglevGraphLabeller* graph_labeller) const { |
| for (int i = 0; i < input_count(); i++) { |
| CheckValueInputIs(this, i, ValueRepresentation::kTagged, graph_labeller); |
| } |
| } |
| |
| void CallWithArrayLike::MarkTaggedInputsAsDecompressing() { |
| for (int i = 0; i < input_count(); i++) { |
| input(i).node()->SetTaggedResultNeedsDecompress(); |
| } |
| } |
| |
| void CallWithSpread::VerifyInputs(MaglevGraphLabeller* graph_labeller) const { |
| for (int i = 0; i < input_count(); i++) { |
| CheckValueInputIs(this, i, ValueRepresentation::kTagged, graph_labeller); |
| } |
| } |
| |
| void CallWithSpread::MarkTaggedInputsAsDecompressing() { |
| for (int i = 0; i < input_count(); i++) { |
| input(i).node()->SetTaggedResultNeedsDecompress(); |
| } |
| } |
| |
| void CallSelf::VerifyInputs(MaglevGraphLabeller* graph_labeller) const { |
| for (int i = 0; i < input_count(); i++) { |
| CheckValueInputIs(this, i, ValueRepresentation::kTagged, graph_labeller); |
| } |
| } |
| |
| void CallSelf::MarkTaggedInputsAsDecompressing() { |
| for (int i = 0; i < input_count(); i++) { |
| input(i).node()->SetTaggedResultNeedsDecompress(); |
| } |
| } |
| |
| void CallKnownJSFunction::VerifyInputs( |
| MaglevGraphLabeller* graph_labeller) const { |
| for (int i = 0; i < input_count(); i++) { |
| CheckValueInputIs(this, i, ValueRepresentation::kTagged, graph_labeller); |
| } |
| } |
| |
| void CallKnownJSFunction::MarkTaggedInputsAsDecompressing() { |
| for (int i = 0; i < input_count(); i++) { |
| input(i).node()->SetTaggedResultNeedsDecompress(); |
| } |
| } |
| |
| void Construct::VerifyInputs(MaglevGraphLabeller* graph_labeller) const { |
| for (int i = 0; i < input_count(); i++) { |
| CheckValueInputIs(this, i, ValueRepresentation::kTagged, graph_labeller); |
| } |
| } |
| |
| void Construct::MarkTaggedInputsAsDecompressing() { |
| for (int i = 0; i < input_count(); i++) { |
| input(i).node()->SetTaggedResultNeedsDecompress(); |
| } |
| } |
| |
| void ConstructWithSpread::VerifyInputs( |
| MaglevGraphLabeller* graph_labeller) const { |
| for (int i = 0; i < input_count(); i++) { |
| CheckValueInputIs(this, i, ValueRepresentation::kTagged, graph_labeller); |
| } |
| } |
| |
| void ConstructWithSpread::MarkTaggedInputsAsDecompressing() { |
| for (int i = 0; i < input_count(); i++) { |
| input(i).node()->SetTaggedResultNeedsDecompress(); |
| } |
| } |
| |
| void CallBuiltin::VerifyInputs(MaglevGraphLabeller* graph_labeller) const { |
| auto descriptor = Builtins::CallInterfaceDescriptorFor(builtin()); |
| int count = input_count(); |
| // Verify context. |
| if (descriptor.HasContextParameter()) { |
| CheckValueInputIs(this, count - 1, ValueRepresentation::kTagged, |
| graph_labeller); |
| count--; |
| } |
| |
| // {all_input_count} includes the feedback slot and vector. |
| #ifdef DEBUG |
| int all_input_count = count + (has_feedback() ? 2 : 0); |
| if (descriptor.AllowVarArgs()) { |
| DCHECK_GE(all_input_count, descriptor.GetParameterCount()); |
| } else { |
| DCHECK_EQ(all_input_count, descriptor.GetParameterCount()); |
| } |
| #endif |
| int i = 0; |
| // Check the rest of inputs. |
| for (; i < count; ++i) { |
| MachineType type = i < descriptor.GetParameterCount() |
| ? descriptor.GetParameterType(i) |
| : MachineType::AnyTagged(); |
| CheckValueInputIs(this, i, ToValueRepresentation(type), graph_labeller); |
| } |
| } |
| |
| void CallBuiltin::MarkTaggedInputsAsDecompressing() { |
| auto descriptor = Builtins::CallInterfaceDescriptorFor(builtin()); |
| int count = input_count(); |
| // Set context. |
| if (descriptor.HasContextParameter()) { |
| input(count - 1).node()->SetTaggedResultNeedsDecompress(); |
| count--; |
| } |
| int i = 0; |
| // Set the rest of the tagged inputs. |
| for (; i < count; ++i) { |
| MachineType type = i < descriptor.GetParameterCount() |
| ? descriptor.GetParameterType(i) |
| : MachineType::AnyTagged(); |
| if (type.IsTagged() && !type.IsTaggedSigned()) { |
| input(i).node()->SetTaggedResultNeedsDecompress(); |
| } |
| } |
| } |
| |
| void CallRuntime::VerifyInputs(MaglevGraphLabeller* graph_labeller) const { |
| for (int i = 0; i < input_count(); i++) { |
| CheckValueInputIs(this, i, ValueRepresentation::kTagged, graph_labeller); |
| } |
| } |
| |
| void CallRuntime::MarkTaggedInputsAsDecompressing() { |
| for (int i = 0; i < input_count(); i++) { |
| input(i).node()->SetTaggedResultNeedsDecompress(); |
| } |
| } |
| |
| void FoldedAllocation::VerifyInputs(MaglevGraphLabeller* graph_labeller) const { |
| Base::VerifyInputs(graph_labeller); |
| CheckValueInputIs(this, 0, Opcode::kAllocateRaw, graph_labeller); |
| } |
| |
| // --- |
| // Reify constants |
| // --- |
| |
| Handle<Object> ValueNode::Reify(LocalIsolate* isolate) const { |
| switch (opcode()) { |
| #define V(Name) \ |
| case Opcode::k##Name: \ |
| return this->Cast<Name>()->DoReify(isolate); |
| CONSTANT_VALUE_NODE_LIST(V) |
| #undef V |
| default: |
| UNREACHABLE(); |
| } |
| } |
| |
| Handle<Object> ExternalConstant::DoReify(LocalIsolate* isolate) const { |
| UNREACHABLE(); |
| } |
| |
| Handle<Object> SmiConstant::DoReify(LocalIsolate* isolate) const { |
| return handle(value_, isolate); |
| } |
| |
| Handle<Object> Int32Constant::DoReify(LocalIsolate* isolate) const { |
| return isolate->factory()->NewNumber<AllocationType::kOld>(value()); |
| } |
| |
| Handle<Object> Float64Constant::DoReify(LocalIsolate* isolate) const { |
| return isolate->factory()->NewNumber<AllocationType::kOld>( |
| value_.get_scalar()); |
| } |
| |
| Handle<Object> Constant::DoReify(LocalIsolate* isolate) const { |
| return object_.object(); |
| } |
| |
| Handle<Object> RootConstant::DoReify(LocalIsolate* isolate) const { |
| return isolate->root_handle(index()); |
| } |
| |
| // --- |
| // Load node to registers |
| // --- |
| |
| namespace { |
| template <typename NodeT> |
| void LoadToRegisterHelper(NodeT* node, MaglevAssembler* masm, Register reg) { |
| if constexpr (!IsDoubleRepresentation( |
| NodeT::kProperties.value_representation())) { |
| return node->DoLoadToRegister(masm, reg); |
| } else { |
| UNREACHABLE(); |
| } |
| } |
| template <typename NodeT> |
| void LoadToRegisterHelper(NodeT* node, MaglevAssembler* masm, |
| DoubleRegister reg) { |
| if constexpr (IsDoubleRepresentation( |
| NodeT::kProperties.value_representation())) { |
| return node->DoLoadToRegister(masm, reg); |
| } else { |
| UNREACHABLE(); |
| } |
| } |
| } // namespace |
| |
| void ValueNode::LoadToRegister(MaglevAssembler* masm, Register reg) { |
| switch (opcode()) { |
| #define V(Name) \ |
| case Opcode::k##Name: \ |
| return LoadToRegisterHelper(this->Cast<Name>(), masm, reg); |
| VALUE_NODE_LIST(V) |
| #undef V |
| default: |
| UNREACHABLE(); |
| } |
| } |
| void ValueNode::LoadToRegister(MaglevAssembler* masm, DoubleRegister reg) { |
| switch (opcode()) { |
| #define V(Name) \ |
| case Opcode::k##Name: \ |
| return LoadToRegisterHelper(this->Cast<Name>(), masm, reg); |
| VALUE_NODE_LIST(V) |
| #undef V |
| default: |
| UNREACHABLE(); |
| } |
| } |
| |
| void ValueNode::DoLoadToRegister(MaglevAssembler* masm, Register reg) { |
| DCHECK(is_spilled()); |
| DCHECK(!use_double_register()); |
| __ Move(reg, |
| masm->GetStackSlot(compiler::AllocatedOperand::cast(spill_slot()))); |
| } |
| |
| void ValueNode::DoLoadToRegister(MaglevAssembler* masm, DoubleRegister reg) { |
| DCHECK(is_spilled()); |
| DCHECK(use_double_register()); |
| __ Move(reg, |
| masm->GetStackSlot(compiler::AllocatedOperand::cast(spill_slot()))); |
| } |
| |
| void ExternalConstant::DoLoadToRegister(MaglevAssembler* masm, Register reg) { |
| __ Move(reg, reference()); |
| } |
| |
| void SmiConstant::DoLoadToRegister(MaglevAssembler* masm, Register reg) { |
| __ Move(reg, value()); |
| } |
| |
| void Int32Constant::DoLoadToRegister(MaglevAssembler* masm, Register reg) { |
| __ Move(reg, value()); |
| } |
| |
| void Float64Constant::DoLoadToRegister(MaglevAssembler* masm, |
| DoubleRegister reg) { |
| __ Move(reg, value()); |
| } |
| |
| void Constant::DoLoadToRegister(MaglevAssembler* masm, Register reg) { |
| __ Move(reg, object_.object()); |
| } |
| |
| void RootConstant::DoLoadToRegister(MaglevAssembler* masm, Register reg) { |
| __ LoadRoot(reg, index()); |
| } |
| |
| // --- |
| // Arch agnostic nodes |
| // --- |
| |
| void ExternalConstant::SetValueLocationConstraints() { DefineAsConstant(this); } |
| void ExternalConstant::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) {} |
| |
| void SmiConstant::SetValueLocationConstraints() { DefineAsConstant(this); } |
| void SmiConstant::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) {} |
| |
| void Int32Constant::SetValueLocationConstraints() { DefineAsConstant(this); } |
| void Int32Constant::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) {} |
| |
| void Float64Constant::SetValueLocationConstraints() { DefineAsConstant(this); } |
| void Float64Constant::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) {} |
| |
| void Constant::SetValueLocationConstraints() { DefineAsConstant(this); } |
| void Constant::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) {} |
| |
| void RootConstant::SetValueLocationConstraints() { DefineAsConstant(this); } |
| void RootConstant::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) {} |
| |
| void InitialValue::SetValueLocationConstraints() { |
| // TODO(leszeks): Make this nicer. |
| result().SetUnallocated(compiler::UnallocatedOperand::FIXED_SLOT, |
| (StandardFrameConstants::kExpressionsOffset - |
| UnoptimizedFrameConstants::kRegisterFileFromFp) / |
| kSystemPointerSize + |
| source().index(), |
| kNoVreg); |
| } |
| void InitialValue::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| // No-op, the value is already in the appropriate slot. |
| } |
| |
| void RegisterInput::SetValueLocationConstraints() { |
| DefineAsFixed(this, input()); |
| } |
| void RegisterInput::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| // Nothing to be done, the value is already in the register. |
| } |
| |
| void GetSecondReturnedValue::SetValueLocationConstraints() { |
| DefineAsFixed(this, kReturnRegister1); |
| } |
| void GetSecondReturnedValue::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| // No-op. This is just a hack that binds kReturnRegister1 to a value node. |
| // kReturnRegister1 is guaranteed to be free in the register allocator, since |
| // previous node in the basic block is a call. |
| #ifdef DEBUG |
| // Check if the previous node is call. |
| Node* previous = nullptr; |
| for (Node* node : state.block()->nodes()) { |
| if (node == this) { |
| break; |
| } |
| previous = node; |
| } |
| DCHECK_NE(previous, nullptr); |
| DCHECK(previous->properties().is_call()); |
| #endif // DEBUG |
| } |
| |
| void Deopt::SetValueLocationConstraints() {} |
| void Deopt::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) { |
| __ EmitEagerDeopt(this, reason()); |
| } |
| |
| void Phi::SetValueLocationConstraints() { |
| for (Input& input : *this) { |
| UseAny(input); |
| } |
| |
| // We have to pass a policy for the result, but it is ignored during register |
| // allocation. See StraightForwardRegisterAllocator::AllocateRegisters which |
| // has special handling for Phis. |
| static const compiler::UnallocatedOperand::ExtendedPolicy kIgnoredPolicy = |
| compiler::UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT; |
| |
| result().SetUnallocated(kIgnoredPolicy, kNoVreg); |
| } |
| void Phi::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) {} |
| |
| namespace { |
| |
| constexpr Builtin BuiltinFor(Operation operation) { |
| switch (operation) { |
| #define CASE(name) \ |
| case Operation::k##name: \ |
| return Builtin::k##name##_WithFeedback; |
| OPERATION_LIST(CASE) |
| #undef CASE |
| } |
| } |
| |
| } // namespace |
| |
| template <class Derived, Operation kOperation> |
| void UnaryWithFeedbackNode<Derived, kOperation>::SetValueLocationConstraints() { |
| using D = UnaryOp_WithFeedbackDescriptor; |
| UseFixed(operand_input(), D::GetRegisterParameter(D::kValue)); |
| DefineAsFixed(this, kReturnRegister0); |
| } |
| |
| template <class Derived, Operation kOperation> |
| void UnaryWithFeedbackNode<Derived, kOperation>::GenerateCode( |
| MaglevAssembler* masm, const ProcessingState& state) { |
| using D = UnaryOp_WithFeedbackDescriptor; |
| DCHECK_EQ(ToRegister(operand_input()), D::GetRegisterParameter(D::kValue)); |
| __ Move(kContextRegister, masm->native_context().object()); |
| __ Move(D::GetRegisterParameter(D::kSlot), feedback().index()); |
| __ Move(D::GetRegisterParameter(D::kFeedbackVector), feedback().vector); |
| __ CallBuiltin(BuiltinFor(kOperation)); |
| masm->DefineExceptionHandlerAndLazyDeoptPoint(this); |
| } |
| |
| template <class Derived, Operation kOperation> |
| void BinaryWithFeedbackNode<Derived, |
| kOperation>::SetValueLocationConstraints() { |
| using D = BinaryOp_WithFeedbackDescriptor; |
| UseFixed(left_input(), D::GetRegisterParameter(D::kLeft)); |
| UseFixed(right_input(), D::GetRegisterParameter(D::kRight)); |
| DefineAsFixed(this, kReturnRegister0); |
| } |
| |
| template <class Derived, Operation kOperation> |
| void BinaryWithFeedbackNode<Derived, kOperation>::GenerateCode( |
| MaglevAssembler* masm, const ProcessingState& state) { |
| using D = BinaryOp_WithFeedbackDescriptor; |
| DCHECK_EQ(ToRegister(left_input()), D::GetRegisterParameter(D::kLeft)); |
| DCHECK_EQ(ToRegister(right_input()), D::GetRegisterParameter(D::kRight)); |
| __ Move(kContextRegister, masm->native_context().object()); |
| __ Move(D::GetRegisterParameter(D::kSlot), feedback().index()); |
| __ Move(D::GetRegisterParameter(D::kFeedbackVector), feedback().vector); |
| __ CallBuiltin(BuiltinFor(kOperation)); |
| masm->DefineExceptionHandlerAndLazyDeoptPoint(this); |
| } |
| |
| #define DEF_OPERATION(Name) \ |
| void Name::SetValueLocationConstraints() { \ |
| Base::SetValueLocationConstraints(); \ |
| } \ |
| void Name::GenerateCode(MaglevAssembler* masm, \ |
| const ProcessingState& state) { \ |
| Base::GenerateCode(masm, state); \ |
| } |
| GENERIC_OPERATIONS_NODE_LIST(DEF_OPERATION) |
| #undef DEF_OPERATION |
| |
| void ConstantGapMove::SetValueLocationConstraints() { UNREACHABLE(); } |
| |
| namespace { |
| template <typename T> |
| struct GetRegister; |
| template <> |
| struct GetRegister<Register> { |
| static Register Get(compiler::AllocatedOperand target) { |
| return target.GetRegister(); |
| } |
| }; |
| template <> |
| struct GetRegister<DoubleRegister> { |
| static DoubleRegister Get(compiler::AllocatedOperand target) { |
| return target.GetDoubleRegister(); |
| } |
| }; |
| } // namespace |
| |
| void ConstantGapMove::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| switch (node_->opcode()) { |
| #define CASE(Name) \ |
| case Opcode::k##Name: \ |
| return node_->Cast<Name>()->DoLoadToRegister( \ |
| masm, GetRegister<Name::OutputRegister>::Get(target())); |
| CONSTANT_VALUE_NODE_LIST(CASE) |
| #undef CASE |
| default: |
| UNREACHABLE(); |
| } |
| } |
| |
| void GapMove::SetValueLocationConstraints() { UNREACHABLE(); } |
| void GapMove::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| DCHECK_EQ(source().representation(), target().representation()); |
| MachineRepresentation repr = source().representation(); |
| if (source().IsRegister()) { |
| Register source_reg = ToRegister(source()); |
| if (target().IsAnyRegister()) { |
| DCHECK(target().IsRegister()); |
| __ MoveRepr(repr, ToRegister(target()), source_reg); |
| } else { |
| __ MoveRepr(repr, masm->ToMemOperand(target()), source_reg); |
| } |
| } else if (source().IsDoubleRegister()) { |
| DoubleRegister source_reg = ToDoubleRegister(source()); |
| if (target().IsAnyRegister()) { |
| DCHECK(target().IsDoubleRegister()); |
| __ Move(ToDoubleRegister(target()), source_reg); |
| } else { |
| __ Move(masm->ToMemOperand(target()), source_reg); |
| } |
| } else { |
| DCHECK(source().IsAnyStackSlot()); |
| MemOperand source_op = masm->ToMemOperand(source()); |
| if (target().IsRegister()) { |
| __ MoveRepr(repr, ToRegister(target()), source_op); |
| } else if (target().IsDoubleRegister()) { |
| __ Move(ToDoubleRegister(target()), source_op); |
| } else { |
| DCHECK(target().IsAnyStackSlot()); |
| __ MoveRepr(repr, masm->ToMemOperand(target()), source_op); |
| } |
| } |
| } |
| |
| void AssertInt32::SetValueLocationConstraints() { |
| UseRegister(left_input()); |
| UseRegister(right_input()); |
| } |
| void AssertInt32::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| __ CompareInt32(ToRegister(left_input()), ToRegister(right_input())); |
| __ Check(ToCondition(condition_), reason_); |
| } |
| |
| void CheckUint32IsSmi::SetValueLocationConstraints() { UseRegister(input()); } |
| void CheckUint32IsSmi::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| Register reg = ToRegister(input()); |
| // Perform an unsigned comparison against Smi::kMaxValue. |
| __ Cmp(reg, Smi::kMaxValue); |
| __ EmitEagerDeoptIf(kUnsignedGreaterThan, DeoptimizeReason::kNotASmi, this); |
| } |
| |
| void CheckedSmiUntag::SetValueLocationConstraints() { |
| UseRegister(input()); |
| DefineSameAsFirst(this); |
| } |
| |
| void CheckedSmiUntag::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| Register value = ToRegister(input()); |
| // TODO(leszeks): Consider optimizing away this test and using the carry bit |
| // of the `sarl` for cases where the deopt uses the value from a different |
| // register. |
| Condition is_smi = __ CheckSmi(value); |
| __ EmitEagerDeoptIf(NegateCondition(is_smi), DeoptimizeReason::kNotASmi, |
| this); |
| __ SmiToInt32(value); |
| } |
| |
| void UnsafeSmiUntag::SetValueLocationConstraints() { |
| UseRegister(input()); |
| DefineSameAsFirst(this); |
| } |
| |
| void UnsafeSmiUntag::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| Register value = ToRegister(input()); |
| __ AssertSmi(value); |
| __ SmiToInt32(value); |
| } |
| |
| int DeleteProperty::MaxCallStackArgs() const { |
| using D = CallInterfaceDescriptorFor<Builtin::kDeleteProperty>::type; |
| return D::GetStackParameterCount(); |
| } |
| void DeleteProperty::SetValueLocationConstraints() { |
| using D = CallInterfaceDescriptorFor<Builtin::kDeleteProperty>::type; |
| UseFixed(context(), kContextRegister); |
| UseFixed(object(), D::GetRegisterParameter(D::kObject)); |
| UseFixed(key(), D::GetRegisterParameter(D::kKey)); |
| DefineAsFixed(this, kReturnRegister0); |
| } |
| void DeleteProperty::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| using D = CallInterfaceDescriptorFor<Builtin::kDeleteProperty>::type; |
| DCHECK_EQ(ToRegister(context()), kContextRegister); |
| DCHECK_EQ(ToRegister(object()), D::GetRegisterParameter(D::kObject)); |
| DCHECK_EQ(ToRegister(key()), D::GetRegisterParameter(D::kKey)); |
| __ Move(D::GetRegisterParameter(D::kLanguageMode), |
| Smi::FromInt(static_cast<int>(mode()))); |
| __ CallBuiltin(Builtin::kDeleteProperty); |
| masm->DefineExceptionHandlerAndLazyDeoptPoint(this); |
| } |
| |
| int ForInPrepare::MaxCallStackArgs() const { |
| using D = CallInterfaceDescriptorFor<Builtin::kForInPrepare>::type; |
| return D::GetStackParameterCount(); |
| } |
| void ForInPrepare::SetValueLocationConstraints() { |
| using D = CallInterfaceDescriptorFor<Builtin::kForInPrepare>::type; |
| UseFixed(context(), kContextRegister); |
| UseFixed(enumerator(), D::GetRegisterParameter(D::kEnumerator)); |
| DefineAsFixed(this, kReturnRegister0); |
| } |
| void ForInPrepare::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| using D = CallInterfaceDescriptorFor<Builtin::kForInPrepare>::type; |
| DCHECK_EQ(ToRegister(context()), kContextRegister); |
| DCHECK_EQ(ToRegister(enumerator()), D::GetRegisterParameter(D::kEnumerator)); |
| __ Move(D::GetRegisterParameter(D::kVectorIndex), |
| TaggedIndex::FromIntptr(feedback().index())); |
| __ Move(D::GetRegisterParameter(D::kFeedbackVector), feedback().vector); |
| __ CallBuiltin(Builtin::kForInPrepare); |
| masm->DefineExceptionHandlerAndLazyDeoptPoint(this); |
| } |
| |
| int ForInNext::MaxCallStackArgs() const { |
| using D = CallInterfaceDescriptorFor<Builtin::kForInNext>::type; |
| return D::GetStackParameterCount(); |
| } |
| void ForInNext::SetValueLocationConstraints() { |
| using D = CallInterfaceDescriptorFor<Builtin::kForInNext>::type; |
| UseFixed(context(), kContextRegister); |
| UseFixed(receiver(), D::GetRegisterParameter(D::kReceiver)); |
| UseFixed(cache_array(), D::GetRegisterParameter(D::kCacheArray)); |
| UseFixed(cache_type(), D::GetRegisterParameter(D::kCacheType)); |
| UseFixed(cache_index(), D::GetRegisterParameter(D::kCacheIndex)); |
| DefineAsFixed(this, kReturnRegister0); |
| } |
| void ForInNext::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| using D = CallInterfaceDescriptorFor<Builtin::kForInNext>::type; |
| DCHECK_EQ(ToRegister(context()), kContextRegister); |
| DCHECK_EQ(ToRegister(receiver()), D::GetRegisterParameter(D::kReceiver)); |
| DCHECK_EQ(ToRegister(cache_array()), D::GetRegisterParameter(D::kCacheArray)); |
| DCHECK_EQ(ToRegister(cache_type()), D::GetRegisterParameter(D::kCacheType)); |
| DCHECK_EQ(ToRegister(cache_index()), D::GetRegisterParameter(D::kCacheIndex)); |
| __ Move(D::GetRegisterParameter(D::kSlot), feedback().index()); |
| // Feedback vector is pushed into the stack. |
| static_assert(D::GetStackParameterIndex(D::kFeedbackVector) == 0); |
| static_assert(D::GetStackParameterCount() == 1); |
| __ Push(feedback().vector); |
| __ CallBuiltin(Builtin::kForInNext); |
| masm->DefineExceptionHandlerAndLazyDeoptPoint(this); |
| } |
| |
| int GetIterator::MaxCallStackArgs() const { |
| using D = CallInterfaceDescriptorFor<Builtin::kGetIteratorWithFeedback>::type; |
| return D::GetStackParameterCount(); |
| } |
| void GetIterator::SetValueLocationConstraints() { |
| using D = CallInterfaceDescriptorFor<Builtin::kGetIteratorWithFeedback>::type; |
| UseFixed(context(), kContextRegister); |
| UseFixed(receiver(), D::GetRegisterParameter(D::kReceiver)); |
| DefineAsFixed(this, kReturnRegister0); |
| } |
| void GetIterator::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| using D = CallInterfaceDescriptorFor<Builtin::kGetIteratorWithFeedback>::type; |
| DCHECK_EQ(ToRegister(context()), kContextRegister); |
| DCHECK_EQ(ToRegister(receiver()), D::GetRegisterParameter(D::kReceiver)); |
| __ Move(D::GetRegisterParameter(D::kLoadSlot), |
| TaggedIndex::FromIntptr(load_slot())); |
| __ Move(D::GetRegisterParameter(D::kCallSlot), |
| TaggedIndex::FromIntptr(call_slot())); |
| __ Move(D::GetRegisterParameter(D::kMaybeFeedbackVector), feedback()); |
| __ CallBuiltin(Builtin::kGetIteratorWithFeedback); |
| masm->DefineExceptionHandlerAndLazyDeoptPoint(this); |
| } |
| |
| template <class Derived, Operation kOperation> |
| void Int32CompareNode<Derived, kOperation>::SetValueLocationConstraints() { |
| UseRegister(left_input()); |
| UseRegister(right_input()); |
| DefineAsRegister(this); |
| } |
| |
| template <class Derived, Operation kOperation> |
| void Int32CompareNode<Derived, kOperation>::GenerateCode( |
| MaglevAssembler* masm, const ProcessingState& state) { |
| Register result = ToRegister(this->result()); |
| Label is_true, end; |
| __ CompareInt32AndJumpIf(ToRegister(left_input()), ToRegister(right_input()), |
| ConditionFor(kOperation), &is_true, |
| Label::Distance::kNear); |
| // TODO(leszeks): Investigate loading existing materialisations of roots here, |
| // if available. |
| __ LoadRoot(result, RootIndex::kFalseValue); |
| __ jmp(&end); |
| { |
| __ bind(&is_true); |
| __ LoadRoot(result, RootIndex::kTrueValue); |
| } |
| __ bind(&end); |
| } |
| |
| #define DEF_OPERATION(Name) \ |
| void Name::SetValueLocationConstraints() { \ |
| Base::SetValueLocationConstraints(); \ |
| } \ |
| void Name::GenerateCode(MaglevAssembler* masm, \ |
| const ProcessingState& state) { \ |
| Base::GenerateCode(masm, state); \ |
| } |
| DEF_OPERATION(Int32Equal) |
| DEF_OPERATION(Int32StrictEqual) |
| DEF_OPERATION(Int32LessThan) |
| DEF_OPERATION(Int32LessThanOrEqual) |
| DEF_OPERATION(Int32GreaterThan) |
| DEF_OPERATION(Int32GreaterThanOrEqual) |
| #undef DEF_OPERATION |
| |
| template <class Derived, Operation kOperation> |
| void Float64CompareNode<Derived, kOperation>::SetValueLocationConstraints() { |
| UseRegister(left_input()); |
| UseRegister(right_input()); |
| DefineAsRegister(this); |
| } |
| |
| template <class Derived, Operation kOperation> |
| void Float64CompareNode<Derived, kOperation>::GenerateCode( |
| MaglevAssembler* masm, const ProcessingState& state) { |
| DoubleRegister left = ToDoubleRegister(left_input()); |
| DoubleRegister right = ToDoubleRegister(right_input()); |
| Register result = ToRegister(this->result()); |
| Label is_false, end; |
| __ CompareFloat64(left, right); |
| // Ucomisd sets these flags accordingly: |
| // UNORDERED(one of the operands is a NaN): ZF,PF,CF := 111; |
| // GREATER_THAN: ZF,PF,CF := 000; |
| // LESS_THAN: ZF,PF,CF := 001; |
| // EQUAL: ZF,PF,CF := 100; |
| // Since ZF can be set by NaN or EQUAL, we check for NaN first. |
| __ JumpIf(ConditionForNaN(), &is_false); |
| __ JumpIf(NegateCondition(ConditionForFloat64(kOperation)), &is_false); |
| // TODO(leszeks): Investigate loading existing materialisations of roots here, |
| // if available. |
| __ LoadRoot(result, RootIndex::kTrueValue); |
| __ Jump(&end); |
| { |
| __ bind(&is_false); |
| __ LoadRoot(result, RootIndex::kFalseValue); |
| } |
| __ bind(&end); |
| } |
| |
| #define DEF_OPERATION(Name) \ |
| void Name::SetValueLocationConstraints() { \ |
| Base::SetValueLocationConstraints(); \ |
| } \ |
| void Name::GenerateCode(MaglevAssembler* masm, \ |
| const ProcessingState& state) { \ |
| Base::GenerateCode(masm, state); \ |
| } |
| DEF_OPERATION(Float64Equal) |
| DEF_OPERATION(Float64StrictEqual) |
| DEF_OPERATION(Float64LessThan) |
| DEF_OPERATION(Float64LessThanOrEqual) |
| DEF_OPERATION(Float64GreaterThan) |
| DEF_OPERATION(Float64GreaterThanOrEqual) |
| #undef DEF_OPERATION |
| |
| void CheckedHoleyFloat64ToFloat64::SetValueLocationConstraints() { |
| UseRegister(input()); |
| DefineSameAsFirst(this); |
| set_temporaries_needed(1); |
| } |
| void CheckedHoleyFloat64ToFloat64::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| MaglevAssembler::ScratchRegisterScope temps(masm); |
| Register scratch = temps.Acquire(); |
| DoubleRegister value = ToDoubleRegister(input()); |
| __ DoubleToInt64Repr(scratch, value); |
| __ EmitEagerDeoptIf(__ IsInt64Constant(scratch, kHoleNanInt64), |
| DeoptimizeReason::kHole, this); |
| } |
| |
| void CheckBounds::SetValueLocationConstraints() { |
| UseRegister(value_input()); |
| UseRegister(bound_input()); |
| } |
| void CheckBounds::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| Register value = ToRegister(value_input()); |
| Register bound = ToRegister(bound_input()); |
| __ CompareInt32(value, bound); |
| __ EmitEagerDeoptIf(kUnsignedGreaterThanEqual, DeoptimizeReason::kOutOfBounds, |
| this); |
| } |
| |
| void LoadDoubleField::SetValueLocationConstraints() { |
| UseRegister(object_input()); |
| DefineAsRegister(this); |
| set_temporaries_needed(1); |
| } |
| void LoadDoubleField::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| MaglevAssembler::ScratchRegisterScope temps(masm); |
| Register tmp = temps.Acquire(); |
| Register object = ToRegister(object_input()); |
| __ AssertNotSmi(object); |
| __ DecompressTagged(tmp, FieldMemOperand(object, offset())); |
| __ AssertNotSmi(tmp); |
| __ LoadHeapNumberValue(ToDoubleRegister(result()), tmp); |
| } |
| |
| void LoadTaggedField::SetValueLocationConstraints() { |
| UseRegister(object_input()); |
| DefineAsRegister(this); |
| } |
| void LoadTaggedField::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| Register object = ToRegister(object_input()); |
| __ AssertNotSmi(object); |
| if (this->decompresses_tagged_result()) { |
| __ LoadTaggedField(ToRegister(result()), object, offset()); |
| } else { |
| __ LoadTaggedFieldWithoutDecompressing(ToRegister(result()), object, |
| offset()); |
| } |
| } |
| |
| void LoadTaggedFieldByFieldIndex::SetValueLocationConstraints() { |
| UseRegister(object_input()); |
| UseAndClobberRegister(index_input()); |
| DefineAsRegister(this); |
| set_temporaries_needed(1); |
| set_double_temporaries_needed(1); |
| } |
| void LoadTaggedFieldByFieldIndex::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| Register object = ToRegister(object_input()); |
| Register index = ToRegister(index_input()); |
| Register result_reg = ToRegister(result()); |
| __ AssertNotSmi(object); |
| __ AssertSmi(index); |
| |
| ZoneLabelRef done(masm); |
| |
| // For in-object properties, the index is encoded as: |
| // |
| // index = actual_index | is_double_bit | smi_tag_bit |
| // = actual_index << 2 | is_double_bit << 1 |
| // |
| // The value we want is at the field offset: |
| // |
| // (actual_index << kTaggedSizeLog2) + JSObject::kHeaderSize |
| // |
| // We could get index from actual_index by shifting away the double and smi |
| // bits. But, note that `kTaggedSizeLog2 == 2` and `index` encodes |
| // `actual_index` with a two bit shift. So, we can do some rearranging |
| // to get the offset without shifting: |
| // |
| // ((index >> 2) << kTaggedSizeLog2 + JSObject::kHeaderSize |
| // |
| // [Expand definitions of index and kTaggedSizeLog2] |
| // = (((actual_index << 2 | is_double_bit << 1) >> 2) << 2) |
| // + JSObject::kHeaderSize |
| // |
| // [Cancel out shift down and shift up, clear is_double bit by subtracting] |
| // = (actual_index << 2 | is_double_bit << 1) - (is_double_bit << 1) |
| // + JSObject::kHeaderSize |
| // |
| // [Fold together the constants, and collapse definition of index] |
| // = index + (JSObject::kHeaderSize - (is_double_bit << 1)) |
| // |
| // |
| // For out-of-object properties, the encoding is: |
| // |
| // index = (-1 - actual_index) | is_double_bit | smi_tag_bit |
| // = (-1 - actual_index) << 2 | is_double_bit << 1 |
| // = (-1 - actual_index) * 4 + (is_double_bit ? 2 : 0) |
| // = -(actual_index * 4) + (is_double_bit ? 2 : 0) - 4 |
| // = -(actual_index << 2) + (is_double_bit ? 2 : 0) - 4 |
| // |
| // The value we want is in the property array at offset: |
| // |
| // (actual_index << kTaggedSizeLog2) + FixedArray::kHeaderSize |
| // |
| // [Expand definition of kTaggedSizeLog2] |
| // = (actual_index << 2) + FixedArray::kHeaderSize |
| // |
| // [Substitute in index] |
| // = (-index + (is_double_bit ? 2 : 0) - 4) + FixedArray::kHeaderSize |
| // |
| // [Fold together the constants] |
| // = -index + (FixedArray::kHeaderSize + (is_double_bit ? 2 : 0) - 4)) |
| // |
| // This allows us to simply negate the index register and do a load with |
| // otherwise constant offset. |
| |
| // Check if field is a mutable double field. |
| static constexpr int32_t kIsDoubleBitMask = 1 << kSmiTagSize; |
| __ TestInt32AndJumpIfAnySet( |
| index, kIsDoubleBitMask, |
| __ MakeDeferredCode( |
| [](MaglevAssembler* masm, Register object, Register index, |
| Register result_reg, RegisterSnapshot register_snapshot, |
| ZoneLabelRef done) { |
| // The field is a Double field, a.k.a. a mutable HeapNumber. |
| static const int kIsDoubleBit = 1; |
| |
| // Check if field is in-object or out-of-object. The is_double bit |
| // value doesn't matter, since negative values will stay negative. |
| Label if_outofobject, loaded_field; |
| __ CompareInt32AndJumpIf(index, 0, kLessThan, &if_outofobject); |
| |
| // The field is located in the {object} itself. |
| { |
| // See giant comment above. |
| static_assert(kTaggedSizeLog2 == 2); |
| static_assert(kSmiTagSize == 1); |
| // We haven't untagged, so we need to sign extend. |
| __ SignExtend32To64Bits(index, index); |
| __ LoadTaggedFieldByIndex( |
| result_reg, object, index, 1, |
| JSObject::kHeaderSize - (kIsDoubleBit << kSmiTagSize)); |
| __ Jump(&loaded_field); |
| } |
| |
| __ bind(&if_outofobject); |
| { |
| MaglevAssembler::ScratchRegisterScope temps(masm); |
| Register property_array = temps.Acquire(); |
| // Load the property array. |
| __ LoadTaggedField( |
| property_array, |
| FieldMemOperand(object, JSObject::kPropertiesOrHashOffset)); |
| |
| // See giant comment above. |
| static_assert(kSmiTagSize == 1); |
| __ NegateInt32(index); |
| __ LoadTaggedFieldByIndex( |
| result_reg, property_array, index, 1, |
| FixedArray::kHeaderSize + (kIsDoubleBit << kSmiTagSize) - 4); |
| __ Jump(&loaded_field); |
| } |
| |
| __ bind(&loaded_field); |
| // We may have transitioned in-place away from double, so check that |
| // this is a HeapNumber -- otherwise the load is fine and we don't |
| // need to copy anything anyway. |
| __ JumpIfSmi(result_reg, *done); |
| MaglevAssembler::ScratchRegisterScope temps(masm); |
| Register map = temps.Acquire(); |
| // Hack: The temporary allocated for `map` might alias the result |
| // register. If it does, use the index register as a temporary |
| // instead (since it's clobbered anyway). |
| // TODO(leszeks): Extend the result register's lifetime to overlap |
| // the temporaries, so that this alias isn't possible. |
| if (map == result_reg) { |
| DCHECK_NE(map, index); |
| map = index; |
| } |
| __ LoadMap(map, result_reg); |
| __ JumpIfNotRoot(map, RootIndex::kHeapNumberMap, *done); |
| DoubleRegister double_value = temps.AcquireDouble(); |
| __ LoadHeapNumberValue(double_value, result_reg); |
| __ AllocateHeapNumber(register_snapshot, result_reg, double_value); |
| __ Jump(*done); |
| }, |
| object, index, result_reg, register_snapshot(), done)); |
| |
| // The field is a proper Tagged field on {object}. The {index} is shifted |
| // to the left by one in the code below. |
| { |
| static const int kIsDoubleBit = 0; |
| |
| // Check if field is in-object or out-of-object. The is_double bit value |
| // doesn't matter, since negative values will stay negative. |
| Label if_outofobject; |
| __ CompareInt32AndJumpIf(index, 0, kLessThan, &if_outofobject); |
| |
| // The field is located in the {object} itself. |
| { |
| // See giant comment above. |
| static_assert(kTaggedSizeLog2 == 2); |
| static_assert(kSmiTagSize == 1); |
| // We haven't untagged, so we need to sign extend. |
| __ SignExtend32To64Bits(index, index); |
| __ LoadTaggedFieldByIndex( |
| result_reg, object, index, 1, |
| JSObject::kHeaderSize - (kIsDoubleBit << kSmiTagSize)); |
| __ Jump(*done); |
| } |
| |
| __ bind(&if_outofobject); |
| { |
| MaglevAssembler::ScratchRegisterScope temps(masm); |
| Register property_array = temps.Acquire(); |
| // Load the property array. |
| __ LoadTaggedField( |
| property_array, |
| FieldMemOperand(object, JSObject::kPropertiesOrHashOffset)); |
| |
| // See giant comment above. |
| static_assert(kSmiTagSize == 1); |
| __ NegateInt32(index); |
| __ LoadTaggedFieldByIndex( |
| result_reg, property_array, index, 1, |
| FixedArray::kHeaderSize + (kIsDoubleBit << kSmiTagSize) - 4); |
| // Fallthrough to `done`. |
| } |
| } |
| |
| __ bind(*done); |
| } |
| |
| void LoadFixedArrayElement::SetValueLocationConstraints() { |
| UseRegister(elements_input()); |
| UseRegister(index_input()); |
| DefineAsRegister(this); |
| } |
| void LoadFixedArrayElement::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| Register elements = ToRegister(elements_input()); |
| Register index = ToRegister(index_input()); |
| Register result_reg = ToRegister(result()); |
| if (this->decompresses_tagged_result()) { |
| __ LoadFixedArrayElement(result_reg, elements, index); |
| } else { |
| __ LoadFixedArrayElementWithoutDecompressing(result_reg, elements, index); |
| } |
| } |
| |
| void LoadFixedDoubleArrayElement::SetValueLocationConstraints() { |
| UseRegister(elements_input()); |
| UseRegister(index_input()); |
| DefineAsRegister(this); |
| } |
| void LoadFixedDoubleArrayElement::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| Register elements = ToRegister(elements_input()); |
| Register index = ToRegister(index_input()); |
| DoubleRegister result_reg = ToDoubleRegister(result()); |
| __ LoadFixedDoubleArrayElement(result_reg, elements, index); |
| } |
| |
| void LoadHoleyFixedDoubleArrayElement::SetValueLocationConstraints() { |
| UseRegister(elements_input()); |
| UseRegister(index_input()); |
| DefineAsRegister(this); |
| } |
| void LoadHoleyFixedDoubleArrayElement::GenerateCode( |
| MaglevAssembler* masm, const ProcessingState& state) { |
| Register elements = ToRegister(elements_input()); |
| Register index = ToRegister(index_input()); |
| DoubleRegister result_reg = ToDoubleRegister(result()); |
| __ LoadFixedDoubleArrayElement(result_reg, elements, index); |
| } |
| |
| int StoreMap::MaxCallStackArgs() const { |
| return WriteBarrierDescriptor::GetStackParameterCount(); |
| } |
| void StoreMap::SetValueLocationConstraints() { |
| UseFixed(object_input(), WriteBarrierDescriptor::ObjectRegister()); |
| set_temporaries_needed(1); |
| } |
| void StoreMap::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| MaglevAssembler::ScratchRegisterScope temps(masm); |
| // TODO(leszeks): Consider making this an arbitrary register and push/popping |
| // in the deferred path. |
| Register object = WriteBarrierDescriptor::ObjectRegister(); |
| DCHECK_EQ(object, ToRegister(object_input())); |
| Register value = temps.Acquire(); |
| __ Move(value, map_.object()); |
| |
| __ StoreTaggedFieldWithWriteBarrier(object, HeapObject::kMapOffset, value, |
| register_snapshot(), |
| MaglevAssembler::kValueIsDecompressed, |
| MaglevAssembler::kValueCannotBeSmi); |
| } |
| |
| int StoreTaggedFieldWithWriteBarrier::MaxCallStackArgs() const { |
| return WriteBarrierDescriptor::GetStackParameterCount(); |
| } |
| void StoreTaggedFieldWithWriteBarrier::SetValueLocationConstraints() { |
| UseFixed(object_input(), WriteBarrierDescriptor::ObjectRegister()); |
| UseRegister(value_input()); |
| } |
| void StoreTaggedFieldWithWriteBarrier::GenerateCode( |
| MaglevAssembler* masm, const ProcessingState& state) { |
| // TODO(leszeks): Consider making this an arbitrary register and push/popping |
| // in the deferred path. |
| Register object = WriteBarrierDescriptor::ObjectRegister(); |
| DCHECK_EQ(object, ToRegister(object_input())); |
| Register value = ToRegister(value_input()); |
| |
| __ StoreTaggedFieldWithWriteBarrier( |
| object, offset(), value, register_snapshot(), |
| value_input().node()->decompresses_tagged_result() |
| ? MaglevAssembler::kValueIsDecompressed |
| : MaglevAssembler::kValueIsCompressed, |
| MaglevAssembler::kValueCanBeSmi); |
| } |
| |
| namespace { |
| |
| template <typename NodeT, typename Function, typename... Args> |
| void EmitPolymorphicAccesses(MaglevAssembler* masm, NodeT* node, |
| Register object, Function&& f, Args&&... args) { |
| MaglevAssembler::ScratchRegisterScope temps(masm); |
| Register object_map = temps.Acquire(); |
| |
| Label done; |
| Label is_number; |
| |
| Condition is_smi = __ CheckSmi(object); |
| __ JumpIf(is_smi, &is_number); |
| __ LoadMap(object_map, object); |
| |
| for (const PolymorphicAccessInfo& access_info : node->access_infos()) { |
| Label next; |
| Label map_found; |
| auto& maps = access_info.maps(); |
| |
| bool has_number_map = false; |
| if (HasOnlyStringMaps(base::VectorOf(maps))) { |
| __ CompareInstanceTypeRange(object_map, FIRST_STRING_TYPE, |
| LAST_STRING_TYPE); |
| __ JumpIf(kUnsignedGreaterThan, &next); |
| // Fallthrough... to map_found. |
| } else { |
| for (auto it = maps.begin(); it != maps.end(); ++it) { |
| if (it->object()->IsHeapNumberMap()) { |
| __ CompareRoot(object_map, RootIndex::kHeapNumberMap); |
| has_number_map = true; |
| } else { |
| __ CompareTagged(object_map, it->object()); |
| } |
| if (it == maps.end() - 1) { |
| __ JumpIf(kNotEqual, &next); |
| // Fallthrough... to map_found. |
| } else { |
| __ JumpIf(kEqual, &map_found); |
| } |
| } |
| } |
| |
| if (has_number_map) { |
| DCHECK(!is_number.is_bound()); |
| __ bind(&is_number); |
| } |
| __ bind(&map_found); |
| f(masm, node, access_info, object, object_map, std::forward<Args>(args)...); |
| __ Jump(&done); |
| |
| __ bind(&next); |
| } |
| |
| // A HeapNumberMap was not found, we should eager deopt here in case of a |
| // number. |
| if (!is_number.is_bound()) { |
| __ bind(&is_number); |
| } |
| |
| // No map matched! |
| __ EmitEagerDeopt(node, DeoptimizeReason::kWrongMap); |
| __ bind(&done); |
| } |
| |
| } // namespace |
| |
| void LoadPolymorphicTaggedField::SetValueLocationConstraints() { |
| UseRegister(object_input()); |
| DefineAsRegister(this); |
| set_temporaries_needed(1); |
| set_double_temporaries_needed(1); |
| } |
| void LoadPolymorphicTaggedField::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| Register object = ToRegister(object_input()); |
| EmitPolymorphicAccesses( |
| masm, this, object, |
| [](MaglevAssembler* masm, LoadPolymorphicTaggedField* node, |
| const PolymorphicAccessInfo& access_info, Register object, |
| Register map, Register result) { |
| switch (access_info.kind()) { |
| case PolymorphicAccessInfo::kNotFound: |
| __ LoadRoot(result, RootIndex::kUndefinedValue); |
| break; |
| case PolymorphicAccessInfo::kConstant: { |
| Handle<Object> constant = access_info.constant(); |
| if (constant->IsSmi()) { |
| __ Move(result, Smi::cast(*constant)); |
| } else { |
| DCHECK(access_info.constant()->IsHeapObject()); |
| __ Move(result, Handle<HeapObject>::cast(constant)); |
| } |
| break; |
| } |
| case PolymorphicAccessInfo::kModuleExport: { |
| Register cell = map; // Reuse scratch. |
| __ Move(cell, access_info.cell()); |
| __ AssertNotSmi(cell); |
| __ DecompressTagged(result, |
| FieldMemOperand(cell, Cell::kValueOffset)); |
| break; |
| } |
| case PolymorphicAccessInfo::kDataLoad: { |
| MaglevAssembler::ScratchRegisterScope temps(masm); |
| DoubleRegister double_scratch = temps.AcquireDouble(); |
| __ LoadDataField(access_info, result, object, map); |
| if (access_info.field_index().is_double()) { |
| __ LoadHeapNumberValue(double_scratch, result); |
| __ AllocateHeapNumber(node->register_snapshot(), result, |
| double_scratch); |
| } |
| break; |
| } |
| case PolymorphicAccessInfo::kStringLength: |
| __ StringLength(result, object); |
| __ SmiTag(result); |
| break; |
| } |
| }, |
| ToRegister(result())); |
| } |
| |
| void LoadPolymorphicDoubleField::SetValueLocationConstraints() { |
| UseRegister(object_input()); |
| DefineAsRegister(this); |
| set_temporaries_needed(1); |
| } |
| void LoadPolymorphicDoubleField::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| Register object = ToRegister(object_input()); |
| EmitPolymorphicAccesses( |
| masm, this, object, |
| [](MaglevAssembler* masm, LoadPolymorphicDoubleField* node, |
| const PolymorphicAccessInfo& access_info, Register object, |
| Register map, DoubleRegister result) { |
| Register scratch = map; |
| switch (access_info.kind()) { |
| case PolymorphicAccessInfo::kDataLoad: |
| __ LoadDataField(access_info, scratch, object, map); |
| switch (access_info.field_representation().kind()) { |
| case Representation::kSmi: |
| __ SmiToDouble(result, scratch); |
| break; |
| case Representation::kDouble: |
| __ LoadHeapNumberValue(result, scratch); |
| break; |
| default: |
| UNREACHABLE(); |
| } |
| break; |
| case PolymorphicAccessInfo::kConstant: { |
| Handle<Object> constant = access_info.constant(); |
| if (constant->IsSmi()) { |
| __ Move(scratch, Smi::cast(*constant)); |
| __ SmiToDouble(result, scratch); |
| } else { |
| DCHECK(constant->IsHeapNumber()); |
| __ Move(result, Handle<HeapNumber>::cast(constant)->value()); |
| } |
| break; |
| } |
| case PolymorphicAccessInfo::kStringLength: |
| __ StringLength(scratch, object); |
| __ Int32ToDouble(result, scratch); |
| break; |
| default: |
| UNREACHABLE(); |
| } |
| }, |
| ToDoubleRegister(result())); |
| } |
| |
| void LoadEnumCacheLength::SetValueLocationConstraints() { |
| UseRegister(map_input()); |
| DefineAsRegister(this); |
| } |
| void LoadEnumCacheLength::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| Register map = ToRegister(map_input()); |
| Register result_reg = ToRegister(result()); |
| __ AssertMap(map); |
| __ LoadBitField<Map::Bits3::EnumLengthBits>( |
| result_reg, FieldMemOperand(map, Map::kBitField3Offset)); |
| } |
| |
| int LoadGlobal::MaxCallStackArgs() const { |
| if (typeof_mode() == TypeofMode::kNotInside) { |
| using D = CallInterfaceDescriptorFor<Builtin::kLoadGlobalIC>::type; |
| return D::GetStackParameterCount(); |
| } else { |
| using D = |
| CallInterfaceDescriptorFor<Builtin::kLoadGlobalICInsideTypeof>::type; |
| return D::GetStackParameterCount(); |
| } |
| } |
| void LoadGlobal::SetValueLocationConstraints() { |
| UseFixed(context(), kContextRegister); |
| DefineAsFixed(this, kReturnRegister0); |
| } |
| void LoadGlobal::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| // TODO(leszeks): Port the nice Sparkplug CallBuiltin helper. |
| if (typeof_mode() == TypeofMode::kNotInside) { |
| using D = CallInterfaceDescriptorFor<Builtin::kLoadGlobalIC>::type; |
| DCHECK_EQ(ToRegister(context()), kContextRegister); |
| __ Move(D::GetRegisterParameter(D::kName), name().object()); |
| __ Move(D::GetRegisterParameter(D::kSlot), |
| TaggedIndex::FromIntptr(feedback().index())); |
| __ Move(D::GetRegisterParameter(D::kVector), feedback().vector); |
| |
| __ CallBuiltin(Builtin::kLoadGlobalIC); |
| } else { |
| DCHECK_EQ(typeof_mode(), TypeofMode::kInside); |
| using D = |
| CallInterfaceDescriptorFor<Builtin::kLoadGlobalICInsideTypeof>::type; |
| DCHECK_EQ(ToRegister(context()), kContextRegister); |
| __ Move(D::GetRegisterParameter(D::kName), name().object()); |
| __ Move(D::GetRegisterParameter(D::kSlot), |
| TaggedIndex::FromIntptr(feedback().index())); |
| __ Move(D::GetRegisterParameter(D::kVector), feedback().vector); |
| |
| __ CallBuiltin(Builtin::kLoadGlobalICInsideTypeof); |
| } |
| |
| masm->DefineExceptionHandlerAndLazyDeoptPoint(this); |
| } |
| |
| int StoreGlobal::MaxCallStackArgs() const { |
| using D = CallInterfaceDescriptorFor<Builtin::kStoreGlobalIC>::type; |
| return D::GetStackParameterCount(); |
| } |
| void StoreGlobal::SetValueLocationConstraints() { |
| using D = CallInterfaceDescriptorFor<Builtin::kStoreGlobalIC>::type; |
| UseFixed(context(), kContextRegister); |
| UseFixed(value(), D::GetRegisterParameter(D::kValue)); |
| DefineAsFixed(this, kReturnRegister0); |
| } |
| void StoreGlobal::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| using D = CallInterfaceDescriptorFor<Builtin::kStoreGlobalIC>::type; |
| DCHECK_EQ(ToRegister(context()), kContextRegister); |
| DCHECK_EQ(ToRegister(value()), D::GetRegisterParameter(D::kValue)); |
| __ Move(D::GetRegisterParameter(D::kName), name().object()); |
| __ Move(D::GetRegisterParameter(D::kSlot), |
| TaggedIndex::FromIntptr(feedback().index())); |
| __ Move(D::GetRegisterParameter(D::kVector), feedback().vector); |
| |
| __ CallBuiltin(Builtin::kStoreGlobalIC); |
| masm->DefineExceptionHandlerAndLazyDeoptPoint(this); |
| } |
| |
| void CheckValue::SetValueLocationConstraints() { UseRegister(target_input()); } |
| void CheckValue::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| Register target = ToRegister(target_input()); |
| __ CompareTagged(target, value().object()); |
| __ EmitEagerDeoptIfNotEqual(DeoptimizeReason::kWrongValue, this); |
| } |
| |
| void CheckValueEqualsInt32::SetValueLocationConstraints() { |
| UseRegister(target_input()); |
| } |
| void CheckValueEqualsInt32::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| Register target = ToRegister(target_input()); |
| __ CompareInt32(target, value()); |
| __ EmitEagerDeoptIfNotEqual(DeoptimizeReason::kWrongValue, this); |
| } |
| |
| void CheckValueEqualsFloat64::SetValueLocationConstraints() { |
| UseRegister(target_input()); |
| set_double_temporaries_needed(1); |
| } |
| void CheckValueEqualsFloat64::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| MaglevAssembler::ScratchRegisterScope temps(masm); |
| DoubleRegister scratch = temps.AcquireDouble(); |
| DoubleRegister target = ToDoubleRegister(target_input()); |
| __ Move(scratch, value()); |
| __ CompareFloat64(scratch, target); |
| __ EmitEagerDeoptIf(ConditionForNaN(), DeoptimizeReason::kWrongValue, this); |
| __ EmitEagerDeoptIfNotEqual(DeoptimizeReason::kWrongValue, this); |
| } |
| |
| void CheckValueEqualsString::SetValueLocationConstraints() { |
| using D = CallInterfaceDescriptorFor<Builtin::kStringEqual>::type; |
| UseFixed(target_input(), D::GetRegisterParameter(D::kLeft)); |
| RequireSpecificTemporary(D::GetRegisterParameter(D::kLength)); |
| } |
| void CheckValueEqualsString::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| using D = CallInterfaceDescriptorFor<Builtin::kStringEqual>::type; |
| |
| ZoneLabelRef end(masm); |
| DCHECK_EQ(D::GetRegisterParameter(D::kLeft), ToRegister(target_input())); |
| Register target = D::GetRegisterParameter(D::kLeft); |
| // Maybe the string is internalized already, do a fast reference check first. |
| __ CompareTagged(target, value().object()); |
| __ JumpIf(kEqual, *end, Label::kNear); |
| |
| __ EmitEagerDeoptIf(__ CheckSmi(target), DeoptimizeReason::kWrongValue, this); |
| __ CompareObjectTypeRange(target, FIRST_STRING_TYPE, LAST_STRING_TYPE); |
| |
| __ JumpToDeferredIf( |
| kUnsignedLessThanEqual, |
| [](MaglevAssembler* masm, CheckValueEqualsString* node, |
| ZoneLabelRef end) { |
| Register target = D::GetRegisterParameter(D::kLeft); |
| Register string_length = D::GetRegisterParameter(D::kLength); |
| __ StringLength(string_length, target); |
| __ CompareInt32(string_length, node->value().length()); |
| __ EmitEagerDeoptIf(kNotEqual, DeoptimizeReason::kWrongValue, node); |
| |
| RegisterSnapshot snapshot = node->register_snapshot(); |
| AddDeoptRegistersToSnapshot(&snapshot, node->eager_deopt_info()); |
| { |
| SaveRegisterStateForCall save_register_state(masm, snapshot); |
| __ Move(kContextRegister, masm->native_context().object()); |
| __ Move(D::GetRegisterParameter(D::kRight), node->value().object()); |
| __ CallBuiltin(Builtin::kStringEqual); |
| save_register_state.DefineSafepoint(); |
| // Compare before restoring registers, so that the deopt below has the |
| // correct register set. |
| __ CompareRoot(kReturnRegister0, RootIndex::kTrueValue); |
| } |
| __ JumpIf(kEqual, *end); |
| __ EmitEagerDeopt(node, DeoptimizeReason::kWrongValue); |
| }, |
| this, end); |
| |
| __ EmitEagerDeopt(this, DeoptimizeReason::kWrongValue); |
| |
| __ bind(*end); |
| } |
| |
| void CheckDynamicValue::SetValueLocationConstraints() { |
| UseRegister(first_input()); |
| UseRegister(second_input()); |
| } |
| void CheckDynamicValue::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| Register first = ToRegister(first_input()); |
| Register second = ToRegister(second_input()); |
| __ CompareTagged(first, second); |
| __ EmitEagerDeoptIfNotEqual(DeoptimizeReason::kWrongValue, this); |
| } |
| |
| void CheckSmi::SetValueLocationConstraints() { UseRegister(receiver_input()); } |
| void CheckSmi::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| Register object = ToRegister(receiver_input()); |
| Condition is_smi = __ CheckSmi(object); |
| __ EmitEagerDeoptIf(NegateCondition(is_smi), DeoptimizeReason::kNotASmi, |
| this); |
| } |
| |
| void CheckHeapObject::SetValueLocationConstraints() { |
| UseRegister(receiver_input()); |
| } |
| void CheckHeapObject::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| Register object = ToRegister(receiver_input()); |
| Condition is_smi = __ CheckSmi(object); |
| __ EmitEagerDeoptIf(is_smi, DeoptimizeReason::kSmi, this); |
| } |
| |
| void CheckSymbol::SetValueLocationConstraints() { |
| UseRegister(receiver_input()); |
| } |
| void CheckSymbol::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| Register object = ToRegister(receiver_input()); |
| if (check_type() == CheckType::kOmitHeapObjectCheck) { |
| __ AssertNotSmi(object); |
| } else { |
| Condition is_smi = __ CheckSmi(object); |
| __ EmitEagerDeoptIf(is_smi, DeoptimizeReason::kNotASymbol, this); |
| } |
| __ IsObjectType(object, SYMBOL_TYPE); |
| __ EmitEagerDeoptIf(kNotEqual, DeoptimizeReason::kNotASymbol, this); |
| } |
| |
| void CheckInstanceType::SetValueLocationConstraints() { |
| UseRegister(receiver_input()); |
| if (first_instance_type_ != last_instance_type_) { |
| set_temporaries_needed(1); |
| } |
| } |
| void CheckInstanceType::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| Register object = ToRegister(receiver_input()); |
| if (check_type() == CheckType::kOmitHeapObjectCheck) { |
| __ AssertNotSmi(object); |
| } else { |
| Condition is_smi = __ CheckSmi(object); |
| __ EmitEagerDeoptIf(is_smi, DeoptimizeReason::kWrongInstanceType, this); |
| } |
| if (first_instance_type_ == last_instance_type_) { |
| __ IsObjectType(object, first_instance_type_); |
| __ EmitEagerDeoptIf(kNotEqual, DeoptimizeReason::kWrongInstanceType, this); |
| } else { |
| MaglevAssembler::ScratchRegisterScope temps(masm); |
| Register map = temps.Acquire(); |
| __ LoadMap(map, object); |
| __ CompareInstanceTypeRange(map, first_instance_type_, last_instance_type_); |
| __ EmitEagerDeoptIf(kUnsignedGreaterThan, |
| DeoptimizeReason::kWrongInstanceType, this); |
| } |
| } |
| |
| void CheckFixedArrayNonEmpty::SetValueLocationConstraints() { |
| UseRegister(receiver_input()); |
| set_temporaries_needed(1); |
| } |
| void CheckFixedArrayNonEmpty::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| Register object = ToRegister(receiver_input()); |
| __ AssertNotSmi(object); |
| |
| if (v8_flags.debug_code) { |
| Label ok; |
| __ IsObjectType(object, FIXED_ARRAY_TYPE); |
| __ JumpIf(kEqual, &ok); |
| __ IsObjectType(object, FIXED_DOUBLE_ARRAY_TYPE); |
| __ Assert(kEqual, AbortReason::kOperandIsNotAFixedArray); |
| __ bind(&ok); |
| } |
| MaglevAssembler::ScratchRegisterScope temps(masm); |
| Register length = temps.Acquire(); |
| __ LoadTaggedSignedField(length, object, FixedArrayBase::kLengthOffset); |
| __ CompareSmiAndJumpIf( |
| length, Smi::zero(), kEqual, |
| __ GetDeoptLabel(this, DeoptimizeReason::kWrongEnumIndices)); |
| } |
| |
| void CheckInt32Condition::SetValueLocationConstraints() { |
| UseRegister(left_input()); |
| UseRegister(right_input()); |
| } |
| void CheckInt32Condition::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| __ CompareInt32(ToRegister(left_input()), ToRegister(right_input())); |
| __ EmitEagerDeoptIf(NegateCondition(ToCondition(condition_)), reason_, this); |
| } |
| |
| void CheckString::SetValueLocationConstraints() { |
| UseRegister(receiver_input()); |
| } |
| void CheckString::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| Register object = ToRegister(receiver_input()); |
| if (check_type() == CheckType::kOmitHeapObjectCheck) { |
| __ AssertNotSmi(object); |
| } else { |
| Condition is_smi = __ CheckSmi(object); |
| __ EmitEagerDeoptIf(is_smi, DeoptimizeReason::kNotAString, this); |
| } |
| __ CompareObjectTypeRange(object, FIRST_STRING_TYPE, LAST_STRING_TYPE); |
| __ EmitEagerDeoptIf(kUnsignedGreaterThan, DeoptimizeReason::kNotAString, |
| this); |
| } |
| |
| void ConvertHoleToUndefined::SetValueLocationConstraints() { |
| UseRegister(object_input()); |
| DefineSameAsFirst(this); |
| } |
| void ConvertHoleToUndefined::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| Label done; |
| DCHECK_EQ(ToRegister(object_input()), ToRegister(result())); |
| __ JumpIfNotRoot(ToRegister(object_input()), RootIndex::kTheHoleValue, &done); |
| __ LoadRoot(ToRegister(result()), RootIndex::kUndefinedValue); |
| __ bind(&done); |
| } |
| |
| int ConvertReceiver::MaxCallStackArgs() const { |
| using D = CallInterfaceDescriptorFor<Builtin::kToObject>::type; |
| return D::GetStackParameterCount(); |
| } |
| void ConvertReceiver::SetValueLocationConstraints() { |
| using D = CallInterfaceDescriptorFor<Builtin::kToObject>::type; |
| UseFixed(receiver_input(), D::GetRegisterParameter(D::kInput)); |
| DefineAsFixed(this, kReturnRegister0); |
| } |
| void ConvertReceiver::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| Label convert_to_object, done; |
| Register receiver = ToRegister(receiver_input()); |
| __ JumpIfSmi(receiver, &convert_to_object, Label::Distance::kNear); |
| __ JumpIfJSAnyIsNotPrimitive(receiver, &done); |
| |
| compiler::JSHeapBroker* broker = masm->compilation_info()->broker(); |
| if (mode_ != ConvertReceiverMode::kNotNullOrUndefined) { |
| Label convert_global_proxy; |
| __ JumpIfRoot(receiver, RootIndex::kUndefinedValue, &convert_global_proxy, |
| Label::Distance::kNear); |
| __ JumpIfNotRoot(receiver, RootIndex::kNullValue, &convert_to_object, |
| Label::Distance::kNear); |
| __ bind(&convert_global_proxy); |
| // Patch receiver to global proxy. |
| __ Move(ToRegister(result()), |
| native_context_.global_proxy_object(broker).object()); |
| __ Jump(&done); |
| } |
| |
| __ bind(&convert_to_object); |
| // ToObject needs to be ran with the target context installed. |
| __ Move(kContextRegister, native_context_.object()); |
| __ CallBuiltin(Builtin::kToObject); |
| __ bind(&done); |
| } |
| |
| int CheckConstructResult::MaxCallStackArgs() const { return 0; } |
| void CheckConstructResult::SetValueLocationConstraints() { |
| UseRegister(construct_result_input()); |
| UseRegister(implicit_receiver_input()); |
| DefineSameAsFirst(this); |
| } |
| void CheckConstructResult::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| Register construct_result = ToRegister(construct_result_input()); |
| Register implicit_receiver = ToRegister(implicit_receiver_input()); |
| Register result_reg = ToRegister(result()); |
| DCHECK_EQ(construct_result, result_reg); |
| |
| // If the result is an object (in the ECMA sense), we should get rid |
| // of the receiver and use the result; see ECMA-262 section 13.2.2-7 |
| // on page 74. |
| Label done, use_receiver; |
| |
| // If the result is undefined, we'll use the implicit receiver. |
| __ JumpIfRoot(construct_result, RootIndex::kUndefinedValue, &use_receiver, |
| Label::Distance::kNear); |
| |
| // If the result is a smi, it is *not* an object in the ECMA sense. |
| __ JumpIfSmi(construct_result, &use_receiver, Label::Distance::kNear); |
| |
| // Check if the type of the result is not an object in the ECMA sense. |
| __ JumpIfJSAnyIsNotPrimitive(construct_result, &done, Label::Distance::kNear); |
| |
| // Throw away the result of the constructor invocation and use the |
| // implicit receiver as the result. |
| __ bind(&use_receiver); |
| __ JumpIfRoot( |
| implicit_receiver, RootIndex::kTheHoleValue, |
| __ MakeDeferredCode( |
| [](MaglevAssembler* masm, CheckConstructResult* node) { |
| __ Move(kContextRegister, masm->native_context().object()); |
| __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject); |
| masm->DefineExceptionHandlerAndLazyDeoptPoint(node); |
| __ Abort(AbortReason::kUnexpectedReturnFromThrow); |
| }, |
| this)); |
| __ Move(result_reg, implicit_receiver); |
| |
| __ bind(&done); |
| } |
| |
| int CreateObjectLiteral::MaxCallStackArgs() const { |
| DCHECK_EQ(Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->nargs, 4); |
| return 4; |
| } |
| void CreateObjectLiteral::SetValueLocationConstraints() { |
| DefineAsFixed(this, kReturnRegister0); |
| } |
| void CreateObjectLiteral::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| __ Move(kContextRegister, masm->native_context().object()); |
| __ Push(feedback().vector, TaggedIndex::FromIntptr(feedback().index()), |
| boilerplate_descriptor().object(), Smi::FromInt(flags())); |
| __ CallRuntime(Runtime::kCreateObjectLiteral, 4); |
| masm->DefineExceptionHandlerAndLazyDeoptPoint(this); |
| } |
| |
| int CreateShallowArrayLiteral::MaxCallStackArgs() const { |
| using D = CallInterfaceDescriptorFor<Builtin::kCreateEmptyArrayLiteral>::type; |
| return D::GetStackParameterCount(); |
| } |
| void CreateShallowArrayLiteral::SetValueLocationConstraints() { |
| DefineAsFixed(this, kReturnRegister0); |
| } |
| void CreateShallowArrayLiteral::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| using D = CreateShallowArrayLiteralDescriptor; |
| __ Move(D::ContextRegister(), masm->native_context().object()); |
| __ Move(D::GetRegisterParameter(D::kMaybeFeedbackVector), feedback().vector); |
| __ Move(D::GetRegisterParameter(D::kSlot), |
| TaggedIndex::FromIntptr(feedback().index())); |
| __ Move(D::GetRegisterParameter(D::kConstantElements), |
| constant_elements().object()); |
| __ Move(D::GetRegisterParameter(D::kFlags), Smi::FromInt(flags())); |
| __ CallBuiltin(Builtin::kCreateShallowArrayLiteral); |
| masm->DefineExceptionHandlerAndLazyDeoptPoint(this); |
| } |
| |
| int CreateArrayLiteral::MaxCallStackArgs() const { |
| DCHECK_EQ(Runtime::FunctionForId(Runtime::kCreateArrayLiteral)->nargs, 4); |
| return 4; |
| } |
| void CreateArrayLiteral::SetValueLocationConstraints() { |
| DefineAsFixed(this, kReturnRegister0); |
| } |
| void CreateArrayLiteral::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| __ Move(kContextRegister, masm->native_context().object()); |
| __ Push(feedback().vector, TaggedIndex::FromIntptr(feedback().index()), |
| constant_elements().object(), Smi::FromInt(flags())); |
| __ CallRuntime(Runtime::kCreateArrayLiteral, 4); |
| masm->DefineExceptionHandlerAndLazyDeoptPoint(this); |
| } |
| |
| int CreateShallowObjectLiteral::MaxCallStackArgs() const { |
| using D = |
| CallInterfaceDescriptorFor<Builtin::kCreateShallowObjectLiteral>::type; |
| return D::GetStackParameterCount(); |
| } |
| void CreateShallowObjectLiteral::SetValueLocationConstraints() { |
| DefineAsFixed(this, kReturnRegister0); |
| } |
| void CreateShallowObjectLiteral::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| using D = CreateShallowObjectLiteralDescriptor; |
| __ Move(D::ContextRegister(), masm->native_context().object()); |
| __ Move(D::GetRegisterParameter(D::kMaybeFeedbackVector), feedback().vector); |
| __ Move(D::GetRegisterParameter(D::kSlot), |
| TaggedIndex::FromIntptr(feedback().index())); |
| __ Move(D::GetRegisterParameter(D::kDesc), boilerplate_descriptor().object()); |
| __ Move(D::GetRegisterParameter(D::kFlags), Smi::FromInt(flags())); |
| __ CallBuiltin(Builtin::kCreateShallowObjectLiteral); |
| masm->DefineExceptionHandlerAndLazyDeoptPoint(this); |
| } |
| |
| void AllocateRaw::SetValueLocationConstraints() { DefineAsRegister(this); } |
| |
| void AllocateRaw::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| __ Allocate(register_snapshot(), ToRegister(result()), size(), |
| allocation_type()); |
| } |
| |
| int CreateClosure::MaxCallStackArgs() const { |
| DCHECK_EQ(Runtime::FunctionForId(pretenured() ? Runtime::kNewClosure_Tenured |
| : Runtime::kNewClosure) |
| ->nargs, |
| 2); |
| return 2; |
| } |
| void CreateClosure::SetValueLocationConstraints() { |
| UseFixed(context(), kContextRegister); |
| DefineAsFixed(this, kReturnRegister0); |
| } |
| void CreateClosure::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| Runtime::FunctionId function_id = |
| pretenured() ? Runtime::kNewClosure_Tenured : Runtime::kNewClosure; |
| __ Push(shared_function_info().object(), feedback_cell().object()); |
| __ CallRuntime(function_id); |
| } |
| |
| int FastCreateClosure::MaxCallStackArgs() const { |
| using D = CallInterfaceDescriptorFor<Builtin::kFastNewClosure>::type; |
| return D::GetStackParameterCount(); |
| } |
| void FastCreateClosure::SetValueLocationConstraints() { |
| using D = CallInterfaceDescriptorFor<Builtin::kFastNewClosure>::type; |
| static_assert(D::HasContextParameter()); |
| UseFixed(context(), D::ContextRegister()); |
| DefineAsFixed(this, kReturnRegister0); |
| } |
| void FastCreateClosure::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| using D = CallInterfaceDescriptorFor<Builtin::kFastNewClosure>::type; |
| |
| DCHECK_EQ(ToRegister(context()), D::ContextRegister()); |
| __ Move(D::GetRegisterParameter(D::kSharedFunctionInfo), |
| shared_function_info().object()); |
| __ Move(D::GetRegisterParameter(D::kFeedbackCell), feedback_cell().object()); |
| __ CallBuiltin(Builtin::kFastNewClosure); |
| masm->DefineExceptionHandlerAndLazyDeoptPoint(this); |
| } |
| |
| int CreateFunctionContext::MaxCallStackArgs() const { |
| if (scope_type() == FUNCTION_SCOPE) { |
| using D = CallInterfaceDescriptorFor< |
| Builtin::kFastNewFunctionContextFunction>::type; |
| return D::GetStackParameterCount(); |
| } else { |
| using D = |
| CallInterfaceDescriptorFor<Builtin::kFastNewFunctionContextEval>::type; |
| return D::GetStackParameterCount(); |
| } |
| } |
| void CreateFunctionContext::SetValueLocationConstraints() { |
| DCHECK_LE(slot_count(), |
| static_cast<uint32_t>( |
| ConstructorBuiltins::MaximumFunctionContextSlots())); |
| if (scope_type() == FUNCTION_SCOPE) { |
| using D = CallInterfaceDescriptorFor< |
| Builtin::kFastNewFunctionContextFunction>::type; |
| static_assert(D::HasContextParameter()); |
| UseFixed(context(), D::ContextRegister()); |
| } else { |
| DCHECK_EQ(scope_type(), ScopeType::EVAL_SCOPE); |
| using D = |
| CallInterfaceDescriptorFor<Builtin::kFastNewFunctionContextEval>::type; |
| static_assert(D::HasContextParameter()); |
| UseFixed(context(), D::ContextRegister()); |
| } |
| DefineAsFixed(this, kReturnRegister0); |
| } |
| void CreateFunctionContext::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| if (scope_type() == FUNCTION_SCOPE) { |
| using D = CallInterfaceDescriptorFor< |
| Builtin::kFastNewFunctionContextFunction>::type; |
| DCHECK_EQ(ToRegister(context()), D::ContextRegister()); |
| __ Move(D::GetRegisterParameter(D::kScopeInfo), scope_info().object()); |
| __ Move(D::GetRegisterParameter(D::kSlots), slot_count()); |
| // TODO(leszeks): Consider inlining this allocation. |
| __ CallBuiltin(Builtin::kFastNewFunctionContextFunction); |
| } else { |
| DCHECK_EQ(scope_type(), ScopeType::EVAL_SCOPE); |
| using D = |
| CallInterfaceDescriptorFor<Builtin::kFastNewFunctionContextEval>::type; |
| DCHECK_EQ(ToRegister(context()), D::ContextRegister()); |
| __ Move(D::GetRegisterParameter(D::kScopeInfo), scope_info().object()); |
| __ Move(D::GetRegisterParameter(D::kSlots), slot_count()); |
| __ CallBuiltin(Builtin::kFastNewFunctionContextEval); |
| } |
| masm->DefineExceptionHandlerAndLazyDeoptPoint(this); |
| } |
| |
| int CreateRegExpLiteral::MaxCallStackArgs() const { |
| using D = CallInterfaceDescriptorFor<Builtin::kCreateRegExpLiteral>::type; |
| return D::GetStackParameterCount(); |
| } |
| void CreateRegExpLiteral::SetValueLocationConstraints() { |
| DefineAsFixed(this, kReturnRegister0); |
| } |
| void CreateRegExpLiteral::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| using D = CreateRegExpLiteralDescriptor; |
| __ Move(D::ContextRegister(), masm->native_context().object()); |
| __ Move(D::GetRegisterParameter(D::kMaybeFeedbackVector), feedback().vector); |
| __ Move(D::GetRegisterParameter(D::kSlot), |
| TaggedIndex::FromIntptr(feedback().index())); |
| __ Move(D::GetRegisterParameter(D::kPattern), pattern().object()); |
| __ Move(D::GetRegisterParameter(D::kFlags), Smi::FromInt(flags())); |
| __ CallBuiltin(Builtin::kCreateRegExpLiteral); |
| } |
| |
| int GetTemplateObject::MaxCallStackArgs() const { |
| using D = CallInterfaceDescriptorFor<Builtin::kGetTemplateObject>::type; |
| return D::GetStackParameterCount(); |
| } |
| void GetTemplateObject::SetValueLocationConstraints() { |
| using D = GetTemplateObjectDescriptor; |
| UseFixed(description(), D::GetRegisterParameter(D::kDescription)); |
| DefineAsFixed(this, kReturnRegister0); |
| } |
| void GetTemplateObject::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| using D = GetTemplateObjectDescriptor; |
| __ Move(D::ContextRegister(), masm->native_context().object()); |
| __ Move(D::GetRegisterParameter(D::kMaybeFeedbackVector), feedback().vector); |
| __ Move(D::GetRegisterParameter(D::kSlot), feedback().slot.ToInt()); |
| __ Move(D::GetRegisterParameter(D::kShared), shared_function_info_.object()); |
| __ CallBuiltin(Builtin::kGetTemplateObject); |
| masm->DefineExceptionHandlerAndLazyDeoptPoint(this); |
| } |
| |
| int HasInPrototypeChain::MaxCallStackArgs() const { |
| DCHECK_EQ(2, Runtime::FunctionForId(Runtime::kHasInPrototypeChain)->nargs); |
| return 2; |
| } |
| void HasInPrototypeChain::SetValueLocationConstraints() { |
| UseRegister(object()); |
| DefineAsRegister(this); |
| set_temporaries_needed(2); |
| } |
| void HasInPrototypeChain::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| MaglevAssembler::ScratchRegisterScope temps(masm); |
| Register object_reg = ToRegister(object()); |
| Register result_reg = ToRegister(result()); |
| |
| Label return_false, return_true; |
| ZoneLabelRef done(masm); |
| |
| __ JumpIfSmi(object_reg, &return_false, Label::kNear); |
| |
| // Loop through the prototype chain looking for the {prototype}. |
| Register map = temps.Acquire(); |
| __ LoadMap(map, object_reg); |
| Label loop; |
| { |
| __ bind(&loop); |
| Register scratch = temps.Acquire(); |
| // Check if we can determine the prototype directly from the {object_map}. |
| ZoneLabelRef if_objectisdirect(masm); |
| Register instance_type = scratch; |
| __ CompareInstanceTypeRange(map, instance_type, FIRST_TYPE, |
| LAST_SPECIAL_RECEIVER_TYPE); |
| __ JumpToDeferredIf( |
| kLessThan, |
| [](MaglevAssembler* masm, RegisterSnapshot snapshot, |
| Register object_reg, Register map, Register instance_type, |
| Register result_reg, HasInPrototypeChain* node, |
| ZoneLabelRef if_objectisdirect, ZoneLabelRef done) { |
| Label return_runtime; |
| // The {object_map} is a special receiver map or a primitive map, |
| // check if we need to use the if_objectisspecial path in the runtime. |
| __ JumpIfEqual(instance_type, JS_PROXY_TYPE, &return_runtime); |
| |
| Register object_bitfield = instance_type; |
| __ LoadByte(object_bitfield, |
| FieldMemOperand(map, Map::kBitFieldOffset)); |
| int mask = Map::Bits1::HasNamedInterceptorBit::kMask | |
| Map::Bits1::IsAccessCheckNeededBit::kMask; |
| __ TestInt32AndJumpIfAllClear(object_bitfield, mask, |
| *if_objectisdirect); |
| |
| __ bind(&return_runtime); |
| { |
| snapshot.live_registers.clear(result_reg); |
| SaveRegisterStateForCall save_register_state(masm, snapshot); |
| __ Move(kContextRegister, masm->native_context().object()); |
| __ Push(object_reg, node->prototype().object()); |
| __ CallRuntime(Runtime::kHasInPrototypeChain, 2); |
| masm->DefineExceptionHandlerPoint(node); |
| save_register_state.DefineSafepointWithLazyDeopt( |
| node->lazy_deopt_info()); |
| __ Move(result_reg, kReturnRegister0); |
| } |
| __ Jump(*done); |
| }, |
| register_snapshot(), object_reg, map, instance_type, result_reg, this, |
| if_objectisdirect, done); |
| instance_type = Register::no_reg(); |
| |
| __ bind(*if_objectisdirect); |
| // Check the current {object} prototype. |
| Register object_prototype = scratch; |
| __ LoadTaggedField(object_prototype, map, Map::kPrototypeOffset); |
| __ JumpIfRoot(object_prototype, RootIndex::kNullValue, &return_false, |
| Label::kNear); |
| __ CompareTagged(object_prototype, prototype().object()); |
| __ JumpIf(kEqual, &return_true, Label::kNear); |
| |
| // Continue with the prototype. |
| __ AssertNotSmi(object_prototype); |
| __ LoadMap(map, object_prototype); |
| __ Jump(&loop); |
| } |
| |
| __ bind(&return_true); |
| __ LoadRoot(result_reg, RootIndex::kTrueValue); |
| __ Jump(*done, Label::kNear); |
| |
| __ bind(&return_false); |
| __ LoadRoot(result_reg, RootIndex::kFalseValue); |
| __ bind(*done); |
| } |
| |
| void DebugBreak::SetValueLocationConstraints() {} |
| void DebugBreak::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| __ DebugBreak(); |
| } |
| |
| int Abort::MaxCallStackArgs() const { |
| DCHECK_EQ(Runtime::FunctionForId(Runtime::kAbort)->nargs, 1); |
| return 1; |
| } |
| void Abort::SetValueLocationConstraints() {} |
| void Abort::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) { |
| __ Push(Smi::FromInt(static_cast<int>(reason()))); |
| __ CallRuntime(Runtime::kAbort, 1); |
| __ Trap(); |
| } |
| |
| void LogicalNot::SetValueLocationConstraints() { |
| UseAny(value()); |
| DefineAsRegister(this); |
| } |
| void LogicalNot::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| if (v8_flags.debug_code) { |
| // LogicalNot expects either TrueValue or FalseValue. |
| Label next; |
| __ JumpIf(__ IsRootConstant(value(), RootIndex::kFalseValue), &next); |
| __ JumpIf(__ IsRootConstant(value(), RootIndex::kTrueValue), &next); |
| __ Abort(AbortReason::kUnexpectedValue); |
| __ bind(&next); |
| } |
| |
| Label return_false, done; |
| __ JumpIf(__ IsRootConstant(value(), RootIndex::kTrueValue), &return_false); |
| __ LoadRoot(ToRegister(result()), RootIndex::kTrueValue); |
| __ Jump(&done); |
| |
| __ bind(&return_false); |
| __ LoadRoot(ToRegister(result()), RootIndex::kFalseValue); |
| |
| __ bind(&done); |
| } |
| |
| int LoadNamedGeneric::MaxCallStackArgs() const { |
| return LoadWithVectorDescriptor::GetStackParameterCount(); |
| } |
| void LoadNamedGeneric::SetValueLocationConstraints() { |
| using D = LoadWithVectorDescriptor; |
| UseFixed(context(), kContextRegister); |
| UseFixed(object_input(), D::GetRegisterParameter(D::kReceiver)); |
| DefineAsFixed(this, kReturnRegister0); |
| } |
| void LoadNamedGeneric::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| using D = LoadWithVectorDescriptor; |
| DCHECK_EQ(ToRegister(context()), kContextRegister); |
| DCHECK_EQ(ToRegister(object_input()), D::GetRegisterParameter(D::kReceiver)); |
| __ Move(D::GetRegisterParameter(D::kName), name().object()); |
| __ Move(D::GetRegisterParameter(D::kSlot), |
| Smi::FromInt(feedback().slot.ToInt())); |
| __ Move(D::GetRegisterParameter(D::kVector), feedback().vector); |
| __ CallBuiltin(Builtin::kLoadIC); |
| masm->DefineExceptionHandlerAndLazyDeoptPoint(this); |
| } |
| |
| int LoadNamedFromSuperGeneric::MaxCallStackArgs() const { |
| return LoadWithReceiverAndVectorDescriptor::GetStackParameterCount(); |
| } |
| void LoadNamedFromSuperGeneric::SetValueLocationConstraints() { |
| using D = LoadWithReceiverAndVectorDescriptor; |
| UseFixed(context(), kContextRegister); |
| UseFixed(receiver(), D::GetRegisterParameter(D::kReceiver)); |
| UseFixed(lookup_start_object(), |
| D::GetRegisterParameter(D::kLookupStartObject)); |
| DefineAsFixed(this, kReturnRegister0); |
| } |
| void LoadNamedFromSuperGeneric::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| using D = LoadWithReceiverAndVectorDescriptor; |
| DCHECK_EQ(ToRegister(context()), kContextRegister); |
| DCHECK_EQ(ToRegister(receiver()), D::GetRegisterParameter(D::kReceiver)); |
| DCHECK_EQ(ToRegister(lookup_start_object()), |
| D::GetRegisterParameter(D::kLookupStartObject)); |
| __ Move(D::GetRegisterParameter(D::kName), name().object()); |
| __ Move(D::GetRegisterParameter(D::kSlot), |
| Smi::FromInt(feedback().slot.ToInt())); |
| __ Move(D::GetRegisterParameter(D::kVector), feedback().vector); |
| __ CallBuiltin(Builtin::kLoadSuperIC); |
| masm->DefineExceptionHandlerAndLazyDeoptPoint(this); |
| } |
| |
| int SetNamedGeneric::MaxCallStackArgs() const { |
| using D = CallInterfaceDescriptorFor<Builtin::kStoreIC>::type; |
| return D::GetStackParameterCount(); |
| } |
| void SetNamedGeneric::SetValueLocationConstraints() { |
| using D = CallInterfaceDescriptorFor<Builtin::kStoreIC>::type; |
| UseFixed(context(), kContextRegister); |
| UseFixed(object_input(), D::GetRegisterParameter(D::kReceiver)); |
| UseFixed(value_input(), D::GetRegisterParameter(D::kValue)); |
| DefineAsFixed(this, kReturnRegister0); |
| } |
| void SetNamedGeneric::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| using D = CallInterfaceDescriptorFor<Builtin::kStoreIC>::type; |
| DCHECK_EQ(ToRegister(context()), kContextRegister); |
| DCHECK_EQ(ToRegister(object_input()), D::GetRegisterParameter(D::kReceiver)); |
| DCHECK_EQ(ToRegister(value_input()), D::GetRegisterParameter(D::kValue)); |
| __ Move(D::GetRegisterParameter(D::kName), name().object()); |
| __ Move(D::GetRegisterParameter(D::kSlot), |
| TaggedIndex::FromIntptr(feedback().index())); |
| __ Move(D::GetRegisterParameter(D::kVector), feedback().vector); |
| __ CallBuiltin(Builtin::kStoreIC); |
| masm->DefineExceptionHandlerAndLazyDeoptPoint(this); |
| } |
| |
| int DefineNamedOwnGeneric::MaxCallStackArgs() const { |
| using D = CallInterfaceDescriptorFor<Builtin::kDefineNamedOwnIC>::type; |
| return D::GetStackParameterCount(); |
| } |
| void DefineNamedOwnGeneric::SetValueLocationConstraints() { |
| using D = CallInterfaceDescriptorFor<Builtin::kDefineNamedOwnIC>::type; |
| UseFixed(context(), kContextRegister); |
| UseFixed(object_input(), D::GetRegisterParameter(D::kReceiver)); |
| UseFixed(value_input(), D::GetRegisterParameter(D::kValue)); |
| DefineAsFixed(this, kReturnRegister0); |
| } |
| void DefineNamedOwnGeneric::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| using D = CallInterfaceDescriptorFor<Builtin::kDefineNamedOwnIC>::type; |
| DCHECK_EQ(ToRegister(context()), kContextRegister); |
| DCHECK_EQ(ToRegister(object_input()), D::GetRegisterParameter(D::kReceiver)); |
| DCHECK_EQ(ToRegister(value_input()), D::GetRegisterParameter(D::kValue)); |
| __ Move(D::GetRegisterParameter(D::kName), name().object()); |
| __ Move(D::GetRegisterParameter(D::kSlot), |
| TaggedIndex::FromIntptr(feedback().index())); |
| __ Move(D::GetRegisterParameter(D::kVector), feedback().vector); |
| __ CallBuiltin(Builtin::kDefineNamedOwnIC); |
| masm->DefineExceptionHandlerAndLazyDeoptPoint(this); |
| } |
| |
| void UpdateJSArrayLength::SetValueLocationConstraints() { |
| UseRegister(object_input()); |
| UseAndClobberRegister(index_input()); |
| UseRegister(length_input()); |
| } |
| void UpdateJSArrayLength::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| Register object = ToRegister(object_input()); |
| Register index = ToRegister(index_input()); |
| Register length = ToRegister(length_input()); |
| Label done; |
| if (v8_flags.debug_code) { |
| __ IsObjectType(object, JS_ARRAY_TYPE); |
| __ Assert(kEqual, AbortReason::kUnexpectedValue); |
| static_assert(Internals::IsValidSmi(FixedArray::kMaxLength), |
| "MaxLength not a Smi"); |
| __ CompareInt32(index, FixedArray::kMaxLength); |
| __ Assert(kUnsignedLessThan, AbortReason::kUnexpectedValue); |
| } |
| __ CompareInt32(index, length); |
| __ JumpIf(kUnsignedLessThan, &done); |
| __ IncrementInt32(index); // This cannot overflow. |
| __ SmiTag(index); |
| __ StoreTaggedField(FieldMemOperand(object, JSArray::kLengthOffset), index); |
| __ bind(&done); |
| } |
| |
| void EnsureWritableFastElements::SetValueLocationConstraints() { |
| UseRegister(elements_input()); |
| UseRegister(object_input()); |
| set_temporaries_needed(1); |
| DefineSameAsFirst(this); |
| } |
| void EnsureWritableFastElements::GenerateCode(MaglevAssembler* masm, |
| const ProcessingState& state) { |
| Register object = ToRegister(object_input()); |
| Register elements = ToRegister(elements_input()); |
| DCHECK_EQ(elements, ToRegister(result())); |
| MaglevAssembler::ScratchRegisterScope temps(masm); |
| Register scratch = temps.Acquire(); |
| __ EnsureWritableFastElements(register_snapshot(), elements, object, scratch); |
| } |
| |
| void MaybeGrowAndEnsureWritableFastElements::SetValueLocationConstraints() { |
| UseRegister(elements_input()); |
| UseRegister(object_input()); |
| UseRegister(index_input()); |
| UseRegister(elements_length_input()); |
| if (IsSmiOrObjectElementsKind(elements_kind())) { |
| set_temporaries_needed(1); |
| } |
| DefineSameAsFirst(this); |
| } |
| void MaybeGrowAndEnsureWritableFastElements::GenerateCode( |
| MaglevAssembler* masm, const ProcessingState& state) { |
| Register elements = ToRegister(elements_input()); |
| Register object = ToRegister(object_input()); |
| Register index = ToRegister(index_input()); |
| Register elements_length = ToRegister(elements_length_input()); |
| DCHECK_EQ(elements, ToRegister(result())); |
| |
| ZoneLabelRef done(masm); |
| __ CompareInt32(index, elements_length); |
| __ JumpToDeferredIf( |
| kUnsignedGreaterThanEqual, |
| [](MaglevAssembler* masm, ZoneLabelRef done, Register object, |
| Register index, Register result_reg, |
| MaybeGrowAndEnsureWritableFastElements* node) { |
| { |
| RegisterSnapshot snapshot = node->register_snapshot(); |
| AddDeoptRegistersToSnapshot(&snapshot, node->eager_deopt_info()); |
| snapshot.live_registers.clear(result_reg); |
| snapshot.live_tagged_registers.clear(result_reg); |
| SaveRegisterStateForCall save_register_state(masm, snapshot); |
| using D = GrowArrayElementsDescriptor; |
| if (index == D::GetRegisterParameter(D::kObject)) { |
| // That implies that the first parameter move will clobber the index |
| // value. So we use the result register as temporary. |
| __ SmiTag(result_reg, index); |
| index = result_reg; |
| } else { |
| __ SmiTag(index); |
| } |
| __ Move(D::GetRegisterParameter(D::kObject), object); |
| __ Move(D::GetRegisterParameter(D::kKey), index); |
| if (IsDoubleElementsKind(node->elements_kind())) { |
| __ CallBuiltin(Builtin::kGrowFastDoubleElements); |
| } else { |
| __ CallBuiltin(Builtin::kGrowFastSmiOrObjectElements); |
| } |
| save_register_state.DefineSafepoint(); |
| __ Move(result_reg, kReturnRegister0); |
| } |
| Condition is_smi = __ CheckSmi(result_reg); |
| __ EmitEagerDeoptIf(is_smi, DeoptimizeReason::kCouldNotGrowElements, |
| node); |
| __ Jump(*done); |
| }, |
| done, object, index, elements, this); |
| |
| if (IsSmiOrObjectElementsKind(elements_kind())) { |
| MaglevAssembler::ScratchRegisterScope temps(masm); |
| Register scratch = temps.Acquire(); |
| __ EnsureWritableFastElements(register_snapshot(), elements, object, |
| scratch); |
| } |
| |
| __ bind(*done); |
| } |
| |
| int SetKeyedGeneric::MaxCallStackArgs() const { |
| using D = CallInterfaceDescriptorFor<Builtin::kKeyedStoreIC>::type; |
| return D::GetStackParameterCount(); |
| } |
| void SetKeyedGeneric::SetValueLocationConstraints() { |
| using D = CallInterfaceDescriptorFor<Builtin::kKeyedStoreIC>::type; |
| UseFixed(context(), kContextRegister); |
| UseFixed(object_input(), D::GetRegisterParameter(D::kReceiver)); |
| UseFixed(key_input(), |