| // Copyright 2021 the V8 project authors. All rights reserved. |
| // Use of this source code is governed by a BSD-style license that can be |
| // found in the LICENSE file. |
| |
| #ifndef V8_MAGLEV_MAGLEV_IR_H_ |
| #define V8_MAGLEV_MAGLEV_IR_H_ |
| |
| #include "src/base/bit-field.h" |
| #include "src/base/discriminated-union.h" |
| #include "src/base/enum-set.h" |
| #include "src/base/logging.h" |
| #include "src/base/macros.h" |
| #include "src/base/small-vector.h" |
| #include "src/base/threaded-list.h" |
| #include "src/codegen/label.h" |
| #include "src/codegen/machine-type.h" |
| #include "src/codegen/reglist.h" |
| #include "src/codegen/source-position.h" |
| #include "src/common/globals.h" |
| #include "src/common/operation.h" |
| #include "src/compiler/access-info.h" |
| #include "src/compiler/backend/instruction.h" |
| #include "src/compiler/feedback-source.h" |
| #include "src/compiler/heap-refs.h" |
| // TODO(dmercadier): move the Turboshaft utils functions to shared code (in |
| // particular, any_of, which is the reason we're including this Turboshaft |
| // header) |
| #include "src/compiler/turboshaft/utils.h" |
| #include "src/deoptimizer/deoptimize-reason.h" |
| #include "src/interpreter/bytecode-flags.h" |
| #include "src/interpreter/bytecode-register.h" |
| #include "src/maglev/maglev-compilation-unit.h" |
| #include "src/objects/smi.h" |
| #include "src/roots/roots.h" |
| #include "src/utils/utils.h" |
| #include "src/zone/zone.h" |
| |
| namespace v8 { |
| namespace internal { |
| |
| enum Condition : uint8_t; |
| |
| namespace maglev { |
| |
| class BasicBlock; |
| class ProcessingState; |
| class MaglevAssembler; |
| class MaglevCodeGenState; |
| class MaglevCompilationUnit; |
| class MaglevGraphLabeller; |
| class MaglevVregAllocationState; |
| class CompactInterpreterFrameState; |
| class MergePointInterpreterFrameState; |
| |
| // Nodes are either |
| // 1. side-effecting or value-holding SSA nodes in the body of basic blocks, or |
| // 2. Control nodes that store the control flow at the end of basic blocks, and |
| // form a separate node hierarchy to non-control nodes. |
| // |
| // The macro lists below must match the node class hierarchy. |
| |
| #define GENERIC_OPERATIONS_NODE_LIST(V) \ |
| V(GenericAdd) \ |
| V(GenericSubtract) \ |
| V(GenericMultiply) \ |
| V(GenericDivide) \ |
| V(GenericModulus) \ |
| V(GenericExponentiate) \ |
| V(GenericBitwiseAnd) \ |
| V(GenericBitwiseOr) \ |
| V(GenericBitwiseXor) \ |
| V(GenericShiftLeft) \ |
| V(GenericShiftRight) \ |
| V(GenericShiftRightLogical) \ |
| V(GenericBitwiseNot) \ |
| V(GenericNegate) \ |
| V(GenericIncrement) \ |
| V(GenericDecrement) \ |
| V(GenericEqual) \ |
| V(GenericStrictEqual) \ |
| V(GenericLessThan) \ |
| V(GenericLessThanOrEqual) \ |
| V(GenericGreaterThan) \ |
| V(GenericGreaterThanOrEqual) |
| |
| #define INT32_OPERATIONS_NODE_LIST(V) \ |
| V(Int32AddWithOverflow) \ |
| V(Int32SubtractWithOverflow) \ |
| V(Int32MultiplyWithOverflow) \ |
| V(Int32DivideWithOverflow) \ |
| V(Int32ModulusWithOverflow) \ |
| V(Int32BitwiseAnd) \ |
| V(Int32BitwiseOr) \ |
| V(Int32BitwiseXor) \ |
| V(Int32ShiftLeft) \ |
| V(Int32ShiftRight) \ |
| V(Int32ShiftRightLogical) \ |
| V(Int32BitwiseNot) \ |
| V(Int32NegateWithOverflow) \ |
| V(Int32IncrementWithOverflow) \ |
| V(Int32DecrementWithOverflow) \ |
| V(Int32Equal) \ |
| V(Int32StrictEqual) \ |
| V(Int32LessThan) \ |
| V(Int32LessThanOrEqual) \ |
| V(Int32GreaterThan) \ |
| V(Int32GreaterThanOrEqual) |
| |
| #define FLOAT64_OPERATIONS_NODE_LIST(V) \ |
| V(Float64Add) \ |
| V(Float64Subtract) \ |
| V(Float64Multiply) \ |
| V(Float64Divide) \ |
| V(Float64Exponentiate) \ |
| V(Float64Modulus) \ |
| V(Float64Negate) \ |
| V(Float64Round) \ |
| V(Float64Equal) \ |
| V(Float64StrictEqual) \ |
| V(Float64LessThan) \ |
| V(Float64LessThanOrEqual) \ |
| V(Float64GreaterThan) \ |
| V(Float64GreaterThanOrEqual) \ |
| V(Float64Ieee754Unary) |
| |
| #define CONSTANT_VALUE_NODE_LIST(V) \ |
| V(Constant) \ |
| V(ExternalConstant) \ |
| V(Float64Constant) \ |
| V(Int32Constant) \ |
| V(RootConstant) \ |
| V(SmiConstant) |
| |
| #define INLINE_BUILTIN_NODE_LIST(V) \ |
| V(BuiltinStringFromCharCode) \ |
| V(BuiltinStringPrototypeCharCodeOrCodePointAt) |
| |
| #define VALUE_NODE_LIST(V) \ |
| V(Identity) \ |
| V(AllocateRaw) \ |
| V(Call) \ |
| V(CallBuiltin) \ |
| V(CallRuntime) \ |
| V(CallWithArrayLike) \ |
| V(CallWithSpread) \ |
| V(CallKnownJSFunction) \ |
| V(CallSelf) \ |
| V(Construct) \ |
| V(CheckConstructResult) \ |
| V(ConstructWithSpread) \ |
| V(ConvertReceiver) \ |
| V(ConvertHoleToUndefined) \ |
| V(CreateArrayLiteral) \ |
| V(CreateShallowArrayLiteral) \ |
| V(CreateObjectLiteral) \ |
| V(CreateShallowObjectLiteral) \ |
| V(CreateFunctionContext) \ |
| V(CreateClosure) \ |
| V(FastCreateClosure) \ |
| V(CreateRegExpLiteral) \ |
| V(DeleteProperty) \ |
| V(EnsureWritableFastElements) \ |
| V(FoldedAllocation) \ |
| V(ForInPrepare) \ |
| V(ForInNext) \ |
| V(GeneratorRestoreRegister) \ |
| V(GetIterator) \ |
| V(GetSecondReturnedValue) \ |
| V(GetTemplateObject) \ |
| V(HasInPrototypeChain) \ |
| V(InitialValue) \ |
| V(LoadPolymorphicDoubleField) \ |
| V(LoadPolymorphicTaggedField) \ |
| V(LoadTaggedField) \ |
| V(LoadDoubleField) \ |
| V(LoadTaggedFieldByFieldIndex) \ |
| V(LoadFixedArrayElement) \ |
| V(LoadFixedDoubleArrayElement) \ |
| V(LoadHoleyFixedDoubleArrayElement) \ |
| V(LoadSignedIntDataViewElement) \ |
| V(LoadDoubleDataViewElement) \ |
| V(LoadSignedIntTypedArrayElement) \ |
| V(LoadSignedIntTypedArrayElementNoDeopt) \ |
| V(LoadUnsignedIntTypedArrayElement) \ |
| V(LoadUnsignedIntTypedArrayElementNoDeopt) \ |
| V(LoadDoubleTypedArrayElement) \ |
| V(LoadDoubleTypedArrayElementNoDeopt) \ |
| V(LoadEnumCacheLength) \ |
| V(LoadGlobal) \ |
| V(LoadNamedGeneric) \ |
| V(LoadNamedFromSuperGeneric) \ |
| V(MaybeGrowAndEnsureWritableFastElements) \ |
| V(SetNamedGeneric) \ |
| V(DefineNamedOwnGeneric) \ |
| V(StoreInArrayLiteralGeneric) \ |
| V(StoreGlobal) \ |
| V(GetKeyedGeneric) \ |
| V(SetKeyedGeneric) \ |
| V(DefineKeyedOwnGeneric) \ |
| V(Phi) \ |
| V(RegisterInput) \ |
| V(CheckedSmiTagInt32) \ |
| V(CheckedSmiTagUint32) \ |
| V(UnsafeSmiTag) \ |
| V(CheckedSmiUntag) \ |
| V(UnsafeSmiUntag) \ |
| V(CheckedInternalizedString) \ |
| V(CheckedObjectToIndex) \ |
| V(CheckedTruncateNumberOrOddballToInt32) \ |
| V(CheckedInt32ToUint32) \ |
| V(CheckedUint32ToInt32) \ |
| V(ChangeInt32ToFloat64) \ |
| V(ChangeUint32ToFloat64) \ |
| V(CheckedTruncateFloat64ToInt32) \ |
| V(CheckedTruncateFloat64ToUint32) \ |
| V(TruncateNumberOrOddballToInt32) \ |
| V(TruncateUint32ToInt32) \ |
| V(TruncateFloat64ToInt32) \ |
| V(UnsafeTruncateUint32ToInt32) \ |
| V(UnsafeTruncateFloat64ToInt32) \ |
| V(Int32ToUint8Clamped) \ |
| V(Uint32ToUint8Clamped) \ |
| V(Float64ToUint8Clamped) \ |
| V(CheckedNumberToUint8Clamped) \ |
| V(Int32ToNumber) \ |
| V(Uint32ToNumber) \ |
| V(Float64ToTagged) \ |
| V(HoleyFloat64ToTagged) \ |
| V(CheckedSmiTagFloat64) \ |
| V(CheckedNumberOrOddballToFloat64) \ |
| V(UncheckedNumberOrOddballToFloat64) \ |
| V(CheckedHoleyFloat64ToFloat64) \ |
| V(HoleyFloat64ToMaybeNanFloat64) \ |
| V(LogicalNot) \ |
| V(SetPendingMessage) \ |
| V(StringAt) \ |
| V(StringEqual) \ |
| V(StringLength) \ |
| V(StringConcat) \ |
| V(ToBoolean) \ |
| V(ToBooleanLogicalNot) \ |
| V(TaggedEqual) \ |
| V(TaggedNotEqual) \ |
| V(TestInstanceOf) \ |
| V(TestUndetectable) \ |
| V(TestTypeOf) \ |
| V(ToName) \ |
| V(ToNumberOrNumeric) \ |
| V(ToObject) \ |
| V(ToString) \ |
| V(NumberToString) \ |
| CONSTANT_VALUE_NODE_LIST(V) \ |
| INT32_OPERATIONS_NODE_LIST(V) \ |
| FLOAT64_OPERATIONS_NODE_LIST(V) \ |
| GENERIC_OPERATIONS_NODE_LIST(V) \ |
| INLINE_BUILTIN_NODE_LIST(V) |
| |
| #define GAP_MOVE_NODE_LIST(V) \ |
| V(ConstantGapMove) \ |
| V(GapMove) |
| |
| #define NODE_LIST(V) \ |
| V(AssertInt32) \ |
| V(CheckDynamicValue) \ |
| V(CheckInt32IsSmi) \ |
| V(CheckUint32IsSmi) \ |
| V(CheckHoleyFloat64IsSmi) \ |
| V(CheckHeapObject) \ |
| V(CheckInt32Condition) \ |
| V(CheckFixedArrayNonEmpty) \ |
| V(CheckBounds) \ |
| V(CheckJSDataViewBounds) \ |
| V(CheckJSTypedArrayBounds) \ |
| V(CheckMaps) \ |
| V(CheckMapsWithMigration) \ |
| V(CheckNumber) \ |
| V(CheckSmi) \ |
| V(CheckString) \ |
| V(CheckSymbol) \ |
| V(CheckValue) \ |
| V(CheckValueEqualsInt32) \ |
| V(CheckValueEqualsFloat64) \ |
| V(CheckValueEqualsString) \ |
| V(CheckInstanceType) \ |
| V(DebugBreak) \ |
| V(FunctionEntryStackCheck) \ |
| V(GeneratorStore) \ |
| V(TryOnStackReplacement) \ |
| V(StoreMap) \ |
| V(StoreDoubleField) \ |
| V(StoreFixedArrayElementWithWriteBarrier) \ |
| V(StoreFixedArrayElementNoWriteBarrier) \ |
| V(StoreFixedDoubleArrayElement) \ |
| V(StoreFloat64) \ |
| V(StoreIntTypedArrayElement) \ |
| V(StoreIntTypedArrayElementNoDeopt) \ |
| V(StoreDoubleTypedArrayElement) \ |
| V(StoreDoubleTypedArrayElementNoDeopt) \ |
| V(StoreSignedIntDataViewElement) \ |
| V(StoreDoubleDataViewElement) \ |
| V(StoreTaggedFieldNoWriteBarrier) \ |
| V(StoreTaggedFieldWithWriteBarrier) \ |
| V(ReduceInterruptBudgetForLoop) \ |
| V(ReduceInterruptBudgetForReturn) \ |
| V(ThrowReferenceErrorIfHole) \ |
| V(ThrowSuperNotCalledIfHole) \ |
| V(ThrowSuperAlreadyCalledIfNotHole) \ |
| V(ThrowIfNotSuperConstructor) \ |
| V(TransitionElementsKind) \ |
| V(UpdateJSArrayLength) \ |
| GAP_MOVE_NODE_LIST(V) \ |
| VALUE_NODE_LIST(V) |
| |
| #define BRANCH_CONTROL_NODE_LIST(V) \ |
| V(BranchIfRootConstant) \ |
| V(BranchIfToBooleanTrue) \ |
| V(BranchIfInt32ToBooleanTrue) \ |
| V(BranchIfFloat64ToBooleanTrue) \ |
| V(BranchIfReferenceCompare) \ |
| V(BranchIfInt32Compare) \ |
| V(BranchIfFloat64Compare) \ |
| V(BranchIfUndefinedOrNull) \ |
| V(BranchIfUndetectable) \ |
| V(BranchIfJSReceiver) \ |
| V(BranchIfTypeOf) |
| |
| #define CONDITIONAL_CONTROL_NODE_LIST(V) \ |
| V(Switch) \ |
| BRANCH_CONTROL_NODE_LIST(V) |
| |
| #define UNCONDITIONAL_CONTROL_NODE_LIST(V) \ |
| V(Jump) \ |
| V(JumpLoop) \ |
| V(JumpToInlined) \ |
| V(JumpFromInlined) |
| |
| #define TERMINAL_CONTROL_NODE_LIST(V) \ |
| V(Abort) \ |
| V(Return) \ |
| V(Deopt) |
| |
| #define CONTROL_NODE_LIST(V) \ |
| TERMINAL_CONTROL_NODE_LIST(V) \ |
| CONDITIONAL_CONTROL_NODE_LIST(V) \ |
| UNCONDITIONAL_CONTROL_NODE_LIST(V) |
| |
| #define NODE_BASE_LIST(V) \ |
| NODE_LIST(V) \ |
| CONTROL_NODE_LIST(V) |
| |
| // Define the opcode enum. |
| #define DEF_OPCODES(type) k##type, |
| enum class Opcode : uint16_t { NODE_BASE_LIST(DEF_OPCODES) }; |
| #undef DEF_OPCODES |
| #define PLUS_ONE(type) +1 |
| static constexpr int kOpcodeCount = NODE_BASE_LIST(PLUS_ONE); |
| static constexpr Opcode kFirstOpcode = static_cast<Opcode>(0); |
| static constexpr Opcode kLastOpcode = static_cast<Opcode>(kOpcodeCount - 1); |
| #undef PLUS_ONE |
| |
| const char* OpcodeToString(Opcode opcode); |
| inline std::ostream& operator<<(std::ostream& os, Opcode opcode) { |
| return os << OpcodeToString(opcode); |
| } |
| |
| #define V(Name) Opcode::k##Name, |
| static constexpr Opcode kFirstValueNodeOpcode = |
| std::min({VALUE_NODE_LIST(V) kLastOpcode}); |
| static constexpr Opcode kLastValueNodeOpcode = |
| std::max({VALUE_NODE_LIST(V) kFirstOpcode}); |
| static constexpr Opcode kFirstConstantNodeOpcode = |
| std::min({CONSTANT_VALUE_NODE_LIST(V) kLastOpcode}); |
| static constexpr Opcode kLastConstantNodeOpcode = |
| std::max({CONSTANT_VALUE_NODE_LIST(V) kFirstOpcode}); |
| static constexpr Opcode kFirstGapMoveNodeOpcode = |
| std::min({GAP_MOVE_NODE_LIST(V) kLastOpcode}); |
| static constexpr Opcode kLastGapMoveNodeOpcode = |
| std::max({GAP_MOVE_NODE_LIST(V) kFirstOpcode}); |
| |
| static constexpr Opcode kFirstNodeOpcode = std::min({NODE_LIST(V) kLastOpcode}); |
| static constexpr Opcode kLastNodeOpcode = std::max({NODE_LIST(V) kFirstOpcode}); |
| |
| static constexpr Opcode kFirstBranchControlNodeOpcode = |
| std::min({BRANCH_CONTROL_NODE_LIST(V) kLastOpcode}); |
| static constexpr Opcode kLastBranchControlNodeOpcode = |
| std::max({BRANCH_CONTROL_NODE_LIST(V) kFirstOpcode}); |
| |
| static constexpr Opcode kFirstConditionalControlNodeOpcode = |
| std::min({CONDITIONAL_CONTROL_NODE_LIST(V) kLastOpcode}); |
| static constexpr Opcode kLastConditionalControlNodeOpcode = |
| std::max({CONDITIONAL_CONTROL_NODE_LIST(V) kFirstOpcode}); |
| |
| static constexpr Opcode kLastUnconditionalControlNodeOpcode = |
| std::max({UNCONDITIONAL_CONTROL_NODE_LIST(V) kFirstOpcode}); |
| static constexpr Opcode kFirstUnconditionalControlNodeOpcode = |
| std::min({UNCONDITIONAL_CONTROL_NODE_LIST(V) kLastOpcode}); |
| |
| static constexpr Opcode kLastTerminalControlNodeOpcode = |
| std::max({TERMINAL_CONTROL_NODE_LIST(V) kFirstOpcode}); |
| static constexpr Opcode kFirstTerminalControlNodeOpcode = |
| std::min({TERMINAL_CONTROL_NODE_LIST(V) kLastOpcode}); |
| |
| static constexpr Opcode kFirstControlNodeOpcode = |
| std::min({CONTROL_NODE_LIST(V) kLastOpcode}); |
| static constexpr Opcode kLastControlNodeOpcode = |
| std::max({CONTROL_NODE_LIST(V) kFirstOpcode}); |
| #undef V |
| |
| constexpr bool IsValueNode(Opcode opcode) { |
| return kFirstValueNodeOpcode <= opcode && opcode <= kLastValueNodeOpcode; |
| } |
| constexpr bool IsConstantNode(Opcode opcode) { |
| return kFirstConstantNodeOpcode <= opcode && |
| opcode <= kLastConstantNodeOpcode; |
| } |
| constexpr bool IsGapMoveNode(Opcode opcode) { |
| return kFirstGapMoveNodeOpcode <= opcode && opcode <= kLastGapMoveNodeOpcode; |
| } |
| constexpr bool IsControlNode(Opcode opcode) { |
| return kFirstControlNodeOpcode <= opcode && opcode <= kLastControlNodeOpcode; |
| } |
| constexpr bool IsBranchControlNode(Opcode opcode) { |
| return kFirstBranchControlNodeOpcode <= opcode && |
| opcode <= kLastBranchControlNodeOpcode; |
| } |
| constexpr bool IsConditionalControlNode(Opcode opcode) { |
| return kFirstConditionalControlNodeOpcode <= opcode && |
| opcode <= kLastConditionalControlNodeOpcode; |
| } |
| constexpr bool IsUnconditionalControlNode(Opcode opcode) { |
| return kFirstUnconditionalControlNodeOpcode <= opcode && |
| opcode <= kLastUnconditionalControlNodeOpcode; |
| } |
| constexpr bool IsTerminalControlNode(Opcode opcode) { |
| return kFirstTerminalControlNodeOpcode <= opcode && |
| opcode <= kLastTerminalControlNodeOpcode; |
| } |
| |
| // Forward-declare NodeBase sub-hierarchies. |
| class Node; |
| class ControlNode; |
| class ConditionalControlNode; |
| class BranchControlNode; |
| class UnconditionalControlNode; |
| class TerminalControlNode; |
| class ValueNode; |
| |
| enum class ValueRepresentation : uint8_t { |
| kTagged, |
| kInt32, |
| kUint32, |
| kFloat64, |
| kHoleyFloat64, |
| kWord64 |
| }; |
| |
| inline constexpr bool IsDoubleRepresentation(ValueRepresentation repr) { |
| return repr == ValueRepresentation::kFloat64 || |
| repr == ValueRepresentation::kHoleyFloat64; |
| } |
| |
| enum class TaggedToFloat64ConversionType : uint8_t { |
| kOnlyNumber, |
| kNumberOrOddball, |
| }; |
| |
| constexpr Condition ConditionFor(Operation cond); |
| constexpr Condition ConditionForNaN(); |
| |
| bool FromConstantToBool(LocalIsolate* local_isolate, ValueNode* node); |
| bool FromConstantToBool(MaglevAssembler* masm, ValueNode* node); |
| |
| inline int ExternalArrayElementSize(const ExternalArrayType element_type) { |
| switch (element_type) { |
| #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \ |
| case kExternal##Type##Array: \ |
| DCHECK_LE(sizeof(ctype), 8); \ |
| return sizeof(ctype); |
| TYPED_ARRAYS(TYPED_ARRAY_CASE) |
| default: |
| UNREACHABLE(); |
| #undef TYPED_ARRAY_CASE |
| } |
| } |
| |
| inline int ElementsKindSize(ElementsKind element_kind) { |
| switch (element_kind) { |
| #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \ |
| case TYPE##_ELEMENTS: \ |
| DCHECK_LE(sizeof(ctype), 8); \ |
| return sizeof(ctype); |
| TYPED_ARRAYS(TYPED_ARRAY_CASE) |
| default: |
| UNREACHABLE(); |
| #undef TYPED_ARRAY_CASE |
| } |
| } |
| |
| inline std::ostream& operator<<(std::ostream& os, |
| const ValueRepresentation& repr) { |
| switch (repr) { |
| case ValueRepresentation::kTagged: |
| return os << "Tagged"; |
| case ValueRepresentation::kInt32: |
| return os << "Int32"; |
| case ValueRepresentation::kUint32: |
| return os << "Uint32"; |
| case ValueRepresentation::kFloat64: |
| return os << "Float64"; |
| case ValueRepresentation::kHoleyFloat64: |
| return os << "HoleyFloat64"; |
| case ValueRepresentation::kWord64: |
| return os << "Word64"; |
| } |
| } |
| |
| inline std::ostream& operator<<( |
| std::ostream& os, const TaggedToFloat64ConversionType& conversion_type) { |
| switch (conversion_type) { |
| case TaggedToFloat64ConversionType::kOnlyNumber: |
| return os << "Number"; |
| case TaggedToFloat64ConversionType::kNumberOrOddball: |
| return os << "NumberOrOddball"; |
| } |
| } |
| |
| inline bool HasOnlyJSTypedArrayMaps(base::Vector<const compiler::MapRef> maps) { |
| for (compiler::MapRef map : maps) { |
| if (!map.IsJSTypedArrayMap()) return false; |
| } |
| return true; |
| } |
| |
| inline bool HasOnlyJSArrayMaps(base::Vector<const compiler::MapRef> maps) { |
| for (compiler::MapRef map : maps) { |
| if (!map.IsJSArrayMap()) return false; |
| } |
| return true; |
| } |
| |
| inline bool HasOnlyJSObjectMaps(base::Vector<const compiler::MapRef> maps) { |
| for (compiler::MapRef map : maps) { |
| if (!map.IsJSObjectMap()) return false; |
| } |
| return true; |
| } |
| |
| inline bool HasOnlyStringMaps(base::Vector<const compiler::MapRef> maps) { |
| for (compiler::MapRef map : maps) { |
| if (!map.IsStringMap()) return false; |
| } |
| return true; |
| } |
| |
| inline bool HasOnlyNumberMaps(base::Vector<const compiler::MapRef> maps) { |
| for (compiler::MapRef map : maps) { |
| if (map.instance_type() != HEAP_NUMBER_TYPE) return false; |
| } |
| return true; |
| } |
| |
| #define DEF_FORWARD_DECLARATION(type, ...) class type; |
| NODE_BASE_LIST(DEF_FORWARD_DECLARATION) |
| #undef DEF_FORWARD_DECLARATION |
| |
| using NodeIdT = uint32_t; |
| static constexpr uint32_t kInvalidNodeId = 0; |
| static constexpr uint32_t kFirstValidNodeId = 1; |
| |
| // Represents either a direct BasicBlock pointer, or an entry in a list of |
| // unresolved BasicBlockRefs which will be mutated (in place) at some point into |
| // direct BasicBlock pointers. |
| class BasicBlockRef { |
| struct BasicBlockRefBuilder; |
| |
| public: |
| BasicBlockRef() : next_ref_(nullptr) { |
| #ifdef DEBUG |
| state_ = kRefList; |
| #endif |
| } |
| explicit BasicBlockRef(BasicBlock* block) : block_ptr_(block) { |
| #ifdef DEBUG |
| state_ = kBlockPointer; |
| #endif |
| } |
| |
| // Refs can't be copied or moved, since they are referenced by `this` pointer |
| // in the ref list. |
| BasicBlockRef(const BasicBlockRef&) = delete; |
| BasicBlockRef(BasicBlockRef&&) = delete; |
| BasicBlockRef& operator=(const BasicBlockRef&) = delete; |
| BasicBlockRef& operator=(BasicBlockRef&&) = delete; |
| |
| // Construct a new ref-list mode BasicBlockRef and add it to the given ref |
| // list. |
| explicit BasicBlockRef(BasicBlockRef* ref_list_head) : BasicBlockRef() { |
| BasicBlockRef* old_next_ptr = MoveToRefList(ref_list_head); |
| USE(old_next_ptr); |
| DCHECK_NULL(old_next_ptr); |
| } |
| |
| // Change this ref to a direct basic block pointer, returning the old "next" |
| // pointer of the current ref. |
| BasicBlockRef* SetToBlockAndReturnNext(BasicBlock* block) { |
| DCHECK_EQ(state_, kRefList); |
| |
| BasicBlockRef* old_next_ptr = next_ref_; |
| block_ptr_ = block; |
| #ifdef DEBUG |
| state_ = kBlockPointer; |
| #endif |
| return old_next_ptr; |
| } |
| |
| // Reset this ref list to null, returning the old ref list (i.e. the old |
| // "next" pointer). |
| BasicBlockRef* Reset() { |
| DCHECK_EQ(state_, kRefList); |
| |
| BasicBlockRef* old_next_ptr = next_ref_; |
| next_ref_ = nullptr; |
| return old_next_ptr; |
| } |
| |
| // Move this ref to the given ref list, returning the old "next" pointer of |
| // the current ref. |
| BasicBlockRef* MoveToRefList(BasicBlockRef* ref_list_head) { |
| DCHECK_EQ(state_, kRefList); |
| DCHECK_EQ(ref_list_head->state_, kRefList); |
| |
| BasicBlockRef* old_next_ptr = next_ref_; |
| next_ref_ = ref_list_head->next_ref_; |
| ref_list_head->next_ref_ = this; |
| return old_next_ptr; |
| } |
| |
| BasicBlock* block_ptr() const { |
| DCHECK_EQ(state_, kBlockPointer); |
| return block_ptr_; |
| } |
| |
| BasicBlockRef* next_ref() const { |
| DCHECK_EQ(state_, kRefList); |
| return next_ref_; |
| } |
| |
| bool has_ref() const { |
| DCHECK_EQ(state_, kRefList); |
| return next_ref_ != nullptr; |
| } |
| |
| private: |
| union { |
| BasicBlock* block_ptr_; |
| BasicBlockRef* next_ref_; |
| }; |
| #ifdef DEBUG |
| enum { kBlockPointer, kRefList } state_; |
| #endif // DEBUG |
| }; |
| |
| class OpProperties { |
| public: |
| constexpr bool is_call() const { |
| // Only returns true for non-deferred calls. Use `is_any_call` to check |
| // deferred calls as well. |
| return kIsCallBit::decode(bitfield_); |
| } |
| constexpr bool can_eager_deopt() const { |
| return kCanEagerDeoptBit::decode(bitfield_); |
| } |
| constexpr bool can_lazy_deopt() const { |
| return kCanLazyDeoptBit::decode(bitfield_); |
| } |
| constexpr bool can_deopt() const { |
| return can_eager_deopt() || can_lazy_deopt(); |
| } |
| constexpr bool can_throw() const { |
| return kCanThrowBit::decode(bitfield_) && can_lazy_deopt(); |
| } |
| constexpr bool can_read() const { return kCanReadBit::decode(bitfield_); } |
| constexpr bool can_write() const { return kCanWriteBit::decode(bitfield_); } |
| constexpr bool non_memory_side_effects() const { |
| return kNonMemorySideEffectsBit::decode(bitfield_); |
| } |
| constexpr ValueRepresentation value_representation() const { |
| return kValueRepresentationBits::decode(bitfield_); |
| } |
| constexpr bool is_tagged() const { |
| return value_representation() == ValueRepresentation::kTagged; |
| } |
| constexpr bool is_conversion() const { |
| return kIsConversionBit::decode(bitfield_); |
| } |
| constexpr bool needs_register_snapshot() const { |
| return kNeedsRegisterSnapshotBit::decode(bitfield_); |
| } |
| constexpr bool is_pure() const { |
| return (bitfield_ & kPureMask) == kPureValue; |
| } |
| constexpr bool has_any_side_effects() const { |
| return can_write() || non_memory_side_effects(); |
| } |
| constexpr bool is_required_when_unused() { |
| if (is_conversion()) { |
| // Calls in conversions are not counted as side-effect as far as |
| // is_required_when_unused is concerned, since they should always be to |
| // the Allocate builtin. |
| return has_any_side_effects() || can_throw() || can_deopt(); |
| } else { |
| return has_any_side_effects() || can_throw() || can_deopt() || |
| is_any_call(); |
| } |
| } |
| |
| constexpr OpProperties operator|(const OpProperties& that) { |
| return OpProperties(bitfield_ | that.bitfield_); |
| } |
| |
| static constexpr OpProperties Pure() { return OpProperties(kPureValue); } |
| static constexpr OpProperties Call() { |
| return OpProperties(kIsCallBit::encode(true)); |
| } |
| static constexpr OpProperties EagerDeopt() { |
| return OpProperties(kCanEagerDeoptBit::encode(true)); |
| } |
| static constexpr OpProperties LazyDeopt() { |
| return OpProperties(kCanLazyDeoptBit::encode(true)); |
| } |
| static constexpr OpProperties Throw() { |
| return OpProperties(kCanThrowBit::encode(true)) | LazyDeopt(); |
| } |
| static constexpr OpProperties Reading() { |
| return OpProperties(kCanReadBit::encode(true)); |
| } |
| static constexpr OpProperties Writing() { |
| return OpProperties(kCanWriteBit::encode(true)); |
| } |
| static constexpr OpProperties NonMemorySideEffects() { |
| return OpProperties(kNonMemorySideEffectsBit::encode(true)); |
| } |
| static constexpr OpProperties TaggedValue() { |
| return OpProperties( |
| kValueRepresentationBits::encode(ValueRepresentation::kTagged)); |
| } |
| static constexpr OpProperties ExternalReference() { |
| return OpProperties( |
| kValueRepresentationBits::encode(ValueRepresentation::kWord64)); |
| } |
| static constexpr OpProperties Int32() { |
| return OpProperties( |
| kValueRepresentationBits::encode(ValueRepresentation::kInt32)); |
| } |
| static constexpr OpProperties Uint32() { |
| return OpProperties( |
| kValueRepresentationBits::encode(ValueRepresentation::kUint32)); |
| } |
| static constexpr OpProperties Float64() { |
| return OpProperties( |
| kValueRepresentationBits::encode(ValueRepresentation::kFloat64)); |
| } |
| static constexpr OpProperties HoleyFloat64() { |
| return OpProperties( |
| kValueRepresentationBits::encode(ValueRepresentation::kHoleyFloat64)); |
| } |
| static constexpr OpProperties ConversionNode() { |
| return OpProperties(kIsConversionBit::encode(true)); |
| } |
| static constexpr OpProperties CanCallUserCode() { |
| return NonMemorySideEffects() | LazyDeopt() | Throw(); |
| } |
| // Without auditing the call target, we must assume it can cause a lazy deopt |
| // and throw. Use this when codegen calls runtime or a builtin, unless |
| // certain that the target either doesn't throw or cannot deopt. |
| // TODO(jgruber): Go through all nodes marked with this property and decide |
| // whether to keep it (or remove either the lazy-deopt or throw flag). |
| static constexpr OpProperties GenericRuntimeOrBuiltinCall() { |
| return Call() | CanCallUserCode(); |
| } |
| static constexpr OpProperties JSCall() { return Call() | CanCallUserCode(); } |
| static constexpr OpProperties AnySideEffects() { |
| return Reading() | Writing() | NonMemorySideEffects(); |
| } |
| static constexpr OpProperties DeferredCall() { |
| // Operations with a deferred call need a snapshot of register state, |
| // because they need to be able to push registers to save them, and annotate |
| // the safepoint with information about which registers are tagged. |
| return NeedsRegisterSnapshot(); |
| } |
| |
| constexpr explicit OpProperties(uint32_t bitfield) : bitfield_(bitfield) {} |
| operator uint32_t() const { return bitfield_; } |
| |
| OpProperties WithNewValueRepresentation(ValueRepresentation new_repr) const { |
| return OpProperties(kValueRepresentationBits::update(bitfield_, new_repr)); |
| } |
| |
| OpProperties WithoutDeopt() const { |
| return OpProperties(kCanLazyDeoptBit::update( |
| kCanEagerDeoptBit::update(bitfield_, false), false)); |
| } |
| |
| private: |
| using kIsCallBit = base::BitField<bool, 0, 1>; |
| using kCanEagerDeoptBit = kIsCallBit::Next<bool, 1>; |
| using kCanLazyDeoptBit = kCanEagerDeoptBit::Next<bool, 1>; |
| using kCanThrowBit = kCanLazyDeoptBit::Next<bool, 1>; |
| using kCanReadBit = kCanThrowBit::Next<bool, 1>; |
| using kCanWriteBit = kCanReadBit::Next<bool, 1>; |
| using kNonMemorySideEffectsBit = kCanWriteBit::Next<bool, 1>; |
| using kValueRepresentationBits = |
| kNonMemorySideEffectsBit::Next<ValueRepresentation, 3>; |
| using kIsConversionBit = kValueRepresentationBits::Next<bool, 1>; |
| using kNeedsRegisterSnapshotBit = kIsConversionBit::Next<bool, 1>; |
| |
| static const uint32_t kPureMask = kCanReadBit::kMask | kCanWriteBit::kMask | |
| kNonMemorySideEffectsBit::kMask; |
| static const uint32_t kPureValue = kCanReadBit::encode(false) | |
| kCanWriteBit::encode(false) | |
| kNonMemorySideEffectsBit::encode(false); |
| |
| // NeedsRegisterSnapshot is only used for DeferredCall, and we rely on this in |
| // `is_any_call` to detect deferred calls. If you need to use |
| // NeedsRegisterSnapshot for something else that DeferredCalls, then you'll |
| // have to update `is_any_call`. |
| static constexpr OpProperties NeedsRegisterSnapshot() { |
| return OpProperties(kNeedsRegisterSnapshotBit::encode(true)); |
| } |
| |
| const uint32_t bitfield_; |
| |
| public: |
| static const size_t kSize = kNeedsRegisterSnapshotBit::kLastUsedBit + 1; |
| |
| constexpr bool is_any_call() const { |
| // Currently, there is no kDeferredCall bit, but DeferredCall only sets a |
| // single bit: kNeedsRegisterSnapShot. If this static assert breaks, it |
| // means that you added additional properties to DeferredCall, and you |
| // should update this function accordingly. |
| static_assert(DeferredCall().bitfield_ == |
| kNeedsRegisterSnapshotBit::encode(true)); |
| return is_call() || needs_register_snapshot(); |
| } |
| }; |
| |
| constexpr inline OpProperties StaticPropertiesForOpcode(Opcode opcode); |
| |
| class ValueLocation { |
| public: |
| ValueLocation() = default; |
| |
| template <typename... Args> |
| void SetUnallocated(Args&&... args) { |
| DCHECK(operand_.IsInvalid()); |
| operand_ = compiler::UnallocatedOperand(args...); |
| } |
| |
| template <typename... Args> |
| void SetAllocated(Args&&... args) { |
| DCHECK(operand_.IsUnallocated()); |
| operand_ = compiler::AllocatedOperand(args...); |
| } |
| |
| // Only to be used on inputs that inherit allocation. |
| void InjectLocation(compiler::InstructionOperand location) { |
| operand_ = location; |
| } |
| |
| // We use USED_AT_START to indicate that the input will be clobbered. |
| bool Cloberred() { |
| DCHECK(operand_.IsUnallocated()); |
| return compiler::UnallocatedOperand::cast(operand_).IsUsedAtStart(); |
| } |
| |
| template <typename... Args> |
| void SetConstant(Args&&... args) { |
| DCHECK(operand_.IsUnallocated()); |
| operand_ = compiler::ConstantOperand(args...); |
| } |
| |
| Register AssignedGeneralRegister() const { |
| DCHECK(!IsDoubleRegister()); |
| return compiler::AllocatedOperand::cast(operand_).GetRegister(); |
| } |
| |
| DoubleRegister AssignedDoubleRegister() const { |
| DCHECK(IsDoubleRegister()); |
| return compiler::AllocatedOperand::cast(operand_).GetDoubleRegister(); |
| } |
| |
| bool IsAnyRegister() const { return operand_.IsAnyRegister(); } |
| bool IsGeneralRegister() const { return operand_.IsRegister(); } |
| bool IsDoubleRegister() const { return operand_.IsDoubleRegister(); } |
| |
| const compiler::InstructionOperand& operand() const { return operand_; } |
| const compiler::InstructionOperand& operand() { return operand_; } |
| |
| private: |
| compiler::InstructionOperand operand_; |
| }; |
| |
| class InputLocation : public ValueLocation { |
| public: |
| NodeIdT next_use_id() const { return next_use_id_; } |
| // Used in ValueNode::mark_use |
| NodeIdT* get_next_use_id_address() { return &next_use_id_; } |
| |
| private: |
| NodeIdT next_use_id_ = kInvalidNodeId; |
| }; |
| |
| class Input : public InputLocation { |
| public: |
| explicit Input(ValueNode* node) : node_(node) {} |
| ValueNode* node() const { return node_; } |
| |
| private: |
| ValueNode* node_; |
| }; |
| |
| class InterpretedDeoptFrame; |
| class InlinedArgumentsDeoptFrame; |
| class ConstructStubDeoptFrame; |
| class BuiltinContinuationDeoptFrame; |
| class DeoptFrame { |
| public: |
| enum class FrameType { |
| kInterpretedFrame, |
| kInlinedArgumentsFrame, |
| kConstructStubFrame, |
| kBuiltinContinuationFrame, |
| }; |
| |
| struct InterpretedFrameData { |
| const MaglevCompilationUnit& unit; |
| const CompactInterpreterFrameState* frame_state; |
| ValueNode* closure; |
| const BytecodeOffset bytecode_position; |
| const SourcePosition source_position; |
| }; |
| |
| struct InlinedArgumentsFrameData { |
| const MaglevCompilationUnit& unit; |
| const BytecodeOffset bytecode_position; |
| ValueNode* closure; |
| const base::Vector<ValueNode*> arguments; |
| }; |
| |
| struct ConstructStubFrameData { |
| const MaglevCompilationUnit& unit; |
| const BytecodeOffset bytecode_position; |
| const SourcePosition source_position; |
| ValueNode* closure; |
| ValueNode* receiver; |
| const base::Vector<ValueNode*> arguments_without_receiver; |
| ValueNode* context; |
| }; |
| |
| struct BuiltinContinuationFrameData { |
| const Builtin builtin_id; |
| const base::Vector<ValueNode*> parameters; |
| ValueNode* context; |
| }; |
| |
| using FrameData = base::DiscriminatedUnion< |
| FrameType, InterpretedFrameData, InlinedArgumentsFrameData, |
| ConstructStubFrameData, BuiltinContinuationFrameData>; |
| |
| DeoptFrame(FrameData&& data, DeoptFrame* parent) |
| : data_(std::move(data)), parent_(parent) {} |
| |
| FrameType type() const { return data_.tag(); } |
| DeoptFrame* parent() { return parent_; } |
| const DeoptFrame* parent() const { return parent_; } |
| |
| inline const InterpretedDeoptFrame& as_interpreted() const; |
| inline const InlinedArgumentsDeoptFrame& as_inlined_arguments() const; |
| inline const ConstructStubDeoptFrame& as_construct_stub() const; |
| inline const BuiltinContinuationDeoptFrame& as_builtin_continuation() const; |
| inline InterpretedDeoptFrame& as_interpreted(); |
| inline InlinedArgumentsDeoptFrame& as_inlined_arguments(); |
| inline ConstructStubDeoptFrame& as_construct_stub(); |
| inline BuiltinContinuationDeoptFrame& as_builtin_continuation(); |
| |
| protected: |
| DeoptFrame(InterpretedFrameData&& data, DeoptFrame* parent) |
| : data_(std::move(data)), parent_(parent) {} |
| DeoptFrame(InlinedArgumentsFrameData&& data, DeoptFrame* parent) |
| : data_(std::move(data)), parent_(parent) {} |
| DeoptFrame(ConstructStubFrameData&& data, DeoptFrame* parent) |
| : data_(std::move(data)), parent_(parent) {} |
| DeoptFrame(BuiltinContinuationFrameData&& data, DeoptFrame* parent) |
| : data_(std::move(data)), parent_(parent) {} |
| |
| FrameData data_; |
| DeoptFrame* const parent_; |
| }; |
| |
| class InterpretedDeoptFrame : public DeoptFrame { |
| public: |
| InterpretedDeoptFrame(const MaglevCompilationUnit& unit, |
| const CompactInterpreterFrameState* frame_state, |
| ValueNode* closure, BytecodeOffset bytecode_position, |
| SourcePosition source_position, DeoptFrame* parent) |
| : DeoptFrame(InterpretedFrameData{unit, frame_state, closure, |
| bytecode_position, source_position}, |
| parent) {} |
| |
| const MaglevCompilationUnit& unit() const { return data().unit; } |
| const CompactInterpreterFrameState* frame_state() const { |
| return data().frame_state; |
| } |
| ValueNode*& closure() { return data().closure; } |
| ValueNode* closure() const { return data().closure; } |
| BytecodeOffset bytecode_position() const { return data().bytecode_position; } |
| SourcePosition source_position() const { return data().source_position; } |
| |
| private: |
| InterpretedFrameData& data() { return data_.get<InterpretedFrameData>(); } |
| const InterpretedFrameData& data() const { |
| return data_.get<InterpretedFrameData>(); |
| } |
| }; |
| |
| // Make sure storing/passing deopt frames by value doesn't truncate them. |
| static_assert(sizeof(InterpretedDeoptFrame) == sizeof(DeoptFrame)); |
| |
| inline const InterpretedDeoptFrame& DeoptFrame::as_interpreted() const { |
| DCHECK_EQ(type(), FrameType::kInterpretedFrame); |
| return static_cast<const InterpretedDeoptFrame&>(*this); |
| } |
| inline InterpretedDeoptFrame& DeoptFrame::as_interpreted() { |
| DCHECK_EQ(type(), FrameType::kInterpretedFrame); |
| return static_cast<InterpretedDeoptFrame&>(*this); |
| } |
| |
| class InlinedArgumentsDeoptFrame : public DeoptFrame { |
| public: |
| InlinedArgumentsDeoptFrame(const MaglevCompilationUnit& unit, |
| BytecodeOffset bytecode_position, |
| ValueNode* closure, |
| base::Vector<ValueNode*> arguments, |
| DeoptFrame* parent) |
| : DeoptFrame(InlinedArgumentsFrameData{unit, bytecode_position, closure, |
| arguments}, |
| parent) {} |
| |
| const MaglevCompilationUnit& unit() const { return data().unit; } |
| BytecodeOffset bytecode_position() const { return data().bytecode_position; } |
| ValueNode*& closure() { return data().closure; } |
| ValueNode* closure() const { return data().closure; } |
| base::Vector<ValueNode*> arguments() const { return data().arguments; } |
| |
| private: |
| InlinedArgumentsFrameData& data() { |
| return data_.get<InlinedArgumentsFrameData>(); |
| } |
| const InlinedArgumentsFrameData& data() const { |
| return data_.get<InlinedArgumentsFrameData>(); |
| } |
| }; |
| |
| // Make sure storing/passing deopt frames by value doesn't truncate them. |
| static_assert(sizeof(InlinedArgumentsDeoptFrame) == sizeof(DeoptFrame)); |
| |
| inline const InlinedArgumentsDeoptFrame& DeoptFrame::as_inlined_arguments() |
| const { |
| DCHECK_EQ(type(), FrameType::kInlinedArgumentsFrame); |
| return static_cast<const InlinedArgumentsDeoptFrame&>(*this); |
| } |
| inline InlinedArgumentsDeoptFrame& DeoptFrame::as_inlined_arguments() { |
| DCHECK_EQ(type(), FrameType::kInlinedArgumentsFrame); |
| return static_cast<InlinedArgumentsDeoptFrame&>(*this); |
| } |
| |
| class ConstructStubDeoptFrame : public DeoptFrame { |
| public: |
| ConstructStubDeoptFrame(const MaglevCompilationUnit& unit, |
| BytecodeOffset bytecode_position, |
| SourcePosition source_position, ValueNode* closure, |
| ValueNode* receiver, |
| base::Vector<ValueNode*> arguments_without_receiver, |
| ValueNode* context, DeoptFrame* parent) |
| : DeoptFrame(ConstructStubFrameData{unit, bytecode_position, |
| source_position, closure, receiver, |
| arguments_without_receiver, context}, |
| parent) {} |
| |
| const MaglevCompilationUnit& unit() const { return data().unit; } |
| BytecodeOffset bytecode_position() const { return data().bytecode_position; } |
| ValueNode*& closure() { return data().closure; } |
| ValueNode* closure() const { return data().closure; } |
| ValueNode*& receiver() { return data().receiver; } |
| ValueNode* receiver() const { return data().receiver; } |
| base::Vector<ValueNode*> arguments_without_receiver() const { |
| return data().arguments_without_receiver; |
| } |
| ValueNode*& context() { return data().context; } |
| ValueNode* context() const { return data().context; } |
| SourcePosition source_position() const { return data().source_position; } |
| |
| private: |
| ConstructStubFrameData& data() { return data_.get<ConstructStubFrameData>(); } |
| const ConstructStubFrameData& data() const { |
| return data_.get<ConstructStubFrameData>(); |
| } |
| }; |
| |
| // Make sure storing/passing deopt frames by value doesn't truncate them. |
| static_assert(sizeof(ConstructStubDeoptFrame) == sizeof(DeoptFrame)); |
| |
| inline const ConstructStubDeoptFrame& DeoptFrame::as_construct_stub() const { |
| DCHECK_EQ(type(), FrameType::kConstructStubFrame); |
| return static_cast<const ConstructStubDeoptFrame&>(*this); |
| } |
| |
| inline ConstructStubDeoptFrame& DeoptFrame::as_construct_stub() { |
| DCHECK_EQ(type(), FrameType::kConstructStubFrame); |
| return static_cast<ConstructStubDeoptFrame&>(*this); |
| } |
| |
| class BuiltinContinuationDeoptFrame : public DeoptFrame { |
| public: |
| BuiltinContinuationDeoptFrame(Builtin builtin_id, |
| base::Vector<ValueNode*> parameters, |
| ValueNode* context, DeoptFrame* parent) |
| : DeoptFrame( |
| BuiltinContinuationFrameData{builtin_id, parameters, context}, |
| parent) {} |
| |
| const Builtin& builtin_id() const { return data().builtin_id; } |
| base::Vector<ValueNode*> parameters() const { return data().parameters; } |
| ValueNode*& context() { return data().context; } |
| ValueNode* context() const { return data().context; } |
| |
| private: |
| BuiltinContinuationFrameData& data() { |
| return data_.get<BuiltinContinuationFrameData>(); |
| } |
| const BuiltinContinuationFrameData& data() const { |
| return data_.get<BuiltinContinuationFrameData>(); |
| } |
| }; |
| |
| // Make sure storing/passing deopt frames by value doesn't truncate them. |
| static_assert(sizeof(BuiltinContinuationDeoptFrame) == sizeof(DeoptFrame)); |
| |
| inline const BuiltinContinuationDeoptFrame& |
| DeoptFrame::as_builtin_continuation() const { |
| DCHECK_EQ(type(), FrameType::kBuiltinContinuationFrame); |
| return static_cast<const BuiltinContinuationDeoptFrame&>(*this); |
| } |
| inline BuiltinContinuationDeoptFrame& DeoptFrame::as_builtin_continuation() { |
| DCHECK_EQ(type(), FrameType::kBuiltinContinuationFrame); |
| return static_cast<BuiltinContinuationDeoptFrame&>(*this); |
| } |
| |
| class DeoptInfo { |
| protected: |
| DeoptInfo(Zone* zone, const DeoptFrame top_frame, |
| compiler::FeedbackSource feedback_to_update); |
| |
| public: |
| DeoptFrame& top_frame() { return top_frame_; } |
| const DeoptFrame& top_frame() const { return top_frame_; } |
| const compiler::FeedbackSource& feedback_to_update() { |
| return feedback_to_update_; |
| } |
| |
| InputLocation* input_locations() const { return input_locations_; } |
| Label* deopt_entry_label() { return &deopt_entry_label_; } |
| |
| int translation_index() const { return translation_index_; } |
| void set_translation_index(int index) { translation_index_ = index; } |
| |
| private: |
| DeoptFrame top_frame_; |
| const compiler::FeedbackSource feedback_to_update_; |
| InputLocation* const input_locations_; |
| Label deopt_entry_label_; |
| int translation_index_ = -1; |
| }; |
| |
| struct RegisterSnapshot { |
| RegList live_registers; |
| RegList live_tagged_registers; |
| DoubleRegList live_double_registers; |
| }; |
| |
| class EagerDeoptInfo : public DeoptInfo { |
| public: |
| EagerDeoptInfo(Zone* zone, const DeoptFrame top_frame, |
| compiler::FeedbackSource feedback_to_update) |
| : DeoptInfo(zone, top_frame, feedback_to_update) {} |
| |
| DeoptimizeReason reason() const { return reason_; } |
| void set_reason(DeoptimizeReason reason) { reason_ = reason; } |
| |
| private: |
| DeoptimizeReason reason_ = DeoptimizeReason::kUnknown; |
| }; |
| |
| class LazyDeoptInfo : public DeoptInfo { |
| public: |
| LazyDeoptInfo(Zone* zone, const DeoptFrame top_frame, |
| interpreter::Register result_location, int result_size, |
| compiler::FeedbackSource feedback_to_update) |
| : DeoptInfo(zone, top_frame, feedback_to_update), |
| result_location_(result_location), |
| bitfield_( |
| DeoptingCallReturnPcField::encode(kUninitializedCallReturnPc) | |
| ResultSizeField::encode(result_size)) {} |
| |
| interpreter::Register result_location() const { |
| // We should only be checking this for interpreted frames, other kinds of |
| // frames shouldn't be considered for result locations. |
| DCHECK_EQ(top_frame().type(), DeoptFrame::FrameType::kInterpretedFrame); |
| return result_location_; |
| } |
| int result_size() const { |
| // We should only be checking this for interpreted frames, other kinds of |
| // frames shouldn't be considered for result locations. |
| DCHECK_EQ(top_frame().type(), DeoptFrame::FrameType::kInterpretedFrame); |
| return ResultSizeField::decode(bitfield_); |
| } |
| |
| bool IsResultRegister(interpreter::Register reg) const; |
| void UpdateResultLocation(interpreter::Register result_location, |
| int result_size) { |
| // We should only update to a subset of the existing result location. |
| DCHECK_GE(result_location.index(), result_location_.index()); |
| DCHECK_LE(result_location.index() + result_size, |
| result_location_.index() + this->result_size()); |
| result_location_ = result_location; |
| bitfield_ = ResultSizeField::update(bitfield_, result_size); |
| } |
| bool HasResultLocation() const { |
| // We should only be checking this for interpreted frames, other kinds of |
| // frames shouldn't be considered for result locations. |
| DCHECK_EQ(top_frame().type(), DeoptFrame::FrameType::kInterpretedFrame); |
| return result_location_.is_valid(); |
| } |
| |
| int deopting_call_return_pc() const { |
| DCHECK_NE(DeoptingCallReturnPcField::decode(bitfield_), |
| kUninitializedCallReturnPc); |
| return DeoptingCallReturnPcField::decode(bitfield_); |
| } |
| void set_deopting_call_return_pc(int pc) { |
| DCHECK_EQ(DeoptingCallReturnPcField::decode(bitfield_), |
| kUninitializedCallReturnPc); |
| bitfield_ = DeoptingCallReturnPcField::update(bitfield_, pc); |
| } |
| |
| private: |
| using DeoptingCallReturnPcField = base::BitField<unsigned int, 0, 30>; |
| using ResultSizeField = DeoptingCallReturnPcField::Next<unsigned int, 2>; |
| |
| // The max code size is enforced by the various assemblers, but it's not |
| // visible here, so static assert against the magic constant that we happen |
| // to know is correct. |
| static constexpr int kMaxCodeSize = 512 * MB; |
| static constexpr unsigned int kUninitializedCallReturnPc = |
| DeoptingCallReturnPcField::kMax; |
| static_assert(DeoptingCallReturnPcField::is_valid(kMaxCodeSize)); |
| static_assert(kMaxCodeSize != kUninitializedCallReturnPc); |
| |
| // Lazy deopts can have at most two result registers -- temporarily three for |
| // ForInPrepare. |
| static_assert(ResultSizeField::kMax >= 3); |
| |
| interpreter::Register result_location_; |
| uint32_t bitfield_; |
| }; |
| |
| class ExceptionHandlerInfo { |
| public: |
| const int kNoExceptionHandlerPCOffsetMarker = 0xdeadbeef; |
| |
| ExceptionHandlerInfo() |
| : catch_block(), pc_offset(kNoExceptionHandlerPCOffsetMarker) {} |
| |
| explicit ExceptionHandlerInfo(BasicBlockRef* catch_block_ref) |
| : catch_block(catch_block_ref), pc_offset(-1) {} |
| |
| bool HasExceptionHandler() { |
| return pc_offset != kNoExceptionHandlerPCOffsetMarker; |
| } |
| |
| BasicBlockRef catch_block; |
| Label trampoline_entry; |
| int pc_offset; |
| }; |
| |
| // Dummy type for the initial raw allocation. |
| struct NodeWithInlineInputs {}; |
| |
| namespace detail { |
| // Helper for getting the static opcode of a Node subclass. This is in a |
| // "detail" namespace rather than in NodeBase because we can't template |
| // specialize outside of namespace scopes before C++17. |
| template <class T> |
| struct opcode_of_helper; |
| |
| #define DEF_OPCODE_OF(Name) \ |
| template <> \ |
| struct opcode_of_helper<Name> { \ |
| static constexpr Opcode value = Opcode::k##Name; \ |
| }; |
| NODE_BASE_LIST(DEF_OPCODE_OF) |
| #undef DEF_OPCODE_OF |
| |
| template <typename T> |
| constexpr T* ObjectPtrBeforeAddress(void* address) { |
| char* address_as_char_ptr = reinterpret_cast<char*>(address); |
| char* object_ptr_as_char_ptr = address_as_char_ptr - sizeof(T); |
| return reinterpret_cast<T*>(object_ptr_as_char_ptr); |
| } |
| |
| template <typename T> |
| constexpr const T* ObjectPtrBeforeAddress(const void* address) { |
| const char* address_as_char_ptr = reinterpret_cast<const char*>(address); |
| const char* object_ptr_as_char_ptr = address_as_char_ptr - sizeof(T); |
| return reinterpret_cast<const T*>(object_ptr_as_char_ptr); |
| } |
| |
| } // namespace detail |
| |
| class NodeBase : public ZoneObject { |
| private: |
| // Bitfield specification. |
| using OpcodeField = base::BitField64<Opcode, 0, 16>; |
| static_assert(OpcodeField::is_valid(kLastOpcode)); |
| using OpPropertiesField = |
| OpcodeField::Next<OpProperties, OpProperties::kSize>; |
| using NumTemporariesNeededField = OpPropertiesField::Next<uint8_t, 2>; |
| using NumDoubleTemporariesNeededField = |
| NumTemporariesNeededField::Next<uint8_t, 1>; |
| // Align input count to 32-bit. |
| using UnusedField = NumDoubleTemporariesNeededField::Next<uint8_t, 1>; |
| using InputCountField = UnusedField::Next<size_t, 17>; |
| static_assert(InputCountField::kShift == 32); |
| |
| protected: |
| // Subclasses may use the remaining bitfield bits. |
| template <class T, int size> |
| using NextBitField = InputCountField::Next<T, size>; |
| |
| static constexpr int kMaxInputs = InputCountField::kMax; |
| |
| public: |
| template <class T> |
| static constexpr Opcode opcode_of = detail::opcode_of_helper<T>::value; |
| |
| template <class Derived, typename... Args> |
| static Derived* New(Zone* zone, std::initializer_list<ValueNode*> inputs, |
| Args&&... args) { |
| Derived* node = |
| Allocate<Derived>(zone, inputs.size(), std::forward<Args>(args)...); |
| |
| int i = 0; |
| for (ValueNode* input : inputs) { |
| DCHECK_NOT_NULL(input); |
| node->set_input(i++, input); |
| } |
| |
| return node; |
| } |
| |
| // Inputs must be initialized manually. |
| template <class Derived, typename... Args> |
| static Derived* New(Zone* zone, size_t input_count, Args&&... args) { |
| Derived* node = |
| Allocate<Derived>(zone, input_count, std::forward<Args>(args)...); |
| return node; |
| } |
| |
| // Overwritten by subclasses. |
| static constexpr OpProperties kProperties = |
| OpProperties::Pure() | OpProperties::TaggedValue(); |
| |
| constexpr Opcode opcode() const { return OpcodeField::decode(bitfield_); } |
| constexpr OpProperties properties() const { |
| return OpPropertiesField::decode(bitfield_); |
| } |
| void set_properties(OpProperties properties) { |
| bitfield_ = OpPropertiesField::update(bitfield_, properties); |
| } |
| |
| template <class T> |
| constexpr bool Is() const; |
| |
| template <class T> |
| constexpr T* Cast() { |
| DCHECK(Is<T>()); |
| return static_cast<T*>(this); |
| } |
| template <class T> |
| constexpr const T* Cast() const { |
| DCHECK(Is<T>()); |
| return static_cast<const T*>(this); |
| } |
| template <class T> |
| constexpr T* TryCast() { |
| return Is<T>() ? static_cast<T*>(this) : nullptr; |
| } |
| |
| constexpr bool has_inputs() const { return input_count() > 0; } |
| constexpr int input_count() const { |
| static_assert(InputCountField::kMax <= kMaxInt); |
| return static_cast<int>(InputCountField::decode(bitfield_)); |
| } |
| |
| constexpr Input& input(int index) { |
| DCHECK_LT(index, input_count()); |
| return *(input_base() - index); |
| } |
| constexpr const Input& input(int index) const { |
| DCHECK_LT(index, input_count()); |
| return *(input_base() - index); |
| } |
| |
| // Input iterators, use like: |
| // |
| // for (Input& input : *node) { ... } |
| constexpr auto begin() { return std::make_reverse_iterator(&input(-1)); } |
| constexpr auto end() { |
| return std::make_reverse_iterator(&input(input_count() - 1)); |
| } |
| |
| constexpr bool has_id() const { return id_ != kInvalidNodeId; } |
| constexpr NodeIdT id() const { |
| DCHECK_NE(id_, kInvalidNodeId); |
| return id_; |
| } |
| void set_id(NodeIdT id) { |
| DCHECK_EQ(id_, kInvalidNodeId); |
| DCHECK_NE(id, kInvalidNodeId); |
| id_ = id; |
| } |
| |
| template <typename RegisterT> |
| uint8_t num_temporaries_needed() const { |
| if constexpr (std::is_same_v<RegisterT, Register>) { |
| return NumTemporariesNeededField::decode(bitfield_); |
| } else { |
| return NumDoubleTemporariesNeededField::decode(bitfield_); |
| } |
| } |
| |
| template <typename RegisterT> |
| RegListBase<RegisterT>& temporaries() { |
| if constexpr (std::is_same_v<RegisterT, Register>) { |
| return temporaries_; |
| } else { |
| return double_temporaries_; |
| } |
| } |
| |
| RegList& general_temporaries() { return temporaries_; } |
| DoubleRegList& double_temporaries() { return double_temporaries_; } |
| |
| template <typename RegisterT> |
| void assign_temporaries(RegListBase<RegisterT> list) { |
| if constexpr (std::is_same_v<RegisterT, Register>) { |
| temporaries_ = list; |
| } else { |
| double_temporaries_ = list; |
| } |
| } |
| |
| enum class InputAllocationPolicy { kFixedRegister, kArbitraryRegister, kAny }; |
| |
| // Some parts of Maglev require a specific iteration order of the inputs (such |
| // as UseMarkingProcessor::MarkInputUses or |
| // StraightForwardRegisterAllocator::AssignInputs). For such cases, |
| // `ForAllInputsInRegallocAssignmentOrder` can be called with a callback `f` |
| // that will be called for each input in the "correct" order. |
| template <typename Function> |
| void ForAllInputsInRegallocAssignmentOrder(Function&& f); |
| |
| void Print(std::ostream& os, MaglevGraphLabeller*, |
| bool skip_targets = false) const; |
| |
| // For GDB: Print any Node with `print node->Print()`. |
| void Print() const; |
| |
| EagerDeoptInfo* eager_deopt_info() { |
| DCHECK(properties().can_eager_deopt()); |
| DCHECK(!properties().can_lazy_deopt()); |
| return reinterpret_cast<EagerDeoptInfo*>(deopt_info_address()); |
| } |
| |
| LazyDeoptInfo* lazy_deopt_info() { |
| DCHECK(properties().can_lazy_deopt()); |
| DCHECK(!properties().can_eager_deopt()); |
| return reinterpret_cast<LazyDeoptInfo*>(deopt_info_address()); |
| } |
| |
| const RegisterSnapshot& register_snapshot() const { |
| DCHECK(properties().needs_register_snapshot()); |
| return *reinterpret_cast<RegisterSnapshot*>(register_snapshot_address()); |
| } |
| |
| ExceptionHandlerInfo* exception_handler_info() { |
| DCHECK(properties().can_throw()); |
| return reinterpret_cast<ExceptionHandlerInfo*>(exception_handler_address()); |
| } |
| |
| void set_register_snapshot(RegisterSnapshot snapshot) { |
| DCHECK(properties().needs_register_snapshot()); |
| *reinterpret_cast<RegisterSnapshot*>(register_snapshot_address()) = |
| snapshot; |
| } |
| |
| void change_input(int index, ValueNode* node) { set_input(index, node); } |
| |
| void change_representation(ValueRepresentation new_repr) { |
| DCHECK_EQ(opcode(), Opcode::kPhi); |
| bitfield_ = OpPropertiesField::update( |
| bitfield_, properties().WithNewValueRepresentation(new_repr)); |
| } |
| |
| void set_opcode(Opcode new_opcode) { |
| bitfield_ = OpcodeField::update(bitfield_, new_opcode); |
| } |
| |
| void CopyEagerDeoptInfoOf(NodeBase* other, Zone* zone) { |
| new (eager_deopt_info()) |
| EagerDeoptInfo(zone, other->eager_deopt_info()->top_frame(), |
| other->eager_deopt_info()->feedback_to_update()); |
| } |
| |
| void SetEagerDeoptInfo(Zone* zone, DeoptFrame deopt_frame, |
| compiler::FeedbackSource feedback_to_update = |
| compiler::FeedbackSource()) { |
| DCHECK(properties().can_eager_deopt()); |
| new (eager_deopt_info()) |
| EagerDeoptInfo(zone, deopt_frame, feedback_to_update); |
| } |
| |
| template <typename NodeT> |
| void OverwriteWith() { |
| OverwriteWith(NodeBase::opcode_of<NodeT>, NodeT::kProperties); |
| } |
| |
| void OverwriteWith( |
| Opcode new_opcode, |
| base::Optional<OpProperties> maybe_new_properties = base::nullopt) { |
| OpProperties new_properties = maybe_new_properties.has_value() |
| ? maybe_new_properties.value() |
| : StaticPropertiesForOpcode(new_opcode); |
| #ifdef DEBUG |
| CheckCanOverwriteWith(new_opcode, new_properties); |
| #endif |
| set_opcode(new_opcode); |
| set_properties(new_properties); |
| } |
| |
| protected: |
| explicit NodeBase(uint64_t bitfield) : bitfield_(bitfield) {} |
| |
| // Allow updating bits above NextBitField from subclasses |
| constexpr uint64_t bitfield() const { return bitfield_; } |
| void set_bitfield(uint64_t new_bitfield) { |
| #ifdef DEBUG |
| // Make sure that all the base bitfield bits (all bits before the next |
| // bitfield start) are equal in the new value.s |
| const uint64_t base_bitfield_mask = |
| (uint64_t{1} << NextBitField<bool, 1>::kShift) - 1; |
| DCHECK_EQ(bitfield_ & base_bitfield_mask, |
| new_bitfield & base_bitfield_mask); |
| #endif |
| bitfield_ = new_bitfield; |
| } |
| |
| constexpr Input* input_base() { |
| return detail::ObjectPtrBeforeAddress<Input>(this); |
| } |
| constexpr const Input* input_base() const { |
| return detail::ObjectPtrBeforeAddress<Input>(this); |
| } |
| Input* last_input() { return &input(input_count() - 1); } |
| const Input* last_input() const { return &input(input_count() - 1); } |
| |
| Address last_input_address() const { |
| return reinterpret_cast<Address>(last_input()); |
| } |
| |
| void set_input(int index, ValueNode* node) { |
| new (&input(index)) Input(node); |
| } |
| |
| // For nodes that don't have data past the input, allow trimming the input |
| // count. This is used by Phis to reduce inputs when merging in dead control |
| // flow. |
| void reduce_input_count() { |
| DCHECK_EQ(opcode(), Opcode::kPhi); |
| DCHECK(!properties().can_lazy_deopt()); |
| DCHECK(!properties().can_eager_deopt()); |
| bitfield_ = InputCountField::update(bitfield_, input_count() - 1); |
| } |
| |
| // Specify that there need to be a certain number of registers free (i.e. |
| // useable as scratch registers) on entry into this node. |
| // |
| // Does not include any registers requested by RequireSpecificTemporary. |
| void set_temporaries_needed(uint8_t value) { |
| DCHECK_EQ(num_temporaries_needed<Register>(), 0); |
| bitfield_ = NumTemporariesNeededField::update(bitfield_, value); |
| } |
| |
| void set_double_temporaries_needed(uint8_t value) { |
| DCHECK_EQ(num_temporaries_needed<DoubleRegister>(), 0); |
| bitfield_ = NumDoubleTemporariesNeededField::update(bitfield_, value); |
| } |
| |
| // Require that a specific register is free (and therefore clobberable) by the |
| // entry into this node. |
| void RequireSpecificTemporary(Register reg) { temporaries_.set(reg); } |
| |
| void RequireSpecificDoubleTemporary(DoubleRegister reg) { |
| double_temporaries_.set(reg); |
| } |
| |
| private: |
| template <class Derived, typename... Args> |
| static Derived* Allocate(Zone* zone, size_t input_count, Args&&... args) { |
| static_assert( |
| !Derived::kProperties.can_eager_deopt() || |
| !Derived::kProperties.can_lazy_deopt(), |
| "The current deopt info representation, at the end of inputs, requires " |
| "that we cannot have both lazy and eager deopts on a node. If we ever " |
| "need this, we have to update accessors to check node->properties() " |
| "for which deopts are active."); |
| constexpr size_t size_before_inputs = RoundUp<alignof(Input)>( |
| (Derived::kProperties.can_throw() ? sizeof(ExceptionHandlerInfo) : 0) + |
| (Derived::kProperties.needs_register_snapshot() |
| ? sizeof(RegisterSnapshot) |
| : 0) + |
| (Derived::kProperties.can_eager_deopt() ? sizeof(EagerDeoptInfo) : 0) + |
| (Derived::kProperties.can_lazy_deopt() ? sizeof(LazyDeoptInfo) : 0)); |
| |
| static_assert(IsAligned(size_before_inputs, alignof(Input))); |
| const size_t size_before_node = |
| size_before_inputs + input_count * sizeof(Input); |
| |
| DCHECK(IsAligned(size_before_inputs, alignof(Derived))); |
| const size_t size = size_before_node + sizeof(Derived); |
| intptr_t raw_buffer = |
| reinterpret_cast<intptr_t>(zone->Allocate<NodeWithInlineInputs>(size)); |
| void* node_buffer = reinterpret_cast<void*>(raw_buffer + size_before_node); |
| uint64_t bitfield = OpcodeField::encode(opcode_of<Derived>) | |
| OpPropertiesField::encode(Derived::kProperties) | |
| InputCountField::encode(input_count); |
| Derived* node = |
| new (node_buffer) Derived(bitfield, std::forward<Args>(args)...); |
| return node; |
| } |
| |
| // Returns the position of deopt info if it exists, otherwise returns |
| // its position as if DeoptInfo size were zero. |
| Address deopt_info_address() const { |
| DCHECK(!properties().can_eager_deopt() || !properties().can_lazy_deopt()); |
| size_t extra = RoundUp<alignof(Input)>( |
| (properties().can_eager_deopt() ? sizeof(EagerDeoptInfo) : 0) + |
| (properties().can_lazy_deopt() ? sizeof(LazyDeoptInfo) : 0)); |
| return last_input_address() - extra; |
| } |
| |
| // Returns the position of register snapshot if it exists, otherwise returns |
| // its position as if RegisterSnapshot size were zero. |
| Address register_snapshot_address() const { |
| size_t extra = RoundUp<alignof(Input)>(( |
| properties().needs_register_snapshot() ? sizeof(RegisterSnapshot) : 0)); |
| return deopt_info_address() - extra; |
| } |
| |
| // Returns the position of exception handler info if it exists, otherwise |
| // returns its position as if ExceptionHandlerInfo size were zero. |
| Address exception_handler_address() const { |
| size_t extra = RoundUp<alignof(Input)>( |
| (properties().can_throw() ? sizeof(ExceptionHandlerInfo) : 0)); |
| return register_snapshot_address() - extra; |
| } |
| |
| void CheckCanOverwriteWith(Opcode new_opcode, OpProperties new_properties); |
| |
| uint64_t bitfield_; |
| NodeIdT id_ = kInvalidNodeId; |
| RegList temporaries_; |
| DoubleRegList double_temporaries_; |
| |
| NodeBase() = delete; |
| NodeBase(const NodeBase&) = delete; |
| NodeBase(NodeBase&&) = delete; |
| NodeBase& operator=(const NodeBase&) = delete; |
| NodeBase& operator=(NodeBase&&) = delete; |
| }; |
| |
| template <class T> |
| constexpr bool NodeBase::Is() const { |
| return opcode() == opcode_of<T>; |
| } |
| |
| // Specialized sub-hierarchy type checks. |
| template <> |
| constexpr bool NodeBase::Is<ValueNode>() const { |
| return IsValueNode(opcode()); |
| } |
| template <> |
| constexpr bool NodeBase::Is<ControlNode>() const { |
| return IsControlNode(opcode()); |
| } |
| template <> |
| constexpr bool NodeBase::Is<BranchControlNode>() const { |
| return IsBranchControlNode(opcode()); |
| } |
| template <> |
| constexpr bool NodeBase::Is<ConditionalControlNode>() const { |
| return IsConditionalControlNode(opcode()); |
| } |
| template <> |
| constexpr bool NodeBase::Is<UnconditionalControlNode>() const { |
| return IsUnconditionalControlNode(opcode()); |
| } |
| template <> |
| constexpr bool NodeBase::Is<TerminalControlNode>() const { |
| return IsTerminalControlNode(opcode()); |
| } |
| |
| void CheckValueInputIs(const NodeBase* node, int i, |
| ValueRepresentation expected, |
| MaglevGraphLabeller* graph_labeller); |
| |
| // The Node class hierarchy contains all non-control nodes. |
| class Node : public NodeBase { |
| public: |
| using List = base::ThreadedListWithUnsafeInsertions<Node>; |
| |
| inline ValueLocation& result(); |
| |
| Node* NextNode() const { return next_; } |
| |
| protected: |
| using NodeBase::NodeBase; |
| |
| private: |
| Node** next() { return &next_; } |
| Node* next_ = nullptr; |
| |
| friend List; |
| friend base::ThreadedListTraits<Node>; |
| }; |
| |
| // All non-control nodes with a result. |
| class ValueNode : public Node { |
| private: |
| using TaggedResultNeedsDecompressField = NextBitField<bool, 1>; |
| |
| protected: |
| // Subclasses may use the remaining bitfield bits. |
| template <class T, int size> |
| using NextBitField = TaggedResultNeedsDecompressField::Next<T, size>; |
| |
| public: |
| ValueLocation& result() { return result_; } |
| const ValueLocation& result() const { return result_; } |
| |
| void SetHint(compiler::InstructionOperand hint); |
| |
| void ClearHint() { hint_ = compiler::InstructionOperand(); } |
| |
| bool has_hint() { return !hint_.IsInvalid(); } |
| |
| template <typename RegisterT> |
| RegisterT GetRegisterHint() { |
| if (hint_.IsInvalid()) return RegisterT::no_reg(); |
| return RegisterT::from_code( |
| compiler::UnallocatedOperand::cast(hint_).fixed_register_index()); |
| } |
| |
| const compiler::InstructionOperand& hint() const { |
| DCHECK(hint_.IsInvalid() || hint_.IsUnallocated()); |
| return hint_; |
| } |
| |
| bool is_loadable() const { |
| DCHECK_EQ(state_, kSpill); |
| return spill_.IsConstant() || spill_.IsAnyStackSlot(); |
| } |
| |
| bool is_spilled() const { |
| DCHECK_EQ(state_, kSpill); |
| return spill_.IsAnyStackSlot(); |
| } |
| |
| void SetNoSpill(); |
| void SetConstantLocation(); |
| |
| /* For constants only. */ |
| void LoadToRegister(MaglevAssembler*, Register); |
| void LoadToRegister(MaglevAssembler*, DoubleRegister); |
| void DoLoadToRegister(MaglevAssembler*, Register); |
| void DoLoadToRegister(MaglevAssembler*, DoubleRegister); |
| Handle<Object> Reify(LocalIsolate* isolate) const; |
| |
| void Spill(compiler::AllocatedOperand operand) { |
| #ifdef DEBUG |
| if (state_ == kLastUse) { |
| state_ = kSpill; |
| } else { |
| DCHECK(!is_loadable()); |
| } |
| #endif // DEBUG |
| DCHECK(!IsConstantNode(opcode())); |
| DCHECK(operand.IsAnyStackSlot()); |
| spill_ = operand; |
| DCHECK(spill_.IsAnyStackSlot()); |
| } |
| |
| compiler::AllocatedOperand spill_slot() const { |
| DCHECK(is_spilled()); |
| return compiler::AllocatedOperand::cast(loadable_slot()); |
| } |
| |
| compiler::InstructionOperand loadable_slot() const { |
| DCHECK_EQ(state_, kSpill); |
| DCHECK(is_loadable()); |
| return spill_; |
| } |
| |
| void mark_use(NodeIdT id, InputLocation* input_location) { |
| DCHECK_EQ(state_, kLastUse); |
| DCHECK_NE(id, kInvalidNodeId); |
| DCHECK_LT(start_id(), id); |
| DCHECK_IMPLIES(has_valid_live_range(), id >= end_id_); |
| end_id_ = id; |
| *last_uses_next_use_id_ = id; |
| last_uses_next_use_id_ = input_location->get_next_use_id_address(); |
| DCHECK_EQ(*last_uses_next_use_id_, kInvalidNodeId); |
| } |
| |
| struct LiveRange { |
| NodeIdT start = kInvalidNodeId; |
| NodeIdT end = kInvalidNodeId; // Inclusive. |
| }; |
| |
| bool has_valid_live_range() const { return end_id_ != 0; } |
| LiveRange live_range() const { return {start_id(), end_id_}; } |
| NodeIdT next_use() const { return next_use_; } |
| |
| // The following metods should only be used during register allocation, to |
| // mark the _current_ state of this Node according to the register allocator. |
| void set_next_use(NodeIdT use) { next_use_ = use; } |
| |
| // A node is dead once it has no more upcoming uses. |
| bool is_dead() const { return next_use_ == kInvalidNodeId; } |
| |
| constexpr bool use_double_register() const { |
| return IsDoubleRepresentation(properties().value_representation()); |
| } |
| |
| constexpr bool is_tagged() const { |
| return (properties().value_representation() == |
| ValueRepresentation::kTagged); |
| } |
| |
| constexpr bool decompresses_tagged_result() const { |
| return TaggedResultNeedsDecompressField::decode(bitfield()); |
| } |
| void SetTaggedResultNeedsDecompress() { |
| DCHECK_IMPLIES(!Is<Identity>(), is_tagged()); |
| DCHECK_IMPLIES(Is<Identity>(), input(0).node()->is_tagged()); |
| set_bitfield(TaggedResultNeedsDecompressField::update(bitfield(), true)); |
| if (Is<Phi>()) { |
| for (Input& input : *this) { |
| // Avoid endless recursion by terminating on values already marked. |
| if (input.node()->decompresses_tagged_result()) continue; |
| input.node()->SetTaggedResultNeedsDecompress(); |
| } |
| } else if (Is<Identity>()) { |
| DCHECK_EQ(input_count(), 0); |
| input(0).node()->SetTaggedResultNeedsDecompress(); |
| } |
| } |
| |
| constexpr ValueRepresentation value_representation() const { |
| return properties().value_representation(); |
| } |
| |
| constexpr MachineRepresentation GetMachineRepresentation() const { |
| switch (properties().value_representation()) { |
| case ValueRepresentation::kTagged: |
| return MachineRepresentation::kTagged; |
| case ValueRepresentation::kInt32: |
| case ValueRepresentation::kUint32: |
| return MachineRepresentation::kWord32; |
| case ValueRepresentation::kWord64: |
| return MachineRepresentation::kWord64; |
| case ValueRepresentation::kFloat64: |
| return MachineRepresentation::kFloat64; |
| case ValueRepresentation::kHoleyFloat64: |
| return MachineRepresentation::kFloat64; |
| } |
| } |
| |
| void AddRegister(Register reg) { |
| DCHECK(!use_double_register()); |
| registers_with_result_.set(reg); |
| } |
| void AddRegister(DoubleRegister reg) { |
| DCHECK(use_double_register()); |
| double_registers_with_result_.set(reg); |
| } |
| |
| void RemoveRegister(Register reg) { |
| DCHECK(!use_double_register()); |
| registers_with_result_.clear(reg); |
| } |
| void RemoveRegister(DoubleRegister reg) { |
| DCHECK(use_double_register()); |
| double_registers_with_result_.clear(reg); |
| } |
| |
| template <typename T> |
| inline RegListBase<T> ClearRegisters(); |
| |
| int num_registers() const { |
| if (use_double_register()) { |
| return double_registers_with_result_.Count(); |
| } |
| return registers_with_result_.Count(); |
| } |
| bool has_register() const { |
| if (use_double_register()) { |
| return double_registers_with_result_ != kEmptyDoubleRegList; |
| } |
| return registers_with_result_ != kEmptyRegList; |
| } |
| bool is_in_register(Register reg) const { |
| DCHECK(!use_double_register()); |
| return registers_with_result_.has(reg); |
| } |
| bool is_in_register(DoubleRegister reg) const { |
| DCHECK(use_double_register()); |
| return double_registers_with_result_.has(reg); |
| } |
| |
| template <typename T> |
| RegListBase<T> result_registers() { |
| if constexpr (std::is_same<T, DoubleRegister>::value) { |
| DCHECK(use_double_register()); |
| return double_registers_with_result_; |
| } else { |
| DCHECK(!use_double_register()); |
| return registers_with_result_; |
| } |
| } |
| |
| compiler::InstructionOperand allocation() const { |
| if (has_register()) { |
| return compiler::AllocatedOperand(compiler::LocationOperand::REGISTER, |
| GetMachineRepresentation(), |
| FirstRegisterCode()); |
| } |
| DCHECK(is_loadable()); |
| return spill_; |
| } |
| |
| protected: |
| explicit ValueNode(uint64_t bitfield) |
| : Node(bitfield), |
| last_uses_next_use_id_(&next_use_), |
| hint_(compiler::InstructionOperand()) |
| #ifdef DEBUG |
| , |
| state_(kLastUse) |
| #endif // DEBUG |
| { |
| if (use_double_register()) { |
| double_registers_with_result_ = kEmptyDoubleRegList; |
| } else { |
| registers_with_result_ = kEmptyRegList; |
| } |
| } |
| |
| int FirstRegisterCode() const { |
| if (use_double_register()) { |
| return double_registers_with_result_.first().code(); |
| } |
| return registers_with_result_.first().code(); |
| } |
| |
| // Rename for better pairing with `end_id`. |
| NodeIdT start_id() const { return id(); } |
| |
| NodeIdT end_id_ = kInvalidNodeId; |
| NodeIdT next_use_ = kInvalidNodeId; |
| ValueLocation result_; |
| union { |
| RegList registers_with_result_; |
| DoubleRegList double_registers_with_result_; |
| }; |
| union { |
| // Pointer to the current last use's next_use_id field. Most of the time |
| // this will be a pointer to an Input's next_use_id_ field, but it's |
| // initialized to this node's next_use_ to track the first use. |
| NodeIdT* last_uses_next_use_id_; |
| compiler::InstructionOperand spill_; |
| }; |
| compiler::InstructionOperand hint_; |
| #ifdef DEBUG |
| enum {kLastUse, kSpill} state_; |
| #endif // DEBUG |
| }; |
| |
| template <> |
| inline RegList ValueNode::ClearRegisters() { |
| DCHECK(!use_double_register()); |
| return std::exchange(registers_with_result_, kEmptyRegList); |
| } |
| |
| template <> |
| inline DoubleRegList ValueNode::ClearRegisters() { |
| DCHECK(use_double_register()); |
| return std::exchange(double_registers_with_result_, kEmptyDoubleRegList); |
| } |
| |
| ValueLocation& Node::result() { |
| DCHECK(Is<ValueNode>()); |
| return Cast<ValueNode>()->result(); |
| } |
| |
| // Mixin for a node with known class (and therefore known opcode and static |
| // properties), but possibly unknown numbers of inputs. |
| template <typename Base, typename Derived> |
| class NodeTMixin : public Base { |
| public: |
| // Shadowing for static knowledge. |
| constexpr Opcode opcode() const { return NodeBase::opcode_of<Derived>; } |
| constexpr const OpProperties& properties() const { |
| return Derived::kProperties; |
| } |
| |
| template <typename... Args> |
| static Derived* New(Zone* zone, std::initializer_list<ValueNode*> inputs, |
| Args&&... args) { |
| return NodeBase::New<Derived>(zone, inputs, std::forward<Args>...); |
| } |
| template <typename... Args> |
| static Derived* New(Zone* zone, size_t input_count, Args&&... args) { |
| return NodeBase::New<Derived>(zone, input_count, std::forward<Args>...); |
| } |
| |
| protected: |
| template <typename... Args> |
| explicit NodeTMixin(uint64_t bitfield, Args&&... args) |
| : Base(bitfield, std::forward<Args>(args)...) { |
| DCHECK_EQ(NodeBase::opcode(), NodeBase::opcode_of<Derived>); |
| DCHECK_EQ(NodeBase::properties(), Derived::kProperties); |
| } |
| }; |
| |
| namespace detail { |
| // Helper class for defining input types as a std::array, but without |
| // accidental initialisation with the wrong sized initializer_list. |
| template <size_t Size> |
| class ArrayWrapper : public std::array<ValueRepresentation, Size> { |
| public: |
| template <typename... Args> |
| explicit constexpr ArrayWrapper(Args&&... args) |
| : std::array<ValueRepresentation, Size>({args...}) { |
| static_assert(sizeof...(args) == Size); |
| } |
| }; |
| struct YouNeedToDefineAnInputTypesArrayInYourDerivedClass {}; |
| } // namespace detail |
| |
| // Mixin for a node with known class (and therefore known opcode and static |
| // properties), and known numbers of inputs. |
| template <size_t InputCount, typename Base, typename Derived> |
| class FixedInputNodeTMixin : public NodeTMixin<Base, Derived> { |
| static constexpr size_t kInputCount = InputCount; |
| |
| public: |
| // Shadowing for static knowledge. |
| constexpr bool has_inputs() const { return input_count() > 0; } |
| constexpr uint16_t input_count() const { return kInputCount; } |
| constexpr auto end() { |
| return std::make_reverse_iterator(&this->input(input_count() - 1)); |
| } |
| |
| void VerifyInputs(MaglevGraphLabeller* graph_labeller) const { |
| if constexpr (kInputCount != 0) { |
| static_assert( |
| std::is_same_v<const InputTypes, decltype(Derived::kInputTypes)>); |
| static_assert(kInputCount == Derived::kInputTypes.size()); |
| for (int i = 0; i < static_cast<int>(kInputCount); ++i) { |
| CheckValueInputIs(this, i, Derived::kInputTypes[i], graph_labeller); |
| } |
| } |
| } |
| |
| void MarkTaggedInputsAsDecompressing() const { |
| if constexpr (kInputCount != 0) { |
| static_assert( |
| std::is_same_v<const InputTypes, decltype(Derived::kInputTypes)>); |
| static_assert(kInputCount == Derived::kInputTypes.size()); |
| for (int i = 0; i < static_cast<int>(kInputCount); ++i) { |
| if (Derived::kInputTypes[i] == ValueRepresentation::kTagged) { |
| ValueNode* input_node = this->input(i).node(); |
| input_node->SetTaggedResultNeedsDecompress(); |
| } |
| } |
| } |
| } |
| |
| protected: |
| using InputTypes = detail::ArrayWrapper<kInputCount>; |
| detail::YouNeedToDefineAnInputTypesArrayInYourDerivedClass kInputTypes; |
| |
| template <typename... Args> |
| explicit FixedInputNodeTMixin(uint64_t bitfield, Args&&... args) |
| : NodeTMixin<Base, Derived>(bitfield, std::forward<Args>(args)...) { |
| DCHECK_EQ(NodeBase::input_count(), kInputCount); |
| } |
| }; |
| |
| template <class Derived> |
| using NodeT = NodeTMixin<Node, Derived>; |
| |
| template <class Derived> |
| using ValueNodeT = NodeTMixin<ValueNode, Derived>; |
| |
| template <size_t InputCount, class Derived> |
| using FixedInputNodeT = |
| FixedInputNodeTMixin<InputCount, NodeT<Derived>, Derived>; |
| |
| template <size_t InputCount, class Derived> |
| using FixedInputValueNodeT = |
| FixedInputNodeTMixin<InputCount, ValueNodeT<Derived>, Derived>; |
| |
| class Identity : public FixedInputValueNodeT<1, Identity> { |
| using Base = FixedInputValueNodeT<1, Identity>; |
| |
| public: |
| static constexpr OpProperties kProperties = OpProperties::Pure(); |
| |
| explicit Identity(uint64_t bitfield) : Base(bitfield) {} |
| |
| void VerifyInputs(MaglevGraphLabeller*) const { |
| // Identity is valid for all input types. |
| } |
| void MarkTaggedInputsAsDecompressing() { |
| // Do not mark inputs as decompressing here, since we don't yet know whether |
| // this Phi needs decompression. Instead, let |
| // Node::SetTaggedResultNeedsDecompress pass through phis. |
| } |
| void SetValueLocationConstraints() {} |
| void GenerateCode(MaglevAssembler*, const ProcessingState&) {} |
| void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} |
| }; |
| |
| template <class Derived, Operation kOperation> |
| class UnaryWithFeedbackNode : public FixedInputValueNodeT<1, Derived> { |
| using Base = FixedInputValueNodeT<1, Derived>; |
| |
| public: |
| // The implementation currently calls runtime. |
| static constexpr OpProperties kProperties = OpProperties::JSCall(); |
| static constexpr |
| typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged}; |
| |
| static constexpr int kOperandIndex = 0; |
| Input& operand_input() { return Node::input(kOperandIndex); } |
| compiler::FeedbackSource feedback() const { return feedback_; } |
| |
| protected: |
| explicit UnaryWithFeedbackNode(uint64_t bitfield, |
| const compiler::FeedbackSource& feedback) |
| : Base(bitfield), feedback_(feedback) {} |
| |
| void SetValueLocationConstraints(); |
| void GenerateCode(MaglevAssembler*, const ProcessingState&); |
| void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} |
| |
| const compiler::FeedbackSource feedback_; |
| }; |
| |
| template <class Derived, Operation kOperation> |
| class BinaryWithFeedbackNode : public FixedInputValueNodeT<2, Derived> { |
| using Base = FixedInputValueNodeT<2, Derived>; |
| |
| public: |
| // The implementation currently calls runtime. |
| static constexpr OpProperties kProperties = OpProperties::JSCall(); |
| static constexpr typename Base::InputTypes kInputTypes{ |
| ValueRepresentation::kTagged, ValueRepresentation::kTagged}; |
| |
| static constexpr int kLeftIndex = 0; |
| static constexpr int kRightIndex = 1; |
| Input& left_input() { return Node::input(kLeftIndex); } |
| Input& right_input() { return Node::input(kRightIndex); } |
| compiler::FeedbackSource feedback() const { return feedback_; } |
| |
| protected: |
| BinaryWithFeedbackNode(uint64_t bitfield, |
| const compiler::FeedbackSource& feedback) |
| : Base(bitfield), feedback_(feedback) {} |
| |
| void SetValueLocationConstraints(); |
| void GenerateCode(MaglevAssembler*, const ProcessingState&); |
| void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} |
| |
| const compiler::FeedbackSource feedback_; |
| }; |
| |
| #define DEF_OPERATION_WITH_FEEDBACK_NODE(Name, Super, OpName) \ |
| class Name : public Super<Name, Operation::k##OpName> { \ |
| using Base = Super<Name, Operation::k##OpName>; \ |
| \ |
| public: \ |
| Name(uint64_t bitfield, const compiler::FeedbackSource& feedback) \ |
| : Base(bitfield, feedback) {} \ |
| int MaxCallStackArgs() const { return 0; } \ |
| void SetValueLocationConstraints(); \ |
| void GenerateCode(MaglevAssembler*, const ProcessingState&); \ |
| void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} \ |
| }; |
| |
| #define DEF_UNARY_WITH_FEEDBACK_NODE(Name) \ |
| DEF_OPERATION_WITH_FEEDBACK_NODE(Generic##Name, UnaryWithFeedbackNode, Name) |
| #define DEF_BINARY_WITH_FEEDBACK_NODE(Name) \ |
| DEF_OPERATION_WITH_FEEDBACK_NODE(Generic##Name, BinaryWithFeedbackNode, Name) |
| UNARY_OPERATION_LIST(DEF_UNARY_WITH_FEEDBACK_NODE) |
| ARITHMETIC_OPERATION_LIST(DEF_BINARY_WITH_FEEDBACK_NODE) |
| COMPARISON_OPERATION_LIST(DEF_BINARY_WITH_FEEDBACK_NODE) |
| #undef DEF_UNARY_WITH_FEEDBACK_NODE |
| #undef DEF_BINARY_WITH_FEEDBACK_NODE |
| #undef DEF_OPERATION_WITH_FEEDBACK_NODE |
| |
| template <class Derived, Operation kOperation> |
| class Int32BinaryWithOverflowNode : public FixedInputValueNodeT<2, Derived> { |
| using Base = FixedInputValueNodeT<2, Derived>; |
| |
| public: |
| static constexpr OpProperties kProperties = |
| OpProperties::EagerDeopt() | OpProperties::Int32(); |
| static constexpr typename Base::InputTypes kInputTypes{ |
| ValueRepresentation::kInt32, ValueRepresentation::kInt32}; |
| |
| static constexpr int kLeftIndex = 0; |
| static constexpr int kRightIndex = 1; |
| Input& left_input() { return Node::input(kLeftIndex); } |
| Input& right_input() { return Node::input(kRightIndex); } |
| |
| protected: |
| explicit Int32BinaryWithOverflowNode(uint64_t bitfield) : Base(bitfield) {} |
| |
| void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} |
| }; |
| |
| #define DEF_OPERATION_NODE(Name, Super, OpName) \ |
| class Name : public Super<Name, Operation::k##OpName> { \ |
| using Base = Super<Name, Operation::k##OpName>; \ |
| \ |
| public: \ |
| explicit Name(uint64_t bitfield) : Base(bitfield) {} \ |
| void SetValueLocationConstraints(); \ |
| void GenerateCode(MaglevAssembler*, const ProcessingState&); \ |
| void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} \ |
| }; |
| |
| #define DEF_INT32_BINARY_WITH_OVERFLOW_NODE(Name) \ |
| DEF_OPERATION_NODE(Int32##Name##WithOverflow, Int32BinaryWithOverflowNode, \ |
| Name) |
| DEF_INT32_BINARY_WITH_OVERFLOW_NODE(Add) |
| DEF_INT32_BINARY_WITH_OVERFLOW_NODE(Subtract) |
| DEF_INT32_BINARY_WITH_OVERFLOW_NODE(Multiply) |
| DEF_INT32_BINARY_WITH_OVERFLOW_NODE(Divide) |
| DEF_INT32_BINARY_WITH_OVERFLOW_NODE(Modulus) |
| #undef DEF_INT32_BINARY_WITH_OVERFLOW_NODE |
| |
| template <class Derived, Operation kOperation> |
| class Int32BinaryNode : public FixedInputValueNodeT<2, Derived> { |
| using Base = FixedInputValueNodeT<2, Derived>; |
| |
| public: |
| static constexpr OpProperties kProperties = OpProperties::Int32(); |
| static constexpr typename Base::InputTypes kInputTypes{ |
| ValueRepresentation::kInt32, ValueRepresentation::kInt32}; |
| |
| static constexpr int kLeftIndex = 0; |
| static constexpr int kRightIndex = 1; |
| Input& left_input() { return Node::input(kLeftIndex); } |
| Input& right_input() { return Node::input(kRightIndex); } |
| |
| protected: |
| explicit Int32BinaryNode(uint64_t bitfield) : Base(bitfield) {} |
| }; |
| |
| #define DEF_INT32_BINARY_NODE(Name) \ |
| DEF_OPERATION_NODE(Int32##Name, Int32BinaryNode, Name) |
| DEF_INT32_BINARY_NODE(BitwiseAnd) |
| DEF_INT32_BINARY_NODE(BitwiseOr) |
| DEF_INT32_BINARY_NODE(BitwiseXor) |
| DEF_INT32_BINARY_NODE(ShiftLeft) |
| DEF_INT32_BINARY_NODE(ShiftRight) |
| #undef DEF_INT32_BINARY_NODE |
| |
| class Int32BitwiseNot : public FixedInputValueNodeT<1, Int32BitwiseNot> { |
| using Base = FixedInputValueNodeT<1, Int32BitwiseNot>; |
| |
| public: |
| explicit Int32BitwiseNot(uint64_t bitfield) : Base(bitfield) {} |
| |
| static constexpr OpProperties kProperties = OpProperties::Int32(); |
| static constexpr |
| typename Base::InputTypes kInputTypes{ValueRepresentation::kInt32}; |
| |
| static constexpr int kValueIndex = 0; |
| Input& value_input() { return Node::input(kValueIndex); } |
| |
| void SetValueLocationConstraints(); |
| void GenerateCode(MaglevAssembler*, const ProcessingState&); |
| void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} |
| }; |
| |
| template <class Derived, Operation kOperation> |
| class Int32UnaryWithOverflowNode : public FixedInputValueNodeT<1, Derived> { |
| using Base = FixedInputValueNodeT<1, Derived>; |
| |
| public: |
| static constexpr OpProperties kProperties = |
| OpProperties::EagerDeopt() | OpProperties::Int32(); |
| static constexpr |
| typename Base::InputTypes kInputTypes{ValueRepresentation::kInt32}; |
| |
| static constexpr int kValueIndex = 0; |
| Input& value_input() { return Node::input(kValueIndex); } |
| |
| protected: |
| explicit Int32UnaryWithOverflowNode(uint64_t bitfield) : Base(bitfield) {} |
| }; |
| |
| #define DEF_INT32_UNARY_WITH_OVERFLOW_NODE(Name) \ |
| DEF_OPERATION_NODE(Int32##Name##WithOverflow, Int32UnaryWithOverflowNode, \ |
| Name) |
| DEF_INT32_UNARY_WITH_OVERFLOW_NODE(Negate) |
| DEF_INT32_UNARY_WITH_OVERFLOW_NODE(Increment) |
| DEF_INT32_UNARY_WITH_OVERFLOW_NODE(Decrement) |
| #undef DEF_INT32_UNARY_WITH_OVERFLOW_NODE |
| |
| class Int32ShiftRightLogical |
| : public FixedInputValueNodeT<2, Int32ShiftRightLogical> { |
| using Base = FixedInputValueNodeT<2, Int32ShiftRightLogical>; |
| |
| public: |
| explicit Int32ShiftRightLogical(uint64_t bitfield) : Base(bitfield) {} |
| |
| // Unlike the other Int32 nodes, logical right shift returns a Uint32. |
| static constexpr OpProperties kProperties = OpProperties::Uint32(); |
| static constexpr typename Base::InputTypes kInputTypes{ |
| ValueRepresentation::kInt32, ValueRepresentation::kInt32}; |
| |
| static constexpr int kLeftIndex = 0; |
| static constexpr int kRightIndex = 1; |
| Input& left_input() { return Node::input(kLeftIndex); } |
| Input& right_input() { return Node::input(kRightIndex); } |
| |
| void SetValueLocationConstraints(); |
| void GenerateCode(MaglevAssembler*, const ProcessingState&); |
| void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} |
| }; |
| |
| template <class Derived, Operation kOperation> |
| class Int32CompareNode : public FixedInputValueNodeT<2, Derived> { |
| using Base = FixedInputValueNodeT<2, Derived>; |
| |
| public: |
| static constexpr typename Base::InputTypes kInputTypes{ |
| ValueRepresentation::kInt32, ValueRepresentation::kInt32}; |
| |
| static constexpr int kLeftIndex = 0; |
| static constexpr int kRightIndex = 1; |
| Input& left_input() { return Node::input(kLeftIndex); } |
| Input& right_input() { return Node::input(kRightIndex); } |
| |
| protected: |
| explicit Int32CompareNode(uint64_t bitfield) : Base(bitfield) {} |
| |
| void SetValueLocationConstraints(); |
| void GenerateCode(MaglevAssembler*, const ProcessingState&); |
| void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} |
| }; |
| |
| #define DEF_INT32_COMPARE_NODE(Name) \ |
| DEF_OPERATION_NODE(Int32##Name, Int32CompareNode, Name) |
| DEF_INT32_COMPARE_NODE(Equal) |
| DEF_INT32_COMPARE_NODE(StrictEqual) |
| DEF_INT32_COMPARE_NODE(LessThan) |
| DEF_INT32_COMPARE_NODE(LessThanOrEqual) |
| DEF_INT32_COMPARE_NODE(GreaterThan) |
| DEF_INT32_COMPARE_NODE(GreaterThanOrEqual) |
| #undef DEF_INT32_COMPARE_NODE |
| |
| template <class Derived, Operation kOperation> |
| class Float64BinaryNode : public FixedInputValueNodeT<2, Derived> { |
| using Base = FixedInputValueNodeT<2, Derived>; |
| |
| public: |
| static constexpr OpProperties kProperties = OpProperties::Float64(); |
| static constexpr typename Base::InputTypes kInputTypes{ |
| ValueRepresentation::kHoleyFloat64, ValueRepresentation::kHoleyFloat64}; |
| |
| static constexpr int kLeftIndex = 0; |
| static constexpr int kRightIndex = 1; |
| Input& left_input() { return Node::input(kLeftIndex); } |
| Input& right_input() { return Node::input(kRightIndex); } |
| |
| protected: |
| explicit Float64BinaryNode(uint64_t bitfield) : Base(bitfield) {} |
| |
| void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} |
| }; |
| |
| #define DEF_OPERATION_NODE_WITH_CALL(Name, Super, OpName) \ |
| class Name : public Super<Name, Operation::k##OpName> { \ |
| using Base = Super<Name, Operation::k##OpName>; \ |
| \ |
| public: \ |
| explicit Name(uint64_t bitfield) : Base(bitfield) {} \ |
| int MaxCallStackArgs() const; \ |
| void SetValueLocationConstraints(); \ |
| void GenerateCode(MaglevAssembler*, const ProcessingState&); \ |
| void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} \ |
| }; |
| |
| template <class Derived, Operation kOperation> |
| class Float64BinaryNodeWithCall : public FixedInputValueNodeT<2, Derived> { |
| using Base = FixedInputValueNodeT<2, Derived>; |
| |
| public: |
| static constexpr OpProperties kProperties = |
| OpProperties::Float64() | OpProperties::Call(); |
| static constexpr typename Base::InputTypes kInputTypes{ |
| ValueRepresentation::kHoleyFloat64, ValueRepresentation::kHoleyFloat64}; |
| |
| static constexpr int kLeftIndex = 0; |
| static constexpr int kRightIndex = 1; |
| Input& left_input() { return Node::input(kLeftIndex); } |
| Input& right_input() { return Node::input(kRightIndex); } |
| |
| protected: |
| explicit Float64BinaryNodeWithCall(uint64_t bitfield) : Base(bitfield) {} |
| |
| void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} |
| }; |
| |
| #define DEF_FLOAT64_BINARY_NODE(Name) \ |
| DEF_OPERATION_NODE(Float64##Name, Float64BinaryNode, Name) |
| #define DEF_FLOAT64_BINARY_NODE_WITH_CALL(Name) \ |
| DEF_OPERATION_NODE_WITH_CALL(Float64##Name, Float64BinaryNodeWithCall, Name) |
| DEF_FLOAT64_BINARY_NODE(Add) |
| DEF_FLOAT64_BINARY_NODE(Subtract) |
| DEF_FLOAT64_BINARY_NODE(Multiply) |
| DEF_FLOAT64_BINARY_NODE(Divide) |
| #ifdef V8_TARGET_ARCH_ARM64 |
| // On Arm64, floating point modulus is implemented with a call to a C++ |
| // function, while on x64, it's implemented natively without call. |
| DEF_FLOAT64_BINARY_NODE_WITH_CALL(Modulus) |
| #else |
| DEF_FLOAT64_BINARY_NODE(Modulus) |
| #endif |
| DEF_FLOAT64_BINARY_NODE_WITH_CALL(Exponentiate) |
| #undef DEF_FLOAT64_BINARY_NODE |
| #undef DEF_FLOAT64_BINARY_NODE_WITH_CALL |
| |
| template <class Derived, Operation kOperation> |
| class Float64CompareNode : public FixedInputValueNodeT<2, Derived> { |
| using Base = FixedInputValueNodeT<2, Derived>; |
| |
| public: |
| static constexpr typename Base::InputTypes kInputTypes{ |
| ValueRepresentation::kFloat64, ValueRepresentation::kFloat64}; |
| |
| static constexpr int kLeftIndex = 0; |
| static constexpr int kRightIndex = 1; |
| Input& left_input() { return Node::input(kLeftIndex); } |
| Input& right_input() { return Node::input(kRightIndex); } |
| |
| protected: |
| explicit Float64CompareNode(uint64_t bitfield) : Base(bitfield) {} |
| |
| void SetValueLocationConstraints(); |
| void GenerateCode(MaglevAssembler*, const ProcessingState&); |
| void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} |
| }; |
| |
| #define DEF_FLOAT64_COMPARE_NODE(Name) \ |
| DEF_OPERATION_NODE(Float64##Name, Float64CompareNode, Name) |
| DEF_FLOAT64_COMPARE_NODE(Equal) |
| DEF_FLOAT64_COMPARE_NODE(StrictEqual) |
| DEF_FLOAT64_COMPARE_NODE(LessThan) |
| DEF_FLOAT64_COMPARE_NODE(LessThanOrEqual) |
| DEF_FLOAT64_COMPARE_NODE(GreaterThan) |
| DEF_FLOAT64_COMPARE_NODE(GreaterThanOrEqual) |
| #undef DEF_FLOAT64_COMPARE_NODE |
| |
| #undef DEF_OPERATION_NODE |
| #undef DEF_OPERATION_NODE_WITH_CALL |
| |
| class Float64Negate : public FixedInputValueNodeT<1, Float64Negate> { |
| using Base = FixedInputValueNodeT<1, Float64Negate>; |
| |
| public: |
| explicit Float64Negate(uint64_t bitfield) : Base(bitfield) {} |
| |
| static constexpr OpProperties kProperties = OpProperties::Float64(); |
| static constexpr |
| typename Base::InputTypes kInputTypes{ValueRepresentation::kHoleyFloat64}; |
| |
| Input& input() { return Node::input(0); } |
| |
| void SetValueLocationConstraints(); |
| void GenerateCode(MaglevAssembler*, const ProcessingState&); |
| void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} |
| }; |
| |
| class Float64Ieee754Unary |
| : public FixedInputValueNodeT<1, Float64Ieee754Unary> { |
| using Base = FixedInputValueNodeT<1, Float64Ieee754Unary>; |
| |
| public: |
| explicit Float64Ieee754Unary(uint64_t bitfield, |
| ExternalReference ieee_function) |
| : Base(bitfield), ieee_function_(ieee_function) {} |
| |
| static constexpr OpProperties kProperties = |
| OpProperties::Float64() | OpProperties::Call(); |
| static constexpr |
| typename Base::InputTypes kInputTypes{ValueRepresentation::kHoleyFloat64}; |
| |
| Input& input() { return Node::input(0); } |
| |
| int MaxCallStackArgs() const; |
| void SetValueLocationConstraints(); |
| void GenerateCode(MaglevAssembler*, const ProcessingState&); |
| void PrintParams(std::ostream&, MaglevGraphLabeller*) const; |
| |
| private: |
| ExternalReference ieee_function_; |
| }; |
| |
| class CheckInt32IsSmi : public FixedInputNodeT<1, CheckInt32IsSmi> { |
| using Base = FixedInputNodeT<1, CheckInt32IsSmi>; |
| |
| public: |
| explicit CheckInt32IsSmi(uint64_t bitfield) : Base(bitfield) {} |
| |
| static constexpr OpProperties kProperties = OpProperties::EagerDeopt(); |
| static constexpr |
| typename Base::InputTypes kInputTypes{ValueRepresentation::kInt32}; |
| |
| Input& input() { return Node::input(0); } |
| |
| void SetValueLocationConstraints(); |
| void GenerateCode(MaglevAssembler*, const ProcessingState&); |
| void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} |
| }; |
| |
| class CheckUint32IsSmi : public FixedInputNodeT<1, CheckUint32IsSmi> { |
| using Base = FixedInputNodeT<1, CheckUint32IsSmi>; |
| |
| public: |
| explicit CheckUint32IsSmi(uint64_t bitfield) : Base(bitfield) {} |
| |
| static constexpr OpProperties kProperties = OpProperties::EagerDeopt(); |
| static constexpr |
| typename Base::InputTypes kInputTypes{ValueRepresentation::kUint32}; |
| |
| Input& input() { return Node::input(0); } |
| |
| void SetValueLocationConstraints(); |
| void GenerateCode(MaglevAssembler*, const ProcessingState&); |
| void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} |
| }; |
| |
| class CheckHoleyFloat64IsSmi |
| : public FixedInputNodeT<1, CheckHoleyFloat64IsSmi> { |
| using Base = FixedInputNodeT<1, CheckHoleyFloat64IsSmi>; |
| |
| public: |
| explicit CheckHoleyFloat64IsSmi(uint64_t bitfield) : Base(bitfield) {} |
| |
| static constexpr OpProperties kProperties = OpProperties::EagerDeopt(); |
| static constexpr |
| typename Base::InputTypes kInputTypes{ValueRepresentation::kHoleyFloat64}; |
| |
| Input& input() { return Node::input(0); } |
| |
| void SetValueLocationConstraints(); |
| void GenerateCode(MaglevAssembler*, const ProcessingState&); |
| void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} |
| }; |
| |
| class CheckedSmiTagInt32 : public FixedInputValueNodeT<1, CheckedSmiTagInt32> { |
| using Base = FixedInputValueNodeT<1, CheckedSmiTagInt32>; |
| |
| public: |
| explicit CheckedSmiTagInt32(uint64_t bitfield) : Base(bitfield) {} |
| |
| static constexpr OpProperties kProperties = |
| OpProperties::EagerDeopt() | OpProperties::ConversionNode(); |
| static constexpr |
| typename Base::InputTypes kInputTypes{ValueRepresentation::kInt32}; |
| |
| Input& input() { return Node::input(0); } |
| |
| void SetValueLocationConstraints(); |
| void GenerateCode(MaglevAssembler*, const ProcessingState&); |
| void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} |
| }; |
| |
| class CheckedSmiTagUint32 |
| : public FixedInputValueNodeT<1, CheckedSmiTagUint32> { |
| using Base = FixedInputValueNodeT<1, CheckedSmiTagUint32>; |
| |
| public: |
| explicit CheckedSmiTagUint32(uint64_t bitfield) : Base(bitfield) {} |
| |
| static constexpr OpProperties kProperties = |
| OpProperties::EagerDeopt() | OpProperties::ConversionNode(); |
| static constexpr |
| typename Base::InputTypes kInputTypes{ValueRepresentation::kUint32}; |
| |
| Input& input() { return Node::input(0); } |
| |
| void SetValueLocationConstraints(); |
| void GenerateCode(MaglevAssembler*, const ProcessingState&); |
| void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} |
| }; |
| |
| // Input must guarantee to fit in a Smi. |
| class UnsafeSmiTag : public FixedInputValueNodeT<1, UnsafeSmiTag> { |
| using Base = FixedInputValueNodeT<1, UnsafeSmiTag>; |
| |
| public: |
| explicit UnsafeSmiTag(uint64_t bitfield) : Base(bitfield) {} |
| |
| static constexpr OpProperties kProperties = OpProperties::ConversionNode(); |
| |
| Input& input() { return Node::input(0); } |
| |
| void VerifyInputs(MaglevGraphLabeller*) const; |
| void MarkTaggedInputsAsDecompressing() { |
| // No tagged inputs. |
| } |
| void SetValueLocationConstraints(); |
| void GenerateCode(MaglevAssembler*, const ProcessingState&); |
| void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} |
| }; |
| |
| class CheckedSmiUntag : public FixedInputValueNodeT<1, CheckedSmiUntag> { |
| using Base = FixedInputValueNodeT<1, CheckedSmiUntag>; |
| |
| public: |
| explicit CheckedSmiUntag(uint64_t bitfield) : Base(bitfield) {} |
| |
| static constexpr OpProperties kProperties = OpProperties::EagerDeopt() | |
| OpProperties::Int32() | |
| OpProperties::ConversionNode(); |
| static constexpr |
| typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged}; |
| |
| Input& input() { return Node::input(0); } |
| |
| void MarkTaggedInputsAsDecompressing() { |
| // Don't need to decompress to untag. |
| } |
| void SetValueLocationConstraints(); |
| void GenerateCode(MaglevAssembler*, const ProcessingState&); |
| void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} |
| }; |
| |
| class UnsafeSmiUntag : public FixedInputValueNodeT<1, UnsafeSmiUntag> { |
| using Base = FixedInputValueNodeT<1, UnsafeSmiUntag>; |
| |
| public: |
| explicit UnsafeSmiUntag(uint64_t bitfield) : Base(bitfield) {} |
| |
| static constexpr OpProperties kProperties = |
| OpProperties::Int32() | OpProperties::ConversionNode(); |
| static constexpr |
| typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged}; |
| |
| Input& input() { return Node::input(0); } |
| |
| void MarkTaggedInputsAsDecompressing() { |
| // Don't need to decompress to untag. |
| } |
| void SetValueLocationConstraints(); |
| void GenerateCode(MaglevAssembler*, const ProcessingState&); |
| void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} |
| }; |
| |
| class Int32Constant : public FixedInputValueNodeT<0, Int32Constant> { |
| using Base = FixedInputValueNodeT<0, Int32Constant>; |
| |
| public: |
| using OutputRegister = Register; |
| |
| explicit Int32Constant(uint64_t bitfield, int32_t value) |
| : Base(bitfield), value_(value) {} |
| |
| static constexpr OpProperties kProperties = OpProperties::Int32(); |
| |
| int32_t value() const { return value_; } |
| |
| bool ToBoolean(LocalIsolate* local_isolate) const { return value_ != 0; } |
| |
| void SetValueLocationConstraints(); |
| void GenerateCode(MaglevAssembler*, const ProcessingState&); |
| void PrintParams(std::ostream&, MaglevGraphLabeller*) const; |
| |
| void DoLoadToRegister(MaglevAssembler*, OutputRegister); |
| Handle<Object> DoReify(LocalIsolate* isolate) const; |
| |
| private: |
| const int32_t value_; |
| }; |
| |
| class Float64Constant : public FixedInputValueNodeT<0, Float64Constant> { |
| using Base = FixedInputValueNodeT<0, Float64Constant>; |
| |
| public: |
| using OutputRegister = DoubleRegister; |
| |
| explicit Float64Constant(uint64_t bitfield, Float64 value) |
| : Base(bitfield), value_(value) {} |
| |
| static constexpr OpProperties kProperties = OpProperties::Float64(); |
| |
| Float64 value()
|