| // Copyright 2021 the V8 project authors. All rights reserved. |
| // Use of this source code is governed by a BSD-style license that can be |
| // found in the LICENSE file. |
| |
| #ifndef V8_MAGLEV_MAGLEV_IR_H_ |
| #define V8_MAGLEV_MAGLEV_IR_H_ |
| |
| #include "src/base/bit-field.h" |
| #include "src/base/bits.h" |
| #include "src/base/discriminated-union.h" |
| #include "src/base/enum-set.h" |
| #include "src/base/logging.h" |
| #include "src/base/macros.h" |
| #include "src/base/small-vector.h" |
| #include "src/base/threaded-list.h" |
| #include "src/codegen/label.h" |
| #include "src/codegen/machine-type.h" |
| #include "src/codegen/reglist.h" |
| #include "src/codegen/source-position.h" |
| #include "src/common/globals.h" |
| #include "src/common/operation.h" |
| #include "src/compiler/access-info.h" |
| #include "src/compiler/backend/instruction.h" |
| #include "src/compiler/feedback-source.h" |
| #include "src/compiler/heap-refs.h" |
| // TODO(dmercadier): move the Turboshaft utils functions to shared code (in |
| // particular, any_of, which is the reason we're including this Turboshaft |
| // header) |
| #include "src/compiler/turboshaft/snapshot-table.h" |
| #include "src/compiler/turboshaft/utils.h" |
| #include "src/deoptimizer/deoptimize-reason.h" |
| #include "src/interpreter/bytecode-flags.h" |
| #include "src/interpreter/bytecode-register.h" |
| #include "src/maglev/maglev-compilation-unit.h" |
| #include "src/objects/smi.h" |
| #include "src/objects/tagged-index.h" |
| #include "src/roots/roots.h" |
| #include "src/utils/utils.h" |
| #include "src/zone/zone.h" |
| |
| namespace v8 { |
| namespace internal { |
| |
| enum Condition : int; |
| |
| namespace maglev { |
| |
| class BasicBlock; |
| class ProcessingState; |
| class MaglevAssembler; |
| class MaglevCodeGenState; |
| class MaglevCompilationUnit; |
| class MaglevGraphLabeller; |
| class MaglevVregAllocationState; |
| class CompactInterpreterFrameState; |
| class MergePointInterpreterFrameState; |
| |
| // Nodes are either |
| // 1. side-effecting or value-holding SSA nodes in the body of basic blocks, or |
| // 2. Control nodes that store the control flow at the end of basic blocks, and |
| // form a separate node hierarchy to non-control nodes. |
| // |
| // The macro lists below must match the node class hierarchy. |
| |
| #define GENERIC_OPERATIONS_NODE_LIST(V) \ |
| V(GenericAdd) \ |
| V(GenericSubtract) \ |
| V(GenericMultiply) \ |
| V(GenericDivide) \ |
| V(GenericModulus) \ |
| V(GenericExponentiate) \ |
| V(GenericBitwiseAnd) \ |
| V(GenericBitwiseOr) \ |
| V(GenericBitwiseXor) \ |
| V(GenericShiftLeft) \ |
| V(GenericShiftRight) \ |
| V(GenericShiftRightLogical) \ |
| V(GenericBitwiseNot) \ |
| V(GenericNegate) \ |
| V(GenericIncrement) \ |
| V(GenericDecrement) \ |
| V(GenericEqual) \ |
| V(GenericStrictEqual) \ |
| V(GenericLessThan) \ |
| V(GenericLessThanOrEqual) \ |
| V(GenericGreaterThan) \ |
| V(GenericGreaterThanOrEqual) |
| |
| #define INT32_OPERATIONS_NODE_LIST(V) \ |
| V(Int32AddWithOverflow) \ |
| V(Int32SubtractWithOverflow) \ |
| V(Int32MultiplyWithOverflow) \ |
| V(Int32DivideWithOverflow) \ |
| V(Int32ModulusWithOverflow) \ |
| V(Int32BitwiseAnd) \ |
| V(Int32BitwiseOr) \ |
| V(Int32BitwiseXor) \ |
| V(Int32ShiftLeft) \ |
| V(Int32ShiftRight) \ |
| V(Int32ShiftRightLogical) \ |
| V(Int32BitwiseNot) \ |
| V(Int32NegateWithOverflow) \ |
| V(Int32IncrementWithOverflow) \ |
| V(Int32DecrementWithOverflow) \ |
| V(Int32Compare) \ |
| V(Int32ToBoolean) |
| |
| #define FLOAT64_OPERATIONS_NODE_LIST(V) \ |
| V(Float64Add) \ |
| V(Float64Subtract) \ |
| V(Float64Multiply) \ |
| V(Float64Divide) \ |
| V(Float64Exponentiate) \ |
| V(Float64Modulus) \ |
| V(Float64Negate) \ |
| V(Float64Round) \ |
| V(Float64Compare) \ |
| V(Float64ToBoolean) \ |
| V(Float64Ieee754Unary) |
| |
| #define SMI_OPERATIONS_NODE_LIST(V) \ |
| V(CheckedSmiIncrement) \ |
| V(CheckedSmiDecrement) |
| |
| #define CONSTANT_VALUE_NODE_LIST(V) \ |
| V(Constant) \ |
| V(ExternalConstant) \ |
| V(Float64Constant) \ |
| V(Int32Constant) \ |
| V(Uint32Constant) \ |
| V(RootConstant) \ |
| V(SmiConstant) \ |
| V(TaggedIndexConstant) |
| |
| #define INLINE_BUILTIN_NODE_LIST(V) \ |
| V(BuiltinStringFromCharCode) \ |
| V(BuiltinStringPrototypeCharCodeOrCodePointAt) |
| |
| #define VALUE_NODE_LIST(V) \ |
| V(Identity) \ |
| V(AllocateRaw) \ |
| V(Call) \ |
| V(CallBuiltin) \ |
| V(CallCPPBuiltin) \ |
| V(CallRuntime) \ |
| V(CallWithArrayLike) \ |
| V(CallWithSpread) \ |
| V(CallKnownApiFunction) \ |
| V(CallKnownJSFunction) \ |
| V(CallSelf) \ |
| V(Construct) \ |
| V(CheckConstructResult) \ |
| V(CheckDerivedConstructResult) \ |
| V(CheckNotHole) \ |
| V(ConstructWithSpread) \ |
| V(ConvertReceiver) \ |
| V(ConvertHoleToUndefined) \ |
| V(CreateArrayLiteral) \ |
| V(CreateShallowArrayLiteral) \ |
| V(CreateObjectLiteral) \ |
| V(CreateShallowObjectLiteral) \ |
| V(CreateFunctionContext) \ |
| V(CreateClosure) \ |
| V(FastCreateClosure) \ |
| V(CreateRegExpLiteral) \ |
| V(DeleteProperty) \ |
| V(EnsureWritableFastElements) \ |
| V(FoldedAllocation) \ |
| V(ForInPrepare) \ |
| V(ForInNext) \ |
| V(GeneratorRestoreRegister) \ |
| V(GetIterator) \ |
| V(GetSecondReturnedValue) \ |
| V(GetTemplateObject) \ |
| V(HasInPrototypeChain) \ |
| V(InitialValue) \ |
| V(LoadPolymorphicDoubleField) \ |
| V(LoadPolymorphicTaggedField) \ |
| V(LoadTaggedField) \ |
| V(LoadDoubleField) \ |
| V(LoadTaggedFieldByFieldIndex) \ |
| V(LoadFixedArrayElement) \ |
| V(LoadFixedDoubleArrayElement) \ |
| V(LoadHoleyFixedDoubleArrayElement) \ |
| V(LoadHoleyFixedDoubleArrayElementCheckedNotHole) \ |
| V(LoadSignedIntDataViewElement) \ |
| V(LoadDoubleDataViewElement) \ |
| V(LoadTypedArrayLength) \ |
| V(LoadSignedIntTypedArrayElement) \ |
| V(LoadUnsignedIntTypedArrayElement) \ |
| V(LoadDoubleTypedArrayElement) \ |
| V(LoadEnumCacheLength) \ |
| V(LoadGlobal) \ |
| V(LoadNamedGeneric) \ |
| V(LoadNamedFromSuperGeneric) \ |
| V(MaybeGrowAndEnsureWritableFastElements) \ |
| V(SetNamedGeneric) \ |
| V(DefineNamedOwnGeneric) \ |
| V(StoreInArrayLiteralGeneric) \ |
| V(StoreGlobal) \ |
| V(GetKeyedGeneric) \ |
| V(SetKeyedGeneric) \ |
| V(DefineKeyedOwnGeneric) \ |
| V(Phi) \ |
| V(RegisterInput) \ |
| V(CheckedSmiSizedInt32) \ |
| V(CheckedSmiTagInt32) \ |
| V(CheckedSmiTagUint32) \ |
| V(UnsafeSmiTag) \ |
| V(CheckedSmiUntag) \ |
| V(UnsafeSmiUntag) \ |
| V(CheckedInternalizedString) \ |
| V(CheckedObjectToIndex) \ |
| V(CheckedTruncateNumberOrOddballToInt32) \ |
| V(CheckedInt32ToUint32) \ |
| V(CheckedUint32ToInt32) \ |
| V(ChangeInt32ToFloat64) \ |
| V(ChangeUint32ToFloat64) \ |
| V(CheckedTruncateFloat64ToInt32) \ |
| V(CheckedTruncateFloat64ToUint32) \ |
| V(TruncateNumberOrOddballToInt32) \ |
| V(TruncateUint32ToInt32) \ |
| V(TruncateFloat64ToInt32) \ |
| V(UnsafeTruncateUint32ToInt32) \ |
| V(UnsafeTruncateFloat64ToInt32) \ |
| V(Int32ToUint8Clamped) \ |
| V(Uint32ToUint8Clamped) \ |
| V(Float64ToUint8Clamped) \ |
| V(CheckedNumberToUint8Clamped) \ |
| V(Int32ToNumber) \ |
| V(Uint32ToNumber) \ |
| V(Float64ToTagged) \ |
| V(HoleyFloat64ToTagged) \ |
| V(CheckedSmiTagFloat64) \ |
| V(CheckedNumberOrOddballToFloat64) \ |
| V(UncheckedNumberOrOddballToFloat64) \ |
| V(CheckedHoleyFloat64ToFloat64) \ |
| V(HoleyFloat64ToMaybeNanFloat64) \ |
| V(LogicalNot) \ |
| V(SetPendingMessage) \ |
| V(StringAt) \ |
| V(StringEqual) \ |
| V(StringLength) \ |
| V(StringConcat) \ |
| V(ToBoolean) \ |
| V(ToBooleanLogicalNot) \ |
| V(TaggedEqual) \ |
| V(TaggedNotEqual) \ |
| V(TestInstanceOf) \ |
| V(TestUndetectable) \ |
| V(TestTypeOf) \ |
| V(ToName) \ |
| V(ToNumberOrNumeric) \ |
| V(ToObject) \ |
| V(ToString) \ |
| V(NumberToString) \ |
| V(UpdateJSArrayLength) \ |
| CONSTANT_VALUE_NODE_LIST(V) \ |
| INT32_OPERATIONS_NODE_LIST(V) \ |
| FLOAT64_OPERATIONS_NODE_LIST(V) \ |
| SMI_OPERATIONS_NODE_LIST(V) \ |
| GENERIC_OPERATIONS_NODE_LIST(V) \ |
| INLINE_BUILTIN_NODE_LIST(V) |
| |
| #define GAP_MOVE_NODE_LIST(V) \ |
| V(ConstantGapMove) \ |
| V(GapMove) |
| |
| #define NODE_LIST(V) \ |
| V(AssertInt32) \ |
| V(CheckDynamicValue) \ |
| V(CheckInt32IsSmi) \ |
| V(CheckUint32IsSmi) \ |
| V(CheckHoleyFloat64IsSmi) \ |
| V(CheckHeapObject) \ |
| V(CheckInt32Condition) \ |
| V(CheckFixedArrayNonEmpty) \ |
| V(CheckJSDataViewBounds) \ |
| V(CheckTypedArrayBounds) \ |
| V(CheckTypedArrayNotDetached) \ |
| V(CheckMaps) \ |
| V(CheckMapsWithMigration) \ |
| V(CheckNumber) \ |
| V(CheckSmi) \ |
| V(CheckString) \ |
| V(CheckSymbol) \ |
| V(CheckValue) \ |
| V(CheckValueEqualsInt32) \ |
| V(CheckValueEqualsFloat64) \ |
| V(CheckValueEqualsString) \ |
| V(CheckInstanceType) \ |
| V(DebugBreak) \ |
| V(FunctionEntryStackCheck) \ |
| V(GeneratorStore) \ |
| V(TryOnStackReplacement) \ |
| V(StoreMap) \ |
| V(StoreDoubleField) \ |
| V(StoreFixedArrayElementWithWriteBarrier) \ |
| V(StoreFixedArrayElementNoWriteBarrier) \ |
| V(StoreFixedDoubleArrayElement) \ |
| V(StoreFloat64) \ |
| V(StoreIntTypedArrayElement) \ |
| V(StoreDoubleTypedArrayElement) \ |
| V(StoreSignedIntDataViewElement) \ |
| V(StoreDoubleDataViewElement) \ |
| V(StoreTaggedFieldNoWriteBarrier) \ |
| V(StoreTaggedFieldWithWriteBarrier) \ |
| V(HandleNoHeapWritesInterrupt) \ |
| V(ReduceInterruptBudgetForLoop) \ |
| V(ReduceInterruptBudgetForReturn) \ |
| V(ThrowReferenceErrorIfHole) \ |
| V(ThrowSuperNotCalledIfHole) \ |
| V(ThrowSuperAlreadyCalledIfNotHole) \ |
| V(ThrowIfNotCallable) \ |
| V(ThrowIfNotSuperConstructor) \ |
| V(TransitionElementsKind) \ |
| V(TransitionElementsKindOrCheckMap) \ |
| GAP_MOVE_NODE_LIST(V) \ |
| VALUE_NODE_LIST(V) |
| |
| #define BRANCH_CONTROL_NODE_LIST(V) \ |
| V(BranchIfRootConstant) \ |
| V(BranchIfToBooleanTrue) \ |
| V(BranchIfInt32ToBooleanTrue) \ |
| V(BranchIfFloat64ToBooleanTrue) \ |
| V(BranchIfFloat64IsHole) \ |
| V(BranchIfReferenceEqual) \ |
| V(BranchIfInt32Compare) \ |
| V(BranchIfFloat64Compare) \ |
| V(BranchIfUndefinedOrNull) \ |
| V(BranchIfUndetectable) \ |
| V(BranchIfJSReceiver) \ |
| V(BranchIfTypeOf) |
| |
| #define CONDITIONAL_CONTROL_NODE_LIST(V) \ |
| V(Switch) \ |
| BRANCH_CONTROL_NODE_LIST(V) |
| |
| #define UNCONDITIONAL_CONTROL_NODE_LIST(V) \ |
| V(Jump) \ |
| V(CheckpointedJump) \ |
| V(JumpLoop) |
| |
| #define TERMINAL_CONTROL_NODE_LIST(V) \ |
| V(Abort) \ |
| V(Return) \ |
| V(Deopt) |
| |
| #define CONTROL_NODE_LIST(V) \ |
| TERMINAL_CONTROL_NODE_LIST(V) \ |
| CONDITIONAL_CONTROL_NODE_LIST(V) \ |
| UNCONDITIONAL_CONTROL_NODE_LIST(V) |
| |
| #define NODE_BASE_LIST(V) \ |
| NODE_LIST(V) \ |
| CONTROL_NODE_LIST(V) |
| |
| // Define the opcode enum. |
| #define DEF_OPCODES(type) k##type, |
| enum class Opcode : uint16_t { NODE_BASE_LIST(DEF_OPCODES) }; |
| #undef DEF_OPCODES |
| #define PLUS_ONE(type) +1 |
| static constexpr int kOpcodeCount = NODE_BASE_LIST(PLUS_ONE); |
| static constexpr Opcode kFirstOpcode = static_cast<Opcode>(0); |
| static constexpr Opcode kLastOpcode = static_cast<Opcode>(kOpcodeCount - 1); |
| #undef PLUS_ONE |
| |
| const char* OpcodeToString(Opcode opcode); |
| inline std::ostream& operator<<(std::ostream& os, Opcode opcode) { |
| return os << OpcodeToString(opcode); |
| } |
| |
| #define V(Name) Opcode::k##Name, |
| static constexpr Opcode kFirstValueNodeOpcode = |
| std::min({VALUE_NODE_LIST(V) kLastOpcode}); |
| static constexpr Opcode kLastValueNodeOpcode = |
| std::max({VALUE_NODE_LIST(V) kFirstOpcode}); |
| static constexpr Opcode kFirstConstantNodeOpcode = |
| std::min({CONSTANT_VALUE_NODE_LIST(V) kLastOpcode}); |
| static constexpr Opcode kLastConstantNodeOpcode = |
| std::max({CONSTANT_VALUE_NODE_LIST(V) kFirstOpcode}); |
| static constexpr Opcode kFirstGapMoveNodeOpcode = |
| std::min({GAP_MOVE_NODE_LIST(V) kLastOpcode}); |
| static constexpr Opcode kLastGapMoveNodeOpcode = |
| std::max({GAP_MOVE_NODE_LIST(V) kFirstOpcode}); |
| |
| static constexpr Opcode kFirstNodeOpcode = std::min({NODE_LIST(V) kLastOpcode}); |
| static constexpr Opcode kLastNodeOpcode = std::max({NODE_LIST(V) kFirstOpcode}); |
| |
| static constexpr Opcode kFirstBranchControlNodeOpcode = |
| std::min({BRANCH_CONTROL_NODE_LIST(V) kLastOpcode}); |
| static constexpr Opcode kLastBranchControlNodeOpcode = |
| std::max({BRANCH_CONTROL_NODE_LIST(V) kFirstOpcode}); |
| |
| static constexpr Opcode kFirstConditionalControlNodeOpcode = |
| std::min({CONDITIONAL_CONTROL_NODE_LIST(V) kLastOpcode}); |
| static constexpr Opcode kLastConditionalControlNodeOpcode = |
| std::max({CONDITIONAL_CONTROL_NODE_LIST(V) kFirstOpcode}); |
| |
| static constexpr Opcode kLastUnconditionalControlNodeOpcode = |
| std::max({UNCONDITIONAL_CONTROL_NODE_LIST(V) kFirstOpcode}); |
| static constexpr Opcode kFirstUnconditionalControlNodeOpcode = |
| std::min({UNCONDITIONAL_CONTROL_NODE_LIST(V) kLastOpcode}); |
| |
| static constexpr Opcode kLastTerminalControlNodeOpcode = |
| std::max({TERMINAL_CONTROL_NODE_LIST(V) kFirstOpcode}); |
| static constexpr Opcode kFirstTerminalControlNodeOpcode = |
| std::min({TERMINAL_CONTROL_NODE_LIST(V) kLastOpcode}); |
| |
| static constexpr Opcode kFirstControlNodeOpcode = |
| std::min({CONTROL_NODE_LIST(V) kLastOpcode}); |
| static constexpr Opcode kLastControlNodeOpcode = |
| std::max({CONTROL_NODE_LIST(V) kFirstOpcode}); |
| #undef V |
| |
| constexpr bool IsValueNode(Opcode opcode) { |
| return kFirstValueNodeOpcode <= opcode && opcode <= kLastValueNodeOpcode; |
| } |
| constexpr bool IsConstantNode(Opcode opcode) { |
| return kFirstConstantNodeOpcode <= opcode && |
| opcode <= kLastConstantNodeOpcode; |
| } |
| constexpr bool IsGapMoveNode(Opcode opcode) { |
| return kFirstGapMoveNodeOpcode <= opcode && opcode <= kLastGapMoveNodeOpcode; |
| } |
| constexpr bool IsControlNode(Opcode opcode) { |
| return kFirstControlNodeOpcode <= opcode && opcode <= kLastControlNodeOpcode; |
| } |
| constexpr bool IsBranchControlNode(Opcode opcode) { |
| return kFirstBranchControlNodeOpcode <= opcode && |
| opcode <= kLastBranchControlNodeOpcode; |
| } |
| constexpr bool IsConditionalControlNode(Opcode opcode) { |
| return kFirstConditionalControlNodeOpcode <= opcode && |
| opcode <= kLastConditionalControlNodeOpcode; |
| } |
| constexpr bool IsUnconditionalControlNode(Opcode opcode) { |
| return kFirstUnconditionalControlNodeOpcode <= opcode && |
| opcode <= kLastUnconditionalControlNodeOpcode; |
| } |
| constexpr bool IsTerminalControlNode(Opcode opcode) { |
| return kFirstTerminalControlNodeOpcode <= opcode && |
| opcode <= kLastTerminalControlNodeOpcode; |
| } |
| |
| // Forward-declare NodeBase sub-hierarchies. |
| class Node; |
| class ControlNode; |
| class ConditionalControlNode; |
| class BranchControlNode; |
| class UnconditionalControlNode; |
| class TerminalControlNode; |
| class ValueNode; |
| |
| enum class ValueRepresentation : uint8_t { |
| kTagged, |
| kInt32, |
| kUint32, |
| kFloat64, |
| kHoleyFloat64, |
| kIntPtr |
| }; |
| |
| inline constexpr bool IsDoubleRepresentation(ValueRepresentation repr) { |
| return repr == ValueRepresentation::kFloat64 || |
| repr == ValueRepresentation::kHoleyFloat64; |
| } |
| |
| /* |
| * The intersection (using `&`) of any two NodeTypes must be a valid NodeType |
| * (possibly "kUnknown", modulo heap object bit). |
| * |
| * All heap object types include the heap object bit, so that they can be |
| * checked for AnyHeapObject with a single bit check. |
| * |
| * Here is a diagram of the relations between the types, where (*) means that |
| * they have the kAnyHeapObject bit set. |
| * |
| * NumberOrOddball JsReceiver* Name* |
| * / \ | / \ |
| * Oddball* Number Callable* String* Symbol* |
| * | / \ | |
| * Boolean* Smi HeapNumber* InternalizedString* |
| * |
| */ |
| |
| #define NODE_TYPE_LIST(V) \ |
| V(Unknown, 0) \ |
| V(NumberOrOddball, (1 << 1)) \ |
| V(Number, (1 << 2) | kNumberOrOddball) \ |
| V(Smi, (1 << 4) | kNumber) \ |
| V(AnyHeapObject, (1 << 5)) \ |
| V(Oddball, (1 << 6) | kAnyHeapObject | kNumberOrOddball) \ |
| V(Boolean, (1 << 7) | kOddball) \ |
| V(Name, (1 << 8) | kAnyHeapObject) \ |
| V(String, (1 << 9) | kName) \ |
| V(InternalizedString, (1 << 10) | kString) \ |
| V(Symbol, (1 << 11) | kName) \ |
| V(JSReceiver, (1 << 12) | kAnyHeapObject) \ |
| V(Callable, (1 << 13) | kJSReceiver | kAnyHeapObject) \ |
| V(HeapNumber, kAnyHeapObject | kNumber) |
| |
| enum class NodeType : uint16_t { |
| #define DEFINE_NODE_TYPE(Name, Value) k##Name = Value, |
| NODE_TYPE_LIST(DEFINE_NODE_TYPE) |
| #undef DEFINE_NODE_TYPE |
| }; |
| |
| inline NodeType CombineType(NodeType left, NodeType right) { |
| return static_cast<NodeType>(static_cast<int>(left) | |
| static_cast<int>(right)); |
| } |
| inline NodeType IntersectType(NodeType left, NodeType right) { |
| return static_cast<NodeType>(static_cast<int>(left) & |
| static_cast<int>(right)); |
| } |
| inline bool NodeTypeIs(NodeType type, NodeType to_check) { |
| int right = static_cast<int>(to_check); |
| return (static_cast<int>(type) & right) == right; |
| } |
| |
| inline NodeType StaticTypeForMap(compiler::MapRef map) { |
| if (map.IsHeapNumberMap()) return NodeType::kHeapNumber; |
| if (map.IsInternalizedStringMap()) return NodeType::kInternalizedString; |
| if (map.IsStringMap()) return NodeType::kString; |
| if (map.IsJSReceiverMap()) return NodeType::kJSReceiver; |
| return NodeType::kAnyHeapObject; |
| } |
| |
| inline NodeType StaticTypeForConstant(compiler::JSHeapBroker* broker, |
| compiler::ObjectRef ref) { |
| if (ref.IsSmi()) return NodeType::kSmi; |
| return StaticTypeForMap(ref.AsHeapObject().map(broker)); |
| } |
| |
| inline bool IsInstanceOfNodeType(compiler::MapRef map, NodeType type, |
| compiler::JSHeapBroker* broker) { |
| switch (type) { |
| case NodeType::kUnknown: |
| return true; |
| case NodeType::kNumberOrOddball: |
| return map.IsHeapNumberMap() || map.IsOddballMap(); |
| case NodeType::kSmi: |
| return false; |
| case NodeType::kNumber: |
| case NodeType::kHeapNumber: |
| return map.IsHeapNumberMap(); |
| case NodeType::kAnyHeapObject: |
| return true; |
| case NodeType::kOddball: |
| return map.IsOddballMap(); |
| case NodeType::kBoolean: |
| return map.IsOddballMap() && |
| map.oddball_type(broker) == compiler::OddballType::kBoolean; |
| case NodeType::kName: |
| return map.IsNameMap(); |
| case NodeType::kString: |
| return map.IsStringMap(); |
| case NodeType::kInternalizedString: |
| return map.IsInternalizedStringMap(); |
| case NodeType::kSymbol: |
| return map.IsSymbolMap(); |
| case NodeType::kJSReceiver: |
| return map.IsJSReceiverMap(); |
| case NodeType::kCallable: |
| return map.is_callable(); |
| } |
| |
| // This is some composed type. We could speed this up by exploiting the tree |
| // structure of the types. |
| #define CASE(Name, _) \ |
| if (NodeTypeIs(type, NodeType::k##Name)) { \ |
| if (!IsInstanceOfNodeType(map, NodeType::k##Name, broker)) { \ |
| return false; \ |
| } \ |
| } |
| NODE_TYPE_LIST(CASE) |
| #undef CASE |
| return true; |
| } |
| |
| inline std::ostream& operator<<(std::ostream& out, const NodeType& type) { |
| switch (type) { |
| #define CASE(Name, _) \ |
| case NodeType::k##Name: \ |
| out << #Name; \ |
| break; |
| NODE_TYPE_LIST(CASE) |
| #undef CASE |
| default: |
| #define CASE(Name, _) \ |
| if (NodeTypeIs(type, NodeType::k##Name)) { \ |
| out << #Name ","; \ |
| } |
| NODE_TYPE_LIST(CASE) |
| #undef CASE |
| } |
| return out; |
| } |
| |
| #define DEFINE_NODE_TYPE_CHECK(Type, _) \ |
| inline bool NodeTypeIs##Type(NodeType type) { \ |
| return NodeTypeIs(type, NodeType::k##Type); \ |
| } |
| NODE_TYPE_LIST(DEFINE_NODE_TYPE_CHECK) |
| #undef DEFINE_NODE_TYPE_CHECK |
| |
| enum class TaggedToFloat64ConversionType : uint8_t { |
| kOnlyNumber, |
| kNumberOrOddball, |
| }; |
| |
| constexpr Condition ConditionFor(Operation cond); |
| constexpr Condition ConditionForNaN(); |
| |
| bool FromConstantToBool(LocalIsolate* local_isolate, ValueNode* node); |
| bool FromConstantToBool(MaglevAssembler* masm, ValueNode* node); |
| |
| inline int ExternalArrayElementSize(const ExternalArrayType element_type) { |
| switch (element_type) { |
| #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \ |
| case kExternal##Type##Array: \ |
| DCHECK_LE(sizeof(ctype), 8); \ |
| return sizeof(ctype); |
| TYPED_ARRAYS(TYPED_ARRAY_CASE) |
| default: |
| UNREACHABLE(); |
| #undef TYPED_ARRAY_CASE |
| } |
| } |
| |
| inline int ElementsKindSize(ElementsKind element_kind) { |
| switch (element_kind) { |
| #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \ |
| case TYPE##_ELEMENTS: \ |
| DCHECK_LE(sizeof(ctype), 8); \ |
| return sizeof(ctype); |
| TYPED_ARRAYS(TYPED_ARRAY_CASE) |
| default: |
| UNREACHABLE(); |
| #undef TYPED_ARRAY_CASE |
| } |
| } |
| |
| inline std::ostream& operator<<(std::ostream& os, |
| const ValueRepresentation& repr) { |
| switch (repr) { |
| case ValueRepresentation::kTagged: |
| return os << "Tagged"; |
| case ValueRepresentation::kInt32: |
| return os << "Int32"; |
| case ValueRepresentation::kUint32: |
| return os << "Uint32"; |
| case ValueRepresentation::kFloat64: |
| return os << "Float64"; |
| case ValueRepresentation::kHoleyFloat64: |
| return os << "HoleyFloat64"; |
| case ValueRepresentation::kIntPtr: |
| return os << "Word64"; |
| } |
| } |
| |
| inline std::ostream& operator<<( |
| std::ostream& os, const TaggedToFloat64ConversionType& conversion_type) { |
| switch (conversion_type) { |
| case TaggedToFloat64ConversionType::kOnlyNumber: |
| return os << "Number"; |
| case TaggedToFloat64ConversionType::kNumberOrOddball: |
| return os << "NumberOrOddball"; |
| } |
| } |
| |
| inline bool HasOnlyJSTypedArrayMaps(base::Vector<const compiler::MapRef> maps) { |
| for (compiler::MapRef map : maps) { |
| if (!map.IsJSTypedArrayMap()) return false; |
| } |
| return true; |
| } |
| |
| inline bool HasOnlyJSArrayMaps(base::Vector<const compiler::MapRef> maps) { |
| for (compiler::MapRef map : maps) { |
| if (!map.IsJSArrayMap()) return false; |
| } |
| return true; |
| } |
| |
| inline bool HasOnlyJSObjectMaps(base::Vector<const compiler::MapRef> maps) { |
| for (compiler::MapRef map : maps) { |
| if (!map.IsJSObjectMap()) return false; |
| } |
| return true; |
| } |
| |
| inline bool HasOnlyStringMaps(base::Vector<const compiler::MapRef> maps) { |
| for (compiler::MapRef map : maps) { |
| if (!map.IsStringMap()) return false; |
| } |
| return true; |
| } |
| |
| inline bool HasOnlyNumberMaps(base::Vector<const compiler::MapRef> maps) { |
| for (compiler::MapRef map : maps) { |
| if (map.instance_type() != HEAP_NUMBER_TYPE) return false; |
| } |
| return true; |
| } |
| |
| #define DEF_FORWARD_DECLARATION(type, ...) class type; |
| NODE_BASE_LIST(DEF_FORWARD_DECLARATION) |
| #undef DEF_FORWARD_DECLARATION |
| |
| using NodeIdT = uint32_t; |
| static constexpr uint32_t kInvalidNodeId = 0; |
| static constexpr uint32_t kFirstValidNodeId = 1; |
| |
| // Represents either a direct BasicBlock pointer, or an entry in a list of |
| // unresolved BasicBlockRefs which will be mutated (in place) at some point into |
| // direct BasicBlock pointers. |
| class BasicBlockRef { |
| struct BasicBlockRefBuilder; |
| |
| public: |
| BasicBlockRef() : next_ref_(nullptr) { |
| #ifdef DEBUG |
| state_ = kRefList; |
| #endif |
| } |
| explicit BasicBlockRef(BasicBlock* block) : block_ptr_(block) { |
| #ifdef DEBUG |
| state_ = kBlockPointer; |
| #endif |
| } |
| |
| // Refs can't be copied or moved, since they are referenced by `this` pointer |
| // in the ref list. |
| BasicBlockRef(const BasicBlockRef&) = delete; |
| BasicBlockRef(BasicBlockRef&&) = delete; |
| BasicBlockRef& operator=(const BasicBlockRef&) = delete; |
| BasicBlockRef& operator=(BasicBlockRef&&) = delete; |
| |
| // Construct a new ref-list mode BasicBlockRef and add it to the given ref |
| // list. |
| explicit BasicBlockRef(BasicBlockRef* ref_list_head) : BasicBlockRef() { |
| BasicBlockRef* old_next_ptr = MoveToRefList(ref_list_head); |
| USE(old_next_ptr); |
| DCHECK_NULL(old_next_ptr); |
| } |
| |
| // Change this ref to a direct basic block pointer, returning the old "next" |
| // pointer of the current ref. |
| BasicBlockRef* SetToBlockAndReturnNext(BasicBlock* block) { |
| DCHECK_EQ(state_, kRefList); |
| |
| BasicBlockRef* old_next_ptr = next_ref_; |
| block_ptr_ = block; |
| #ifdef DEBUG |
| state_ = kBlockPointer; |
| #endif |
| return old_next_ptr; |
| } |
| |
| // Reset this ref list to null, returning the old ref list (i.e. the old |
| // "next" pointer). |
| BasicBlockRef* Reset() { |
| DCHECK_EQ(state_, kRefList); |
| |
| BasicBlockRef* old_next_ptr = next_ref_; |
| next_ref_ = nullptr; |
| return old_next_ptr; |
| } |
| |
| // Move this ref to the given ref list, returning the old "next" pointer of |
| // the current ref. |
| BasicBlockRef* MoveToRefList(BasicBlockRef* ref_list_head) { |
| DCHECK_EQ(state_, kRefList); |
| DCHECK_EQ(ref_list_head->state_, kRefList); |
| |
| BasicBlockRef* old_next_ptr = next_ref_; |
| next_ref_ = ref_list_head->next_ref_; |
| ref_list_head->next_ref_ = this; |
| return old_next_ptr; |
| } |
| |
| void Bind(BasicBlock* block) { |
| DCHECK_EQ(state_, kRefList); |
| |
| BasicBlockRef* next_ref = SetToBlockAndReturnNext(block); |
| while (next_ref != nullptr) { |
| next_ref = next_ref->SetToBlockAndReturnNext(block); |
| } |
| DCHECK_EQ(block_ptr(), block); |
| } |
| |
| BasicBlock* block_ptr() const { |
| DCHECK_EQ(state_, kBlockPointer); |
| return block_ptr_; |
| } |
| |
| BasicBlockRef* next_ref() const { |
| DCHECK_EQ(state_, kRefList); |
| return next_ref_; |
| } |
| |
| bool has_ref() const { |
| DCHECK_EQ(state_, kRefList); |
| return next_ref_ != nullptr; |
| } |
| |
| private: |
| union { |
| BasicBlock* block_ptr_; |
| BasicBlockRef* next_ref_; |
| }; |
| #ifdef DEBUG |
| enum { kBlockPointer, kRefList } state_; |
| #endif // DEBUG |
| }; |
| |
| class OpProperties { |
| public: |
| constexpr bool is_call() const { |
| // Only returns true for non-deferred calls. Use `is_any_call` to check |
| // deferred calls as well. |
| return kIsCallBit::decode(bitfield_); |
| } |
| constexpr bool is_any_call() const { return is_call() || is_deferred_call(); } |
| constexpr bool can_eager_deopt() const { |
| return kAttachedDeoptInfoBits::decode(bitfield_) == |
| AttachedDeoptInfo::kEager; |
| } |
| constexpr bool can_lazy_deopt() const { |
| return kAttachedDeoptInfoBits::decode(bitfield_) == |
| AttachedDeoptInfo::kLazy; |
| } |
| constexpr bool is_deopt_checkpoint() const { |
| return kAttachedDeoptInfoBits::decode(bitfield_) == |
| AttachedDeoptInfo::kCheckpoint; |
| } |
| constexpr bool can_deopt() const { |
| return can_eager_deopt() || can_lazy_deopt(); |
| } |
| constexpr bool can_throw() const { |
| return kCanThrowBit::decode(bitfield_) && can_lazy_deopt(); |
| } |
| constexpr bool can_read() const { return kCanReadBit::decode(bitfield_); } |
| constexpr bool can_write() const { return kCanWriteBit::decode(bitfield_); } |
| constexpr bool can_allocate() const { |
| return kCanAllocate::decode(bitfield_); |
| } |
| constexpr ValueRepresentation value_representation() const { |
| return kValueRepresentationBits::decode(bitfield_); |
| } |
| constexpr bool is_tagged() const { |
| return value_representation() == ValueRepresentation::kTagged; |
| } |
| constexpr bool is_conversion() const { |
| return kIsConversionBit::decode(bitfield_); |
| } |
| constexpr bool needs_register_snapshot() const { |
| return kNeedsRegisterSnapshotBit::decode(bitfield_); |
| } |
| constexpr bool is_pure() const { |
| return (bitfield_ & kPureMask) == kPureValue; |
| } |
| constexpr bool is_required_when_unused() const { |
| if (is_conversion()) { |
| // Calls in conversions are not counted as a side-effect as far as |
| // is_required_when_unused is concerned, since they should always be to |
| // the Allocate builtin. |
| return can_write() || can_throw() || can_deopt(); |
| } else { |
| return can_write() || can_throw() || can_deopt() || is_any_call(); |
| } |
| } |
| |
| constexpr OpProperties operator|(const OpProperties& that) { |
| return OpProperties(bitfield_ | that.bitfield_); |
| } |
| |
| static constexpr OpProperties Pure() { return OpProperties(kPureValue); } |
| static constexpr OpProperties Call() { |
| return OpProperties(kIsCallBit::encode(true)); |
| } |
| static constexpr OpProperties EagerDeopt() { |
| return OpProperties( |
| kAttachedDeoptInfoBits::encode(AttachedDeoptInfo::kEager)); |
| } |
| static constexpr OpProperties LazyDeopt() { |
| return OpProperties( |
| kAttachedDeoptInfoBits::encode(AttachedDeoptInfo::kLazy)); |
| } |
| static constexpr OpProperties DeoptCheckpoint() { |
| return OpProperties( |
| kAttachedDeoptInfoBits::encode(AttachedDeoptInfo::kCheckpoint)); |
| } |
| static constexpr OpProperties CanThrow() { |
| return OpProperties(kCanThrowBit::encode(true)) | LazyDeopt(); |
| } |
| static constexpr OpProperties CanRead() { |
| return OpProperties(kCanReadBit::encode(true)); |
| } |
| static constexpr OpProperties CanWrite() { |
| return OpProperties(kCanWriteBit::encode(true)); |
| } |
| static constexpr OpProperties CanAllocate() { |
| return OpProperties(kCanAllocate::encode(true)); |
| } |
| static constexpr OpProperties TaggedValue() { |
| return OpProperties( |
| kValueRepresentationBits::encode(ValueRepresentation::kTagged)); |
| } |
| static constexpr OpProperties ExternalReference() { |
| return OpProperties( |
| kValueRepresentationBits::encode(ValueRepresentation::kIntPtr)); |
| } |
| static constexpr OpProperties Int32() { |
| return OpProperties( |
| kValueRepresentationBits::encode(ValueRepresentation::kInt32)); |
| } |
| static constexpr OpProperties Uint32() { |
| return OpProperties( |
| kValueRepresentationBits::encode(ValueRepresentation::kUint32)); |
| } |
| static constexpr OpProperties Float64() { |
| return OpProperties( |
| kValueRepresentationBits::encode(ValueRepresentation::kFloat64)); |
| } |
| static constexpr OpProperties HoleyFloat64() { |
| return OpProperties( |
| kValueRepresentationBits::encode(ValueRepresentation::kHoleyFloat64)); |
| } |
| static constexpr OpProperties IntPtr() { |
| return OpProperties( |
| kValueRepresentationBits::encode(ValueRepresentation::kIntPtr)); |
| } |
| static constexpr OpProperties ConversionNode() { |
| return OpProperties(kIsConversionBit::encode(true)); |
| } |
| static constexpr OpProperties CanCallUserCode() { |
| return AnySideEffects() | LazyDeopt() | CanThrow(); |
| } |
| // Without auditing the call target, we must assume it can cause a lazy deopt |
| // and throw. Use this when codegen calls runtime or a builtin, unless |
| // certain that the target either doesn't throw or cannot deopt. |
| // TODO(jgruber): Go through all nodes marked with this property and decide |
| // whether to keep it (or remove either the lazy-deopt or throw flag). |
| static constexpr OpProperties GenericRuntimeOrBuiltinCall() { |
| return Call() | CanCallUserCode(); |
| } |
| static constexpr OpProperties JSCall() { return Call() | CanCallUserCode(); } |
| static constexpr OpProperties AnySideEffects() { |
| return CanRead() | CanWrite() | CanAllocate(); |
| } |
| static constexpr OpProperties DeferredCall() { |
| // Operations with a deferred call need a snapshot of register state, |
| // because they need to be able to push registers to save them, and annotate |
| // the safepoint with information about which registers are tagged. |
| return NeedsRegisterSnapshot(); |
| } |
| |
| constexpr explicit OpProperties(uint32_t bitfield) : bitfield_(bitfield) {} |
| operator uint32_t() const { return bitfield_; } |
| |
| OpProperties WithNewValueRepresentation(ValueRepresentation new_repr) const { |
| return OpProperties(kValueRepresentationBits::update(bitfield_, new_repr)); |
| } |
| |
| OpProperties WithoutDeopt() const { |
| return OpProperties( |
| kAttachedDeoptInfoBits::update(bitfield_, AttachedDeoptInfo::kNone)); |
| } |
| |
| private: |
| enum class AttachedDeoptInfo { kNone, kEager, kLazy, kCheckpoint }; |
| using kIsCallBit = base::BitField<bool, 0, 1>; |
| using kAttachedDeoptInfoBits = kIsCallBit::Next<AttachedDeoptInfo, 2>; |
| using kCanThrowBit = kAttachedDeoptInfoBits::Next<bool, 1>; |
| using kCanReadBit = kCanThrowBit::Next<bool, 1>; |
| using kCanWriteBit = kCanReadBit::Next<bool, 1>; |
| using kCanAllocate = kCanWriteBit::Next<bool, 1>; |
| using kValueRepresentationBits = kCanAllocate::Next<ValueRepresentation, 3>; |
| using kIsConversionBit = kValueRepresentationBits::Next<bool, 1>; |
| using kNeedsRegisterSnapshotBit = kIsConversionBit::Next<bool, 1>; |
| |
| static const uint32_t kPureMask = |
| kCanReadBit::kMask | kCanWriteBit::kMask | kCanAllocate::kMask; |
| static const uint32_t kPureValue = kCanReadBit::encode(false) | |
| kCanWriteBit::encode(false) | |
| kCanAllocate::encode(false); |
| |
| // NeedsRegisterSnapshot is only used for DeferredCall, and we rely on this in |
| // `is_deferred_call` to detect deferred calls. If you need to use |
| // NeedsRegisterSnapshot for something else that DeferredCalls, then you'll |
| // have to update `is_any_call`. |
| static constexpr OpProperties NeedsRegisterSnapshot() { |
| return OpProperties(kNeedsRegisterSnapshotBit::encode(true)); |
| } |
| |
| const uint32_t bitfield_; |
| |
| public: |
| static const size_t kSize = kNeedsRegisterSnapshotBit::kLastUsedBit + 1; |
| |
| constexpr bool is_deferred_call() const { |
| // Currently, there is no kDeferredCall bit, but DeferredCall only sets a |
| // single bit: kNeedsRegisterSnapShot. If this static assert breaks, it |
| // means that you added additional properties to DeferredCall, and you |
| // should update this function accordingly. |
| static_assert(DeferredCall().bitfield_ == |
| kNeedsRegisterSnapshotBit::encode(true)); |
| return needs_register_snapshot(); |
| } |
| }; |
| |
| constexpr inline OpProperties StaticPropertiesForOpcode(Opcode opcode); |
| |
| class ValueLocation { |
| public: |
| ValueLocation() = default; |
| |
| template <typename... Args> |
| void SetUnallocated(Args&&... args) { |
| DCHECK(operand_.IsInvalid()); |
| operand_ = compiler::UnallocatedOperand(args...); |
| } |
| |
| template <typename... Args> |
| void SetAllocated(Args&&... args) { |
| DCHECK(operand_.IsUnallocated()); |
| operand_ = compiler::AllocatedOperand(args...); |
| } |
| |
| // Only to be used on inputs that inherit allocation. |
| void InjectLocation(compiler::InstructionOperand location) { |
| operand_ = location; |
| } |
| |
| // We use USED_AT_START to indicate that the input will be clobbered. |
| bool Cloberred() { |
| DCHECK(operand_.IsUnallocated()); |
| return compiler::UnallocatedOperand::cast(operand_).IsUsedAtStart(); |
| } |
| |
| template <typename... Args> |
| void SetConstant(Args&&... args) { |
| DCHECK(operand_.IsUnallocated()); |
| operand_ = compiler::ConstantOperand(args...); |
| } |
| |
| Register AssignedGeneralRegister() const { |
| DCHECK(!IsDoubleRegister()); |
| return compiler::AllocatedOperand::cast(operand_).GetRegister(); |
| } |
| |
| DoubleRegister AssignedDoubleRegister() const { |
| DCHECK(IsDoubleRegister()); |
| return compiler::AllocatedOperand::cast(operand_).GetDoubleRegister(); |
| } |
| |
| bool IsAnyRegister() const { return operand_.IsAnyRegister(); } |
| bool IsGeneralRegister() const { return operand_.IsRegister(); } |
| bool IsDoubleRegister() const { return operand_.IsDoubleRegister(); } |
| |
| const compiler::InstructionOperand& operand() const { return operand_; } |
| const compiler::InstructionOperand& operand() { return operand_; } |
| |
| private: |
| compiler::InstructionOperand operand_; |
| }; |
| |
| class InputLocation : public ValueLocation { |
| public: |
| NodeIdT next_use_id() const { return next_use_id_; } |
| // Used in ValueNode::mark_use |
| NodeIdT* get_next_use_id_address() { return &next_use_id_; } |
| |
| private: |
| NodeIdT next_use_id_ = kInvalidNodeId; |
| }; |
| |
| class Input : public InputLocation { |
| public: |
| explicit Input(ValueNode* node) : node_(node) {} |
| ValueNode* node() const { return node_; } |
| |
| private: |
| ValueNode* node_; |
| }; |
| |
| class InterpretedDeoptFrame; |
| class InlinedArgumentsDeoptFrame; |
| class ConstructInvokeStubDeoptFrame; |
| class BuiltinContinuationDeoptFrame; |
| class DeoptFrame { |
| public: |
| enum class FrameType { |
| kInterpretedFrame, |
| kInlinedArgumentsFrame, |
| kConstructInvokeStubFrame, |
| kBuiltinContinuationFrame, |
| }; |
| |
| struct InterpretedFrameData { |
| const MaglevCompilationUnit& unit; |
| const CompactInterpreterFrameState* frame_state; |
| ValueNode* closure; |
| const BytecodeOffset bytecode_position; |
| const SourcePosition source_position; |
| }; |
| |
| struct InlinedArgumentsFrameData { |
| const MaglevCompilationUnit& unit; |
| const BytecodeOffset bytecode_position; |
| ValueNode* closure; |
| const base::Vector<ValueNode*> arguments; |
| }; |
| |
| struct ConstructInvokeStubFrameData { |
| const MaglevCompilationUnit& unit; |
| const SourcePosition source_position; |
| ValueNode* receiver; |
| ValueNode* context; |
| }; |
| |
| struct BuiltinContinuationFrameData { |
| const Builtin builtin_id; |
| const base::Vector<ValueNode*> parameters; |
| ValueNode* context; |
| compiler::OptionalJSFunctionRef maybe_js_target; |
| }; |
| |
| using FrameData = base::DiscriminatedUnion< |
| FrameType, InterpretedFrameData, InlinedArgumentsFrameData, |
| ConstructInvokeStubFrameData, BuiltinContinuationFrameData>; |
| |
| DeoptFrame(FrameData&& data, DeoptFrame* parent) |
| : data_(std::move(data)), parent_(parent) {} |
| |
| DeoptFrame(const FrameData& data, DeoptFrame* parent) |
| : data_(data), parent_(parent) {} |
| |
| FrameType type() const { return data_.tag(); } |
| DeoptFrame* parent() { return parent_; } |
| const DeoptFrame* parent() const { return parent_; } |
| |
| inline const InterpretedDeoptFrame& as_interpreted() const; |
| inline const InlinedArgumentsDeoptFrame& as_inlined_arguments() const; |
| inline const ConstructInvokeStubDeoptFrame& as_construct_stub() const; |
| inline const BuiltinContinuationDeoptFrame& as_builtin_continuation() const; |
| inline InterpretedDeoptFrame& as_interpreted(); |
| inline InlinedArgumentsDeoptFrame& as_inlined_arguments(); |
| inline ConstructInvokeStubDeoptFrame& as_construct_stub(); |
| inline BuiltinContinuationDeoptFrame& as_builtin_continuation(); |
| inline bool IsJsFrame() const; |
| |
| protected: |
| DeoptFrame(InterpretedFrameData&& data, DeoptFrame* parent) |
| : data_(std::move(data)), parent_(parent) {} |
| DeoptFrame(InlinedArgumentsFrameData&& data, DeoptFrame* parent) |
| : data_(std::move(data)), parent_(parent) {} |
| DeoptFrame(ConstructInvokeStubFrameData&& data, DeoptFrame* parent) |
| : data_(std::move(data)), parent_(parent) {} |
| DeoptFrame(BuiltinContinuationFrameData&& data, DeoptFrame* parent) |
| : data_(std::move(data)), parent_(parent) {} |
| |
| FrameData data_; |
| DeoptFrame* const parent_; |
| }; |
| |
| class InterpretedDeoptFrame : public DeoptFrame { |
| public: |
| InterpretedDeoptFrame(const MaglevCompilationUnit& unit, |
| const CompactInterpreterFrameState* frame_state, |
| ValueNode* closure, BytecodeOffset bytecode_position, |
| SourcePosition source_position, DeoptFrame* parent) |
| : DeoptFrame(InterpretedFrameData{unit, frame_state, closure, |
| bytecode_position, source_position}, |
| parent) {} |
| |
| const MaglevCompilationUnit& unit() const { return data().unit; } |
| const CompactInterpreterFrameState* frame_state() const { |
| return data().frame_state; |
| } |
| ValueNode*& closure() { return data().closure; } |
| ValueNode* closure() const { return data().closure; } |
| BytecodeOffset bytecode_position() const { return data().bytecode_position; } |
| SourcePosition source_position() const { return data().source_position; } |
| |
| private: |
| InterpretedFrameData& data() { return data_.get<InterpretedFrameData>(); } |
| const InterpretedFrameData& data() const { |
| return data_.get<InterpretedFrameData>(); |
| } |
| }; |
| |
| // Make sure storing/passing deopt frames by value doesn't truncate them. |
| static_assert(sizeof(InterpretedDeoptFrame) == sizeof(DeoptFrame)); |
| |
| inline const InterpretedDeoptFrame& DeoptFrame::as_interpreted() const { |
| DCHECK_EQ(type(), FrameType::kInterpretedFrame); |
| return static_cast<const InterpretedDeoptFrame&>(*this); |
| } |
| inline InterpretedDeoptFrame& DeoptFrame::as_interpreted() { |
| DCHECK_EQ(type(), FrameType::kInterpretedFrame); |
| return static_cast<InterpretedDeoptFrame&>(*this); |
| } |
| |
| class InlinedArgumentsDeoptFrame : public DeoptFrame { |
| public: |
| InlinedArgumentsDeoptFrame(const MaglevCompilationUnit& unit, |
| BytecodeOffset bytecode_position, |
| ValueNode* closure, |
| base::Vector<ValueNode*> arguments, |
| DeoptFrame* parent) |
| : DeoptFrame(InlinedArgumentsFrameData{unit, bytecode_position, closure, |
| arguments}, |
| parent) {} |
| |
| const MaglevCompilationUnit& unit() const { return data().unit; } |
| BytecodeOffset bytecode_position() const { return data().bytecode_position; } |
| ValueNode*& closure() { return data().closure; } |
| ValueNode* closure() const { return data().closure; } |
| base::Vector<ValueNode*> arguments() const { return data().arguments; } |
| |
| private: |
| InlinedArgumentsFrameData& data() { |
| return data_.get<InlinedArgumentsFrameData>(); |
| } |
| const InlinedArgumentsFrameData& data() const { |
| return data_.get<InlinedArgumentsFrameData>(); |
| } |
| }; |
| |
| // Make sure storing/passing deopt frames by value doesn't truncate them. |
| static_assert(sizeof(InlinedArgumentsDeoptFrame) == sizeof(DeoptFrame)); |
| |
| inline const InlinedArgumentsDeoptFrame& DeoptFrame::as_inlined_arguments() |
| const { |
| DCHECK_EQ(type(), FrameType::kInlinedArgumentsFrame); |
| return static_cast<const InlinedArgumentsDeoptFrame&>(*this); |
| } |
| inline InlinedArgumentsDeoptFrame& DeoptFrame::as_inlined_arguments() { |
| DCHECK_EQ(type(), FrameType::kInlinedArgumentsFrame); |
| return static_cast<InlinedArgumentsDeoptFrame&>(*this); |
| } |
| |
| class ConstructInvokeStubDeoptFrame : public DeoptFrame { |
| public: |
| ConstructInvokeStubDeoptFrame(const MaglevCompilationUnit& unit, |
| SourcePosition source_position, |
| ValueNode* receiver, ValueNode* context, |
| DeoptFrame* parent) |
| : DeoptFrame(ConstructInvokeStubFrameData{unit, source_position, receiver, |
| context}, |
| parent) {} |
| |
| const MaglevCompilationUnit& unit() const { return data().unit; } |
| ValueNode*& receiver() { return data().receiver; } |
| ValueNode* receiver() const { return data().receiver; } |
| ValueNode*& context() { return data().context; } |
| ValueNode* context() const { return data().context; } |
| SourcePosition source_position() const { return data().source_position; } |
| |
| private: |
| ConstructInvokeStubFrameData& data() { |
| return data_.get<ConstructInvokeStubFrameData>(); |
| } |
| const ConstructInvokeStubFrameData& data() const { |
| return data_.get<ConstructInvokeStubFrameData>(); |
| } |
| }; |
| |
| // Make sure storing/passing deopt frames by value doesn't truncate them. |
| static_assert(sizeof(ConstructInvokeStubDeoptFrame) == sizeof(DeoptFrame)); |
| |
| inline const ConstructInvokeStubDeoptFrame& DeoptFrame::as_construct_stub() |
| const { |
| DCHECK_EQ(type(), FrameType::kConstructInvokeStubFrame); |
| return static_cast<const ConstructInvokeStubDeoptFrame&>(*this); |
| } |
| |
| inline ConstructInvokeStubDeoptFrame& DeoptFrame::as_construct_stub() { |
| DCHECK_EQ(type(), FrameType::kConstructInvokeStubFrame); |
| return static_cast<ConstructInvokeStubDeoptFrame&>(*this); |
| } |
| |
| class BuiltinContinuationDeoptFrame : public DeoptFrame { |
| public: |
| BuiltinContinuationDeoptFrame(Builtin builtin_id, |
| base::Vector<ValueNode*> parameters, |
| ValueNode* context, |
| compiler::OptionalJSFunctionRef maybe_js_target, |
| DeoptFrame* parent) |
| : DeoptFrame(BuiltinContinuationFrameData{builtin_id, parameters, context, |
| maybe_js_target}, |
| parent) {} |
| |
| const Builtin& builtin_id() const { return data().builtin_id; } |
| base::Vector<ValueNode*> parameters() const { return data().parameters; } |
| ValueNode*& context() { return data().context; } |
| ValueNode* context() const { return data().context; } |
| bool is_javascript() const { return data().maybe_js_target.has_value(); } |
| compiler::JSFunctionRef javascript_target() const { |
| return data().maybe_js_target.value(); |
| } |
| |
| private: |
| BuiltinContinuationFrameData& data() { |
| return data_.get<BuiltinContinuationFrameData>(); |
| } |
| const BuiltinContinuationFrameData& data() const { |
| return data_.get<BuiltinContinuationFrameData>(); |
| } |
| }; |
| |
| // Make sure storing/passing deopt frames by value doesn't truncate them. |
| static_assert(sizeof(BuiltinContinuationDeoptFrame) == sizeof(DeoptFrame)); |
| |
| inline const BuiltinContinuationDeoptFrame& |
| DeoptFrame::as_builtin_continuation() const { |
| DCHECK_EQ(type(), FrameType::kBuiltinContinuationFrame); |
| return static_cast<const BuiltinContinuationDeoptFrame&>(*this); |
| } |
| inline BuiltinContinuationDeoptFrame& DeoptFrame::as_builtin_continuation() { |
| DCHECK_EQ(type(), FrameType::kBuiltinContinuationFrame); |
| return static_cast<BuiltinContinuationDeoptFrame&>(*this); |
| } |
| |
| inline bool DeoptFrame::IsJsFrame() const { |
| // This must be in sync with TRANSLATION_JS_FRAME_OPCODE_LIST in |
| // translation-opcode.h or bad things happen. |
| switch (data_.tag()) { |
| case FrameType::kInterpretedFrame: |
| return true; |
| case FrameType::kBuiltinContinuationFrame: |
| return as_builtin_continuation().is_javascript(); |
| case FrameType::kConstructInvokeStubFrame: |
| case FrameType::kInlinedArgumentsFrame: |
| return false; |
| } |
| } |
| |
| class DeoptInfo { |
| protected: |
| DeoptInfo(Zone* zone, const DeoptFrame top_frame, |
| compiler::FeedbackSource feedback_to_update); |
| |
| public: |
| DeoptFrame& top_frame() { return top_frame_; } |
| const DeoptFrame& top_frame() const { return top_frame_; } |
| const compiler::FeedbackSource& feedback_to_update() const { |
| return feedback_to_update_; |
| } |
| |
| InputLocation* input_locations() const { return input_locations_; } |
| Label* deopt_entry_label() { return &deopt_entry_label_; } |
| |
| int translation_index() const { return translation_index_; } |
| void set_translation_index(int index) { translation_index_ = index; } |
| |
| private: |
| DeoptFrame top_frame_; |
| const compiler::FeedbackSource feedback_to_update_; |
| InputLocation* const input_locations_; |
| Label deopt_entry_label_; |
| int translation_index_ = -1; |
| }; |
| |
| struct RegisterSnapshot { |
| RegList live_registers; |
| RegList live_tagged_registers; |
| DoubleRegList live_double_registers; |
| }; |
| |
| class EagerDeoptInfo : public DeoptInfo { |
| public: |
| EagerDeoptInfo(Zone* zone, const DeoptFrame top_frame, |
| compiler::FeedbackSource feedback_to_update) |
| : DeoptInfo(zone, top_frame, feedback_to_update) {} |
| |
| DeoptimizeReason reason() const { return reason_; } |
| void set_reason(DeoptimizeReason reason) { reason_ = reason; } |
| |
| private: |
| DeoptimizeReason reason_ = DeoptimizeReason::kUnknown; |
| }; |
| |
| class LazyDeoptInfo : public DeoptInfo { |
| public: |
| LazyDeoptInfo(Zone* zone, const DeoptFrame top_frame, |
| interpreter::Register result_location, int result_size, |
| compiler::FeedbackSource feedback_to_update) |
| : DeoptInfo(zone, top_frame, feedback_to_update), |
| result_location_(result_location), |
| bitfield_( |
| DeoptingCallReturnPcField::encode(kUninitializedCallReturnPc) | |
| ResultSizeField::encode(result_size)) {} |
| |
| interpreter::Register result_location() const { |
| DCHECK(IsConsideredForResultLocation()); |
| return result_location_; |
| } |
| int result_size() const { |
| DCHECK(IsConsideredForResultLocation()); |
| return ResultSizeField::decode(bitfield_); |
| } |
| |
| bool IsResultRegister(interpreter::Register reg) const; |
| void UpdateResultLocation(interpreter::Register result_location, |
| int result_size) { |
| // We should only update to a subset of the existing result location. |
| DCHECK_GE(result_location.index(), result_location_.index()); |
| DCHECK_LE(result_location.index() + result_size, |
| result_location_.index() + this->result_size()); |
| result_location_ = result_location; |
| bitfield_ = ResultSizeField::update(bitfield_, result_size); |
| } |
| bool HasResultLocation() const { |
| DCHECK(IsConsideredForResultLocation()); |
| return result_location_.is_valid(); |
| } |
| |
| int deopting_call_return_pc() const { |
| DCHECK_NE(DeoptingCallReturnPcField::decode(bitfield_), |
| kUninitializedCallReturnPc); |
| return DeoptingCallReturnPcField::decode(bitfield_); |
| } |
| void set_deopting_call_return_pc(int pc) { |
| DCHECK_EQ(DeoptingCallReturnPcField::decode(bitfield_), |
| kUninitializedCallReturnPc); |
| bitfield_ = DeoptingCallReturnPcField::update(bitfield_, pc); |
| } |
| |
| private: |
| #ifdef DEBUG |
| bool IsConsideredForResultLocation() const { |
| switch (top_frame().type()) { |
| case DeoptFrame::FrameType::kInterpretedFrame: |
| // Interpreted frames obviously need a result location. |
| return true; |
| case DeoptFrame::FrameType::kInlinedArgumentsFrame: |
| case DeoptFrame::FrameType::kConstructInvokeStubFrame: |
| return false; |
| case DeoptFrame::FrameType::kBuiltinContinuationFrame: |
| // Normally if the function is going to be deoptimized then the top |
| // frame should be an interpreted one. The only exception is the case |
| // when the lazy deopt point was added only for the sake of recoring |
| // an inlined Api function instance in the deopt info for exception |
| // stack trace reconstruction. |
| return top_frame().as_builtin_continuation().builtin_id() == |
| Builtin::kGenericLazyDeoptContinuation; |
| } |
| } |
| #endif // DEBUG |
| |
| using DeoptingCallReturnPcField = base::BitField<unsigned int, 0, 30>; |
| using ResultSizeField = DeoptingCallReturnPcField::Next<unsigned int, 2>; |
| |
| // The max code size is enforced by the various assemblers, but it's not |
| // visible here, so static assert against the magic constant that we happen |
| // to know is correct. |
| static constexpr int kMaxCodeSize = 512 * MB; |
| static constexpr unsigned int kUninitializedCallReturnPc = |
| DeoptingCallReturnPcField::kMax; |
| static_assert(DeoptingCallReturnPcField::is_valid(kMaxCodeSize)); |
| static_assert(kMaxCodeSize != kUninitializedCallReturnPc); |
| |
| // Lazy deopts can have at most two result registers -- temporarily three for |
| // ForInPrepare. |
| static_assert(ResultSizeField::kMax >= 3); |
| |
| interpreter::Register result_location_; |
| uint32_t bitfield_; |
| }; |
| |
| class ExceptionHandlerInfo { |
| public: |
| const int kNoExceptionHandlerPCOffsetMarker = 0xdeadbeef; |
| |
| ExceptionHandlerInfo() |
| : catch_block(), pc_offset(kNoExceptionHandlerPCOffsetMarker) {} |
| |
| explicit ExceptionHandlerInfo(BasicBlockRef* catch_block_ref) |
| : catch_block(catch_block_ref), pc_offset(-1) {} |
| |
| bool HasExceptionHandler() { |
| return pc_offset != kNoExceptionHandlerPCOffsetMarker; |
| } |
| |
| BasicBlockRef catch_block; |
| Label trampoline_entry; |
| int pc_offset; |
| }; |
| |
| // Dummy type for the initial raw allocation. |
| struct NodeWithInlineInputs {}; |
| |
| namespace detail { |
| // Helper for getting the static opcode of a Node subclass. This is in a |
| // "detail" namespace rather than in NodeBase because we can't template |
| // specialize outside of namespace scopes before C++17. |
| template <class T> |
| struct opcode_of_helper; |
| |
| #define DEF_OPCODE_OF(Name) \ |
| template <> \ |
| struct opcode_of_helper<Name> { \ |
| static constexpr Opcode value = Opcode::k##Name; \ |
| }; |
| NODE_BASE_LIST(DEF_OPCODE_OF) |
| #undef DEF_OPCODE_OF |
| |
| template <typename T> |
| constexpr T* ObjectPtrBeforeAddress(void* address) { |
| char* address_as_char_ptr = reinterpret_cast<char*>(address); |
| char* object_ptr_as_char_ptr = address_as_char_ptr - sizeof(T); |
| return reinterpret_cast<T*>(object_ptr_as_char_ptr); |
| } |
| |
| template <typename T> |
| constexpr const T* ObjectPtrBeforeAddress(const void* address) { |
| const char* address_as_char_ptr = reinterpret_cast<const char*>(address); |
| const char* object_ptr_as_char_ptr = address_as_char_ptr - sizeof(T); |
| return reinterpret_cast<const T*>(object_ptr_as_char_ptr); |
| } |
| |
| } // namespace detail |
| |
| class NodeBase : public ZoneObject { |
| private: |
| // Bitfield specification. |
| using OpcodeField = base::BitField64<Opcode, 0, 16>; |
| static_assert(OpcodeField::is_valid(kLastOpcode)); |
| using OpPropertiesField = |
| OpcodeField::Next<OpProperties, OpProperties::kSize>; |
| using NumTemporariesNeededField = OpPropertiesField::Next<uint8_t, 2>; |
| using NumDoubleTemporariesNeededField = |
| NumTemporariesNeededField::Next<uint8_t, 1>; |
| // Align input count to 32-bit. |
| using UnusedField = NumDoubleTemporariesNeededField::Next<bool, 1>; |
| using InputCountField = UnusedField::Next<size_t, 17>; |
| static_assert(InputCountField::kShift == 32); |
| |
| protected: |
| using SingleSpareBitField = UnusedField; |
| // Subclasses may use the remaining bitfield bits. |
| template <class T, int size> |
| using NextBitField = InputCountField::Next<T, size>; |
| |
| static constexpr int kMaxInputs = InputCountField::kMax; |
| |
| public: |
| template <class T> |
| static constexpr Opcode opcode_of = detail::opcode_of_helper<T>::value; |
| |
| template <class Derived, typename... Args> |
| static Derived* New(Zone* zone, std::initializer_list<ValueNode*> inputs, |
| Args&&... args) { |
| Derived* node = |
| Allocate<Derived>(zone, inputs.size(), std::forward<Args>(args)...); |
| |
| int i = 0; |
| for (ValueNode* input : inputs) { |
| DCHECK_NOT_NULL(input); |
| node->set_input(i++, input); |
| } |
| |
| return node; |
| } |
| |
| // Inputs must be initialized manually. |
| template <class Derived, typename... Args> |
| static Derived* New(Zone* zone, size_t input_count, Args&&... args) { |
| Derived* node = |
| Allocate<Derived>(zone, input_count, std::forward<Args>(args)...); |
| return node; |
| } |
| |
| // Overwritten by subclasses. |
| static constexpr OpProperties kProperties = |
| OpProperties::Pure() | OpProperties::TaggedValue(); |
| |
| constexpr Opcode opcode() const { return OpcodeField::decode(bitfield_); } |
| constexpr OpProperties properties() const { |
| return OpPropertiesField::decode(bitfield_); |
| } |
| void set_properties(OpProperties properties) { |
| bitfield_ = OpPropertiesField::update(bitfield_, properties); |
| } |
| |
| template <class T> |
| constexpr bool Is() const; |
| |
| template <class T> |
| constexpr T* Cast() { |
| DCHECK(Is<T>()); |
| return static_cast<T*>(this); |
| } |
| template <class T> |
| constexpr const T* Cast() const { |
| DCHECK(Is<T>()); |
| return static_cast<const T*>(this); |
| } |
| template <class T> |
| constexpr T* TryCast() { |
| return Is<T>() ? static_cast<T*>(this) : nullptr; |
| } |
| |
| constexpr bool has_inputs() const { return input_count() > 0; } |
| constexpr int input_count() const { |
| static_assert(InputCountField::kMax <= kMaxInt); |
| return static_cast<int>(InputCountField::decode(bitfield_)); |
| } |
| |
| constexpr Input& input(int index) { |
| DCHECK_LT(index, input_count()); |
| return *(input_base() - index); |
| } |
| constexpr const Input& input(int index) const { |
| DCHECK_LT(index, input_count()); |
| return *(input_base() - index); |
| } |
| |
| // Input iterators, use like: |
| // |
| // for (Input& input : *node) { ... } |
| constexpr auto begin() { return std::make_reverse_iterator(&input(-1)); } |
| constexpr auto end() { |
| return std::make_reverse_iterator(&input(input_count() - 1)); |
| } |
| |
| constexpr bool has_id() const { return id_ != kInvalidNodeId; } |
| constexpr NodeIdT id() const { |
| DCHECK_NE(id_, kInvalidNodeId); |
| return id_; |
| } |
| void set_id(NodeIdT id) { |
| DCHECK_EQ(id_, kInvalidNodeId); |
| DCHECK_NE(id, kInvalidNodeId); |
| id_ = id; |
| } |
| |
| template <typename RegisterT> |
| uint8_t num_temporaries_needed() const { |
| if constexpr (std::is_same_v<RegisterT, Register>) { |
| return NumTemporariesNeededField::decode(bitfield_); |
| } else { |
| return NumDoubleTemporariesNeededField::decode(bitfield_); |
| } |
| } |
| |
| template <typename RegisterT> |
| RegListBase<RegisterT>& temporaries() { |
| if constexpr (std::is_same_v<RegisterT, Register>) { |
| return temporaries_; |
| } else { |
| return double_temporaries_; |
| } |
| } |
| |
| RegList& general_temporaries() { return temporaries_; } |
| DoubleRegList& double_temporaries() { return double_temporaries_; } |
| |
| template <typename RegisterT> |
| void assign_temporaries(RegListBase<RegisterT> list) { |
| if constexpr (std::is_same_v<RegisterT, Register>) { |
| temporaries_ = list; |
| } else { |
| double_temporaries_ = list; |
| } |
| } |
| |
| enum class InputAllocationPolicy { kFixedRegister, kArbitraryRegister, kAny }; |
| |
| // Some parts of Maglev require a specific iteration order of the inputs (such |
| // as UseMarkingProcessor::MarkInputUses or |
| // StraightForwardRegisterAllocator::AssignInputs). For such cases, |
| // `ForAllInputsInRegallocAssignmentOrder` can be called with a callback `f` |
| // that will be called for each input in the "correct" order. |
| template <typename Function> |
| void ForAllInputsInRegallocAssignmentOrder(Function&& f); |
| |
| void Print(std::ostream& os, MaglevGraphLabeller*, |
| bool skip_targets = false) const; |
| |
| // For GDB: Print any Node with `print node->Print()`. |
| void Print() const; |
| |
| EagerDeoptInfo* eager_deopt_info() { |
| DCHECK(properties().can_eager_deopt() || |
| properties().is_deopt_checkpoint()); |
| DCHECK(!properties().can_lazy_deopt()); |
| return reinterpret_cast<EagerDeoptInfo*>(deopt_info_address()); |
| } |
| |
| LazyDeoptInfo* lazy_deopt_info() { |
| DCHECK(properties().can_lazy_deopt()); |
| DCHECK(!properties().can_eager_deopt()); |
| return reinterpret_cast<LazyDeoptInfo*>(deopt_info_address()); |
| } |
| |
| const RegisterSnapshot& register_snapshot() const { |
| DCHECK(properties().needs_register_snapshot()); |
| return *reinterpret_cast<RegisterSnapshot*>(register_snapshot_address()); |
| } |
| |
| ExceptionHandlerInfo* exception_handler_info() { |
| DCHECK(properties().can_throw()); |
| return reinterpret_cast<ExceptionHandlerInfo*>(exception_handler_address()); |
| } |
| |
| void set_register_snapshot(RegisterSnapshot snapshot) { |
| DCHECK(properties().needs_register_snapshot()); |
| *reinterpret_cast<RegisterSnapshot*>(register_snapshot_address()) = |
| snapshot; |
| } |
| |
| inline void change_input(int index, ValueNode* node); |
| |
| void change_representation(ValueRepresentation new_repr) { |
| DCHECK_EQ(opcode(), Opcode::kPhi); |
| bitfield_ = OpPropertiesField::update( |
| bitfield_, properties().WithNewValueRepresentation(new_repr)); |
| } |
| |
| void set_opcode(Opcode new_opcode) { |
| bitfield_ = OpcodeField::update(bitfield_, new_opcode); |
| } |
| |
| void CopyEagerDeoptInfoOf(NodeBase* other, Zone* zone) { |
| new (eager_deopt_info()) |
| EagerDeoptInfo(zone, other->eager_deopt_info()->top_frame(), |
| other->eager_deopt_info()->feedback_to_update()); |
| } |
| |
| void SetEagerDeoptInfo(Zone* zone, DeoptFrame deopt_frame, |
| compiler::FeedbackSource feedback_to_update = |
| compiler::FeedbackSource()) { |
| DCHECK(properties().can_eager_deopt() || |
| properties().is_deopt_checkpoint()); |
| new (eager_deopt_info()) |
| EagerDeoptInfo(zone, deopt_frame, feedback_to_update); |
| } |
| |
| template <typename NodeT> |
| void OverwriteWith() { |
| OverwriteWith(NodeBase::opcode_of<NodeT>, NodeT::kProperties); |
| } |
| |
| void OverwriteWith( |
| Opcode new_opcode, |
| base::Optional<OpProperties> maybe_new_properties = base::nullopt) { |
| OpProperties new_properties = maybe_new_properties.has_value() |
| ? maybe_new_properties.value() |
| : StaticPropertiesForOpcode(new_opcode); |
| #ifdef DEBUG |
| CheckCanOverwriteWith(new_opcode, new_properties); |
| #endif |
| set_opcode(new_opcode); |
| set_properties(new_properties); |
| } |
| |
| protected: |
| explicit NodeBase(uint64_t bitfield) : bitfield_(bitfield) {} |
| |
| // Allow updating bits above NextBitField from subclasses |
| constexpr uint64_t bitfield() const { return bitfield_; } |
| void set_bitfield(uint64_t new_bitfield) { |
| #ifdef DEBUG |
| // Make sure that all the base bitfield bits (all bits before the next |
| // bitfield start, excluding any spare bits) are equal in the new value. |
| const uint64_t base_bitfield_mask = |
| ((uint64_t{1} << NextBitField<bool, 1>::kShift) - 1) & |
| ~SingleSpareBitField::kMask; |
| DCHECK_EQ(bitfield_ & base_bitfield_mask, |
| new_bitfield & base_bitfield_mask); |
| #endif |
| bitfield_ = new_bitfield; |
| } |
| |
| constexpr Input* input_base() { |
| return detail::ObjectPtrBeforeAddress<Input>(this); |
| } |
| constexpr const Input* input_base() const { |
| return detail::ObjectPtrBeforeAddress<Input>(this); |
| } |
| Input* last_input() { return &input(input_count() - 1); } |
| const Input* last_input() const { return &input(input_count() - 1); } |
| |
| Address last_input_address() const { |
| return reinterpret_cast<Address>(last_input()); |
| } |
| |
| inline void initialize_input_null(int index); |
| inline void set_input(int index, ValueNode* node); |
| |
| // For nodes that don't have data past the input, allow trimming the input |
| // count. This is used by Phis to reduce inputs when merging in dead control |
| // flow. |
| void reduce_input_count(int num = 1) { |
| DCHECK_EQ(opcode(), Opcode::kPhi); |
| DCHECK_GE(input_count(), num); |
| DCHECK(!properties().can_lazy_deopt()); |
| DCHECK(!properties().can_eager_deopt()); |
| bitfield_ = InputCountField::update(bitfield_, input_count() - num); |
| } |
| |
| // Specify that there need to be a certain number of registers free (i.e. |
| // useable as scratch registers) on entry into this node. |
| // |
| // Does not include any registers requested by RequireSpecificTemporary. |
| void set_temporaries_needed(uint8_t value) { |
| DCHECK_EQ(num_temporaries_needed<Register>(), 0); |
| bitfield_ = NumTemporariesNeededField::update(bitfield_, value); |
| } |
| |
| void set_double_temporaries_needed(uint8_t value) { |
| DCHECK_EQ(num_temporaries_needed<DoubleRegister>(), 0); |
| bitfield_ = NumDoubleTemporariesNeededField::update(bitfield_, value); |
| } |
| |
| // Require that a specific register is free (and therefore clobberable) by the |
| // entry into this node. |
| void RequireSpecificTemporary(Register reg) { temporaries_.set(reg); } |
| |
| void RequireSpecificDoubleTemporary(DoubleRegister reg) { |
| double_temporaries_.set(reg); |
| } |
| |
| private: |
| template <class Derived, typename... Args> |
| |
| static Derived* Allocate(Zone* zone, size_t input_count, Args&&... args) { |
| static_assert( |
| !Derived::kProperties.can_eager_deopt() || |
| !Derived::kProperties.can_lazy_deopt(), |
| "The current deopt info representation, at the end of inputs, requires " |
| "that we cannot have both lazy and eager deopts on a node. If we ever " |
| "need this, we have to update accessors to check node->properties() " |
| "for which deopts are active."); |
| constexpr size_t size_before_inputs = |
| ExceptionHandlerInfoSize(Derived::kProperties) + |
| RegisterSnapshotSize(Derived::kProperties) + |
| EagerDeoptInfoSize(Derived::kProperties) + |
| LazyDeoptInfoSize(Derived::kProperties); |
| |
| static_assert(IsAligned(size_before_inputs, alignof(Input))); |
| const size_t size_before_node = |
| size_before_inputs + input_count * sizeof(Input); |
| |
| DCHECK(IsAligned(size_before_inputs, alignof(Derived))); |
| const size_t size = size_before_node + sizeof(Derived); |
| intptr_t raw_buffer = |
| reinterpret_cast<intptr_t>(zone->Allocate<NodeWithInlineInputs>(size)); |
| #ifdef DEBUG |
| memset(reinterpret_cast<void*>(raw_buffer), 0, size); |
| #endif |
| |
| void* node_buffer = reinterpret_cast<void*>(raw_buffer + size_before_node); |
| uint64_t bitfield = OpcodeField::encode(opcode_of<Derived>) | |
| OpPropertiesField::encode(Derived::kProperties) | |
| InputCountField::encode(input_count); |
| Derived* node = |
| new (node_buffer) Derived(bitfield, std::forward<Args>(args)...); |
| return node; |
| } |
| |
| static constexpr size_t ExceptionHandlerInfoSize(OpProperties properties) { |
| return RoundUp<alignof(Input)>( |
| properties.can_throw() ? sizeof(ExceptionHandlerInfo) : 0); |
| } |
| |
| static constexpr size_t RegisterSnapshotSize(OpProperties properties) { |
| return RoundUp<alignof(Input)>( |
| properties.needs_register_snapshot() ? sizeof(RegisterSnapshot) : 0); |
| } |
| |
| static constexpr size_t EagerDeoptInfoSize(OpProperties properties) { |
| return RoundUp<alignof(Input)>( |
| (properties.can_eager_deopt() || properties.is_deopt_checkpoint()) |
| ? sizeof(EagerDeoptInfo) |
| : 0); |
| } |
| |
| static constexpr size_t LazyDeoptInfoSize(OpProperties properties) { |
| return RoundUp<alignof(Input)>( |
| properties.can_lazy_deopt() ? sizeof(LazyDeoptInfo) : 0); |
| } |
| |
| // Returns the position of deopt info if it exists, otherwise returns |
| // its position as if DeoptInfo size were zero. |
| Address deopt_info_address() const { |
| DCHECK(!properties().can_eager_deopt() || !properties().can_lazy_deopt()); |
| size_t extra = |
| EagerDeoptInfoSize(properties()) + LazyDeoptInfoSize(properties()); |
| return last_input_address() - extra; |
| } |
| |
| // Returns the position of register snapshot if it exists, otherwise returns |
| // its position as if RegisterSnapshot size were zero. |
| Address register_snapshot_address() const { |
| size_t extra = RegisterSnapshotSize(properties()); |
| return deopt_info_address() - extra; |
| } |
| |
| // Returns the position of exception handler info if it exists, otherwise |
| // returns its position as if ExceptionHandlerInfo size were zero. |
| Address exception_handler_address() const { |
| size_t extra = ExceptionHandlerInfoSize(properties()); |
| return register_snapshot_address() - extra; |
| } |
| |
| void CheckCanOverwriteWith(Opcode new_opcode, OpProperties new_properties); |
| |
| uint64_t bitfield_; |
| NodeIdT id_ = kInvalidNodeId; |
| RegList temporaries_; |
| DoubleRegList double_temporaries_; |
| |
| NodeBase() = delete; |
| NodeBase(const NodeBase&) = delete; |
| NodeBase(NodeBase&&) = delete; |
| NodeBase& operator=(const NodeBase&) = delete; |
| NodeBase& operator=(NodeBase&&) = delete; |
| }; |
| |
| template <class T> |
| constexpr bool NodeBase::Is() const { |
| return opcode() == opcode_of<T>; |
| } |
| |
| // Specialized sub-hierarchy type checks. |
| template <> |
| constexpr bool NodeBase::Is<ValueNode>() const { |
| return IsValueNode(opcode()); |
| } |
| template <> |
| constexpr bool NodeBase::Is<ControlNode>() const { |
| return IsControlNode(opcode()); |
| } |
| template <> |
| constexpr bool NodeBase::Is<BranchControlNode>() const { |
| return IsBranchControlNode(opcode()); |
| } |
| template <> |
| constexpr bool NodeBase::Is<ConditionalControlNode>() const { |
| return IsConditionalControlNode(opcode()); |
| } |
| template <> |
| constexpr bool NodeBase::Is<UnconditionalControlNode>() const { |
| return IsUnconditionalControlNode(opcode()); |
| } |
| template <> |
| constexpr bool NodeBase::Is<TerminalControlNode>() const { |
| return IsTerminalControlNode(opcode()); |
| } |
| |
| void CheckValueInputIs(const NodeBase* node, int i, |
| ValueRepresentation expected, |
| MaglevGraphLabeller* graph_labeller); |
| |
| // The Node class hierarchy contains all non-control nodes. |
| class Node : public NodeBase { |
| public: |
| using List = base::ThreadedListWithUnsafeInsertions<Node>; |
| |
| inline ValueLocation& result(); |
| |
| Node* NextNode() const { return next_; } |
| |
| protected: |
| using NodeBase::NodeBase; |
| |
| private: |
| Node** next() { return &next_; } |
| Node* next_ = nullptr; |
| |
| friend List; |
| friend base::ThreadedListTraits<Node>; |
| }; |
| |
| // All non-control nodes with a result. |
| class ValueNode : public Node { |
| private: |
| using TaggedResultNeedsDecompressField = NodeBase::SingleSpareBitField; |
| |
| protected: |
| using SingleSpareBitField = void; |
| |
| public: |
| ValueLocation& result() { return result_; } |
| const ValueLocation& result() const { return result_; } |
| |
| int use_count() const { |
| // Invalid to check use_count externally once an id is allocated. |
| DCHECK(!has_id()); |
| return use_count_; |
| } |
| bool is_used() const { return use_count_ > 0; } |
| bool unused_inputs_were_visited() const { return use_count_ == -1; } |
| void add_use() { |
| // Make sure a saturated use count won't overflow. |
| DCHECK_LT(use_count_, kMaxInt); |
| use_count_++; |
| } |
| void remove_use() { |
| // Make sure a saturated use count won't drop below zero. |
| DCHECK_GT(use_count_, 0); |
| use_count_--; |
| } |
| // Avoid revisiting nodes when processing an unused node's inputs, by marking |
| // it as visited. |
| void mark_unused_inputs_visited() { |
| DCHECK_EQ(use_count_, 0); |
| use_count_ = -1; |
| } |
| |
| void SetHint(compiler::InstructionOperand hint); |
| |
| void ClearHint() { hint_ = compiler::InstructionOperand(); } |
| |
| bool has_hint() { return !hint_.IsInvalid(); } |
| |
| template <typename RegisterT> |
| RegisterT GetRegisterHint() { |
| if (hint_.IsInvalid()) return RegisterT::no_reg(); |
| return RegisterT::from_code( |
| compiler::UnallocatedOperand::cast(hint_).fixed_register_index()); |
| } |
| |
| const compiler::InstructionOperand& hint() const { |
| DCHECK(hint_.IsInvalid() || hint_.IsUnallocated()); |
| return hint_; |
| } |
| |
| bool is_loadable() const { |
| DCHECK_EQ(state_, kSpill); |
| return spill_.IsConstant() || spill_.IsAnyStackSlot(); |
| } |
| |
| bool is_spilled() const { |
| DCHECK_EQ(state_, kSpill); |
| return spill_.IsAnyStackSlot(); |
| } |
| |
| void SetNoSpill(); |
| void SetConstantLocation(); |
| |
| /* For constants only. */ |
| void LoadToRegister(MaglevAssembler*, Register); |
| void LoadToRegister(MaglevAssembler*, DoubleRegister); |
| void DoLoadToRegister(MaglevAssembler*, Register); |
| void DoLoadToRegister(MaglevAssembler*, DoubleRegister); |
| Handle<Object> Reify(LocalIsolate* isolate) const; |
| |
| void Spill(compiler::AllocatedOperand operand) { |
| #ifdef DEBUG |
| if (state_ == kLastUse) { |
| state_ = kSpill; |
| } else { |
| DCHECK(!is_loadable()); |
| } |
| #endif // DEBUG |
| DCHECK(!IsConstantNode(opcode())); |
| DCHECK(operand.IsAnyStackSlot()); |
| spill_ = operand; |
| DCHECK(spill_.IsAnyStackSlot()); |
| } |
| |
| compiler::AllocatedOperand spill_slot() const { |
| DCHECK(is_spilled()); |
| return compiler::AllocatedOperand::cast(loadable_slot()); |
| } |
| |
| compiler::InstructionOperand loadable_slot() const { |
| DCHECK_EQ(state_, kSpill); |
| DCHECK(is_loadable()); |
| return spill_; |
| } |
| |
| void record_next_use(NodeIdT id, InputLocation* input_location) { |
| DCHECK_EQ(state_, kLastUse); |
| DCHECK_NE(id, kInvalidNodeId); |
| DCHECK_LT(start_id(), id); |
| DCHECK_IMPLIES(has_valid_live_range(), id >= end_id_); |
| end_id_ = id; |
| *last_uses_next_use_id_ = id; |
| last_uses_next_use_id_ = input_location->get_next_use_id_address(); |
| DCHECK_EQ(*last_uses_next_use_id_, kInvalidNodeId); |
| } |
| |
| struct LiveRange { |
| NodeIdT start = kInvalidNodeId; |
| NodeIdT end = kInvalidNodeId; // Inclusive. |
| }; |
| |
| bool has_valid_live_range() const { return end_id_ != 0; } |
| LiveRange live_range() const { return {start_id(), end_id_}; } |
| NodeIdT current_next_use() const { return next_use_; } |
| |
| // The following metods should only be used during register allocation, to |
| // mark the _current_ state of this Node according to the register allocator. |
| void advance_next_use(NodeIdT use) { next_use_ = use; } |
| |
| bool has_no_more_uses() const { return next_use_ == kInvalidNodeId; } |
| |
| constexpr bool use_double_register() const { |
| return IsDoubleRepresentation(properties().value_representation()); |
| } |
| |
| constexpr bool is_tagged() const { |
| return (properties().value_representation() == |
| ValueRepresentation::kTagged); |
| } |
| |
| #ifdef V8_COMPRESS_POINTERS |
| constexpr bool decompresses_tagged_result() const { |
| return TaggedResultNeedsDecompressField::decode(bitfield()); |
| } |
| |
| void SetTaggedResultNeedsDecompress() { |
| static_assert(PointerCompressionIsEnabled()); |
| |
| DCHECK_IMPLIES(!Is<Identity>(), is_tagged()); |
| DCHECK_IMPLIES(Is<Identity>(), input(0).node()->is_tagged()); |
| set_bitfield(TaggedResultNeedsDecompressField::update(bitfield(), true)); |
| if (Is<Phi>()) { |
| for (Input& input : *this) { |
| // Avoid endless recursion by terminating on values already marked. |
| if (input.node()->decompresses_tagged_result()) continue; |
| input.node()->SetTaggedResultNeedsDecompress(); |
| } |
| } else if (Is<Identity>()) { |
| DCHECK_EQ(input_count(), 0); |
| input(0).node()->SetTaggedResultNeedsDecompress(); |
| } |
| } |
| #else |
| constexpr bool decompresses_tagged_result() const { return false; } |
| #endif |
| |
| constexpr ValueRepresentation value_representation() const { |
| return properties().value_representation(); |
| } |
| |
| constexpr MachineRepresentation GetMachineRepresentation() const { |
| switch (properties().value_representation()) { |
| case ValueRepresentation::kTagged: |
| return MachineRepresentation::kTagged; |
| case ValueRepresentation::kInt32: |
| case ValueRepresentation::kUint32: |
| return MachineRepresentation::kWord32; |
| case ValueRepresentation::kIntPtr: |
| return MachineType::PointerRepresentation(); |
| case ValueRepresentation::kFloat64: |
| return MachineRepresentation::kFloat64; |
| case ValueRepresentation::kHoleyFloat64: |
| return MachineRepresentation::kFloat64; |
| } |
| } |
| |
| void InitializeRegisterData() { |
| if (use_double_register()) { |
| double_registers_with_result_ = kEmptyDoubleRegList; |
| } else { |
| registers_with_result_ = kEmptyRegList; |
| } |
| } |
| |
| void AddRegister(Register reg) { |
| DCHECK(!use_double_register()); |
| registers_with_result_.set(reg); |
| } |
| void AddRegister(DoubleRegister reg) { |
| DCHECK(use_double_register()); |
| double_registers_with_result_.set(reg); |
| } |
| |
| void RemoveRegister(Register reg) { |
| DCHECK(!use_double_register()); |
| registers_with_result_.clear(reg); |
| } |
| void RemoveRegister(DoubleRegister reg) { |
| DCHECK(use_double_register()); |
| double_registers_with_result_.clear(reg); |
| } |
| |
| template <typename T> |
| inline RegListBase<T> ClearRegisters(); |
| |
| int num_registers() const { |
| if (use_double_register()) { |
| return double_registers_with_result_.Count(); |
| } |
| return registers_with_result_.Count(); |
| } |
| bool has_register() const { |
| if (use_double_register()) { |
| return double_registers_with_result_ != kEmptyDoubleRegList; |
| } |
| return registers_with_result_ != kEmptyRegList; |
| } |
| bool is_in_register(Register reg) const { |
| DCHECK(!use_double_register()); |
| return registers_with_result_.has(reg); |
| } |
| bool is_in_register(DoubleRegister reg) const { |
| DCHECK(use_double_register()); |
| return double_registers_with_result_.has(reg); |
| } |
| |
| template <typename T> |
| RegListBase<T> result_registers() { |
| if constexpr (std::is_same<T, DoubleRegister>::value) { |
| DCHECK(use_double_register()); |
| return double_registers_with_result_; |
| } else { |
| DCHECK(!use_double_register()); |
| return registers_with_result_; |
| } |
| } |
| |
| compiler::InstructionOperand allocation() const { |
| if (has_register()) { |
| return compiler::AllocatedOperand(compiler::LocationOperand::REGISTER, |
| GetMachineRepresentation(), |
| FirstRegisterCode()); |
| } |
| DCHECK(is_loadable()); |
| return spill_; |
| } |
| |
| protected: |
| explicit ValueNode(uint64_t bitfield) |
| : Node(bitfield), |
| last_uses_next_use_id_(&next_use_), |
| hint_(compiler::InstructionOperand()), |
| use_count_(0) |
| #ifdef DEBUG |
| , |
| state_(kLastUse) |
| #endif // DEBUG |
| { |
| InitializeRegisterData(); |
| } |
| |
| int FirstRegisterCode() const { |
| if (use_double_register()) { |
| return double_registers_with_result_.first().code(); |
| } |
| return registers_with_result_.first().code(); |
| } |
| |
| // Rename for better pairing with `end_id`. |
| NodeIdT start_id() const { return id(); } |
| |
| NodeIdT end_id_ = kInvalidNodeId; |
| NodeIdT next_use_ = kInvalidNodeId; |
| ValueLocation result_; |
| union { |
| RegList registers_with_result_; |
| DoubleRegList double_registers_with_result_; |
| }; |
| union { |
| // Pointer to the current last use's next_use_id field. Most of the time |
| // this will be a pointer to an Input's next_use_id_ field, but it's |
| // initialized to this node's next_use_ to track the first use. |
| NodeIdT* last_uses_next_use_id_; |
| compiler::InstructionOperand spill_; |
| }; |
| compiler::InstructionOperand hint_; |
| // TODO(leszeks): Union this into another field. |
| int use_count_; |
| #ifdef DEBUG |
| enum {kLastUse, kSpill} state_; |
| #endif // DEBUG |
| }; |
| |
| inline void NodeBase::initialize_input_null(int index) { |
| // Should already be null in debug, make sure it's null on release too. |
| DCHECK_EQ(input(index).node(), nullptr); |
| new (&input(index)) Input(nullptr); |
| } |
| |
| inline void NodeBase::set_input(int index, ValueNode* node) { |
| DCHECK_NOT_NULL(node); |
| DCHECK_EQ(input(index).node(), nullptr); |
| node->add_use(); |
| new (&input(index)) Input(node); |
| } |
| |
| inline void NodeBase::change_input(int index, ValueNode* node) { |
| DCHECK_NE(input(index).node(), nullptr); |
| input(index).node()->remove_use(); |
| |
| #ifdef DEBUG |
| input(index) = Input(nullptr); |
| #endif |
| set_input(index, node); |
| } |
| |
| template <> |
| inline RegList ValueNode::ClearRegisters() { |
| DCHECK(!use_double_register()); |
| return std::exchange(registers_with_result_, kEmptyRegList); |
| } |
| |
| template <> |
| inline DoubleRegList ValueNode::ClearRegisters() { |
| DCHECK(use_double_register()); |
| return std::exchange(double_registers_with_result_, kEmptyDoubleRegList); |
| } |
| |
| ValueLocation& Node::result() { |
| DCHECK(Is<ValueNode>()); |
| return Cast<ValueNode>()->result(); |
| } |
| |
| // Mixin for a node with known class (and therefore known opcode and static |
| // properties), but possibly unknown numbers of inputs. |
| template <typename Base, typename Derived> |
| class NodeTMixin : public Base { |
| public: |
| // Shadowing for static knowledge. |
| constexpr Opcode opcode() const { return NodeBase::opcode_of<Derived>; } |
| constexpr const OpProperties& properties() const { |
| return Derived::kProperties; |
| } |
| |
| template <typename... Args> |
| static Derived* New(Zone* zone, std::initializer_list<ValueNode*> inputs, |
| Args&&... args) { |
| return NodeBase::New<Derived>(zone, inputs, std::forward<Args>...); |
| } |
| template <typename... Args> |
| static Derived* New(Zone* zone, size_t input_count, Args&&... args) { |
| return NodeBase::New<Derived>(zone, input_count, std::forward<Args>...); |
| } |
| |
| protected: |
| template <typename... Args> |
| explicit NodeTMixin(uint64_t bitfield, Args&&... args) |
| : Base(bitfield, std::forward<Args>(args)...) { |
| DCHECK_EQ(this->NodeBase::opcode(), NodeBase::opcode_of<Derived>); |
| DCHECK_EQ(this->NodeBase::properties(), Derived::kProperties); |
| } |
| }; |
| |
| namespace detail { |
| // Helper class for defining input types as a std::array, but without |
| // accidental initialisation with the wrong sized initializer_list. |
| template <size_t Size> |
| class ArrayWrapper : public std::array<ValueRepresentation, Size> { |
| public: |
| template <typename... Args> |
| explicit constexpr ArrayWrapper(Args&&... args) |
| : std::array<ValueRepresentation, Size>({args...}) { |
| static_assert(sizeof...(args) == Size); |
| } |
| }; |
| struct YouNeedToDefineAnInputTypesArrayInYourDerivedClass {}; |
| } // namespace detail |
| |
| // Mixin for a node with known class (and therefore known opcode and static |
| // properties), and known numbers of inputs. |
| template <size_t InputCount, typename Base, typename Derived> |
| class FixedInputNodeTMixin : public NodeTMixin<Base, Derived> { |
| static constexpr size_t kInputCount = InputCount; |
| |
| public: |
| // Shadowing for static knowledge. |
| constexpr bool has_inputs() const { return input_count() > 0; } |
| constexpr uint16_t input_count() const { return kInputCount; } |
| constexpr auto end() { |
| return std::make_reverse_iterator(&this->input(input_count() - 1)); |
| } |
| |
| void VerifyInputs(MaglevGraphLabeller* graph_labeller) const { |
| if constexpr (kInputCount != 0) { |
| static_assert( |
| std::is_same_v<const InputTypes, decltype(Derived::kInputTypes)>); |
| static_assert(kInputCount == Derived::kInputTypes.size()); |
| for (int i = 0; i < static_cast<int>(kInputCount); ++i) { |
| CheckValueInputIs(this, i, Derived::kInputTypes[i], graph_labeller); |
| } |
| } |
| } |
| |
| #ifdef V8_COMPRESS_POINTERS |
| void MarkTaggedInputsAsDecompressing() const { |
| if constexpr (kInputCount != 0) { |
| static_assert( |
| std::is_same_v<const InputTypes, decltype(Derived::kInputTypes)>); |
| static_assert(kInputCount == Derived::kInputTypes.size()); |
| for (int i = 0; i < static_cast<int>(kInputCount); ++i) { |
| if (Derived::kInputTypes[i] == ValueRepresentation::kTagged) { |
| ValueNode* input_node = this->input(i).node(); |
| input_node->SetTaggedResultNeedsDecompress(); |
| } |
| } |
| } |
| } |
| #endif |
| |
| protected: |
| using InputTypes = detail::ArrayWrapper<kInputCount>; |
| detail::YouNeedToDefineAnInputTypesArrayInYourDerivedClass kInputTypes; |
| |
| template <typename... Args> |
| explicit FixedInputNodeTMixin(uint64_t bitfield, Args&&... args) |
| : NodeTMixin<Base, Derived>(bitfield, std::forward<Args>(args)...) { |
| DCHECK_EQ(this->NodeBase::input_count(), kInputCount); |
| } |
| }; |
| |
| template <class Derived> |
| using NodeT = NodeTMixin<Node, Derived>; |
| |
| template <class Derived> |
| using ValueNodeT = NodeTMixin<ValueNode, Derived>; |
| |
| template <size_t InputCount, class Derived> |
| using FixedInputNodeT = |
| FixedInputNodeTMixin<InputCount, NodeT<Derived>, Derived>; |
| |
| template <size_t InputCount, class Derived> |
| using FixedInputValueNodeT = |
| FixedInputNodeTMixin<InputCount, ValueNodeT<Derived>, Derived>; |
| |
| class Identity : public FixedInputValueNodeT<1, Identity> { |
| using Base = FixedInputValueNodeT<1, Identity>; |
| |
| public: |
| static constexpr OpProperties kProperties = OpProperties::Pure(); |
| |
| explicit Identity(uint64_t bitfield) : Base(bitfield) {} |
| |
| void VerifyInputs(MaglevGraphLabeller*) const { |
| // Identity is valid for all input types. |
| } |
| #ifdef V8_COMPRESS_POINTERS |
| void MarkTaggedInputsAsDecompressing() { |
| // Do not mark inputs as decompressing here, since we don't yet know whether |
| // this Phi needs decompression. Instead, let |
| // Node::SetTaggedResultNeedsDecompress pass through phis. |
| } |
| #endif |
| void SetValueLocationConstraints() {} |
| void GenerateCode(MaglevAssembler*, const ProcessingState&) {} |
| void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} |
| }; |
| |
| template <class Derived, Operation kOperation> |
| class UnaryWithFeedbackNode : public FixedInputValueNodeT<1, Derived> { |
| using Base = FixedInputValueNodeT<1, Derived>; |
| |
| public: |
| // The implementation currently calls runtime. |
| static constexpr OpProperties kProperties = OpProperties::JSCall(); |
| static constexpr |
| typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged}; |
| |
| static constexpr int kOperandIndex = 0; |
| Input& operand_input() { return Node::input(kOperandIndex); } |
| compiler::FeedbackSource feedback() const { return feedback_; } |
| |
| protected: |
| explicit UnaryWithFeedbackNode(uint64_t bitfield, |
| const compiler::FeedbackSource& feedback) |
| : Base(bitfield), feedback_(feedback) {} |
| |
| void SetValueLocationConstraints(); |
| void GenerateCode(MaglevAssembler*, const ProcessingState&); |
| void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} |
| |
| const compiler::FeedbackSource feedback_; |
| }; |
| |
| template <class Derived, Operation kOperation> |
| class BinaryWithFeedbackNode : public FixedInputValueNodeT<2, Derived> { |
| using Base = FixedInputValueNodeT<2, Derived>; |
| |
| public: |
| // The implementation currently calls runtime. |
| static constexpr OpProperties kProperties = OpProperties::JSCall(); |
| static constexpr typename Base::InputTypes kInputTypes{ |
| ValueRepresentation::kTagged, ValueRepresentation::kTagged}; |
| |
| static constexpr int kLeftIndex = 0; |
| static constexpr int kRightIndex = 1; |
| Input& left_input() { return Node::input(kLeftIndex); } |
| Input& right_input() { return Node::input(kRightIndex); } |
| compiler::FeedbackSource feedback() const { return feedback_; } |
| |
| protected: |
| BinaryWithFeedbackNode(uint64_t bitfield, |
| const compiler::FeedbackSource& feedback) |
| : Base(bitfield), feedback_(feedback) {} |
| |
| void SetValueLocationConstraints(); |
| void GenerateCode(MaglevAssembler*, const ProcessingState&); |
| void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} |
| |
| const compiler::FeedbackSource feedback_; |
| }; |
| |
| #define DEF_OPERATION_WITH_FEEDBACK_NODE(Name, Super, OpName) \ |
| class Name : public Super<Name, Operation::k##OpName> { \ |
| using Base = Super<Name, Operation::k##OpName>; \ |
| \ |
| public: \ |
| Name(uint64_t bitfield, const compiler::FeedbackSource& feedback) \ |
| : Base(bitfield, feedback) {} \ |
| int MaxCallStackArgs() const { return 0; } \ |
| void SetValueLocationConstraints(); \ |
| void GenerateCode(MaglevAssembler*, const ProcessingState&); \ |
| void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} \ |
| }; |
| |
| #define DEF_UNARY_WITH_FEEDBACK_NODE(Name) \ |
| DEF_OPERATION_WITH_FEEDBACK_NODE(Generic##Name, UnaryWithFeedbackNode, Name) |
| #define DEF_BINARY_WITH_FEEDBACK_NODE(Name) \ |
| DEF_OPERATION_WITH_FEEDBACK_NODE(Generic##Name, BinaryWithFeedbackNode, Name) |
| UNARY_OPERATION_LIST(DEF_UNARY_WITH_FEEDBACK_NODE) |
| ARITHMETIC_OPERATION_LIST(DEF_BINARY_WITH_FEEDBACK_NODE) |
| COMPARISON_OPERATION_LIST(DEF_BINARY_WITH_FEEDBACK_NODE) |
| #undef DEF_UNARY_WITH_FEEDBACK_NODE |
| #undef DEF_BINARY_WITH_FEEDBACK_NODE |
| #undef DEF_OPERATION_WITH_FEEDBACK_NODE |
| |
| template <class Derived, Operation kOperation> |
| class Int32BinaryWithOverflowNode : public FixedInputValueNodeT<2, Derived> { |
| using Base = FixedInputValueNodeT<2, Derived>; |
| |
| public: |
| static constexpr OpProperties kProperties = |
| OpProperties::EagerDeopt() | OpProperties::Int32(); |
| static constexpr typename Base::InputTypes kInputTypes{ |
| ValueRepresentation::kInt32, ValueRepresentation::kInt32}; |
| |
| static constexpr int kLeftIndex = 0; |
| static constexpr int kRightIndex = 1; |
| Input& left_input() { return Node::input(kLeftIndex); } |
| Input& right_input() { return Node::input(kRightIndex); } |
| |
| protected: |
| explicit Int32BinaryWithOverflowNode(uint64_t bitfield) : Base(bitfield) {} |
| |
| void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} |
| }; |
| |
| #define DEF_OPERATION_NODE(Name, Super, OpName) \ |
| class Name : public Super<Name, Operation::k##OpName> { \ |
| using Base = Super<Name, Operation::k##OpName>; \ |
| \ |
| public: \ |
| explicit Name(uint64_t bitfield) : Base(bitfield) {} \ |
| void SetValueLocationConstraints(); \ |
| void GenerateCode(MaglevAssembler*, const ProcessingState&); \ |
| void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} \ |
| }; |
| |
| #define DEF_INT32_BINARY_WITH_OVERFLOW_NODE(Name) \ |
| DEF_OPERATION_NODE(Int32##Name##WithOverflow, Int32BinaryWithOverflowNode, \ |
| Name) |
| DEF_INT32_BINARY_WITH_OVERFLOW_NODE(Add) |
| DEF_INT32_BINARY_WITH_OVERFLOW_NODE(Subtract) |
| DEF_INT32_BINARY_WITH_OVERFLOW_NODE(Multiply) |
| DEF_INT32_BINARY_WITH_OVERFLOW_NODE(Divide) |
| DEF_INT32_BINARY_WITH_OVERFLOW_NODE(Modulus) |
| #undef DEF_INT32_BINARY_WITH_OVERFLOW_NODE |
| |
| template <class Derived, Operation kOperation> |
| class Int32BinaryNode : public FixedInputValueNodeT<2, Derived> { |
| using Base = FixedInputValueNodeT<2, Derived>; |
| |
| public: |
| static constexpr OpProperties kProperties = OpProperties::Int32(); |
| static constexpr typename Base::InputTypes kInputTypes{ |
| ValueRepresentation::kInt32, ValueRepresentation::kInt32}; |
| |
| static constexpr int kLeftIndex = 0; |
| static constexpr int kRightIndex = 1; |
| Input& left_input() { return Node::input(kLeftIndex); } |
| Input& right_input() { return Node::input(kRightIndex); } |
| |
| protected: |
| explicit Int32BinaryNode(uint64_t bitfield) : Base(bitfield) {} |
| }; |
| |
| #define DEF_INT32_BINARY_NODE(Name) \ |
| DEF_OPERATION_NODE(Int32##Name, Int32BinaryNode, Name) |
| DEF_INT32_BINARY_NODE(BitwiseAnd) |
| DEF_INT32_BINARY_NODE(BitwiseOr) |
| DEF_INT32_BINARY_NODE(BitwiseXor) |
| DEF_INT32_BINARY_NODE(ShiftLeft) |
| DEF_INT32_BINARY_NODE(ShiftRight) |
| #undef DEF_INT32_BINARY_NODE |
| |
| class Int32BitwiseNot : public FixedInputValueNodeT<1, Int32BitwiseNot> { |
| using Base = FixedInputValueNodeT<1, Int32BitwiseNot>; |
| |
| public: |
| explicit Int32BitwiseNot(uint64_t bitfield) : Base(bitfield) {} |
| |
| static constexpr OpProperties kProperties = OpProperties::Int32(); |
| static constexpr |
| typename Base::InputTypes kInputTypes{ValueRepresentation::kInt32}; |
| |
| static constexpr int kValueIndex = 0; |
| Input& value_input() { return Node::input(kValueIndex); } |
| |
| void SetValueLocationConstraints(); |
| void GenerateCode(MaglevAssembler*, const ProcessingState&); |
| void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} |
| }; |
| |
| template <class Derived, Operation kOperation> |
| class Int32UnaryWithOverflowNode : public FixedInputValueNodeT<1, Derived> { |
| using Base = FixedInputValueNodeT<1, Derived>; |
| |
| public: |
| static constexpr OpProperties kProperties = |
| OpProperties::EagerDeopt() | OpProperties::Int32(); |
| static constexpr |
| typename Base::InputTypes kInputTypes{ValueRepresentation::kInt32}; |
| |
| static constexpr int kValueIndex = 0; |
| Input& value_input() { return Node::input(kValueIndex); } |
| |
| protected: |
| explicit Int32UnaryWithOverflowNode(uint64_t bitfield) : Base(bitfield) {} |
| }; |
| |
| #define DEF_INT32_UNARY_WITH_OVERFLOW_NODE(Name) \ |
| DEF_OPERATION_NODE(Int32##Name##WithOverflow, Int32UnaryWithOverflowNode, \ |
| Name) |
| DEF_INT32_UNARY_WITH_OVERFLOW_NODE(Negate) |
| DEF_INT32_UNARY_WITH_OVERFLOW_NODE(Increment) |
| DEF_INT32_UNARY_WITH_OVERFLOW_NODE(Decrement) |
| #undef DEF_INT32_UNARY_WITH_OVERFLOW_NODE |
| |
| class Int32ShiftRightLogical |
| : public FixedInputValueNodeT<2, Int32ShiftRightLogical> { |
| using Base = FixedInputValueNodeT<2, Int32ShiftRightLogical>; |
| |
| public: |
| explicit Int32ShiftRightLogical(uint64_t bitfield) : Base(bitfield) {} |
| |
| // Unlike the other Int32 nodes, logical right shift returns a Uint32. |
| static constexpr OpProperties kProperties = OpProperties::Uint32(); |
| static constexpr typename Base::InputTypes kInputTypes{ |
| ValueRepresentation::kInt32, ValueRepresentation::kInt32}; |
| |
| static constexpr int kLeftIndex = 0; |
| static constexpr int kRightIndex = 1; |
| Input& left_input() { return Node::input(kLeftIndex); } |
| Input& right_input() { return Node::input(kRightIndex); } |
| |
| void SetValueLocationConstraints(); |
| void GenerateCode(MaglevAssembler*, const ProcessingState&); |
| void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} |
| }; |
| |
| class Int32Compare : public FixedInputValueNodeT<2, Int32Compare> { |
| using Base = FixedInputValueNodeT<2, Int32Compare>; |
| |
| public: |
| explicit Int32Compare(uint64_t bitfield, Operation operation) |
| : Base(OperationBitField::update(bitfield, operation)) {} |
| |
| static constexpr Base::InputTypes kInputTypes{ValueRepresentation::kInt32, |
| ValueRepresentation::kInt32}; |
| |
| static constexpr int kLeftIndex = 0; |
| static constexpr int kRightIndex = 1; |
| Input& left_input() { return Node::input(kLeftIndex); } |
| Input& right_input() { return Node::input(kRightIndex); } |
| |
| constexpr Operation operation() const { |
| return OperationBitField::decode(bitfield()); |
| } |
| |
| void SetValueLocationConstraints(); |
| void GenerateCode(MaglevAssembler*, const ProcessingState&); |
| void PrintParams(std::ostream&, MaglevGraphLabeller*) const; |
| |
| private: |
| using OperationBitField = NextBitField<Operation, 5>; |
| }; |
| |
| class Int32ToBoolean : public FixedInputValueNodeT<1, Int32ToBoolean> { |
| using Base = FixedInputValueNodeT<1, Int32ToBoolean>; |
| |
| public: |
| explicit Int32ToBoolean(uint64_t bitfield, bool flip) |
| : Base(FlipBitField::update(bitfield, flip)) {} |
| |
| static constexpr Base::InputTypes kInputTypes{ValueRepresentation::kInt32}; |
| |
| Input& value() { return Node::input(0); } |
| |
| constexpr bool flip() const { return FlipBitField::decode(bitfield()); } |
| |
| void SetValueLocationConstraints(); |
| void GenerateCode(MaglevAssembler*, const ProcessingState&); |
| void PrintParams(std::ostream&, MaglevGraphLabeller*) const; |
| |
| private: |
| using FlipBitField = NextBitField<bool, 1>; |
| }; |
| |
| class CheckedSmiIncrement |
| : public FixedInputValueNodeT<1, CheckedSmiIncrement> { |
| using Base = FixedInputValueNodeT<1, CheckedSmiIncrement>; |
| |
| public: |
| explicit CheckedSmiIncrement(uint64_t bitfield) : Base(bitfield) {} |
| |
| static constexpr OpProperties kProperties = OpProperties::EagerDeopt(); |
| static constexpr |
| typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged}; |
| |
| static constexpr int kValueIndex = 0; |
| Input& value_input() { return Node::input(kValueIndex); } |
| |
| void SetValueLocationConstraints(); |
| void GenerateCode(MaglevAssembler*, const ProcessingState&); |
| void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} |
| }; |
| |
| class CheckedSmiDecrement |
| : public FixedInputValueNodeT<1, CheckedSmiDecrement> { |
| using Base = FixedInputValueNodeT<1, CheckedSmiDecrement>; |
| |
| public: |
| explicit CheckedSmiDecrement(uint64_t bitfield) : Base(bitfield) {} |
| |
| static constexpr OpProperties kProperties = OpProperties::EagerDeopt(); |
| static constexpr |
| typename Base::InputTypes kInputTypes{ValueRepresentation::kTagged}; |
| |
| static constexpr int kValueIndex = 0; |
| Input& value_input() { return Node::input(kValueIndex); } |
| |
| void SetValueLocationConstraints(); |
| void GenerateCode(MaglevAssembler*, const ProcessingState&); |
| void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} |
| }; |
| |
| template <class Derived, Operation kOperation> |
| class Float64BinaryNode : public FixedInputValueNodeT<2, Derived> { |
| using Base = FixedInputValueNodeT<2, Derived>; |
| |
| public: |
| static constexpr OpProperties kProperties = OpProperties::Float64(); |
| static constexpr typename Base::InputTypes kInputTypes{ |
| ValueRepresentation::kHoleyFloat64, ValueRepresentation::kHoleyFloat64}; |
| |
| static constexpr int kLeftIndex = 0; |
| static constexpr int kRightIndex = 1; |
| Input& left_input() { return Node::input(kLeftIndex); } |
| Input& right_input() { return Node::input(kRightIndex); } |
| |
| protected: |
| explicit Float64BinaryNode(uint64_t bitfield) : Base(bitfield) {} |
| |
| void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} |
| |