| // Copyright 2022 the V8 project authors. All rights reserved. |
| // Use of this source code is governed by a BSD-style license that can be |
| // found in the LICENSE file. |
| |
| #ifndef V8_COMPILER_TURBOSHAFT_OPERATIONS_H_ |
| #define V8_COMPILER_TURBOSHAFT_OPERATIONS_H_ |
| |
| #include <cmath> |
| #include <cstdint> |
| #include <cstring> |
| #include <limits> |
| #include <optional> |
| #include <tuple> |
| #include <type_traits> |
| #include <utility> |
| |
| #include "src/base/logging.h" |
| #include "src/base/macros.h" |
| #include "src/base/platform/mutex.h" |
| #include "src/base/small-vector.h" |
| #include "src/base/template-utils.h" |
| #include "src/base/vector.h" |
| #include "src/codegen/external-reference.h" |
| #include "src/common/globals.h" |
| #include "src/compiler/common-operator.h" |
| #include "src/compiler/fast-api-calls.h" |
| #include "src/compiler/globals.h" |
| #include "src/compiler/simplified-operator.h" |
| #include "src/compiler/turboshaft/deopt-data.h" |
| #include "src/compiler/turboshaft/fast-hash.h" |
| #include "src/compiler/turboshaft/index.h" |
| #include "src/compiler/turboshaft/representations.h" |
| #include "src/compiler/turboshaft/snapshot-table.h" |
| #include "src/compiler/turboshaft/types.h" |
| #include "src/compiler/turboshaft/utils.h" |
| #include "src/compiler/turboshaft/zone-with-name.h" |
| #include "src/compiler/write-barrier-kind.h" |
| #include "src/flags/flags.h" |
| |
| #if V8_ENABLE_WEBASSEMBLY |
| #include "src/wasm/wasm-module.h" |
| #include "src/wasm/wasm-objects.h" |
| #endif |
| |
| namespace v8::internal { |
| class HeapObject; |
| V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, |
| AbortReason reason); |
| } // namespace v8::internal |
| namespace v8::internal::compiler { |
| class CallDescriptor; |
| class JSWasmCallParameters; |
| class DeoptimizeParameters; |
| class FrameStateInfo; |
| class Node; |
| enum class TrapId : int32_t; |
| } // namespace v8::internal::compiler |
| namespace v8::internal::compiler::turboshaft { |
| |
| inline constexpr char kCompilationZoneName[] = "compilation-zone"; |
| |
| class Block; |
| struct FrameStateData; |
| class Graph; |
| struct FrameStateOp; |
| |
| enum class HashingStrategy { |
| kDefault, |
| // This strategy requires that hashing a graph during builtin construction |
| // (mksnapshot) produces the same hash for repeated runs of mksnapshot. This |
| // requires that no pointers and external constants are used in hashes. |
| kMakeSnapshotStable, |
| }; |
| |
| // This belongs to `VariableReducer` in `variable-reducer.h`. It is defined here |
| // because of cyclic header dependencies. |
| struct VariableData { |
| MaybeRegisterRepresentation rep; |
| bool loop_invariant; |
| IntrusiveSetIndex active_loop_variables_index = {}; |
| }; |
| using Variable = SnapshotTable<OpIndex, VariableData>::Key; |
| |
| // DEFINING NEW OPERATIONS |
| // ======================= |
| // For each operation `Foo`, we define: |
| // - An entry V(Foo) in one of the TURBOSHAFT*OPERATION list (eg, |
| // TURBOSHAFT_OPERATION_LIST_BLOCK_TERMINATOR, |
| // TURBOSHAFT_SIMPLIFIED_OPERATION_LIST etc), which defines |
| // `Opcode::kFoo` and whether the operation is a block terminator. |
| // - A `struct FooOp`, which derives from either `OperationT<FooOp>` or |
| // `FixedArityOperationT<k, FooOp>` if the op always has excactly `k` inputs. |
| // Furthermore, the struct has to contain: |
| // - A bunch of options directly as public fields. |
| // - A getter `options()` returning a tuple of all these options. This is used |
| // for default printing and hashing. Alternatively, `void |
| // PrintOptions(std::ostream& os) const` and `size_t hash_value() const` can |
| // also be defined manually. |
| // - Getters for named inputs. |
| // - A constructor that first takes all the inputs and then all the options. For |
| // a variable arity operation where the constructor doesn't take the inputs as |
| // a single base::Vector<OpIndex> argument, it's also necessary to overwrite |
| // the static `New` function, see `CallOp` for an example. |
| // - An `Explode` method that unpacks an operation and invokes the passed |
| // callback. If the operation inherits from FixedArityOperationT, the base |
| // class already provides the required implementation. |
| // - `OpEffects` as either a static constexpr member `effects` or a |
| // non-static method `Effects()` if the effects depend on the particular |
| // operation and not just the opcode. |
| // - outputs_rep/inputs_rep methods, which should return a vector describing the |
| // representation of the outputs and inputs of this operations. |
| // After defining the struct here, you'll also need to integrate it in |
| // Turboshaft: |
| // - If Foo is not lowered before reaching the instruction selector, handle |
| // Opcode::kFoo in the Turboshaft VisitNode of instruction-selector.cc. |
| |
| #ifdef V8_INTL_SUPPORT |
| #define TURBOSHAFT_INTL_OPERATION_LIST(V) V(StringToCaseIntl) |
| #else |
| #define TURBOSHAFT_INTL_OPERATION_LIST(V) |
| #endif // V8_INTL_SUPPORT |
| |
| #ifdef V8_ENABLE_WEBASSEMBLY |
| // These operations should be lowered to Machine operations during |
| // WasmLoweringPhase. |
| #define TURBOSHAFT_WASM_OPERATION_LIST(V) \ |
| V(WasmStackCheck) \ |
| V(GlobalGet) \ |
| V(GlobalSet) \ |
| V(Null) \ |
| V(IsNull) \ |
| V(AssertNotNull) \ |
| V(RttCanon) \ |
| V(WasmTypeCheck) \ |
| V(WasmTypeCast) \ |
| V(AnyConvertExtern) \ |
| V(ExternConvertAny) \ |
| V(WasmTypeAnnotation) \ |
| V(StructGet) \ |
| V(StructSet) \ |
| V(ArrayGet) \ |
| V(ArraySet) \ |
| V(ArrayLength) \ |
| V(WasmAllocateArray) \ |
| V(WasmAllocateStruct) \ |
| V(WasmRefFunc) \ |
| V(StringAsWtf16) \ |
| V(StringPrepareForGetCodeUnit) |
| |
| #if V8_ENABLE_WASM_SIMD256_REVEC |
| #define TURBOSHAFT_SIMD256_COMMOM_OPERATION_LIST(V) \ |
| V(Simd256Constant) \ |
| V(Simd256Extract128Lane) \ |
| V(Simd256LoadTransform) \ |
| V(Simd256Unary) \ |
| V(Simd256Binop) \ |
| V(Simd256Shift) \ |
| V(Simd256Ternary) \ |
| V(Simd256Splat) \ |
| V(SimdPack128To256) |
| |
| #if V8_TARGET_ARCH_X64 |
| #define TURBOSHAFT_SIMD256_X64_OPERATION_LIST(V) \ |
| V(Simd256Shufd) \ |
| V(Simd256Shufps) \ |
| V(Simd256Unpack) |
| |
| #define TURBOSHAFT_SIMD256_OPERATION_LIST(V) \ |
| TURBOSHAFT_SIMD256_COMMOM_OPERATION_LIST(V) \ |
| TURBOSHAFT_SIMD256_X64_OPERATION_LIST(V) |
| #else |
| #define TURBOSHAFT_SIMD256_OPERATION_LIST(V) \ |
| TURBOSHAFT_SIMD256_COMMOM_OPERATION_LIST(V) |
| #endif // V8_TARGET_ARCH_X64 |
| |
| #else |
| #define TURBOSHAFT_SIMD256_OPERATION_LIST(V) |
| #endif |
| |
| #define TURBOSHAFT_SIMD_OPERATION_LIST(V) \ |
| V(Simd128Constant) \ |
| V(Simd128Binop) \ |
| V(Simd128Unary) \ |
| V(Simd128Reduce) \ |
| V(Simd128Shift) \ |
| V(Simd128Test) \ |
| V(Simd128Splat) \ |
| V(Simd128Ternary) \ |
| V(Simd128ExtractLane) \ |
| V(Simd128ReplaceLane) \ |
| V(Simd128LaneMemory) \ |
| V(Simd128LoadTransform) \ |
| V(Simd128Shuffle) \ |
| TURBOSHAFT_SIMD256_OPERATION_LIST(V) |
| |
| #else |
| #define TURBOSHAFT_WASM_OPERATION_LIST(V) |
| #define TURBOSHAFT_SIMD_OPERATION_LIST(V) |
| #endif |
| |
| #define TURBOSHAFT_OPERATION_LIST_BLOCK_TERMINATOR(V) \ |
| V(CheckException) \ |
| V(Goto) \ |
| V(TailCall) \ |
| V(Unreachable) \ |
| V(Return) \ |
| V(Branch) \ |
| V(Switch) \ |
| V(Deoptimize) |
| |
| #ifdef V8_ENABLE_CONTINUATION_PRESERVED_EMBEDDER_DATA |
| #define TURBOSHAFT_CPED_OPERATION_LIST(V) \ |
| V(GetContinuationPreservedEmbedderData) \ |
| V(SetContinuationPreservedEmbedderData) |
| #else |
| #define TURBOSHAFT_CPED_OPERATION_LIST(V) |
| #endif // V8_ENABLE_CONTINUATION_PRESERVED_EMBEDDER_DATA |
| |
| // These operations should be lowered to Machine operations during |
| // MachineLoweringPhase. |
| #define TURBOSHAFT_SIMPLIFIED_OPERATION_LIST(V) \ |
| TURBOSHAFT_INTL_OPERATION_LIST(V) \ |
| TURBOSHAFT_CPED_OPERATION_LIST(V) \ |
| V(ArgumentsLength) \ |
| V(BigIntBinop) \ |
| V(BigIntComparison) \ |
| V(BigIntUnary) \ |
| V(CheckedClosure) \ |
| V(WordBinopDeoptOnOverflow) \ |
| V(CheckEqualsInternalizedString) \ |
| V(CheckMaps) \ |
| V(CompareMaps) \ |
| V(Float64Is) \ |
| V(ObjectIs) \ |
| V(ObjectIsNumericValue) \ |
| V(Float64SameValue) \ |
| V(SameValue) \ |
| V(ChangeOrDeopt) \ |
| V(Convert) \ |
| V(ConvertJSPrimitiveToObject) \ |
| V(ConvertJSPrimitiveToUntagged) \ |
| V(ConvertJSPrimitiveToUntaggedOrDeopt) \ |
| V(ConvertUntaggedToJSPrimitive) \ |
| V(ConvertUntaggedToJSPrimitiveOrDeopt) \ |
| V(TruncateJSPrimitiveToUntagged) \ |
| V(TruncateJSPrimitiveToUntaggedOrDeopt) \ |
| V(DoubleArrayMinMax) \ |
| V(EnsureWritableFastElements) \ |
| V(FastApiCall) \ |
| V(FindOrderedHashEntry) \ |
| V(LoadDataViewElement) \ |
| V(LoadFieldByIndex) \ |
| V(LoadMessage) \ |
| V(LoadStackArgument) \ |
| V(LoadTypedElement) \ |
| V(StoreDataViewElement) \ |
| V(StoreMessage) \ |
| V(StoreTypedElement) \ |
| V(MaybeGrowFastElements) \ |
| V(NewArgumentsElements) \ |
| V(NewArray) \ |
| V(RuntimeAbort) \ |
| V(StaticAssert) \ |
| V(StringAt) \ |
| V(StringComparison) \ |
| V(StringConcat) \ |
| V(StringFromCodePointAt) \ |
| V(StringIndexOf) \ |
| V(StringLength) \ |
| V(TypedArrayLength) \ |
| V(StringSubstring) \ |
| V(NewConsString) \ |
| V(TransitionAndStoreArrayElement) \ |
| V(TransitionElementsKind) \ |
| V(TransitionElementsKindOrCheckMap) \ |
| V(DebugPrint) \ |
| V(CheckTurboshaftTypeOf) \ |
| V(Word32SignHint) |
| |
| // These Operations are the lowest level handled by Turboshaft, and are |
| // supported by the InstructionSelector. |
| #define TURBOSHAFT_MACHINE_OPERATION_LIST(V) \ |
| V(WordBinop) \ |
| V(FloatBinop) \ |
| V(Word32PairBinop) \ |
| V(OverflowCheckedBinop) \ |
| V(WordUnary) \ |
| V(OverflowCheckedUnary) \ |
| V(FloatUnary) \ |
| V(Shift) \ |
| V(Comparison) \ |
| V(Change) \ |
| V(TryChange) \ |
| V(BitcastWord32PairToFloat64) \ |
| V(TaggedBitcast) \ |
| V(Select) \ |
| V(PendingLoopPhi) \ |
| V(Constant) \ |
| V(LoadRootRegister) \ |
| V(Load) \ |
| V(Store) \ |
| V(Retain) \ |
| V(Parameter) \ |
| V(OsrValue) \ |
| V(StackPointerGreaterThan) \ |
| V(StackSlot) \ |
| V(FrameConstant) \ |
| V(DeoptimizeIf) \ |
| IF_WASM(V, TrapIf) \ |
| IF_WASM(V, LoadStackPointer) \ |
| IF_WASM(V, SetStackPointer) \ |
| V(Phi) \ |
| V(FrameState) \ |
| V(Call) \ |
| V(CatchBlockBegin) \ |
| V(DidntThrow) \ |
| V(Tuple) \ |
| V(Projection) \ |
| V(DebugBreak) \ |
| V(AssumeMap) \ |
| V(AtomicRMW) \ |
| V(AtomicWord32Pair) \ |
| V(MemoryBarrier) \ |
| V(Comment) \ |
| V(Dead) \ |
| V(AbortCSADcheck) |
| |
| #define TURBOSHAFT_JS_THROWING_OPERATION_LIST(V) \ |
| V(GenericBinop) \ |
| V(GenericUnop) \ |
| V(ToNumberOrNumeric) |
| |
| #define TURBOSHAFT_JS_OPERATION_LIST(V) \ |
| TURBOSHAFT_JS_THROWING_OPERATION_LIST(V) |
| |
| // These are operations that are not Machine operations and need to be lowered |
| // before Instruction Selection, but they are not lowered during the |
| // MachineLoweringPhase. |
| #define TURBOSHAFT_OTHER_OPERATION_LIST(V) \ |
| V(Allocate) \ |
| V(DecodeExternalPointer) \ |
| V(JSStackCheck) |
| |
| #define TURBOSHAFT_OPERATION_LIST_NOT_BLOCK_TERMINATOR(V) \ |
| TURBOSHAFT_WASM_OPERATION_LIST(V) \ |
| TURBOSHAFT_SIMD_OPERATION_LIST(V) \ |
| TURBOSHAFT_MACHINE_OPERATION_LIST(V) \ |
| TURBOSHAFT_SIMPLIFIED_OPERATION_LIST(V) \ |
| TURBOSHAFT_JS_OPERATION_LIST(V) \ |
| TURBOSHAFT_OTHER_OPERATION_LIST(V) |
| |
| #define TURBOSHAFT_OPERATION_LIST(V) \ |
| TURBOSHAFT_OPERATION_LIST_BLOCK_TERMINATOR(V) \ |
| TURBOSHAFT_OPERATION_LIST_NOT_BLOCK_TERMINATOR(V) |
| |
| enum class Opcode : uint8_t { |
| #define ENUM_CONSTANT(Name) k##Name, |
| TURBOSHAFT_OPERATION_LIST(ENUM_CONSTANT) |
| #undef ENUM_CONSTANT |
| }; |
| |
| const char* OpcodeName(Opcode opcode); |
| constexpr std::underlying_type_t<Opcode> OpcodeIndex(Opcode x) { |
| return static_cast<std::underlying_type_t<Opcode>>(x); |
| } |
| |
| #define FORWARD_DECLARE(Name) struct Name##Op; |
| TURBOSHAFT_OPERATION_LIST(FORWARD_DECLARE) |
| #undef FORWARD_DECLARE |
| |
| namespace detail { |
| template <class Op> |
| struct operation_to_opcode_map {}; |
| |
| #define OPERATION_OPCODE_MAP_CASE(Name) \ |
| template <> \ |
| struct operation_to_opcode_map<Name##Op> \ |
| : std::integral_constant<Opcode, Opcode::k##Name> {}; |
| TURBOSHAFT_OPERATION_LIST(OPERATION_OPCODE_MAP_CASE) |
| #undef OPERATION_OPCODE_MAP_CASE |
| } // namespace detail |
| |
| template <typename Op> |
| struct operation_to_opcode |
| : detail::operation_to_opcode_map<std::remove_cvref_t<Op>> {}; |
| template <typename Op> |
| constexpr Opcode operation_to_opcode_v = operation_to_opcode<Op>::value; |
| |
| template <typename Op, uint64_t Mask, uint64_t Value> |
| struct OpMaskT { |
| using operation = Op; |
| static constexpr uint64_t mask = Mask; |
| static constexpr uint64_t value = Value; |
| }; |
| |
| #define COUNT_OPCODES(Name) +1 |
| constexpr uint16_t kNumberOfBlockTerminatorOpcodes = |
| 0 TURBOSHAFT_OPERATION_LIST_BLOCK_TERMINATOR(COUNT_OPCODES); |
| #undef COUNT_OPCODES |
| |
| #define COUNT_OPCODES(Name) +1 |
| constexpr uint16_t kNumberOfOpcodes = |
| 0 TURBOSHAFT_OPERATION_LIST(COUNT_OPCODES); |
| #undef COUNT_OPCODES |
| |
| inline constexpr bool IsBlockTerminator(Opcode opcode) { |
| return OpcodeIndex(opcode) < kNumberOfBlockTerminatorOpcodes; |
| } |
| |
| // Operations that can throw and that have static output representations. |
| #define TURBOSHAFT_THROWING_STATIC_OUTPUTS_OPERATIONS_LIST(V) \ |
| TURBOSHAFT_JS_THROWING_OPERATION_LIST(V) |
| |
| // This list repeats the operations that may throw and need to be followed by |
| // `DidntThrow`. |
| #define TURBOSHAFT_THROWING_OPERATIONS_LIST(V) \ |
| TURBOSHAFT_THROWING_STATIC_OUTPUTS_OPERATIONS_LIST(V) \ |
| V(Call) \ |
| V(FastApiCall) |
| |
| // Operations that need to be followed by `DidntThrowOp`. |
| inline constexpr bool MayThrow(Opcode opcode) { |
| #define CASE(Name) case Opcode::k##Name: |
| switch (opcode) { |
| TURBOSHAFT_THROWING_OPERATIONS_LIST(CASE) |
| return true; |
| default: |
| return false; |
| } |
| #undef CASE |
| } |
| |
| // For Throwing operations, outputs_rep() are empty, because the values are |
| // produced by the subsequent DidntThrow. Nevertheless, the operation has to |
| // define its output representations in an array that DidntThrow can then reuse |
| // to know what its outputs are. Additionally, when using Maglev as a frontend, |
| // catch handlers that have never been reach so far are not emitted, and instead |
| // the throwing operations lazy deopt instead of throwing. |
| // |
| // That's where the THROWING_OP_BOILERPLATE macro comes in: it creates an array |
| // of representations that DidntThrow can use, and will define outputs_rep() to |
| // be empty, and takes care of creating a LazyDeoptOnThrow member. For instance: |
| // |
| // THROWING_OP_BOILERPLATE(RegisterRepresentation::Tagged(), |
| // RegisterRepresentation::Word32()) |
| // |
| // Warning: don't forget to add `lazy_deopt_on_throw` to the `options` of your |
| // Operation (you'll get a compile-time error if you forget it). |
| #define THROWING_OP_BOILERPLATE(...) \ |
| static constexpr RegisterRepresentation kOutputRepsStorage[]{__VA_ARGS__}; \ |
| static constexpr base::Vector<const RegisterRepresentation> kOutReps = \ |
| base::VectorOf(kOutputRepsStorage, arraysize(kOutputRepsStorage)); \ |
| base::Vector<const RegisterRepresentation> outputs_rep() const { \ |
| return {}; \ |
| } \ |
| LazyDeoptOnThrow lazy_deopt_on_throw; |
| |
| template <typename T> |
| inline base::Vector<T> InitVectorOf( |
| ZoneVector<T>& storage, |
| std::initializer_list<RegisterRepresentation> values) { |
| storage.resize(values.size()); |
| size_t i = 0; |
| for (auto&& value : values) { |
| storage[i++] = value; |
| } |
| return base::VectorOf(storage); |
| } |
| |
| class InputsRepFactory { |
| public: |
| constexpr static base::Vector<const MaybeRegisterRepresentation> SingleRep( |
| RegisterRepresentation rep) { |
| return base::VectorOf(ToMaybeRepPointer(rep), 1); |
| } |
| |
| constexpr static base::Vector<const MaybeRegisterRepresentation> PairOf( |
| RegisterRepresentation rep) { |
| return base::VectorOf(ToMaybeRepPointer(rep), 2); |
| } |
| |
| protected: |
| constexpr static const MaybeRegisterRepresentation* ToMaybeRepPointer( |
| RegisterRepresentation rep) { |
| size_t index = static_cast<size_t>(rep.value()) * 2; |
| DCHECK_LT(index, arraysize(rep_map)); |
| return &rep_map[index]; |
| } |
| |
| private: |
| constexpr static MaybeRegisterRepresentation rep_map[] = { |
| MaybeRegisterRepresentation::Word32(), |
| MaybeRegisterRepresentation::Word32(), |
| MaybeRegisterRepresentation::Word64(), |
| MaybeRegisterRepresentation::Word64(), |
| MaybeRegisterRepresentation::Float32(), |
| MaybeRegisterRepresentation::Float32(), |
| MaybeRegisterRepresentation::Float64(), |
| MaybeRegisterRepresentation::Float64(), |
| MaybeRegisterRepresentation::Tagged(), |
| MaybeRegisterRepresentation::Tagged(), |
| MaybeRegisterRepresentation::Compressed(), |
| MaybeRegisterRepresentation::Compressed(), |
| MaybeRegisterRepresentation::Simd128(), |
| MaybeRegisterRepresentation::Simd128(), |
| #ifdef V8_ENABLE_WASM_SIMD256_REVEC |
| MaybeRegisterRepresentation::Simd256(), |
| MaybeRegisterRepresentation::Simd256(), |
| #endif // V8_ENABLE_WASM_SIMD256_REVEC |
| }; |
| }; |
| |
| struct EffectDimensions { |
| // Produced by loads, consumed by operations that should not move before loads |
| // because they change memory. |
| bool load_heap_memory : 1; |
| bool load_off_heap_memory : 1; |
| |
| // Produced by stores, consumed by operations that should not move before |
| // stores because they load or store memory. |
| bool store_heap_memory : 1; |
| bool store_off_heap_memory : 1; |
| |
| // Operations that perform raw heap access (like initialization) consume |
| // `before_raw_heap_access` and produce `after_raw_heap_access`. |
| // Operations that need the heap to be in a consistent state produce |
| // `before_raw_heap_access` and consume `after_raw_heap_access`. |
| bool before_raw_heap_access : 1; |
| // Produced by operations that access raw/untagged pointers into the |
| // heap or keep such a pointer alive, consumed by operations that can GC to |
| // ensure they don't move before the raw access. |
| bool after_raw_heap_access : 1; |
| |
| // Produced by any operation that can affect whether subsequent operations are |
| // executed, for example by branching, deopting, throwing or aborting. |
| // Consumed by all operations that should not be hoisted before a check |
| // because they rely on it. For example, loads usually rely on the shape of |
| // the heap object or the index being in bounds. |
| bool control_flow : 1; |
| // We need to ensure that the padding bits have a specified value, as they are |
| // observable in bitwise operations. |
| uint8_t unused_padding : 1; |
| |
| using Bits = uint8_t; |
| constexpr EffectDimensions() |
| : load_heap_memory(false), |
| load_off_heap_memory(false), |
| store_heap_memory(false), |
| store_off_heap_memory(false), |
| before_raw_heap_access(false), |
| after_raw_heap_access(false), |
| control_flow(false), |
| unused_padding(0) {} |
| Bits bits() const { return base::bit_cast<Bits>(*this); } |
| static EffectDimensions FromBits(Bits bits) { |
| return base::bit_cast<EffectDimensions>(bits); |
| } |
| bool operator==(EffectDimensions other) const { |
| return bits() == other.bits(); |
| } |
| bool operator!=(EffectDimensions other) const { |
| return bits() != other.bits(); |
| } |
| }; |
| static_assert(sizeof(EffectDimensions) == sizeof(EffectDimensions::Bits)); |
| |
| // Possible reorderings are restricted using two bit vectors: `produces` and |
| // `consumes`. Two operations cannot be reordered if the first operation |
| // produces an effect dimension that the second operation consumes. This is not |
| // necessarily symmetric. For example, it is possible to reorder |
| // Load(x) |
| // CheckMaps(y) |
| // to become |
| // CheckMaps(x) |
| // Load(y) |
| // because the load cannot affect the map check. But the other direction could |
| // be unsound, if the load depends on the map check having been executed. The |
| // former reordering is useful to push a load across a check into a branch if |
| // it is only needed there. The effect system expresses this by having the map |
| // check produce `EffectDimensions::control_flow` and the load consuming |
| // `EffectDimensions::control_flow`. If the producing operation comes before the |
| // consuming operation, then this order has to be preserved. But if the |
| // consuming operation comes first, then we are free to reorder them. Operations |
| // that produce and consume the same effect dimension always have a fixed order |
| // among themselves. For example, stores produce and consume the store |
| // dimensions. It is possible for operations to be reorderable unless certain |
| // other operations appear in-between. This way, the IR can be generous with |
| // reorderings as long as all operations are high-level, but become more |
| // restrictive as soon as low-level operations appear. For example, allocations |
| // can be freely reordered. Tagged bitcasts can be reordered with other tagged |
| // bitcasts. But a tagged bitcast cannot be reordered with allocations, as this |
| // would mean that an untagged pointer can be alive while a GC is happening. The |
| // way this works is that allocations produce the `before_raw_heap_access` |
| // dimension and consume the `after_raw_heap_access` dimension to stay either |
| // before or after a raw heap access. This means that there are no ordering |
| // constraints between allocations themselves. Bitcasts should not |
| // be moved accross an allocation. We treat them as raw heap access by letting |
| // them consume `before_raw_heap_access` and produce `after_raw_heap_access`. |
| // This way, allocations cannot be moved across bitcasts. Similarily, |
| // initializing stores and uninitialized allocations are classified as raw heap |
| // access, to prevent any operation that relies on a consistent heap state to be |
| // scheduled in the middle of an inline allocation. As long as we didn't lower |
| // to raw heap accesses yet, pure allocating operations or operations reading |
| // immutable memory can float freely. As soon as there are raw heap accesses, |
| // they become more restricted in their movement. Note that calls are not the |
| // most side-effectful operations, as they do not leave the heap in an |
| // inconsistent state, so they do not need to be marked as raw heap access. |
| struct OpEffects { |
| EffectDimensions produces; |
| EffectDimensions consumes; |
| |
| // Operations that cannot be merged because they produce identity. That is, |
| // every repetition can produce a different result, but the order in which |
| // they are executed does not matter. All we care about is that they are |
| // different. Producing a random number or allocating an object with |
| // observable pointer equality are examples. Producing identity doesn't |
| // restrict reordering in straight-line code, but we must prevent using GVN or |
| // moving identity-producing operations in- or out of loops. |
| bool can_create_identity : 1; |
| // If the operation can allocate and therefore can trigger GC. |
| bool can_allocate : 1; |
| // Instructions that have no uses but are `required_when_unused` should not be |
| // removed. |
| bool required_when_unused : 1; |
| // We need to ensure that the padding bits have a specified value, as they are |
| // observable in bitwise operations. This is split into two fields so that |
| // also MSVC creates the correct object layout. |
| uint8_t unused_padding_1 : 5; |
| uint8_t unused_padding_2; |
| |
| constexpr OpEffects() |
| : can_create_identity(false), |
| can_allocate(false), |
| required_when_unused(false), |
| unused_padding_1(0), |
| unused_padding_2(0) {} |
| |
| using Bits = uint32_t; |
| Bits bits() const { return base::bit_cast<Bits>(*this); } |
| static OpEffects FromBits(Bits bits) { |
| return base::bit_cast<OpEffects>(bits); |
| } |
| |
| bool operator==(OpEffects other) const { return bits() == other.bits(); } |
| bool operator!=(OpEffects other) const { return bits() != other.bits(); } |
| OpEffects operator|(OpEffects other) const { |
| return FromBits(bits() | other.bits()); |
| } |
| OpEffects operator&(OpEffects other) const { |
| return FromBits(bits() & other.bits()); |
| } |
| bool IsSubsetOf(OpEffects other) const { |
| return (bits() & ~other.bits()) == 0; |
| } |
| |
| constexpr OpEffects AssumesConsistentHeap() const { |
| OpEffects result = *this; |
| // Do not move the operation into a region with raw heap access. |
| result.produces.before_raw_heap_access = true; |
| result.consumes.after_raw_heap_access = true; |
| return result; |
| } |
| // Like `CanAllocate()`, but allocated values must be immutable and not have |
| // identity (for example `HeapNumber`). |
| // Note that if we first allocate something as mutable and later make it |
| // immutable, we have to allocate it with identity. |
| constexpr OpEffects CanAllocateWithoutIdentity() const { |
| OpEffects result = AssumesConsistentHeap(); |
| result.can_allocate = true; |
| return result; |
| } |
| // Allocations change the GC state and can trigger GC, as well as produce a |
| // fresh identity. |
| constexpr OpEffects CanAllocate() const { |
| return CanAllocateWithoutIdentity().CanCreateIdentity(); |
| } |
| // The operation can leave the heap in an incosistent state or have untagged |
| // pointers into the heap as input or output. |
| constexpr OpEffects CanDoRawHeapAccess() const { |
| OpEffects result = *this; |
| // Do not move any operation that relies on a consistent heap state accross. |
| result.produces.after_raw_heap_access = true; |
| result.consumes.before_raw_heap_access = true; |
| return result; |
| } |
| // Reading mutable heap memory. Reading immutable memory doesn't count. |
| constexpr OpEffects CanReadHeapMemory() const { |
| OpEffects result = *this; |
| result.produces.load_heap_memory = true; |
| // Do not reorder before stores. |
| result.consumes.store_heap_memory = true; |
| return result; |
| } |
| // Reading mutable off-heap memory or other input. Reading immutable memory |
| // doesn't count. |
| constexpr OpEffects CanReadOffHeapMemory() const { |
| OpEffects result = *this; |
| result.produces.load_off_heap_memory = true; |
| // Do not reorder before stores. |
| result.consumes.store_off_heap_memory = true; |
| return result; |
| } |
| // Writing any off-memory or other output. |
| constexpr OpEffects CanWriteOffHeapMemory() const { |
| OpEffects result = *this; |
| result.required_when_unused = true; |
| result.produces.store_off_heap_memory = true; |
| // Do not reorder before stores. |
| result.consumes.store_off_heap_memory = true; |
| // Do not reorder before loads. |
| result.consumes.load_off_heap_memory = true; |
| // Do not move before deopting or aborting operations. |
| result.consumes.control_flow = true; |
| return result; |
| } |
| // Writing heap memory that existed before the operation started. Initializing |
| // newly allocated memory doesn't count. |
| constexpr OpEffects CanWriteHeapMemory() const { |
| OpEffects result = *this; |
| result.required_when_unused = true; |
| result.produces.store_heap_memory = true; |
| // Do not reorder before stores. |
| result.consumes.store_heap_memory = true; |
| // Do not reorder before loads. |
| result.consumes.load_heap_memory = true; |
| // Do not move before deopting or aborting operations. |
| result.consumes.control_flow = true; |
| return result; |
| } |
| // Writing any memory or other output, on- or off-heap. |
| constexpr OpEffects CanWriteMemory() const { |
| return CanWriteHeapMemory().CanWriteOffHeapMemory(); |
| } |
| // Reading any memory or other input, on- or off-heap. |
| constexpr OpEffects CanReadMemory() const { |
| return CanReadHeapMemory().CanReadOffHeapMemory(); |
| } |
| // The operation might read immutable data from the heap, so it can be freely |
| // reordered with operations that keep the heap in a consistent state. But we |
| // must prevent the operation from observing an incompletely initialized |
| // object. |
| constexpr OpEffects CanReadImmutableMemory() const { |
| OpEffects result = AssumesConsistentHeap(); |
| return result; |
| } |
| // Partial operations that are only safe to execute after we performed certain |
| // checks, for example loads may only be safe after a corresponding bound or |
| // map checks. |
| constexpr OpEffects CanDependOnChecks() const { |
| OpEffects result = *this; |
| result.consumes.control_flow = true; |
| return result; |
| } |
| // The operation can affect control flow (like branch, deopt, throw or crash). |
| constexpr OpEffects CanChangeControlFlow() const { |
| OpEffects result = *this; |
| result.required_when_unused = true; |
| // Signal that this changes control flow. Prevents stores or operations |
| // relying on checks from flowing before this operation. |
| result.produces.control_flow = true; |
| // Stores must not flow past something that affects control flow. |
| result.consumes.store_heap_memory = true; |
| result.consumes.store_off_heap_memory = true; |
| return result; |
| } |
| // Execution of the current function may end with this operation, for example |
| // because of return, deopt, exception throw or abort/trap. |
| constexpr OpEffects CanLeaveCurrentFunction() const { |
| // All memory becomes observable. |
| return CanChangeControlFlow().CanReadMemory().RequiredWhenUnused(); |
| } |
| // The operation can deopt. |
| constexpr OpEffects CanDeopt() const { |
| return CanLeaveCurrentFunction() |
| // We might depend on previous checks to avoid deopting. |
| .CanDependOnChecks(); |
| } |
| // Producing identity doesn't prevent reorderings, but it prevents GVN from |
| // de-duplicating identical operations. |
| constexpr OpEffects CanCreateIdentity() const { |
| OpEffects result = *this; |
| result.can_create_identity = true; |
| return result; |
| } |
| // The set of all possible effects. |
| constexpr OpEffects CanCallAnything() const { |
| return CanReadMemory() |
| .CanWriteMemory() |
| .CanAllocate() |
| .CanChangeControlFlow() |
| .CanDependOnChecks() |
| .RequiredWhenUnused(); |
| } |
| constexpr OpEffects RequiredWhenUnused() const { |
| OpEffects result = *this; |
| result.required_when_unused = true; |
| return result; |
| } |
| |
| // Operations that can be removed if their result is not used. Unused |
| // allocations can be removed. |
| constexpr bool is_required_when_unused() const { |
| return required_when_unused; |
| } |
| // Operations that can be moved before a preceding branch or check. |
| bool hoistable_before_a_branch() const { |
| // Since this excludes `CanDependOnChecks()`, most loads actually cannot be |
| // hoisted. |
| return IsSubsetOf(OpEffects().CanReadMemory()); |
| } |
| // Operations that can be eliminated via value numbering, which means that if |
| // there are two identical operations where one dominates the other, then the |
| // second can be replaced with the first one. This is safe for deopting or |
| // throwing operations, because the absence of read effects guarantees |
| // deterministic behavior. |
| bool repetition_is_eliminatable() const { |
| return IsSubsetOf(OpEffects() |
| .CanDependOnChecks() |
| .CanChangeControlFlow() |
| .CanAllocateWithoutIdentity()); |
| } |
| bool can_read_mutable_memory() const { |
| return produces.load_heap_memory | produces.load_off_heap_memory; |
| } |
| bool requires_consistent_heap() const { |
| return produces.before_raw_heap_access | consumes.after_raw_heap_access; |
| } |
| bool can_write() const { |
| return produces.store_heap_memory | produces.store_off_heap_memory; |
| } |
| bool can_be_constant_folded() const { |
| // Operations that CanDependOnChecks can still be constant-folded. If they |
| // did indeed depend on a check, then their result will only be used after |
| // said check has been executed anyways. |
| return IsSubsetOf(OpEffects().CanDependOnChecks()); |
| } |
| }; |
| static_assert(sizeof(OpEffects) == sizeof(OpEffects::Bits)); |
| |
| V8_INLINE size_t hash_value(OpEffects effects) { |
| return static_cast<size_t>(effects.bits()); |
| } |
| |
| inline bool CannotSwapOperations(OpEffects first, OpEffects second) { |
| return first.produces.bits() & (second.consumes.bits()); |
| } |
| |
| V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, |
| OpEffects op_effects); |
| |
| // SaturatedUint8 is a wrapper around a uint8_t, which can be incremented and |
| // decremented with the `Incr` and `Decr` methods. These methods prevent over- |
| // and underflow, and saturate once the uint8_t reaches the maximum (255): |
| // future increment and decrement will not change the value then. |
| // We purposefuly do not expose the uint8_t directly, so that users go through |
| // Incr/Decr/SetToZero/SetToOne to manipulate it, so that the saturation and |
| // lack of over/underflow is always respected. |
| class SaturatedUint8 { |
| public: |
| SaturatedUint8() = default; |
| |
| void Incr() { |
| if (V8_LIKELY(val != kMax)) { |
| val++; |
| } |
| } |
| void Decr() { |
| if (V8_LIKELY(val != 0 && val != kMax)) { |
| val--; |
| } |
| } |
| |
| void SetToZero() { val = 0; } |
| void SetToOne() { val = 1; } |
| |
| bool IsZero() const { return val == 0; } |
| bool IsOne() const { return val == 1; } |
| bool IsSaturated() const { return val == kMax; } |
| uint8_t Get() const { return val; } |
| |
| SaturatedUint8& operator+=(const SaturatedUint8& other) { |
| uint32_t sum = val; |
| sum += other.val; |
| val = static_cast<uint8_t>(std::min<uint32_t>(sum, kMax)); |
| return *this; |
| } |
| |
| static SaturatedUint8 FromSize(size_t value) { |
| uint8_t val = static_cast<uint8_t>(std::min<size_t>(value, kMax)); |
| return SaturatedUint8{val}; |
| } |
| |
| private: |
| explicit SaturatedUint8(uint8_t val) : val(val) {} |
| uint8_t val = 0; |
| static constexpr uint8_t kMax = std::numeric_limits<uint8_t>::max(); |
| }; |
| |
| // underlying_operation<> is used to extract the operation type from OpMaskT |
| // classes used in Operation::Is<> and Operation::TryCast<>. |
| template <typename T> |
| struct underlying_operation { |
| using type = T; |
| }; |
| template <typename T, uint64_t M, uint64_t V> |
| struct underlying_operation<OpMaskT<T, M, V>> { |
| using type = T; |
| }; |
| template <typename T> |
| using underlying_operation_t = typename underlying_operation<T>::type; |
| |
| // Baseclass for all Turboshaft operations. |
| // The `alignas(OpIndex)` is necessary because it is followed by an array of |
| // `OpIndex` inputs. |
| struct alignas(OpIndex) Operation { |
| struct IdentityMapper { |
| OpIndex Map(OpIndex index) { return index; } |
| OptionalOpIndex Map(OptionalOpIndex index) { return index; } |
| template <size_t N> |
| base::SmallVector<OpIndex, N> Map(base::Vector<const OpIndex> indices) { |
| return base::SmallVector<OpIndex, N>{indices}; |
| } |
| }; |
| |
| const Opcode opcode; |
| |
| // The number of uses of this operation in the current graph. |
| // Instead of overflowing, we saturate the value if it reaches the maximum. In |
| // this case, the true number of uses is unknown. |
| // We use such a small type to save memory and because nodes with a high |
| // number of uses are rare. Additionally, we usually only care if the number |
| // of uses is 0, 1 or bigger than 1. |
| SaturatedUint8 saturated_use_count; |
| |
| const uint16_t input_count; |
| |
| // The inputs are stored adjacent in memory, right behind the `Operation` |
| // object. |
| base::Vector<const OpIndex> inputs() const; |
| V8_INLINE OpIndex input(size_t i) const { return inputs()[i]; } |
| |
| static size_t StorageSlotCount(Opcode opcode, size_t input_count); |
| size_t StorageSlotCount() const { |
| return StorageSlotCount(opcode, input_count); |
| } |
| |
| base::Vector<const RegisterRepresentation> outputs_rep() const; |
| base::Vector<const MaybeRegisterRepresentation> inputs_rep( |
| ZoneVector<MaybeRegisterRepresentation>& storage) const; |
| |
| template <class Op> |
| bool Is() const { |
| if constexpr (std::is_base_of_v<Operation, Op>) { |
| return opcode == Op::opcode; |
| } else { |
| // Otherwise this must be OpMaskT. |
| return IsOpmask<Op>(); |
| } |
| } |
| template <class Op> |
| underlying_operation_t<Op>& Cast() { |
| DCHECK(Is<Op>()); |
| return *static_cast<underlying_operation_t<Op>*>(this); |
| } |
| template <class Op> |
| const underlying_operation_t<Op>& Cast() const { |
| DCHECK(Is<Op>()); |
| return *static_cast<const underlying_operation_t<Op>*>(this); |
| } |
| template <class Op> |
| const underlying_operation_t<Op>* TryCast() const { |
| if (!Is<Op>()) return nullptr; |
| return static_cast<const underlying_operation_t<Op>*>(this); |
| } |
| template <class Op> |
| underlying_operation_t<Op>* TryCast() { |
| if (!Is<Op>()) return nullptr; |
| return static_cast<underlying_operation_t<Op>*>(this); |
| } |
| OpEffects Effects() const; |
| bool IsBlockTerminator() const { |
| return turboshaft::IsBlockTerminator(opcode); |
| } |
| bool IsRequiredWhenUnused() const { |
| DCHECK_IMPLIES(IsBlockTerminator(), Effects().is_required_when_unused()); |
| return Effects().is_required_when_unused(); |
| } |
| |
| std::string ToString() const; |
| void PrintInputs(std::ostream& os, const std::string& op_index_prefix) const; |
| void PrintOptions(std::ostream& os) const; |
| |
| // Returns true if {this} is the only operation using {value}. |
| bool IsOnlyUserOf(const Operation& value, const Graph& graph) const; |
| |
| void Print() const; |
| |
| protected: |
| // Operation objects store their inputs behind the object. Therefore, they can |
| // only be constructed as part of a Graph. |
| explicit Operation(Opcode opcode, size_t input_count) |
| : opcode(opcode), input_count(input_count) { |
| DCHECK_LE(input_count, |
| std::numeric_limits<decltype(this->input_count)>::max()); |
| } |
| |
| template <class OpmaskT> |
| // A Turboshaft operation can be as small as 4 Bytes while Opmasks can span up |
| // to 8 Bytes. Any mask larger than the operation it is compared with will |
| // always have a mismatch in the initialized memory. Still, there can be some |
| // uninitialized memory being compared as part of the 8 Byte comparison that |
| // this function performs. |
| V8_CLANG_NO_SANITIZE("memory") bool IsOpmask() const { |
| static_assert(std::is_same_v< |
| underlying_operation_t<OpmaskT>, |
| typename OpMaskT<typename OpmaskT::operation, OpmaskT::mask, |
| OpmaskT::value>::operation>); |
| // We check with the given mask. |
| uint64_t b; |
| memcpy(&b, this, sizeof(uint64_t)); |
| b &= OpmaskT::mask; |
| return b == OpmaskT::value; |
| } |
| |
| Operation(const Operation&) = delete; |
| Operation& operator=(const Operation&) = delete; |
| }; |
| |
| struct OperationPrintStyle { |
| const Operation& op; |
| const char* op_index_prefix = "#"; |
| }; |
| |
| V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, |
| OperationPrintStyle op); |
| inline std::ostream& operator<<(std::ostream& os, const Operation& op) { |
| return os << OperationPrintStyle{op}; |
| } |
| |
| V8_EXPORT_PRIVATE Zone* get_zone(Graph* graph); |
| |
| OperationStorageSlot* AllocateOpStorage(Graph* graph, size_t slot_count); |
| V8_EXPORT_PRIVATE const Operation& Get(const Graph& graph, OpIndex index); |
| |
| // Determine if an operation declares `effects`, which means that its |
| // effects are static and don't depend on inputs or options. |
| template <class Op, class = void> |
| struct HasStaticEffects : std::bool_constant<false> {}; |
| template <class Op> |
| struct HasStaticEffects<Op, std::void_t<decltype(Op::effects)>> |
| : std::bool_constant<true> {}; |
| |
| // This template knows the complete type of the operation and is plugged into |
| // the inheritance hierarchy. It removes boilerplate from the concrete |
| // `Operation` subclasses, defining everything that can be expressed |
| // generically. It overshadows many methods from `Operation` with ones that |
| // exploit additional static information. |
| template <class Derived> |
| struct OperationT : Operation { |
| // Enable concise base-constructor call in derived struct. |
| using Base = OperationT; |
| |
| static const Opcode opcode; |
| |
| static constexpr OpEffects Effects() { return Derived::effects; } |
| static constexpr bool IsBlockTerminator() { |
| return turboshaft::IsBlockTerminator(opcode); |
| } |
| bool IsRequiredWhenUnused() const { |
| return IsBlockTerminator() || |
| derived_this().Effects().is_required_when_unused(); |
| } |
| |
| static constexpr std::optional<OpEffects> EffectsIfStatic() { |
| if constexpr (HasStaticEffects<Derived>::value) { |
| return Derived::Effects(); |
| } |
| return std::nullopt; |
| } |
| |
| Derived& derived_this() { return *static_cast<Derived*>(this); } |
| const Derived& derived_this() const { |
| return *static_cast<const Derived*>(this); |
| } |
| |
| // Shadow Operation::inputs to exploit static knowledge about object size. |
| base::Vector<OpIndex> inputs() { |
| return {reinterpret_cast<OpIndex*>(reinterpret_cast<char*>(this) + |
| sizeof(Derived)), |
| derived_this().input_count}; |
| } |
| base::Vector<const OpIndex> inputs() const { |
| return {reinterpret_cast<const OpIndex*>( |
| reinterpret_cast<const char*>(this) + sizeof(Derived)), |
| derived_this().input_count}; |
| } |
| |
| V8_INLINE OpIndex& input(size_t i) { return derived_this().inputs()[i]; } |
| // TODO(chromium:331100916): remove this V<Any> overload once all users use |
| // the more specific V<T> overload. |
| V8_INLINE V<Any> input(size_t i) const { return derived_this().inputs()[i]; } |
| template <typename T> |
| V8_INLINE V<T> input(size_t i) const { |
| return V<T>::Cast(derived_this().inputs()[i]); |
| } |
| |
| static size_t StorageSlotCount(size_t input_count) { |
| // The operation size in bytes is: |
| // `sizeof(Derived) + input_count*sizeof(OpIndex)`. |
| // This is an optimized computation of: |
| // round_up(size_in_bytes / sizeof(StorageSlot)) |
| constexpr size_t r = sizeof(OperationStorageSlot) / sizeof(OpIndex); |
| static_assert(sizeof(OperationStorageSlot) % sizeof(OpIndex) == 0); |
| static_assert(sizeof(Derived) % sizeof(OpIndex) == 0); |
| size_t result = std::max<size_t>( |
| 2, (r - 1 + sizeof(Derived) / sizeof(OpIndex) + input_count) / r); |
| DCHECK_EQ(result, Operation::StorageSlotCount(opcode, input_count)); |
| return result; |
| } |
| size_t StorageSlotCount() const { return StorageSlotCount(input_count); } |
| |
| template <class... Args> |
| static Derived& New(Graph* graph, size_t input_count, Args... args) { |
| OperationStorageSlot* ptr = |
| AllocateOpStorage(graph, StorageSlotCount(input_count)); |
| Derived* result = new (ptr) Derived(args...); |
| #ifdef DEBUG |
| result->Validate(*graph); |
| ZoneVector<MaybeRegisterRepresentation> storage(get_zone(graph)); |
| base::Vector<const MaybeRegisterRepresentation> expected = |
| result->inputs_rep(storage); |
| // TODO(mliedtke): DCHECK that expected and inputs are of the same size |
| // and adapt inputs_rep() to always emit a representation for all inputs. |
| size_t end = std::min<size_t>(expected.size(), result->input_count); |
| for (size_t i = 0; i < end; ++i) { |
| if (expected[i] == MaybeRegisterRepresentation::None()) continue; |
| ValidateOpInputRep(*graph, result->inputs()[i], |
| RegisterRepresentation(expected[i]), result); |
| } |
| #endif |
| // If this DCHECK fails, then the number of inputs specified in the |
| // operation constructor and in the static New function disagree. |
| DCHECK_EQ(input_count, result->Operation::input_count); |
| return *result; |
| } |
| |
| template <class... Args> |
| static Derived& New(Graph* graph, ShadowyOpIndexVectorWrapper inputs, |
| Args... args) { |
| return New(graph, inputs.size(), inputs, args...); |
| } |
| |
| explicit OperationT(size_t input_count) : Operation(opcode, input_count) { |
| static_assert((std::is_base_of<OperationT, Derived>::value)); |
| #if !V8_CC_MSVC |
| static_assert(std::is_trivially_copyable<Derived>::value); |
| #endif // !V8_CC_MSVC |
| static_assert(std::is_trivially_destructible<Derived>::value); |
| } |
| explicit OperationT(ShadowyOpIndexVectorWrapper inputs) |
| : OperationT(inputs.size()) { |
| this->inputs().OverwriteWith( |
| static_cast<base::Vector<const OpIndex>>(inputs)); |
| } |
| |
| bool EqualsForGVN(const Base& other) const { |
| // By default, GVN only removed identical Operations. However, some |
| // Operations (like DeoptimizeIf) can be GVNed when a dominating |
| // similar-but-not-identical one exists. In that case, the Operation should |
| // redefine EqualsForGVN, so that GVN knows which inputs or options of the |
| // Operation to ignore (you should also probably redefine hash_value, |
| // otherwise GVN won't even try to call EqualsForGVN). |
| return derived_this() == other.derived_this(); |
| } |
| bool operator==(const Base& other) const { |
| return derived_this().inputs() == other.derived_this().inputs() && |
| derived_this().options() == other.derived_this().options(); |
| } |
| template <typename... Args> |
| size_t HashWithOptions(const Args&... args) const { |
| return fast_hash_combine(opcode, derived_this().inputs(), args...); |
| } |
| size_t hash_value( |
| HashingStrategy strategy = HashingStrategy::kDefault) const { |
| return HashWithOptions(derived_this().options()); |
| } |
| |
| void PrintInputs(std::ostream& os, const std::string& op_index_prefix) const { |
| os << "("; |
| bool first = true; |
| for (OpIndex input : inputs()) { |
| if (!first) os << ", "; |
| first = false; |
| os << op_index_prefix << input.id(); |
| } |
| os << ")"; |
| } |
| |
| void PrintOptions(std::ostream& os) const { |
| const auto& options = derived_this().options(); |
| constexpr size_t options_count = |
| std::tuple_size<std::remove_reference_t<decltype(options)>>::value; |
| if (options_count == 0) { |
| return; |
| } |
| PrintOptionsHelper(os, options, std::make_index_sequence<options_count>()); |
| } |
| |
| // Check graph invariants for this operation. Will be invoked in debug mode |
| // immediately upon construction. |
| // Concrete Operator classes are expected to re-define it. |
| void Validate(const Graph& graph) const {} |
| |
| private: |
| template <class... T, size_t... I> |
| static void PrintOptionsHelper(std::ostream& os, |
| const std::tuple<T...>& options, |
| std::index_sequence<I...>) { |
| os << "["; |
| bool first = true; |
| USE(first); |
| ((first ? (first = false, os << std::get<I>(options)) |
| : os << ", " << std::get<I>(options)), |
| ...); |
| os << "]"; |
| } |
| |
| // All Operations have to define the outputs_rep function, to which |
| // Operation::outputs_rep() will forward, based on their opcode. If you forget |
| // to define it, then Operation::outputs_rep() would forward to itself, |
| // resulting in an infinite loop. To avoid this, we define here in OperationT |
| // a private version outputs_rep (with no implementation): if an operation |
| // forgets to define outputs_rep, then Operation::outputs_rep() tries to call |
| // this private version, which fails at compile time. |
| base::Vector<const RegisterRepresentation> outputs_rep() const; |
| |
| // Returns a vector of the input representations. |
| // The passed in {storage} can be used to store the underlying data. |
| // The returned vector might be smaller than the input_count in which case the |
| // additional inputs are assumed to have no register representation. |
| base::Vector<const MaybeRegisterRepresentation> inputs_rep( |
| ZoneVector<MaybeRegisterRepresentation>& storage) const; |
| }; |
| |
| template <size_t InputCount, class Derived> |
| struct FixedArityOperationT : OperationT<Derived> { |
| // Enable concise base access in derived struct. |
| using Base = FixedArityOperationT; |
| |
| // Shadow Operation::input_count to exploit static knowledge. |
| static constexpr uint16_t input_count = InputCount; |
| |
| template <class... Args> |
| explicit FixedArityOperationT(Args... args) |
| : OperationT<Derived>(InputCount) { |
| static_assert(sizeof...(Args) == InputCount, "wrong number of inputs"); |
| size_t i = 0; |
| OpIndex* inputs = this->inputs().begin(); |
| ((inputs[i++] = args), ...); |
| } |
| |
| // Redefine the input initialization to tell C++ about the static input size. |
| template <class... Args> |
| static Derived& New(Graph* graph, Args... args) { |
| Derived& result = |
| OperationT<Derived>::New(graph, InputCount, std::move(args)...); |
| return result; |
| } |
| |
| template <typename Fn, typename Mapper, size_t... InputI, size_t... OptionI> |
| V8_INLINE auto ExplodeImpl(Fn fn, Mapper& mapper, |
| std::index_sequence<InputI...>, |
| std::index_sequence<OptionI...>) const { |
| auto options = this->derived_this().options(); |
| USE(options); |
| return fn(mapper.Map(this->input(InputI))..., |
| std::get<OptionI>(options)...); |
| } |
| |
| template <typename Fn, typename Mapper> |
| V8_INLINE auto Explode(Fn fn, Mapper& mapper) const { |
| return ExplodeImpl( |
| fn, mapper, std::make_index_sequence<input_count>(), |
| std::make_index_sequence< |
| std::tuple_size_v<decltype(this->derived_this().options())>>()); |
| } |
| }; |
| |
| #define SUPPORTED_OPERATIONS_LIST(V) \ |
| V(float32_round_down, Float32RoundDown) \ |
| V(float64_round_down, Float64RoundDown) \ |
| V(float32_round_up, Float32RoundUp) \ |
| V(float64_round_up, Float64RoundUp) \ |
| V(float32_round_to_zero, Float32RoundTruncate) \ |
| V(float64_round_to_zero, Float64RoundTruncate) \ |
| V(float32_round_ties_even, Float32RoundTiesEven) \ |
| V(float64_round_ties_even, Float64RoundTiesEven) \ |
| V(float64_round_ties_away, Float64RoundTiesAway) \ |
| V(int32_div_is_safe, Int32DivIsSafe) \ |
| V(uint32_div_is_safe, Uint32DivIsSafe) \ |
| V(word32_shift_is_safe, Word32ShiftIsSafe) \ |
| V(word32_ctz, Word32Ctz) \ |
| V(word64_ctz, Word64Ctz) \ |
| V(word64_ctz_lowerable, Word64CtzLowerable) \ |
| V(word32_popcnt, Word32Popcnt) \ |
| V(word64_popcnt, Word64Popcnt) \ |
| V(word32_reverse_bits, Word32ReverseBits) \ |
| V(word64_reverse_bits, Word64ReverseBits) \ |
| V(float32_select, Float32Select) \ |
| V(float64_select, Float64Select) \ |
| V(int32_abs_with_overflow, Int32AbsWithOverflow) \ |
| V(int64_abs_with_overflow, Int64AbsWithOverflow) \ |
| V(word32_rol, Word32Rol) \ |
| V(word64_rol, Word64Rol) \ |
| V(word64_rol_lowerable, Word64RolLowerable) \ |
| V(sat_conversion_is_safe, SatConversionIsSafe) \ |
| V(word32_select, Word32Select) \ |
| V(word64_select, Word64Select) \ |
| V(float64_to_float16_raw_bits, Float16RawBitsConversion) \ |
| V(float16_raw_bits_to_float64, Float16RawBitsConversion) \ |
| V(float16, Float16) |
| |
| class V8_EXPORT_PRIVATE SupportedOperations { |
| #define DECLARE_FIELD(name, machine_name) bool name##_; |
| #define DECLARE_GETTER(name, machine_name) \ |
| static bool name() { \ |
| if constexpr (DEBUG_BOOL) { \ |
| base::MutexGuard lock(mutex_.Pointer()); \ |
| DCHECK(initialized_); \ |
| } \ |
| return instance_.name##_; \ |
| } |
| |
| public: |
| static void Initialize(); |
| static bool IsUnalignedLoadSupported(MemoryRepresentation repr); |
| static bool IsUnalignedStoreSupported(MemoryRepresentation repr); |
| SUPPORTED_OPERATIONS_LIST(DECLARE_GETTER) |
| |
| private: |
| SUPPORTED_OPERATIONS_LIST(DECLARE_FIELD) |
| |
| static bool initialized_; |
| static base::LazyMutex mutex_; |
| static SupportedOperations instance_; |
| |
| #undef DECLARE_FIELD |
| #undef DECLARE_GETTER |
| }; |
| |
| template <RegisterRepresentation::Enum... reps> |
| base::Vector<const RegisterRepresentation> RepVector() { |
| static constexpr std::array<RegisterRepresentation, sizeof...(reps)> |
| rep_array{RegisterRepresentation{reps}...}; |
| return base::VectorOf(rep_array); |
| } |
| |
| template <MaybeRegisterRepresentation::Enum... reps> |
| base::Vector<const MaybeRegisterRepresentation> MaybeRepVector() { |
| static constexpr std::array<MaybeRegisterRepresentation, sizeof...(reps)> |
| rep_array{MaybeRegisterRepresentation{reps}...}; |
| return base::VectorOf(rep_array); |
| } |
| |
| #if DEBUG |
| V8_EXPORT_PRIVATE void ValidateOpInputRep( |
| const Graph& graph, OpIndex input, |
| std::initializer_list<RegisterRepresentation> expected_rep, |
| const Operation* checked_op = nullptr, |
| std::optional<size_t> projection_index = {}); |
| V8_EXPORT_PRIVATE void ValidateOpInputRep( |
| const Graph& graph, OpIndex input, RegisterRepresentation expected_rep, |
| const Operation* checked_op = nullptr, |
| std::optional<size_t> projection_index = {}); |
| #endif // DEBUG |
| |
| // DeadOp is a special operation that can be used by analyzers to mark |
| // operations as being dead (typically, it should be used by calling the Graph's |
| // KillOperation method, which will Replace the old operation by a DeadOp). |
| // CopyingPhase and Analyzers should ignore Dead operations. A Dead operation |
| // should never be the input of a non-dead operation. |
| struct DeadOp : FixedArityOperationT<0, DeadOp> { |
| static constexpr OpEffects effects = OpEffects(); |
| |
| base::Vector<const RegisterRepresentation> outputs_rep() const { return {}; } |
| |
| base::Vector<const MaybeRegisterRepresentation> inputs_rep( |
| ZoneVector<MaybeRegisterRepresentation>& storage) const { |
| return {}; |
| } |
| |
| auto options() const { return std::tuple{}; } |
| }; |
| |
| struct AbortCSADcheckOp : FixedArityOperationT<1, AbortCSADcheckOp> { |
| static constexpr OpEffects effects = |
| OpEffects().RequiredWhenUnused().CanLeaveCurrentFunction(); |
| |
| base::Vector<const RegisterRepresentation> outputs_rep() const { return {}; } |
| |
| base::Vector<const MaybeRegisterRepresentation> inputs_rep( |
| ZoneVector<MaybeRegisterRepresentation>& storage) const { |
| return MaybeRepVector<MaybeRegisterRepresentation::Tagged()>(); |
| } |
| |
| V<String> message() const { return Base::input<String>(0); } |
| |
| explicit AbortCSADcheckOp(V<String> message) : Base(message) {} |
| |
| auto options() const { return std::tuple{}; } |
| }; |
| |
| struct GenericBinopOp : FixedArityOperationT<4, GenericBinopOp> { |
| #define GENERIC_BINOP_LIST(V) \ |
| V(Add) \ |
| V(Multiply) \ |
| V(Subtract) \ |
| V(Divide) \ |
| V(Modulus) \ |
| V(Exponentiate) \ |
| V(BitwiseAnd) \ |
| V(BitwiseOr) \ |
| V(BitwiseXor) \ |
| V(ShiftLeft) \ |
| V(ShiftRight) \ |
| V(ShiftRightLogical) \ |
| V(Equal) \ |
| V(StrictEqual) \ |
| V(LessThan) \ |
| V(LessThanOrEqual) \ |
| V(GreaterThan) \ |
| V(GreaterThanOrEqual) |
| enum class Kind : uint8_t { |
| #define DEFINE_KIND(Name) k##Name, |
| GENERIC_BINOP_LIST(DEFINE_KIND) |
| #undef DEFINE_KIND |
| }; |
| Kind kind; |
| |
| static constexpr OpEffects effects = OpEffects().CanCallAnything(); |
| |
| THROWING_OP_BOILERPLATE(RegisterRepresentation::Tagged()) |
| |
| base::Vector<const MaybeRegisterRepresentation> inputs_rep( |
| ZoneVector<MaybeRegisterRepresentation>& storage) const { |
| return MaybeRepVector<MaybeRegisterRepresentation::Tagged(), |
| MaybeRegisterRepresentation::Tagged()>(); |
| } |
| |
| V<Object> left() const { return input<Object>(0); } |
| V<Object> right() const { return input<Object>(1); } |
| V<FrameState> frame_state() const { return input<FrameState>(2); } |
| V<Context> context() const { return input<Context>(3); } |
| |
| GenericBinopOp(V<Object> left, V<Object> right, V<FrameState> frame_state, |
| V<Context> context, Kind kind, |
| LazyDeoptOnThrow lazy_deopt_on_throw) |
| : Base(left, right, frame_state, context), |
| kind(kind), |
| lazy_deopt_on_throw(lazy_deopt_on_throw) {} |
| |
| auto options() const { return std::tuple{kind, lazy_deopt_on_throw}; } |
| }; |
| V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, |
| GenericBinopOp::Kind kind); |
| |
| struct GenericUnopOp : FixedArityOperationT<3, GenericUnopOp> { |
| #define GENERIC_UNOP_LIST(V) \ |
| V(BitwiseNot) \ |
| V(Negate) \ |
| V(Increment) \ |
| V(Decrement) |
| enum class Kind : uint8_t { |
| #define DEFINE_KIND(Name) k##Name, |
| GENERIC_UNOP_LIST(DEFINE_KIND) |
| #undef DEFINE_KIND |
| }; |
| Kind kind; |
| |
| static constexpr OpEffects effects = OpEffects().CanCallAnything(); |
| |
| THROWING_OP_BOILERPLATE(RegisterRepresentation::Tagged()) |
| |
| base::Vector<const MaybeRegisterRepresentation> inputs_rep( |
| ZoneVector<MaybeRegisterRepresentation>& storage) const { |
| return MaybeRepVector<MaybeRegisterRepresentation::Tagged()>(); |
| } |
| |
| V<Object> input() const { return Base::input<Object>(0); } |
| V<FrameState> frame_state() const { return Base::input<FrameState>(1); } |
| V<Context> context() const { return Base::input<Context>(2); } |
| |
| GenericUnopOp(V<Object> input, V<FrameState> frame_state, V<Context> context, |
| Kind kind, LazyDeoptOnThrow lazy_deopt_on_throw) |
| : Base(input, frame_state, context), |
| kind(kind), |
| lazy_deopt_on_throw(lazy_deopt_on_throw) {} |
| |
| auto options() const { return std::tuple{kind, lazy_deopt_on_throw}; } |
| }; |
| V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, |
| GenericUnopOp::Kind kind); |
| |
| struct ToNumberOrNumericOp : FixedArityOperationT<3, ToNumberOrNumericOp> { |
| Object::Conversion kind; |
| |
| static constexpr OpEffects effects = OpEffects().CanCallAnything(); |
| |
| THROWING_OP_BOILERPLATE(RegisterRepresentation::Tagged()) |
| |
| base::Vector<const MaybeRegisterRepresentation> inputs_rep( |
| ZoneVector<MaybeRegisterRepresentation>& storage) const { |
| return MaybeRepVector<MaybeRegisterRepresentation::Tagged()>(); |
| } |
| |
| V<Object> input() const { return Base::input<Object>(0); } |
| V<FrameState> frame_state() const { return Base::input<FrameState>(1); } |
| V<Context> context() const { return Base::input<Context>(2); } |
| |
| ToNumberOrNumericOp(V<Object> input, V<FrameState> frame_state, |
| V<Context> context, Object::Conversion kind, |
| LazyDeoptOnThrow lazy_deopt_on_throw) |
| : Base(input, frame_state, context), |
| kind(kind), |
| lazy_deopt_on_throw(lazy_deopt_on_throw) {} |
| |
| auto options() const { return std::tuple{kind, lazy_deopt_on_throw}; } |
| }; |
| |
| // Word32SignHint is a type-hint used during Maglev->Turboshaft |
| // translation to avoid having multiple values being used as both Int32 and |
| // Uint32: for such cases, Maglev has explicit conversions, and it's helpful to |
| // also have them in Turboshaft. Eventually, Word32SignHint is just a |
| // nop in Turboshaft, since as far as Machine level graph is concerned, both |
| // Int32 and Uint32 are just Word32 registers. |
| struct Word32SignHintOp : FixedArityOperationT<1, Word32SignHintOp> { |
| enum class Sign : bool { kSigned, kUnsigned }; |
| Sign sign; |
| |
| static constexpr OpEffects effects = OpEffects(); |
| base::Vector<const RegisterRepresentation> outputs_rep() const { |
| return RepVector<RegisterRepresentation::Word32()>(); |
| } |
| |
| base::Vector<const MaybeRegisterRepresentation> inputs_rep( |
| ZoneVector<MaybeRegisterRepresentation>& storage) const { |
| return MaybeRepVector<MaybeRegisterRepresentation::Word32()>(); |
| } |
| |
| V<Word32> input() const { return Base::input<Word32>(0); } |
| |
| Word32SignHintOp(V<Word32> input, Sign sign) : Base(input), sign(sign) {} |
| |
| auto options() const { return std::tuple{sign}; } |
| }; |
| V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, |
| Word32SignHintOp::Sign sign); |
| |
| struct WordBinopOp : FixedArityOperationT<2, WordBinopOp> { |
| enum class Kind : uint8_t { |
| kAdd, |
| kMul, |
| kSignedMulOverflownBits, |
| kUnsignedMulOverflownBits, |
| kBitwiseAnd, |
| kBitwiseOr, |
| kBitwiseXor, |
| kSub, |
| kSignedDiv, |
| kUnsignedDiv, |
| kSignedMod, |
| kUnsignedMod, |
| }; |
| Kind kind; |
| WordRepresentation rep; |
| |
| // We must avoid division by 0. |
| static constexpr OpEffects effects = OpEffects().CanDependOnChecks(); |
| base::Vector<const RegisterRepresentation> outputs_rep() const { |
| return base::VectorOf(static_cast<const RegisterRepresentation*>(&rep), 1); |
| } |
| |
| base::Vector<const MaybeRegisterRepresentation> inputs_rep( |
| ZoneVector<MaybeRegisterRepresentation>& storage) const { |
| return InputsRepFactory::PairOf(rep); |
| } |
| |
| template <class WordType = Word> |
| V<WordType> left() const |
| requires(IsWord<WordType>()) |
| { |
| return input<WordType>(0); |
| } |
| template <class WordType = Word> |
| V<WordType> right() const |
| requires(IsWord<WordType>()) |
| { |
| return input<WordType>(1); |
| } |
| |
| bool IsCommutative() const { return IsCommutative(kind); } |
| |
| static bool IsCommutative(Kind kind) { |
| switch (kind) { |
| case Kind::kAdd: |
| case Kind::kMul: |
| case Kind::kSignedMulOverflownBits: |
| case Kind::kUnsignedMulOverflownBits: |
| case Kind::kBitwiseAnd: |
| case Kind::kBitwiseOr: |
| case Kind::kBitwiseXor: |
| return true; |
| case Kind::kSub: |
| case Kind::kSignedDiv: |
| case Kind::kUnsignedDiv: |
| case Kind::kSignedMod: |
| case Kind::kUnsignedMod: |
| return false; |
| } |
| } |
| |
| static bool IsAssociative(Kind kind) { |
| switch (kind) { |
| case Kind::kAdd: |
| case Kind::kMul: |
| case Kind::kBitwiseAnd: |
| case Kind::kBitwiseOr: |
| case Kind::kBitwiseXor: |
| return true; |
| case Kind::kSignedMulOverflownBits: |
| case Kind::kUnsignedMulOverflownBits: |
| case Kind::kSub: |
| case Kind::kSignedDiv: |
| case Kind::kUnsignedDiv: |
| case Kind::kSignedMod: |
| case Kind::kUnsignedMod: |
| return false; |
| } |
| } |
| // The Word32 and Word64 versions of the operator compute the same result when |
| // truncated to 32 bit. |
| static bool AllowsWord64ToWord32Truncation(Kind kind) { |
| switch (kind) { |
| case Kind::kAdd: |
| case Kind::kMul: |
| case Kind::kBitwiseAnd: |
| case Kind::kBitwiseOr: |
| case Kind::kBitwiseXor: |
| case Kind::kSub: |
| return true; |
| case Kind::kSignedMulOverflownBits: |
| case Kind::kUnsignedMulOverflownBits: |
| case Kind::kSignedDiv: |
| case Kind::kUnsignedDiv: |
| case Kind::kSignedMod: |
| case Kind::kUnsignedMod: |
| return false; |
| } |
| } |
| |
| WordBinopOp(V<Word> left, V<Word> right, Kind kind, WordRepresentation rep) |
| : Base(left, right), kind(kind), rep(rep) {} |
| |
| auto options() const { return std::tuple{kind, rep}; } |
| void PrintOptions(std::ostream& os) const; |
| }; |
| |
| struct FloatBinopOp : FixedArityOperationT<2, FloatBinopOp> { |
| enum class Kind : uint8_t { |
| kAdd, |
| kMul, |
| kMin, |
| kMax, |
| kSub, |
| kDiv, |
| kMod, |
| kPower, |
| kAtan2, |
| }; |
| Kind kind; |
| FloatRepresentation rep; |
| |
| static constexpr OpEffects effects = OpEffects(); |
| base::Vector<const RegisterRepresentation> outputs_rep() const { |
| return base::VectorOf(static_cast<const RegisterRepresentation*>(&rep), 1); |
| } |
| |
| base::Vector<const MaybeRegisterRepresentation> inputs_rep( |
| ZoneVector<MaybeRegisterRepresentation>& storage) const { |
| return InputsRepFactory::PairOf(rep); |
| } |
| |
| V<Float> left() const { return input<Float>(0); } |
| V<Float> right() const { return input<Float>(1); } |
| |
| static bool IsCommutative(Kind kind) { |
| switch (kind) { |
| case Kind::kAdd: |
| case Kind::kMul: |
| case Kind::kMin: |
| case Kind::kMax: |
| return true; |
| case Kind::kSub: |
| case Kind::kDiv: |
| case Kind::kMod: |
| case Kind::kPower: |
| case Kind::kAtan2: |
| return false; |
| } |
| } |
| |
| FloatBinopOp(V<Float> left, V<Float> right, Kind kind, |
| FloatRepresentation rep) |
| : Base(left, right), kind(kind), rep(rep) {} |
| |
| void Validate(const Graph& graph) const { |
| DCHECK_IMPLIES(kind == any_of(Kind::kPower, Kind::kAtan2, Kind::kMod), |
| rep == FloatRepresentation::Float64()); |
| } |
| auto options() const { return std::tuple{kind, rep}; } |
| void PrintOptions(std::ostream& os) const; |
| }; |
| |
| struct Word32PairBinopOp : FixedArityOperationT<4, Word32PairBinopOp> { |
| enum class Kind : uint8_t { |
| kAdd, |
| kSub, |
| kMul, |
| kShiftLeft, |
| kShiftRightArithmetic, |
| kShiftRightLogical, |
| }; |
| Kind kind; |
| |
| static constexpr OpEffects effects = OpEffects(); |
| |
| base::Vector<const RegisterRepresentation> outputs_rep() const { |
| return RepVector<RegisterRepresentation::Word32(), |
| RegisterRepresentation::Word32()>(); |
| } |
| |
| base::Vector<const MaybeRegisterRepresentation> inputs_rep( |
| const ZoneVector<MaybeRegisterRepresentation>& storage) const { |
| return MaybeRepVector<MaybeRegisterRepresentation::Word32(), |
| MaybeRegisterRepresentation::Word32(), |
| MaybeRegisterRepresentation::Word32(), |
| MaybeRegisterRepresentation::Word32()>(); |
| } |
| |
| V<Word32> left_low() const { return input<Word32>(0); } |
| V<Word32> left_high() const { return input<Word32>(1); } |
| V<Word32> right_low() const { return input<Word32>(2); } |
| V<Word32> right_high() const { return input<Word32>(3); } |
| |
| Word32PairBinopOp(V<Word32> left_low, V<Word32> left_high, |
| V<Word32> right_low, V<Word32> right_high, Kind kind) |
| : Base(left_low, left_high, right_low, right_high), kind(kind) {} |
| |
| auto options() const { return std::tuple{kind}; } |
| void PrintOptions(std::ostream& os) const; |
| }; |
| |
| struct WordBinopDeoptOnOverflowOp |
| : FixedArityOperationT<3, WordBinopDeoptOnOverflowOp> { |
| enum class Kind : uint8_t { |
| kSignedAdd, |
| kSignedMul, |
| kSignedSub, |
| kSignedDiv, |
| kSignedMod, |
| kUnsignedDiv, |
| kUnsignedMod |
| }; |
| Kind kind; |
| WordRepresentation rep; |
| FeedbackSource feedback; |
| CheckForMinusZeroMode mode; |
| |
| static constexpr OpEffects effects = OpEffects().CanDeopt(); |
| base::Vector<const RegisterRepresentation> outputs_rep() const { |
| return base::VectorOf(static_cast<const RegisterRepresentation*>(&rep), 1); |
| } |
| |
| base::Vector<const MaybeRegisterRepresentation> inputs_rep( |
| ZoneVector<MaybeRegisterRepresentation>& storage) const { |
| return InputsRepFactory::PairOf(rep); |
| } |
| |
| V<Word> left() const { return input<Word>(0); } |
| V<Word> right() const { return input<Word>(1); } |
| V<FrameState> frame_state() const { return input<FrameState>(2); } |
| |
| WordBinopDeoptOnOverflowOp(V<Word> left, V<Word> right, |
| V<FrameState> frame_state, Kind kind, |
| WordRepresentation rep, FeedbackSource feedback, |
| CheckForMinusZeroMode mode) |
| : Base(left, right, frame_state), |
| kind(kind), |
| rep(rep), |
| feedback(feedback), |
| mode(mode) {} |
| |
| void Validate(const Graph& graph) const { |
| DCHECK_IMPLIES(kind == Kind::kUnsignedDiv || kind == Kind::kUnsignedMod, |
| rep == WordRepresentation::Word32()); |
| } |
| auto options() const { return std::tuple{kind, rep, feedback, mode}; } |
| void PrintOptions(std::ostream& os) const; |
| }; |
| |
| struct OverflowCheckedBinopOp |
| : FixedArityOperationT<2, OverflowCheckedBinopOp> { |
| static constexpr int kValueIndex = 0; |
| static constexpr int kOverflowIndex = 1; |
| |
| enum class Kind : uint8_t { |
| kSignedAdd, |
| kSignedMul, |
| kSignedSub, |
| }; |
| Kind kind; |
| WordRepresentation rep; |
| |
| static constexpr OpEffects effects = OpEffects(); |
| base::Vector<const RegisterRepresentation> outputs_rep() const { |
| switch (rep.value()) { |
| case WordRepresentation::Word32(): |
| return RepVector<RegisterRepresentation::Word32(), |
| RegisterRepresentation::Word32()>(); |
| case WordRepresentation::Word64(): |
| return RepVector<RegisterRepresentation::Word64(), |
| RegisterRepresentation::Word32()>(); |
| } |
| } |
| |
| base::Vector<const MaybeRegisterRepresentation> inputs_rep( |
| ZoneVector<MaybeRegisterRepresentation>& storage) const { |
| return InputsRepFactory::PairOf(rep); |
| } |
| |
| V<Word> left() const { return input<Word>(0); } |
| V<Word> right() const { return input<Word>(1); } |
| |
| static bool IsCommutative(Kind kind) { |
| switch (kind) { |
| case Kind::kSignedAdd: |
| case Kind::kSignedMul: |
| return true; |
| case Kind::kSignedSub: |
| return false; |
| } |
| } |
| |
| OverflowCheckedBinopOp(V<Word> left, V<Word> right, Kind kind, |
| WordRepresentation rep) |
| : Base(left, right), kind(kind), rep(rep) {} |
| |
| auto options() const { return std::tuple{kind, rep}; } |
| void PrintOptions(std::ostream& os) const; |
| }; |
| |
| struct WordUnaryOp : FixedArityOperationT<1, WordUnaryOp> { |
| enum class Kind : uint8_t { |
| kReverseBytes, |
| kCountLeadingZeros, |
| kCountTrailingZeros, |
| kPopCount, |
| kSignExtend8, |
| kSignExtend16, |
| }; |
| Kind kind; |
| WordRepresentation rep; |
| static constexpr OpEffects effects = OpEffects(); |
| base::Vector<const RegisterRepresentation> outputs_rep() const { |
| return base::VectorOf(static_cast<const RegisterRepresentation*>(&rep), 1); |
| } |
| |
| base::Vector<const MaybeRegisterRepresentation> inputs_rep( |
| ZoneVector<MaybeRegisterRepresentation>& storage) const { |
| return InputsRepFactory::SingleRep(rep); |
| } |
| |
| V<Word> input() const { return Base::input<Word>(0); } |
| |
| V8_EXPORT_PRIVATE static bool IsSupported(Kind kind, WordRepresentation rep); |
| |
| explicit WordUnaryOp(V<Word> input, Kind kind, WordRepresentation rep) |
| : Base(input), kind(kind), rep(rep) {} |
| |
| auto options() const { return std::tuple{kind, rep}; } |
| }; |
| V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, |
| WordUnaryOp::Kind kind); |
| |
| struct OverflowCheckedUnaryOp |
| : FixedArityOperationT<1, OverflowCheckedUnaryOp> { |
| static constexpr int kValueIndex = 0; |
| static constexpr int kOverflowIndex = 1; |
| |
| enum class Kind : uint8_t { kAbs }; |
| Kind kind; |
| WordRepresentation rep; |
| static constexpr OpEffects effects = OpEffects(); |
| base::Vector<const RegisterRepresentation> outputs_rep() const { |
| switch (rep.value()) { |
| case WordRepresentation::Word32(): |
| return RepVector<RegisterRepresentation::Word32(), |
| RegisterRepresentation::Word32()>(); |
| case WordRepresentation::Word64(): |
| return RepVector<RegisterRepresentation::Word64(), |
| RegisterRepresentation::Word32()>(); |
| } |
| } |
| |
| base::Vector<const MaybeRegisterRepresentation> inputs_rep( |
| ZoneVector<MaybeRegisterRepresentation>& storage) const { |
| return InputsRepFactory::SingleRep(rep); |
| } |
| |
| V<Word> input() const { return Base::input<Word>(0); } |
| |
| explicit OverflowCheckedUnaryOp(V<Word> input, Kind kind, |
| WordRepresentation rep) |
| : Base(input), kind(kind), rep(rep) {} |
| |
| auto options() const { return std::tuple{kind, rep}; } |
| }; |
| V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, |
| OverflowCheckedUnaryOp::Kind kind); |
| |
| struct FloatUnaryOp : FixedArityOperationT<1, FloatUnaryOp> { |
| enum class Kind : uint8_t { |
| kAbs, |
| kNegate, |
| kSilenceNaN, |
| kRoundDown, // round towards -infinity |
| kRoundUp, // round towards +infinity |
| kRoundToZero, // round towards 0 |
| kRoundTiesEven, // break ties by rounding towards the next even number |
| kLog, |
| kLog2, |
| kLog10, |
| kLog1p, |
| kSqrt, |
| kCbrt, |
| kExp, |
| kExpm1, |
| kSin, |
| kCos, |
| kSinh, |
| kCosh, |
| kAcos, |
| kAsin, |
| kAsinh, |
| kAcosh, |
| kTan, |
| kTanh, |
| kAtan, |
| kAtanh, |
| }; |
| |
| Kind kind; |
| FloatRepresentation rep; |
| |
| static constexpr OpEffects effects = OpEffects(); |
| base::Vector<const RegisterRepresentation> outputs_rep() const { |
| return base::VectorOf(static_cast<const RegisterRepresentation*>(&rep), 1); |
| } |
| |
| base::Vector<const MaybeRegisterRepresentation> inputs_rep( |
| ZoneVector<MaybeRegisterRepresentation>& storage) const { |
| return InputsRepFactory::SingleRep(rep); |
| } |
| |
| V<Float> input() const { return Base::input<Float>(0); } |
| |
| V8_EXPORT_PRIVATE static bool IsSupported(Kind kind, FloatRepresentation rep); |
| |
| explicit FloatUnaryOp(V<Float> input, Kind kind, FloatRepresentation rep) |
| : Base(input), kind(kind), rep(rep) {} |
| |
| auto options() const { return std::tuple{kind, rep}; } |
| }; |
| V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, |
| FloatUnaryOp::Kind kind); |
| |
| struct ShiftOp : FixedArityOperationT<2, ShiftOp> { |
| enum class Kind : uint8_t { |
| kShiftRightArithmeticShiftOutZeros, |
| kShiftRightArithmetic, |
| kShiftRightLogical, |
| kShiftLeft, |
| kRotateRight, |
| kRotateLeft |
| }; |
| Kind kind; |
| WordRepresentation rep; |
| |
| static constexpr OpEffects effects = OpEffects(); |
| base::Vector<const RegisterRepresentation> outputs_rep() const { |
| return base::VectorOf(static_cast<const RegisterRepresentation*>(&rep), 1); |
| } |
| |
| base::Vector<const MaybeRegisterRepresentation> inputs_rep( |
| ZoneVector<MaybeRegisterRepresentation>& storage) const { |
| return InitVectorOf(storage, |
| {static_cast<const RegisterRepresentation&>(rep), |
| RegisterRepresentation::Word32()}); |
| } |
| |
| template <typename WordT = Word> |
| requires(IsWord<WordT>()) |
| V<WordT> left() const { |
| DCHECK(IsValidTypeFor<WordT>(rep)); |
| return input<WordT>(0); |
| } |
| V<Word32> right() const { return input<Word32>(1); } |
| |
| bool IsRightShift() const { return IsRightShift(kind); } |
| |
| static bool IsRightShift(Kind kind) { |
| switch (kind) { |
| case Kind::kShiftRightArithmeticShiftOutZeros: |
| case Kind::kShiftRightArithmetic: |
| case Kind::kShiftRightLogical: |
| return true; |
| case Kind::kShiftLeft: |
| case Kind::kRotateRight: |
| case Kind::kRotateLeft: |
| return false; |
| } |
| } |
| // The Word32 and Word64 versions of the operator compute the same result when |
| // truncated to 32 bit. |
| static bool AllowsWord64ToWord32Truncation(Kind kind) { |
| switch (kind) { |
| case Kind::kShiftLeft: |
| return true; |
| case Kind::kShiftRightArithmeticShiftOutZeros: |
| case Kind::kShiftRightArithmetic: |
| case Kind::kShiftRightLogical: |
| case Kind::kRotateRight: |
| case Kind::kRotateLeft: |
| return false; |
| } |
| } |
| |
| ShiftOp(V<Word> left, V<Word32> right, Kind kind, WordRepresentation rep) |
| : Base(left, right), kind(kind), rep(rep) {} |
| |
| auto options() const { return std::tuple{kind, rep}; } |
| }; |
| V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, |
| ShiftOp::Kind kind); |
| |
| struct ComparisonOp : FixedArityOperationT<2, ComparisonOp> { |
| enum class Kind : uint8_t { |
| kEqual, |
| kSignedLessThan, |
| kSignedLessThanOrEqual, |
| kUnsignedLessThan, |
| kUnsignedLessThanOrEqual |
| }; |
| Kind kind; |
| RegisterRepresentation rep; |
| |
| static constexpr OpEffects effects = OpEffects(); |
| base::Vector<const RegisterRepresentation> outputs_rep() const { |
| return RepVector<RegisterRepresentation::Word32()>(); |
| } |
| |
| base::Vector<const MaybeRegisterRepresentation> inputs_rep( |
| ZoneVector<MaybeRegisterRepresentation>& storage) const { |
| return InputsRepFactory::PairOf(rep); |
| } |
| |
| static bool IsCommutative(Kind kind) { return kind == Kind::kEqual; } |
| |
| template <typename T = Any> |
| V<T> left() const { |
| DCHECK(IsValidTypeFor<T>(rep)); |
| return input<T>(0); |
| } |
| template <typename T = Any> |
| V<T> right() const { |
| DCHECK(IsValidTypeFor<T>(rep)); |
| return input<T>(1); |
| } |
| |
| ComparisonOp(V<Any> left, V<Any> right, Kind kind, RegisterRepresentation rep) |
| : Base(left, right), kind(kind), rep(rep) {} |
| |
| void Validate(const Graph& graph) const { |
| if (kind == Kind::kEqual) { |
| DCHECK(rep == any_of(RegisterRepresentation::Word32(), |
| RegisterRepresentation::Word64(), |
| RegisterRepresentation::Float32(), |
| RegisterRepresentation::Float64(), |
| RegisterRepresentation::Tagged())); |
| |
| RegisterRepresentation input_rep = rep; |
| #ifdef V8_COMPRESS_POINTERS |
| // In the presence of pointer compression, we only compare the lower |
| // 32bit. |
| if (input_rep == RegisterRepresentation::Tagged()) { |
| input_rep = RegisterRepresentation::Compressed(); |
| } |
| #endif // V8_COMPRESS_POINTERS |
| #ifdef DEBUG |
| ValidateOpInputRep(graph, left(), input_rep); |
| ValidateOpInputRep(graph, right(), input_rep); |
| #endif // DEBUG |
| USE(input_rep); |
| } else { |
| DCHECK_EQ(rep, any_of(RegisterRepresentation::Word32(), |
| RegisterRepresentation::Word64(), |
| RegisterRepresentation::Float32(), |
| RegisterRepresentation::Float64())); |
| DCHECK_IMPLIES( |
| rep == any_of(RegisterRepresentation::Float32(), |
| RegisterRepresentation::Float64()), |
| kind == any_of(Kind::kSignedLessThan, Kind::kSignedLessThanOrEqual)); |
| } |
| } |
| auto options() const { return std::tuple{kind, rep}; } |
| }; |
| V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, |
| ComparisonOp::Kind kind); |
| DEFINE_MULTI_SWITCH_INTEGRAL(ComparisonOp::Kind, 8) |
| |
| struct ChangeOp : FixedArityOperationT<1, ChangeOp> { |
| enum class Kind : uint8_t { |
| // convert between different floating-point types. Note that the |
| // Float64->Float32 conversion is truncating. |
| kFloatConversion, |
| // overflow guaranteed to result in the minimal integer |
| kSignedFloatTruncateOverflowToMin, |
| kUnsignedFloatTruncateOverflowToMin, |
| // JS semantics float64 to word32 truncation |
| // https://tc39.es/ecma262/#sec-touint32 |
| kJSFloatTruncate, |
| // convert float64 to float16, then bitcast word32. Used for storing into |
| // Float16Array and Math.fround16. |
| kJSFloat16TruncateWithBitcast, |
| // bitcast word32 to float16 and convert to float64. Used for loading from |
| // Float16Array and Math.fround16. |
| kJSFloat16ChangeWithBitcast, |
| // convert (un)signed integer to floating-point value |
| kSignedToFloat, |
| kUnsignedToFloat, |
| // extract half of a float64 value |
| kExtractHighHalf, |
| kExtractLowHalf, |
| // increase bit-width for unsigned integer values |
| kZeroExtend, |
| // increase bid-width for signed integer values |
| kSignExtend, |
| // truncate word64 to word32 |
| kTruncate, |
| // preserve bits, change meaning |
| kBitcast |
| }; |
| // Violated assumptions result in undefined behavior. |
| enum class Assumption : uint8_t { |
| kNoAssumption, |
| // Used for conversions from floating-point to integer, assumes that the |
| // value doesn't exceed the integer range. |
| kNoOverflow, |
| // Assume that the original value can be recovered by a corresponding |
| // reverse transformation. |
| kReversible, |
| }; |
| Kind kind; |
| // Reversible means undefined behavior if value cannot be represented |
| // precisely. |
| Assumption assumption; |
| RegisterRepresentation from; |
| RegisterRepresentation to; |
| |
| // Returns true if change<kind>(change<reverse_kind>(a)) == a for all a. |
| // This assumes that change<reverse_kind> uses the inverted {from} and {to} |
| // representations, i.e. the input to the inner change op has the same |
| // representation as the result of the outer change op. |
| static bool IsReversible(Kind kind, Assumption assumption, |
| RegisterRepresentation from, |
| RegisterRepresentation to, Kind reverse_kind, |
| bool signalling_nan_possible) { |
| switch (kind) { |
| case Kind::kFloatConversion: |
| return from == RegisterRepresentation::Float32() && |
| to == RegisterRepresentation::Float64() && |
| reverse_kind == Kind::kFloatConversion && |
| !signalling_nan_possible; |
| case Kind::kSignedFloatTruncateOverflowToMin: |
| return assumption == Assumption::kReversible && |
| reverse_kind == Kind::kSignedToFloat; |
| case Kind::kUnsignedFloatTruncateOverflowToMin: |
| return assumption == Assumption::kReversible && |
| reverse_kind == Kind::kUnsignedToFloat; |
| case Kind::kJSFloatTruncate: |
| return false; |
| case Kind::kJSFloat16TruncateWithBitcast: |
| case Kind::kJSFloat16ChangeWithBitcast: |
| return false; |
| case Kind::kSignedToFloat: |
| if (from == RegisterRepresentation::Word32() && |
| to == RegisterRepresentation::Float64()) { |
| return reverse_kind == any_of(Kind::kSignedFloatTruncateOverflowToMin, |
| Kind::kJSFloatTruncate); |
| } else { |
| return assumption == Assumption::kReversible && |
| reverse_kind == |
| any_of(Kind::kSignedFloatTruncateOverflowToMin); |
| } |
| case Kind::kUnsignedToFloat: |
| if (from == RegisterRepresentation::Word32() && |
| to == RegisterRepresentation::Float64()) { |
| return reverse_kind == |
| any_of(Kind::kUnsignedFloatTruncateOverflowToMin, |
| Kind::kJSFloatTruncate); |
| } else { |
| return assumption == Assumption::kReversible && |
| reverse_kind == Kind::kUnsignedFloatTruncateOverflowToMin; |
| } |
| case Kind::kExtractHighHalf: |
| case Kind::kExtractLowHalf: |
| return false; |
| case Kind::kZeroExtend: |
| case Kind::kSignExtend: |
| DCHECK_EQ(from, RegisterRepresentation::Word32()); |
| DCHECK_EQ(to, RegisterRepresentation::Word64()); |
| return reverse_kind == Kind::kTruncate; |
| case Kind::kTruncate: |
| DCHECK_EQ(from, RegisterRepresentation::Word64()); |
| DCHECK_EQ(to, RegisterRepresentation::Word32()); |
| return reverse_kind == Kind::kBitcast; |
| case Kind::kBitcast: |
| return reverse_kind == Kind::kBitcast; |
| } |
| } |
| |
| bool IsReversibleBy(Kind reverse_kind, bool signalling_nan_possible) const { |
| return IsReversible(kind, assumption, from, to, reverse_kind, |
| signalling_nan_possible); |
| } |
| |
| static constexpr OpEffects effects = OpEffects(); |
| base::Vector<const RegisterRepresentation> outputs_rep() const { |
| return base::VectorOf(&to, 1); |
| } |
| |
| base::Vector<const MaybeRegisterRepresentation> inputs_rep( |
| ZoneVector<MaybeRegisterRepresentation>& storage) const { |
| return InputsRepFactory::SingleRep(from); |
| } |
| |
| template <typename Type = Untagged> |
| requires IsUntagged<Type> |
| V<Type> input() const { |
| DCHECK(IsValidTypeFor<Type>(from)); |
| return Base::input<Type>(0); |
| } |
| |
| ChangeOp(V<Untagged> input, Kind kind, Assumption assumption, |
| RegisterRepresentation from, RegisterRepresentation to) |
| : Base(input), kind(kind), assumption(assumption), from(from), to(to) {} |
| |
| void Validate(const Graph& graph) const { |
| DCHECK_NE(from, RegisterRepresentation::Tagged()); |
| DCHECK_NE(to, RegisterRepresentation::Tagged()); |
| // Bitcasts from and to Tagged should use a TaggedBitcast instead (which has |
| // different effects, since it's unsafe to reorder such bitcasts accross |
| // GCs). |
| } |
| auto options() const { return std::tuple{kind, assumption, from, to}; } |
| }; |
| V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, |
| ChangeOp::Kind kind); |
| V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, |
| ChangeOp::Assumption assumption); |
| DEFINE_MULTI_SWITCH_INTEGRAL(ChangeOp::Kind, 16) |
| DEFINE_MULTI_SWITCH_INTEGRAL(ChangeOp::Assumption, 4) |
| |
| struct ChangeOrDeoptOp : FixedArityOperationT<2, ChangeOrDeoptOp> { |
| enum class Kind : uint8_t { |
| kUint32ToInt32, |
| kInt64ToInt32, |
| kUint64ToInt32, |
| kUint64ToInt64, |
| kFloat64ToInt32, |
| kFloat64ToUint32, |
| kFloat64ToAdditiveSafeInteger, |
| kFloat64ToInt64, |
| kFloat64NotHole, |
| }; |
| Kind kind; |
| CheckForMinusZeroMode minus_zero_mode; |
| FeedbackSource feedback; |
| |
| static constexpr OpEffects effects = OpEffects().CanDeopt(); |
| base::Vector<const RegisterRepresentation> outputs_rep() const { |
| switch (kind) { |
| case Kind::kUint32ToInt32: |
| case Kind::kInt64ToInt32: |
| case Kind::kUint64ToInt32: |
| case Kind::kFloat64ToInt32: |
| case Kind::kFloat64ToUint32: |
| return RepVector<RegisterRepresentation::Word32()>(); |
| case Kind::kUint64ToInt64: |
| case Kind::kFloat64ToAdditiveSafeInteger: |
| case Kind::kFloat64ToInt64: |
| return RepVector<RegisterRepresentation::Word64()>(); |
| case Kind::kFloat64NotHole: |
| return RepVector<RegisterRepresentation::Float64()>(); |
| } |
| } |
| |
| base::Vector<const MaybeRegisterRepresentation> inputs_rep( |
| ZoneVector<MaybeRegisterRepresentation>& storage) const { |
| switch (kind) { |
| case Kind::kUint32ToInt32: |
| return MaybeRepVector<MaybeRegisterRepresentation::Word32()>(); |
| case Kind::kInt64ToInt32: |
| case Kind::kUint64ToInt32: |
| case Kind::kUint64ToInt64: |
| return MaybeRepVector<MaybeRegisterRepresentation::Word64()>(); |
| case Kind::kFloat64ToInt32: |
| case Kind::kFloat64ToUint32: |
| case Kind::kFloat64ToAdditiveSafeInteger: |
| case Kind::kFloat64ToInt64: |
| case Kind::kFloat64NotHole: |
| return MaybeRepVector<MaybeRegisterRepresentation::Float64()>(); |
| } |
| } |
| |
| V<Untagged> input() const { return Base::input<Untagged>(0); } |
| V<FrameState> frame_state() const { return Base::input<FrameState>(1); } |
| |
| ChangeOrDeoptOp(V<Untagged> input, V<FrameState> frame_state, Kind kind, |
| CheckForMinusZeroMode minus_zero_mode, |
| const FeedbackSource& feedback) |
| : Base(input, frame_state), |
| kind(kind), |
| minus_zero_mode(minus_zero_mode), |
| feedback(feedback) {} |
| |
| void Validate(const Graph& graph) const { |
| DCHECK(Get(graph, frame_state()).Is<FrameStateOp>()); |
| } |
| |
| auto options() const { return std::tuple{kind, minus_zero_mode, feedback}; } |
| }; |
| V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, |
| ChangeOrDeoptOp::Kind kind); |
| |
| // Perform a conversion and return a pair of the result and a bit if it was |
| // successful. |
| struct TryChangeOp : FixedArityOperationT<1, TryChangeOp> { |
| static constexpr uint32_t kSuccessValue = 1; |
| static constexpr uint32_t kFailureValue = 0; |
| enum class Kind : uint8_t { |
| // The result of the truncation is undefined if the result is out of range. |
| kSignedFloatTruncateOverflowUndefined, |
| kUnsignedFloatTruncateOverflowUndefined, |
| }; |
| Kind kind; |
| FloatRepresentation from; |
| WordRepresentation to; |
| |
| static constexpr OpEffects effects = OpEffects(); |
| base::Vector<const RegisterRepresentation> outputs_rep() const { |
| switch (to.value()) { |
| case WordRepresentation::Word32(): |
| return RepVector<RegisterRepresentation::Word32(), |
| RegisterRepresentation::Word32()>(); |
| case WordRepresentation::Word64(): |
| return RepVector<RegisterRepresentation::Word64(), |
| RegisterRepresentation::Word32()>(); |
| } |
| } |
| |
| base::Vector<const MaybeRegisterRepresentation> inputs_rep( |
| ZoneVector<MaybeRegisterRepresentation>& storage) const { |
| return InputsRepFactory::SingleRep(from); |
| } |
| |
| OpIndex input() const { return Base::input(0); } |
| |
| TryChangeOp(OpIndex input, Kind kind, FloatRepresentation from, |
| WordRepresentation to) |
| : Base(input), kind(kind), from(from), to(to) {} |
| |
| auto options() const { return std::tuple{kind, from, to}; } |
| }; |
| V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, |
| TryChangeOp::Kind kind); |
| |
| struct BitcastWord32PairToFloat64Op |
| : FixedArityOperationT<2, BitcastWord32PairToFloat64Op> { |
| static constexpr OpEffects effects = OpEffects(); |
| base::Vector<const RegisterRepresentation> outputs_rep() const { |
| return RepVector<RegisterRepresentation::Float64()>(); |
| } |
| |
| base::Vector<const MaybeRegisterRepresentation> inputs_rep( |
| ZoneVector<MaybeRegisterRepresentation>& storage) const { |
| return MaybeRepVector<MaybeRegisterRepresentation::Word32(), |
| MaybeRegisterRepresentation::Word32()>(); |
| } |
| |
| V<Word32> high_word32() const { return input<Word32>(0); } |
| V<Word32> low_word32() const { return input<Word32>(1); } |
| |
| BitcastWord32PairToFloat64Op(V<Word32> high_word32, V<Word32> low_word32) |
| : Base(high_word32, low_word32) {} |
| |
| auto options() const { return std::tuple{}; } |
| }; |
| |
| struct TaggedBitcastOp : FixedArityOperationT<1, TaggedBitcastOp> { |
| enum class Kind : uint8_t { |
| kSmi, // This is a bitcast from a Word to a Smi or from a Smi to a Word |
| kHeapObject, // This is a bitcast from or to a Heap Object |
| kTagAndSmiBits, // This is a bitcast where only access to the tag and the |
| // smi bits (if it's a smi) are valid |
| kAny |
| }; |
| Kind kind; |
| RegisterRepresentation from; |
| RegisterRepresentation to; |
| |
| OpEffects Effects() const { |
| switch (kind) { |
| case Kind::kSmi: |
| case Kind::kTagAndSmiBits: |
| return OpEffects(); |
| case Kind::kHeapObject: |
| case Kind::kAny: |
| // Due to moving GC, converting from or to pointers doesn't commute with |
| // GC. |
| return OpEffects().CanDoRawHeapAccess(); |
| } |
| } |
| |
| base::Vector<const RegisterRepresentation> outputs_rep() const { |
| return base::VectorOf(&to, 1); |
| } |
| |
| base::Vector<const MaybeRegisterRepresentation> inputs_rep( |
| ZoneVector<MaybeRegisterRepresentation>& storage) const { |
| return InputsRepFactory::SingleRep(from); |
| } |
| |
| OpIndex input() const { return Base::input(0); } |
| |
| TaggedBitcastOp(OpIndex input, RegisterRepresentation from, |
| RegisterRepresentation to, Kind kind) |
| : Base(input), kind(kind), from(from), to(to) {} |
| |
| void Validate(const Graph& graph) const { |
| if (kind == Kind::kSmi) { |
| DCHECK((from.IsWord() && to.IsTaggedOrCompressed()) || |
| (from.IsTaggedOrCompressed() && to.IsWord())); |
| DCHECK_IMPLIES(from == RegisterRepresentation::Word64() || |
| to == RegisterRepresentation::Word64(), |
| Is64()); |
| } else { |
| // TODO(nicohartmann@): Without implicit truncation, the first case might |
| // not be correct anymore. |
| DCHECK((from.IsWord() && to == RegisterRepresentation::Tagged()) || |
| (from == RegisterRepresentation::Tagged() && |
| to == RegisterRepresentation::WordPtr()) || |
| (from == RegisterRepresentation::Compressed() && |
| to == RegisterRepresentation::Word32())); |
| } |
| } |
| auto options() const { return std::tuple{from, to, kind}; } |
| }; |
| std::ostream& operator<<(std::ostream& os, TaggedBitcastOp::Kind assumption); |
| |
| struct SelectOp : FixedArityOperationT<3, SelectOp> { |
| enum class Implementation : uint8_t { kBranch, kCMove }; |
| |
| RegisterRepresentation rep; |
| BranchHint hint; |
| Implementation implem; |
| |
| static constexpr OpEffects effects = OpEffects(); |
| base::Vector<const RegisterRepresentation> outputs_rep() const { |
| return base::VectorOf(&rep, 1); |
| } |
| |
| base::Vector<const MaybeRegisterRepresentation> inputs_rep( |
| ZoneVector<MaybeRegisterRepresentation>& storage) const { |
| return InitVectorOf(storage, {RegisterRepresentation::Word32(), rep, rep}); |
| } |
| |
| SelectOp(V<Word32> cond, V<Any> vtrue, V<Any> vfalse, |
| RegisterRepresentation rep, BranchHint hint, Implementation implem) |
| : Base(cond, vtrue, vfalse), rep(rep), hint(hint), implem(implem) {} |
| |
| void Validate(const Graph& graph) const { |
| DCHECK_IMPLIES(implem == Implementation::kCMove, |
| (rep == RegisterRepresentation::Word32() && |
| SupportedOperations::word32_select()) || |
| (rep == RegisterRepresentation::Word64() && |
| SupportedOperations::word64_select()) || |
| (rep == RegisterRepresentation::Float32() && |
| SupportedOperations::float32_select()) || |
| (rep == RegisterRepresentation::Float64() && |
| SupportedOperations::float64_select())); |
| } |
| |
| V<Word32> cond() const { return input<Word32>(0); } |
| V<Any> vtrue() const { return input<Any>(1); } |
| V<Any> vfalse() const { return input<Any>(2); } |
| |
| auto options() const { return std::tuple{rep, hint, implem}; } |
| }; |
| V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, |
| SelectOp::Implementation kind); |
| |
| struct PhiOp : OperationT<PhiOp> { |
| RegisterRepresentation rep; |
| |
| // Phis have to remain at the beginning of the current block. As effects |
| // cannot express this completely, we just mark them as having no effects but |
| // treat them specially when scheduling operations. |
| static constexpr OpEffects effects = OpEffects(); |
| base::Vector<const RegisterRepresentation> outputs_rep() const { |
| return base::VectorOf(&rep, 1); |
| } |
| |
| base::Vector<const MaybeRegisterRepresentation> inputs_rep( |
| ZoneVector<MaybeRegisterRepresentation>& storage) const { |
| storage.resize(input_count); |
| for (size_t i = 0; i < input_count; ++i) { |
| storage[i] = rep; |
| } |
| return base::VectorOf(storage); |
| } |
| |
| static constexpr size_t kLoopPhiBackEdgeIndex = 1; |
| |
| explicit PhiOp(base::Vector<const OpIndex> inputs, RegisterRepresentation rep) |
| : Base(inputs), rep(rep) {} |
| |
| template <typename Fn, typename Mapper> |
| V8_INLINE auto Explode(Fn fn, Mapper& mapper) const { |
| auto mapped_inputs = mapper.template Map<64>(inputs()); |
| return fn(base::VectorOf(mapped_inputs), rep); |
| } |
| |
| void Validate(const Graph& graph) const { DCHECK_GT(input_count, 0); } |
| auto options() const { return std::tuple{rep}; } |
| }; |
| |
| // Used as a placeholder for a loop-phi while building the graph, replaced with |
| // a normal `PhiOp` before graph building is over, so it should never appear in |
| // a complete graph. |
| struct PendingLoopPhiOp : FixedArityOperationT<1, PendingLoopPhiOp> { |
| RegisterRepresentation rep; |
| |
| static constexpr OpEffects effects = OpEffects(); |
| base::Vector<const RegisterRepresentation> outputs_rep() const { |
| return base::VectorOf(&rep, 1); |
| } |
| |
| base::Vector<const MaybeRegisterRepresentation> inputs_rep( |
| ZoneVector<MaybeRegisterRepresentation>& storage) const { |
| return InputsRepFactory::SingleRep(rep); |
| } |
| |
| OpIndex first() const { return input(0); } |
| PendingLoopPhiOp(OpIndex first, RegisterRepresentation rep) |
| : Base(first), rep(rep) {} |
| |
| auto options() const { return std::tuple{rep}; } |
| }; |
| |
| struct ConstantOp : FixedArityOperationT<0, ConstantOp> { |
| enum class Kind : uint8_t { |
| kWord32, |
| kWord64, |
| kFloat32, |
| kFloat64, |
| kSmi, |
| kNumber, // TODO(tebbi): See if we can avoid number constants. |
| kTaggedIndex, |
| kExternal, |
| kHeapObject, |
| kCompressedHeapObject, |
| kTrustedHeapObject, |
| kRelocatableWasmCall, |
| kRelocatableWasmStubCall, |
| kRelocatableWasmIndirectCallTarget, |
| kRelocatableWasmCanonicalSignatureId |
| }; |
| |
| Kind kind; |
| RegisterRepresentation rep = Representation(kind); |
| union Storage { |
| uint64_t integral; |
| i::Float32 float32; |
| i::Float64 float64; |
| ExternalReference external; |
| IndirectHandle<HeapObject> handle; |
| |
| Storage(uint64_t integral = 0) : integral(integral) {} |
| Storage(i::Tagged<Smi> smi) : integral(smi.ptr()) {} |
| Storage(i::Float64 constant) : float64(constant) {} |
| Storage(i::Float32 constant) : float32(constant) {} |
| Storage(ExternalReference constant) : external(constant) {} |
| Storage(IndirectHandle<HeapObject> constant) : handle(constant) {} |
| |
| inline bool operator==(const ConstantOp::Storage&) const { |
| // It is tricky to implement this properly. We currently need to define |
| // this for the matchers, but this should never be called. |
| UNREACHABLE(); |
| } |
| } storage; |
| |
| static constexpr OpEffects effects = OpEffects(); |
| base::Vector<const RegisterRepresentation> outputs_rep() const { |
| return base::VectorOf(&rep, 1); |
| } |
| |
| base::Vector<const MaybeRegisterRepresentation> inputs_rep( |
| ZoneVector<MaybeRegisterRepresentation>&) const { |
| return {}; |
| } |
| |
| static RegisterRepresentation Representation(Kind kind) { |
| switch (kind) { |
| case Kind::kRelocatableWasmCanonicalSignatureId: |
| case Kind::kWord32: |
| return RegisterRepresentation::Word32(); |
| case Kind::kWord64: |
| return RegisterRepresentation::Word64(); |
| case Kind::kFloat32: |
| return RegisterRepresentation::Float32(); |
| case Kind::kFloat64: |
| return RegisterRepresentation::Float64(); |
| case Kind::kExternal: |
| case Kind::kTaggedIndex: |
| case Kind::kTrustedHeapObject: |
| case Kind::kRelocatableWasmCall: |
| case Kind::kRelocatableWasmStubCall: |
| return RegisterRepresentation::WordPtr(); |
| case Kind::kRelocatableWasmIndirectCallTarget: |
| return RegisterRepresentation::Word32(); |
| case Kind::kSmi: |
| case Kind::kHeapObject: |
| case Kind::kNumber: |
| return RegisterRepresentation::Tagged(); |
| case Kind::kCompressedHeapObject: |
| return RegisterRepresentation::Compressed(); |
| } |
| } |
| |
| ConstantOp(Kind kind, Storage storage) |
| : Base(), kind(kind), storage(storage) {} |
| |
| void Validate(const Graph& graph) const { |
| DCHECK_IMPLIES( |
| kind == Kind::kWord32, |
| storage.integral <= WordRepresentation::Word32().MaxUnsignedValue()); |
| DCHECK_IMPLIES( |
| kind == Kind::kRelocatableWasmCanonicalSignatureId, |
| storage.integral <= WordRepresentation::Word32().MaxSignedValue()); |
| } |
| |
| uint64_t integral() const { |
| DCHECK(IsIntegral()); |
| return storage.integral; |
| } |
| |
| int64_t signed_integral() const { |
| DCHECK(IsIntegral()); |
| switch (kind) { |
| case Kind::kWord32: |
| case Kind::kRelocatableWasmCanonicalSignatureId: |
| return static_cast<int32_t>(storage.integral); |
| case Kind::kWord64: |
| return static_cast<int64_t>(storage.integral); |
| default: |
| UNREACHABLE(); |
| } |
| } |
| |
| uint32_t word32() const { |
| DCHECK(kind == Kind::kWord32 || kind == Kind::kWord64); |
| return static_cast<uint32_t>(storage.integral); |
| } |
| |
| uint64_t word64() const { |
| DCHECK_EQ(kind, Kind::kWord64); |
| return static_cast<uint64_t>(storage.integral); |
| } |
| |
| i::Tagged<Smi> smi() const { |
| DCHECK_EQ(kind, Kind::kSmi); |
| return i::Tagged<Smi>(storage.integral); |
| } |
| |
| i::Float64 number() const { |
| DCHECK_EQ(kind, Kind::kNumber); |
| return storage.float64; |
| } |
| |
| i::Float32 float32() const { |
| DCHECK_EQ(kind, Kind::kFloat32); |
| return storage.float32; |
| } |
| |
| i::Float64 float64() const { |
| DCHECK_EQ(kind, Kind::kFloat64); |
| return storage.float64; |
| } |
| |
| int32_t tagged_index() const { |
| DCHECK_EQ(kind, Kind::kTaggedIndex); |
| return static_cast<int32_t>(static_cast<uint32_t>(storage.integral)); |
| } |
| |
| ExternalReference external_reference() const { |
| DCHECK_EQ(kind, Kind::kExternal); |
| return storage.external; |
| } |
| |
| IndirectHandle<i::HeapObject> handle() const { |
| DCHECK(kind == Kind::kHeapObject || kind == Kind::kCompressedHeapObject || |
| kind == Kind::kTrustedHeapObject); |
| return storage.handle; |
| } |
| |
| bool IsWord(uint64_t value) const { |
| switch (kind) { |
| case Kind::kWord32: |
| return static_cast<uint32_t>(value) == word32(); |
| case Kind::kWord64: |
| return value == word64(); |
| default: |
| UNREACHABLE(); |
| } |
| } |
| |
| bool IsIntegral() const { |
| return kind == Kind::kWord32 || kind == Kind::kWord64 || |
| kind == Kind::kRelocatableWasmCall || |
| kind == Kind::kRelocatableWasmStubCall || |
| kind == Kind::kRelocatableWasmCanonicalSignatureId || |
| kind == Kind::kRelocatableWasmIndirectCallTarget; |
| } |
| |
| auto options() const { return std::tuple{kind, storage}; } |
| |
| void PrintOptions(std::ostream& os) const; |
| size_t hash_value( |
| HashingStrategy strategy = HashingStrategy::kDefault) const { |
| switch (kind) { |
| case Kind::kWord32: |
| case Kind::kWord64: |
| case Kind::kSmi: |
| case Kind::kTaggedIndex: |
| case Kind::kRelocatableWasmCall: |
| case Kind::kRelocatableWasmStubCall: |
| case Kind::kRelocatableWasmIndirectCallTarget: |
| case Kind::kRelocatableWasmCanonicalSignatureId: |
| return HashWithOptions(storage.integral); |
| case Kind::kFloat32: |
| return HashWithOptions(storage.float32.get_bits()); |
| case Kind::kFloat64: |
| case Kind::kNumber: |
| return HashWithOptions(storage.float64.get_bits()); | <