| // Copyright 2014 the V8 project authors. All rights reserved. |
| // Use of this source code is governed by a BSD-style license that can be |
| // found in the LICENSE file. |
| |
| #include "src/compiler/simplified-lowering.h" |
| |
| #include <limits> |
| #include <optional> |
| |
| #include "include/v8-fast-api-calls.h" |
| #include "src/base/logging.h" |
| #include "src/base/platform/platform.h" |
| #include "src/base/small-vector.h" |
| #include "src/codegen/callable.h" |
| #include "src/codegen/machine-type.h" |
| #include "src/codegen/tick-counter.h" |
| #include "src/compiler/access-builder.h" |
| #include "src/compiler/common-operator.h" |
| #include "src/compiler/compiler-source-position-table.h" |
| #include "src/compiler/diamond.h" |
| #include "src/compiler/feedback-source.h" |
| #include "src/compiler/js-heap-broker.h" |
| #include "src/compiler/linkage.h" |
| #include "src/compiler/node-matchers.h" |
| #include "src/compiler/node-observer.h" |
| #include "src/compiler/node-origin-table.h" |
| #include "src/compiler/opcodes.h" |
| #include "src/compiler/operation-typer.h" |
| #include "src/compiler/operator-properties.h" |
| #include "src/compiler/representation-change.h" |
| #include "src/compiler/simplified-lowering-verifier.h" |
| #include "src/compiler/simplified-operator.h" |
| #include "src/compiler/turbofan-graph-visualizer.h" |
| #include "src/compiler/type-cache.h" |
| #include "src/flags/flags.h" |
| #include "src/numbers/conversions-inl.h" |
| #include "src/objects/objects.h" |
| |
| #if V8_ENABLE_WEBASSEMBLY |
| #include "src/wasm/value-type.h" |
| #endif // V8_ENABLE_WEBASSEMBLY |
| |
| namespace v8 { |
| namespace internal { |
| namespace compiler { |
| |
| // Macro for outputting trace information from representation inference. |
| #define TRACE(...) \ |
| do { \ |
| if (v8_flags.trace_representation) PrintF(__VA_ARGS__); \ |
| } while (false) |
| |
| const char* kSimplifiedLoweringReducerName = "SimplifiedLowering"; |
| |
| // Representation selection and lowering of {Simplified} operators to machine |
| // operators are interwined. We use a fixpoint calculation to compute both the |
| // output representation and the best possible lowering for {Simplified} nodes. |
| // Representation change insertion ensures that all values are in the correct |
| // machine representation after this phase, as dictated by the machine |
| // operators themselves. |
| enum Phase { |
| // 1.) PROPAGATE: Traverse the graph from the end, pushing usage information |
| // backwards from uses to definitions, around cycles in phis, according |
| // to local rules for each operator. |
| // During this phase, the usage information for a node determines the best |
| // possible lowering for each operator so far, and that in turn determines |
| // the output representation. |
| // Therefore, to be correct, this phase must iterate to a fixpoint before |
| // the next phase can begin. |
| PROPAGATE, |
| |
| // 2.) RETYPE: Propagate types from type feedback forwards. |
| RETYPE, |
| |
| // 3.) LOWER: perform lowering for all {Simplified} nodes by replacing some |
| // operators for some nodes, expanding some nodes to multiple nodes, or |
| // removing some (redundant) nodes. |
| // During this phase, use the {RepresentationChanger} to insert |
| // representation changes between uses that demand a particular |
| // representation and nodes that produce a different representation. |
| LOWER |
| }; |
| |
| namespace { |
| |
| MachineRepresentation MachineRepresentationFromArrayType( |
| ExternalArrayType array_type) { |
| switch (array_type) { |
| case kExternalUint8Array: |
| case kExternalUint8ClampedArray: |
| case kExternalInt8Array: |
| return MachineRepresentation::kWord8; |
| case kExternalUint16Array: |
| case kExternalInt16Array: |
| return MachineRepresentation::kWord16; |
| case kExternalUint32Array: |
| case kExternalInt32Array: |
| return MachineRepresentation::kWord32; |
| case kExternalFloat16Array: |
| return MachineRepresentation::kFloat16RawBits; |
| case kExternalFloat32Array: |
| return MachineRepresentation::kFloat32; |
| case kExternalFloat64Array: |
| return MachineRepresentation::kFloat64; |
| case kExternalBigInt64Array: |
| case kExternalBigUint64Array: |
| return MachineRepresentation::kWord64; |
| } |
| UNREACHABLE(); |
| } |
| |
| UseInfo CheckedUseInfoAsWord32FromHint( |
| NumberOperationHint hint, IdentifyZeros identify_zeros = kDistinguishZeros, |
| const FeedbackSource& feedback = FeedbackSource()) { |
| switch (hint) { |
| case NumberOperationHint::kSignedSmall: |
| case NumberOperationHint::kSignedSmallInputs: |
| return UseInfo::CheckedSignedSmallAsWord32(identify_zeros, feedback); |
| case NumberOperationHint::kAdditiveSafeInteger: |
| case NumberOperationHint::kNumber: |
| DCHECK_EQ(identify_zeros, kIdentifyZeros); |
| return UseInfo::CheckedNumberAsWord32(feedback); |
| case NumberOperationHint::kNumberOrBoolean: |
| // Not used currently. |
| UNREACHABLE(); |
| case NumberOperationHint::kNumberOrOddball: |
| DCHECK_EQ(identify_zeros, kIdentifyZeros); |
| return UseInfo::CheckedNumberOrOddballAsWord32(feedback); |
| } |
| UNREACHABLE(); |
| } |
| |
| UseInfo CheckedUseInfoAsFloat64FromHint( |
| NumberOperationHint hint, const FeedbackSource& feedback, |
| IdentifyZeros identify_zeros = kDistinguishZeros) { |
| switch (hint) { |
| case NumberOperationHint::kSignedSmall: |
| case NumberOperationHint::kSignedSmallInputs: |
| // Not used currently. |
| UNREACHABLE(); |
| case NumberOperationHint::kAdditiveSafeInteger: |
| case NumberOperationHint::kNumber: |
| return UseInfo::CheckedNumberAsFloat64(identify_zeros, feedback); |
| case NumberOperationHint::kNumberOrBoolean: |
| return UseInfo::CheckedNumberOrBooleanAsFloat64(identify_zeros, feedback); |
| case NumberOperationHint::kNumberOrOddball: |
| return UseInfo::CheckedNumberOrOddballAsFloat64(identify_zeros, feedback); |
| } |
| UNREACHABLE(); |
| } |
| |
| UseInfo TruncatingUseInfoFromRepresentation(MachineRepresentation rep) { |
| switch (rep) { |
| case MachineRepresentation::kTaggedSigned: |
| return UseInfo::TaggedSigned(); |
| case MachineRepresentation::kTaggedPointer: |
| case MachineRepresentation::kTagged: |
| case MachineRepresentation::kIndirectPointer: |
| case MachineRepresentation::kMapWord: |
| return UseInfo::AnyTagged(); |
| case MachineRepresentation::kFloat64: |
| return UseInfo::TruncatingFloat64(); |
| case MachineRepresentation::kFloat32: |
| return UseInfo::Float32(); |
| case MachineRepresentation::kFloat16RawBits: |
| return UseInfo::Float16RawBits(); |
| case MachineRepresentation::kWord8: |
| case MachineRepresentation::kWord16: |
| case MachineRepresentation::kWord32: |
| return UseInfo::TruncatingWord32(); |
| case MachineRepresentation::kWord64: |
| return UseInfo::TruncatingWord64(); |
| case MachineRepresentation::kBit: |
| return UseInfo::Bool(); |
| case MachineRepresentation::kCompressedPointer: |
| case MachineRepresentation::kCompressed: |
| case MachineRepresentation::kProtectedPointer: |
| case MachineRepresentation::kSandboxedPointer: |
| case MachineRepresentation::kFloat16: |
| case MachineRepresentation::kSimd128: |
| case MachineRepresentation::kSimd256: |
| case MachineRepresentation::kNone: |
| UNREACHABLE(); |
| } |
| } |
| |
| UseInfo UseInfoForBasePointer(const FieldAccess& access) { |
| return access.tag() != 0 ? UseInfo::AnyTagged() : UseInfo::Word(); |
| } |
| |
| UseInfo UseInfoForBasePointer(const ElementAccess& access) { |
| return access.tag() != 0 ? UseInfo::AnyTagged() : UseInfo::Word(); |
| } |
| |
| void ReplaceEffectControlUses(Node* node, Node* effect, Node* control) { |
| for (Edge edge : node->use_edges()) { |
| if (NodeProperties::IsControlEdge(edge)) { |
| edge.UpdateTo(control); |
| } else if (NodeProperties::IsEffectEdge(edge)) { |
| edge.UpdateTo(effect); |
| } else { |
| DCHECK(NodeProperties::IsValueEdge(edge) || |
| NodeProperties::IsContextEdge(edge)); |
| } |
| } |
| } |
| |
| bool CanOverflowSigned32(const Operator* op, Type left, Type right, |
| TypeCache const* type_cache, Zone* type_zone) { |
| // We assume the inputs are checked Signed32 (or known statically to be |
| // Signed32). Technically, the inputs could also be minus zero, which we treat |
| // as 0 for the purpose of this function. |
| if (left.Maybe(Type::MinusZero())) { |
| left = Type::Union(left, type_cache->kSingletonZero, type_zone); |
| } |
| if (right.Maybe(Type::MinusZero())) { |
| right = Type::Union(right, type_cache->kSingletonZero, type_zone); |
| } |
| left = Type::Intersect(left, Type::Signed32(), type_zone); |
| right = Type::Intersect(right, Type::Signed32(), type_zone); |
| if (left.IsNone() || right.IsNone()) return false; |
| switch (op->opcode()) { |
| case IrOpcode::kSpeculativeSmallIntegerAdd: |
| return (left.Max() + right.Max() > kMaxInt) || |
| (left.Min() + right.Min() < kMinInt); |
| |
| case IrOpcode::kSpeculativeSmallIntegerSubtract: |
| return (left.Max() - right.Min() > kMaxInt) || |
| (left.Min() - right.Max() < kMinInt); |
| |
| default: |
| UNREACHABLE(); |
| } |
| } |
| |
| bool IsSomePositiveOrderedNumber(Type type) { |
| return type.Is(Type::OrderedNumber()) && (type.IsNone() || type.Min() > 0); |
| } |
| |
| inline bool IsLargeBigInt(Type type) { |
| return type.Is(Type::BigInt()) && !type.Is(Type::SignedBigInt64()) && |
| !type.Is(Type::UnsignedBigInt64()); |
| } |
| |
| class JSONGraphWriterWithVerifierTypes : public JSONGraphWriter { |
| public: |
| JSONGraphWriterWithVerifierTypes(std::ostream& os, const Graph* graph, |
| const SourcePositionTable* positions, |
| const NodeOriginTable* origins, |
| SimplifiedLoweringVerifier* verifier) |
| : JSONGraphWriter(os, graph, positions, origins), verifier_(verifier) {} |
| |
| protected: |
| std::optional<Type> GetType(Node* node) override { |
| return verifier_->GetType(node); |
| } |
| |
| private: |
| SimplifiedLoweringVerifier* verifier_; |
| }; |
| |
| bool IsLoadFloat16ArrayElement(Node* node) { |
| Operator::Opcode opcode = node->op()->opcode(); |
| return (opcode == IrOpcode::kLoadTypedElement || |
| opcode == IrOpcode::kLoadDataViewElement) && |
| ExternalArrayTypeOf(node->op()) == kExternalFloat16Array; |
| } |
| |
| } // namespace |
| |
| #ifdef DEBUG |
| // Helpers for monotonicity checking. |
| class InputUseInfos { |
| public: |
| explicit InputUseInfos(Zone* zone) : input_use_infos_(zone) {} |
| |
| void SetAndCheckInput(Node* node, int index, UseInfo use_info) { |
| if (input_use_infos_.empty()) { |
| input_use_infos_.resize(node->InputCount(), UseInfo::None()); |
| } |
| // Check that the new use informatin is a super-type of the old |
| // one. |
| DCHECK(IsUseLessGeneral(input_use_infos_[index], use_info)); |
| input_use_infos_[index] = use_info; |
| } |
| |
| private: |
| ZoneVector<UseInfo> input_use_infos_; |
| |
| static bool IsUseLessGeneral(UseInfo use1, UseInfo use2) { |
| return use1.truncation().IsLessGeneralThan(use2.truncation()); |
| } |
| }; |
| |
| #endif // DEBUG |
| |
| class RepresentationSelector { |
| // The purpose of this nested class is to hide method |
| // v8::internal::compiler::NodeProperties::ChangeOp which should not be |
| // directly used by code in RepresentationSelector and SimplifiedLowering. |
| // RepresentationSelector code should call RepresentationSelector::ChangeOp in |
| // place of NodeProperties::ChangeOp, in order to notify the changes to a |
| // registered ObserveNodeManager and support the %ObserveNode intrinsic. |
| class NodeProperties : public compiler::NodeProperties { |
| static void ChangeOp(Node* node, const Operator* new_op) { UNREACHABLE(); } |
| }; |
| |
| public: |
| // Information for each node tracked during the fixpoint. |
| class NodeInfo final { |
| public: |
| // Adds new use to the node. Returns true if something has changed |
| // and the node has to be requeued. |
| bool AddUse(UseInfo info) { |
| Truncation old_truncation = truncation_; |
| truncation_ = Truncation::Generalize(truncation_, info.truncation()); |
| return truncation_ != old_truncation; |
| } |
| |
| void set_queued() { state_ = kQueued; } |
| void set_visited() { state_ = kVisited; } |
| void set_pushed() { state_ = kPushed; } |
| void reset_state() { state_ = kUnvisited; } |
| bool visited() const { return state_ == kVisited; } |
| bool queued() const { return state_ == kQueued; } |
| bool pushed() const { return state_ == kPushed; } |
| bool unvisited() const { return state_ == kUnvisited; } |
| Truncation truncation() const { return truncation_; } |
| void set_output(MachineRepresentation output) { representation_ = output; } |
| |
| MachineRepresentation representation() const { return representation_; } |
| |
| // Helpers for feedback typing. |
| void set_feedback_type(Type type) { feedback_type_ = type; } |
| Type feedback_type() const { return feedback_type_; } |
| void set_weakened() { weakened_ = true; } |
| bool weakened() const { return weakened_; } |
| void set_restriction_type(Type type) { restriction_type_ = type; } |
| Type restriction_type() const { return restriction_type_; } |
| |
| private: |
| // Fields are ordered to avoid mixing byte and word size fields to minimize |
| // padding. |
| enum State : uint8_t { kUnvisited, kPushed, kVisited, kQueued }; |
| State state_ = kUnvisited; |
| MachineRepresentation representation_ = |
| MachineRepresentation::kNone; // Output representation. |
| Truncation truncation_ = Truncation::None(); // Information about uses. |
| bool weakened_ = false; |
| |
| Type restriction_type_ = Type::Any(); |
| Type feedback_type_; |
| }; |
| |
| RepresentationSelector(JSGraph* jsgraph, JSHeapBroker* broker, Zone* zone, |
| RepresentationChanger* changer, |
| SourcePositionTable* source_positions, |
| NodeOriginTable* node_origins, |
| TickCounter* tick_counter, Linkage* linkage, |
| ObserveNodeManager* observe_node_manager, |
| SimplifiedLoweringVerifier* verifier) |
| : jsgraph_(jsgraph), |
| broker_(broker), |
| zone_(zone), |
| might_need_revisit_(zone), |
| count_(jsgraph->graph()->NodeCount()), |
| info_(count_, zone), |
| #ifdef DEBUG |
| node_input_use_infos_(count_, InputUseInfos(zone), zone), |
| #endif |
| replacements_(zone), |
| changer_(changer), |
| revisit_queue_(zone), |
| traversal_nodes_(zone), |
| source_positions_(source_positions), |
| node_origins_(node_origins), |
| type_cache_(TypeCache::Get()), |
| op_typer_(broker, graph_zone()), |
| tick_counter_(tick_counter), |
| linkage_(linkage), |
| observe_node_manager_(observe_node_manager), |
| verifier_(verifier) { |
| singleton_true_ = |
| Type::Constant(broker, broker->true_value(), graph_zone()); |
| singleton_false_ = |
| Type::Constant(broker, broker->false_value(), graph_zone()); |
| } |
| |
| bool verification_enabled() const { return verifier_ != nullptr; } |
| |
| void ResetNodeInfoState() { |
| // Clean up for the next phase. |
| for (NodeInfo& info : info_) { |
| info.reset_state(); |
| } |
| } |
| |
| Type TypeOf(Node* node) { |
| Type type = GetInfo(node)->feedback_type(); |
| return type.IsInvalid() ? NodeProperties::GetType(node) : type; |
| } |
| |
| Type FeedbackTypeOf(Node* node) { |
| Type type = GetInfo(node)->feedback_type(); |
| return type.IsInvalid() ? Type::None() : type; |
| } |
| |
| Type TypePhi(Node* node) { |
| int arity = node->op()->ValueInputCount(); |
| Type type = FeedbackTypeOf(node->InputAt(0)); |
| for (int i = 1; i < arity; ++i) { |
| type = op_typer_.Merge(type, FeedbackTypeOf(node->InputAt(i))); |
| } |
| return type; |
| } |
| |
| Type TypeSelect(Node* node) { |
| return op_typer_.Merge(FeedbackTypeOf(node->InputAt(1)), |
| FeedbackTypeOf(node->InputAt(2))); |
| } |
| |
| bool UpdateFeedbackType(Node* node) { |
| if (node->op()->ValueOutputCount() == 0) return false; |
| if ((IrOpcode::IsMachineOpcode(node->opcode()) || |
| IrOpcode::IsMachineConstantOpcode(node->opcode())) && |
| node->opcode() != IrOpcode::kLoadFramePointer) { |
| DCHECK(NodeProperties::GetType(node).Is(Type::Machine())); |
| } |
| |
| // For any non-phi node just wait until we get all inputs typed. We only |
| // allow untyped inputs for phi nodes because phis are the only places |
| // where cycles need to be broken. |
| if (node->opcode() != IrOpcode::kPhi) { |
| for (int i = 0; i < node->op()->ValueInputCount(); i++) { |
| if (GetInfo(node->InputAt(i))->feedback_type().IsInvalid()) { |
| return false; |
| } |
| } |
| } |
| |
| NodeInfo* info = GetInfo(node); |
| Type type = info->feedback_type(); |
| Type new_type = NodeProperties::GetType(node); |
| |
| // We preload these values here to avoid increasing the binary size too |
| // much, which happens if we inline the calls into the macros below. |
| Type input0_type; |
| if (node->InputCount() > 0) input0_type = FeedbackTypeOf(node->InputAt(0)); |
| Type input1_type; |
| if (node->InputCount() > 1) input1_type = FeedbackTypeOf(node->InputAt(1)); |
| |
| switch (node->opcode()) { |
| #define DECLARE_CASE(Name) \ |
| case IrOpcode::k##Name: { \ |
| new_type = op_typer_.Name(input0_type, input1_type); \ |
| break; \ |
| } |
| SIMPLIFIED_NUMBER_BINOP_LIST(DECLARE_CASE) |
| DECLARE_CASE(SameValue) |
| #undef DECLARE_CASE |
| |
| #define DECLARE_CASE(Name) \ |
| case IrOpcode::k##Name: { \ |
| new_type = Type::Intersect(op_typer_.Name(input0_type, input1_type), \ |
| info->restriction_type(), graph_zone()); \ |
| break; \ |
| } |
| SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_CASE) |
| SIMPLIFIED_SPECULATIVE_BIGINT_BINOP_LIST(DECLARE_CASE) |
| #undef DECLARE_CASE |
| |
| #define DECLARE_CASE(Name) \ |
| case IrOpcode::k##Name: { \ |
| new_type = op_typer_.Name(input0_type); \ |
| break; \ |
| } |
| SIMPLIFIED_NUMBER_UNOP_LIST(DECLARE_CASE) |
| #undef DECLARE_CASE |
| |
| #define DECLARE_CASE(Name) \ |
| case IrOpcode::k##Name: { \ |
| new_type = Type::Intersect(op_typer_.Name(input0_type), \ |
| info->restriction_type(), graph_zone()); \ |
| break; \ |
| } |
| SIMPLIFIED_SPECULATIVE_NUMBER_UNOP_LIST(DECLARE_CASE) |
| #undef DECLARE_CASE |
| |
| case IrOpcode::kConvertReceiver: |
| new_type = op_typer_.ConvertReceiver(input0_type); |
| break; |
| |
| case IrOpcode::kPlainPrimitiveToNumber: |
| new_type = op_typer_.ToNumber(input0_type); |
| break; |
| |
| case IrOpcode::kCheckBounds: |
| new_type = |
| Type::Intersect(op_typer_.CheckBounds(input0_type, input1_type), |
| info->restriction_type(), graph_zone()); |
| break; |
| |
| case IrOpcode::kCheckFloat64Hole: |
| new_type = Type::Intersect(op_typer_.CheckFloat64Hole(input0_type), |
| info->restriction_type(), graph_zone()); |
| break; |
| |
| case IrOpcode::kCheckNumber: |
| new_type = Type::Intersect(op_typer_.CheckNumber(input0_type), |
| info->restriction_type(), graph_zone()); |
| break; |
| |
| case IrOpcode::kCheckNumberFitsInt32: |
| new_type = Type::Intersect(op_typer_.CheckNumberFitsInt32(input0_type), |
| info->restriction_type(), graph_zone()); |
| break; |
| |
| case IrOpcode::kPhi: { |
| new_type = TypePhi(node); |
| if (!type.IsInvalid()) { |
| new_type = Weaken(node, type, new_type); |
| } |
| break; |
| } |
| |
| case IrOpcode::kConvertTaggedHoleToUndefined: |
| new_type = op_typer_.ConvertTaggedHoleToUndefined( |
| FeedbackTypeOf(node->InputAt(0))); |
| break; |
| |
| case IrOpcode::kTypeGuard: { |
| new_type = op_typer_.TypeTypeGuard(node->op(), |
| FeedbackTypeOf(node->InputAt(0))); |
| break; |
| } |
| |
| case IrOpcode::kSelect: { |
| const auto& p = SelectParametersOf(node->op()); |
| if (p.semantics() == BranchSemantics::kMachine) { |
| if (type.IsInvalid()) { |
| GetInfo(node)->set_feedback_type(NodeProperties::GetType(node)); |
| return true; |
| } |
| return false; |
| } |
| new_type = TypeSelect(node); |
| break; |
| } |
| |
| default: |
| // Shortcut for operations that we do not handle. |
| if (type.IsInvalid()) { |
| GetInfo(node)->set_feedback_type(NodeProperties::GetType(node)); |
| return true; |
| } |
| return false; |
| } |
| // We need to guarantee that the feedback type is a subtype of the upper |
| // bound. Naively that should hold, but weakening can actually produce |
| // a bigger type if we are unlucky with ordering of phi typing. To be |
| // really sure, just intersect the upper bound with the feedback type. |
| new_type = Type::Intersect(GetUpperBound(node), new_type, graph_zone()); |
| |
| if (!type.IsInvalid() && new_type.Is(type)) return false; |
| GetInfo(node)->set_feedback_type(new_type); |
| if (v8_flags.trace_representation) { |
| PrintNodeFeedbackType(node); |
| } |
| return true; |
| } |
| |
| void PrintNodeFeedbackType(Node* n) { |
| StdoutStream os; |
| os << "#" << n->id() << ":" << *n->op() << "("; |
| int j = 0; |
| for (Node* const i : n->inputs()) { |
| if (j++ > 0) os << ", "; |
| os << "#" << i->id() << ":" << i->op()->mnemonic(); |
| } |
| os << ")"; |
| if (NodeProperties::IsTyped(n)) { |
| Type static_type = NodeProperties::GetType(n); |
| os << " [Static type: " << static_type; |
| Type feedback_type = GetInfo(n)->feedback_type(); |
| if (!feedback_type.IsInvalid() && feedback_type != static_type) { |
| os << ", Feedback type: " << feedback_type; |
| } |
| os << "]"; |
| } |
| os << std::endl; |
| } |
| |
| Type Weaken(Node* node, Type previous_type, Type current_type) { |
| // If the types have nothing to do with integers, return the types. |
| Type const integer = type_cache_->kInteger; |
| if (!previous_type.Maybe(integer)) { |
| return current_type; |
| } |
| DCHECK(current_type.Maybe(integer)); |
| |
| Type current_integer = Type::Intersect(current_type, integer, graph_zone()); |
| DCHECK(!current_integer.IsNone()); |
| Type previous_integer = |
| Type::Intersect(previous_type, integer, graph_zone()); |
| DCHECK(!previous_integer.IsNone()); |
| |
| // Once we start weakening a node, we should always weaken. |
| if (!GetInfo(node)->weakened()) { |
| // Only weaken if there is range involved; we should converge quickly |
| // for all other types (the exception is a union of many constants, |
| // but we currently do not increase the number of constants in unions). |
| Type previous = previous_integer.GetRange(); |
| Type current = current_integer.GetRange(); |
| if (current.IsInvalid() || previous.IsInvalid()) { |
| return current_type; |
| } |
| // Range is involved => we are weakening. |
| GetInfo(node)->set_weakened(); |
| } |
| |
| return Type::Union(current_type, |
| op_typer_.WeakenRange(previous_integer, current_integer), |
| graph_zone()); |
| } |
| |
| // Generates a pre-order traversal of the nodes, starting with End. |
| void GenerateTraversal() { |
| // Reset previous state. |
| ResetNodeInfoState(); |
| traversal_nodes_.clear(); |
| count_ = graph()->NodeCount(); |
| info_.resize(count_); |
| |
| ZoneStack<NodeState> stack(zone_); |
| |
| stack.push({graph()->end(), 0}); |
| GetInfo(graph()->end())->set_pushed(); |
| while (!stack.empty()) { |
| NodeState& current = stack.top(); |
| Node* node = current.node; |
| // If there is an unvisited input, push it and continue with that node. |
| bool pushed_unvisited = false; |
| while (current.input_index < node->InputCount()) { |
| Node* input = node->InputAt(current.input_index); |
| NodeInfo* input_info = GetInfo(input); |
| current.input_index++; |
| if (input_info->unvisited()) { |
| input_info->set_pushed(); |
| stack.push({input, 0}); |
| pushed_unvisited = true; |
| break; |
| } else if (input_info->pushed()) { |
| // Optimization for the Retype phase. |
| // If we had already pushed (and not visited) an input, it means that |
| // the current node will be visited in the Retype phase before one of |
| // its inputs. If this happens, the current node might need to be |
| // revisited. |
| MarkAsPossibleRevisit(node, input); |
| } |
| } |
| |
| if (pushed_unvisited) continue; |
| |
| stack.pop(); |
| NodeInfo* info = GetInfo(node); |
| info->set_visited(); |
| |
| // Generate the traversal |
| traversal_nodes_.push_back(node); |
| } |
| } |
| |
| void PushNodeToRevisitIfVisited(Node* node) { |
| NodeInfo* info = GetInfo(node); |
| if (info->visited()) { |
| TRACE(" QUEUEING #%d: %s\n", node->id(), node->op()->mnemonic()); |
| info->set_queued(); |
| revisit_queue_.push(node); |
| } |
| } |
| |
| // Tries to update the feedback type of the node, as well as setting its |
| // machine representation (in VisitNode). Returns true iff updating the |
| // feedback type is successful. |
| bool RetypeNode(Node* node) { |
| NodeInfo* info = GetInfo(node); |
| info->set_visited(); |
| bool updated = UpdateFeedbackType(node); |
| TRACE(" visit #%d: %s\n", node->id(), node->op()->mnemonic()); |
| VisitNode<RETYPE>(node, info->truncation(), nullptr); |
| TRACE(" ==> output %s\n", MachineReprToString(info->representation())); |
| return updated; |
| } |
| |
| // Visits the node and marks it as visited. Inside of VisitNode, we might |
| // change the truncation of one of our inputs (see EnqueueInput<PROPAGATE> for |
| // this). If we change the truncation of an already visited node, we will add |
| // it to the revisit queue. |
| void PropagateTruncation(Node* node) { |
| NodeInfo* info = GetInfo(node); |
| info->set_visited(); |
| TRACE(" visit #%d: %s (trunc: %s)\n", node->id(), node->op()->mnemonic(), |
| info->truncation().description()); |
| VisitNode<PROPAGATE>(node, info->truncation(), nullptr); |
| } |
| |
| // Backward propagation of truncations to a fixpoint. |
| void RunPropagatePhase() { |
| TRACE("--{Propagate phase}--\n"); |
| ResetNodeInfoState(); |
| DCHECK(revisit_queue_.empty()); |
| |
| // Process nodes in reverse post order, with End as the root. |
| for (auto it = traversal_nodes_.crbegin(); it != traversal_nodes_.crend(); |
| ++it) { |
| PropagateTruncation(*it); |
| |
| while (!revisit_queue_.empty()) { |
| Node* node = revisit_queue_.front(); |
| revisit_queue_.pop(); |
| PropagateTruncation(node); |
| } |
| } |
| } |
| |
| // Forward propagation of types from type feedback to a fixpoint. |
| void RunRetypePhase() { |
| TRACE("--{Retype phase}--\n"); |
| ResetNodeInfoState(); |
| DCHECK(revisit_queue_.empty()); |
| |
| for (auto it = traversal_nodes_.cbegin(); it != traversal_nodes_.cend(); |
| ++it) { |
| Node* node = *it; |
| if (!RetypeNode(node)) continue; |
| |
| auto revisit_it = might_need_revisit_.find(node); |
| if (revisit_it == might_need_revisit_.end()) continue; |
| |
| for (Node* const user : revisit_it->second) { |
| PushNodeToRevisitIfVisited(user); |
| } |
| |
| // Process the revisit queue. |
| while (!revisit_queue_.empty()) { |
| Node* revisit_node = revisit_queue_.front(); |
| revisit_queue_.pop(); |
| if (!RetypeNode(revisit_node)) continue; |
| // Here we need to check all uses since we can't easily know which |
| // nodes will need to be revisited due to having an input which was |
| // a revisited node. |
| for (Node* const user : revisit_node->uses()) { |
| PushNodeToRevisitIfVisited(user); |
| } |
| } |
| } |
| } |
| |
| // Lowering and change insertion phase. |
| void RunLowerPhase(SimplifiedLowering* lowering) { |
| TRACE("--{Lower phase}--\n"); |
| for (auto it = traversal_nodes_.cbegin(); it != traversal_nodes_.cend(); |
| ++it) { |
| Node* node = *it; |
| NodeInfo* info = GetInfo(node); |
| TRACE(" visit #%d: %s\n", node->id(), node->op()->mnemonic()); |
| // Reuse {VisitNode()} so the representation rules are in one place. |
| SourcePositionTable::Scope scope( |
| source_positions_, source_positions_->GetSourcePosition(node)); |
| NodeOriginTable::Scope origin_scope(node_origins_, "simplified lowering", |
| node); |
| VisitNode<LOWER>(node, info->truncation(), lowering); |
| } |
| |
| // Perform the final replacements. |
| for (NodeVector::iterator i = replacements_.begin(); |
| i != replacements_.end(); ++i) { |
| Node* node = *i; |
| Node* replacement = *(++i); |
| node->ReplaceUses(replacement); |
| node->Kill(); |
| // We also need to replace the node in the rest of the vector. |
| for (NodeVector::iterator j = i + 1; j != replacements_.end(); ++j) { |
| ++j; |
| if (*j == node) *j = replacement; |
| } |
| } |
| } |
| |
| void RunVerifyPhase(OptimizedCompilationInfo* compilation_info) { |
| DCHECK_NOT_NULL(verifier_); |
| |
| TRACE("--{Verify Phase}--\n"); |
| |
| // Patch pending type overrides. |
| for (const auto& [constant, uses] : |
| verifier_->machine_uses_of_constants()) { |
| Node* typed_constant = |
| InsertTypeOverrideForVerifier(Type::Machine(), constant); |
| for (auto use : uses) { |
| for (int i = 0; i < use->InputCount(); ++i) { |
| if (use->InputAt(i) == constant) { |
| use->ReplaceInput(i, typed_constant); |
| } |
| } |
| } |
| } |
| |
| // Generate a new traversal containing all the new nodes created during |
| // lowering. |
| GenerateTraversal(); |
| |
| // Set node types to the refined types computed during retyping. |
| for (Node* node : traversal_nodes_) { |
| NodeInfo* info = GetInfo(node); |
| if (!info->feedback_type().IsInvalid()) { |
| NodeProperties::SetType(node, info->feedback_type()); |
| } |
| } |
| |
| // Print graph. |
| if (compilation_info != nullptr && compilation_info->trace_turbo_json()) { |
| UnparkedScopeIfNeeded scope(broker_); |
| AllowHandleDereference allow_deref; |
| |
| TurboJsonFile json_of(compilation_info, std::ios_base::app); |
| JSONGraphWriter writer(json_of, graph(), source_positions_, |
| node_origins_); |
| writer.PrintPhase("V8.TFSimplifiedLowering [after lower]"); |
| } |
| |
| // Verify all nodes. |
| for (Node* node : traversal_nodes_) { |
| verifier_->VisitNode(node, op_typer_); |
| } |
| |
| // Print graph. |
| if (compilation_info != nullptr && compilation_info->trace_turbo_json()) { |
| UnparkedScopeIfNeeded scope(broker_); |
| AllowHandleDereference allow_deref; |
| |
| TurboJsonFile json_of(compilation_info, std::ios_base::app); |
| JSONGraphWriterWithVerifierTypes writer( |
| json_of, graph(), source_positions_, node_origins_, verifier_); |
| writer.PrintPhase("V8.TFSimplifiedLowering [after verify]"); |
| } |
| |
| // Eliminate all introduced hints. |
| for (Node* node : verifier_->inserted_hints()) { |
| Node* input = node->InputAt(0); |
| node->ReplaceUses(input); |
| node->Kill(); |
| } |
| } |
| |
| void Run(SimplifiedLowering* lowering) { |
| GenerateTraversal(); |
| RunPropagatePhase(); |
| RunRetypePhase(); |
| RunLowerPhase(lowering); |
| if (verification_enabled()) { |
| RunVerifyPhase(lowering->info_); |
| } |
| } |
| |
| // Just assert for Retype and Lower. Propagate specialized below. |
| template <Phase T> |
| void EnqueueInput(Node* use_node, int index, |
| UseInfo use_info = UseInfo::None()) { |
| static_assert(retype<T>() || lower<T>(), |
| "This version of EnqueueInput has to be called in " |
| "the Retype or Lower phase."); |
| } |
| |
| template <Phase T> |
| static constexpr bool propagate() { |
| return T == PROPAGATE; |
| } |
| |
| template <Phase T> |
| static constexpr bool retype() { |
| return T == RETYPE; |
| } |
| |
| template <Phase T> |
| static constexpr bool lower() { |
| return T == LOWER; |
| } |
| |
| template <Phase T> |
| void SetOutput(Node* node, MachineRepresentation representation, |
| Type restriction_type = Type::Any()); |
| |
| Type GetUpperBound(Node* node) { return NodeProperties::GetType(node); } |
| |
| bool InputCannotBe(Node* node, Type type) { |
| DCHECK_EQ(1, node->op()->ValueInputCount()); |
| return !GetUpperBound(node->InputAt(0)).Maybe(type); |
| } |
| |
| bool InputIs(Node* node, Type type) { |
| DCHECK_EQ(1, node->op()->ValueInputCount()); |
| return GetUpperBound(node->InputAt(0)).Is(type); |
| } |
| |
| bool BothInputsAreSigned32(Node* node) { |
| return BothInputsAre(node, Type::Signed32()); |
| } |
| |
| bool BothInputsAreUnsigned32(Node* node) { |
| return BothInputsAre(node, Type::Unsigned32()); |
| } |
| |
| bool BothInputsAre(Node* node, Type type) { |
| DCHECK_EQ(2, node->op()->ValueInputCount()); |
| return GetUpperBound(node->InputAt(0)).Is(type) && |
| GetUpperBound(node->InputAt(1)).Is(type); |
| } |
| |
| bool IsNodeRepresentationTagged(Node* node) { |
| MachineRepresentation representation = GetInfo(node)->representation(); |
| return IsAnyTagged(representation); |
| } |
| |
| bool OneInputCannotBe(Node* node, Type type) { |
| DCHECK_EQ(2, node->op()->ValueInputCount()); |
| return !GetUpperBound(node->InputAt(0)).Maybe(type) || |
| !GetUpperBound(node->InputAt(1)).Maybe(type); |
| } |
| |
| void ChangeToDeadValue(Node* node, Node* effect, Node* control) { |
| DCHECK(TypeOf(node).IsNone()); |
| // If the node is unreachable, insert an Unreachable node and mark the |
| // value dead. |
| // TODO(jarin,turbofan) Find a way to unify/merge this insertion with |
| // InsertUnreachableIfNecessary. |
| Node* unreachable = effect = |
| graph()->NewNode(common()->Unreachable(), effect, control); |
| const Operator* dead_value = |
| common()->DeadValue(GetInfo(node)->representation()); |
| node->ReplaceInput(0, unreachable); |
| node->TrimInputCount(dead_value->ValueInputCount()); |
| ReplaceEffectControlUses(node, effect, control); |
| ChangeOp(node, dead_value); |
| } |
| |
| // This function is a generalization of ChangeToPureOp. It can be used to |
| // replace a node that is part of the effect and control chain by a pure node. |
| void ReplaceWithPureNode(Node* node, Node* pure_node) { |
| DCHECK(pure_node->op()->HasProperty(Operator::kPure)); |
| if (node->op()->EffectInputCount() > 0) { |
| DCHECK_LT(0, node->op()->ControlInputCount()); |
| Node* control = NodeProperties::GetControlInput(node); |
| Node* effect = NodeProperties::GetEffectInput(node); |
| if (TypeOf(node).IsNone()) { |
| ChangeToDeadValue(node, effect, control); |
| return; |
| } |
| // Rewire the effect and control chains. |
| ReplaceEffectControlUses(node, effect, control); |
| } else { |
| DCHECK_EQ(0, node->op()->ControlInputCount()); |
| } |
| DeferReplacement(node, pure_node); |
| } |
| |
| void ChangeToPureOp(Node* node, const Operator* new_op) { |
| DCHECK(new_op->HasProperty(Operator::kPure)); |
| DCHECK_EQ(new_op->ValueInputCount(), node->op()->ValueInputCount()); |
| if (node->op()->EffectInputCount() > 0) { |
| DCHECK_LT(0, node->op()->ControlInputCount()); |
| Node* control = NodeProperties::GetControlInput(node); |
| Node* effect = NodeProperties::GetEffectInput(node); |
| if (TypeOf(node).IsNone()) { |
| ChangeToDeadValue(node, effect, control); |
| return; |
| } |
| // Rewire the effect and control chains. |
| node->TrimInputCount(new_op->ValueInputCount()); |
| ReplaceEffectControlUses(node, effect, control); |
| } else { |
| DCHECK_EQ(0, node->op()->ControlInputCount()); |
| } |
| ChangeOp(node, new_op); |
| } |
| |
| void ChangeUnaryToPureBinaryOp(Node* node, const Operator* new_op, |
| int new_input_index, Node* new_input) { |
| DCHECK(new_op->HasProperty(Operator::kPure)); |
| DCHECK_EQ(new_op->ValueInputCount(), 2); |
| DCHECK_EQ(node->op()->ValueInputCount(), 1); |
| DCHECK_LE(0, new_input_index); |
| DCHECK_LE(new_input_index, 1); |
| if (node->op()->EffectInputCount() > 0) { |
| DCHECK_LT(0, node->op()->ControlInputCount()); |
| Node* control = NodeProperties::GetControlInput(node); |
| Node* effect = NodeProperties::GetEffectInput(node); |
| if (TypeOf(node).IsNone()) { |
| ChangeToDeadValue(node, effect, control); |
| return; |
| } |
| node->TrimInputCount(node->op()->ValueInputCount()); |
| ReplaceEffectControlUses(node, effect, control); |
| } else { |
| DCHECK_EQ(0, node->op()->ControlInputCount()); |
| } |
| if (new_input_index == 0) { |
| node->InsertInput(jsgraph_->zone(), 0, new_input); |
| } else { |
| DCHECK_EQ(new_input_index, 1); |
| DCHECK_EQ(node->InputCount(), 1); |
| node->AppendInput(jsgraph_->zone(), new_input); |
| } |
| ChangeOp(node, new_op); |
| } |
| |
| // Converts input {index} of {node} according to given UseInfo {use}, |
| // assuming the type of the input is {input_type}. If {input_type} is null, |
| // it takes the input from the input node {TypeOf(node->InputAt(index))}. |
| void ConvertInput(Node* node, int index, UseInfo use, |
| Type input_type = Type::Invalid()) { |
| // In the change phase, insert a change before the use if necessary. |
| if (use.representation() == MachineRepresentation::kNone) |
| return; // No input requirement on the use. |
| Node* input = node->InputAt(index); |
| DCHECK_NOT_NULL(input); |
| NodeInfo* input_info = GetInfo(input); |
| MachineRepresentation input_rep = input_info->representation(); |
| if (input_rep != use.representation() || |
| use.type_check() != TypeCheckKind::kNone) { |
| // Output representation doesn't match usage. |
| TRACE(" change: #%d:%s(@%d #%d:%s) ", node->id(), node->op()->mnemonic(), |
| index, input->id(), input->op()->mnemonic()); |
| TRACE("from %s to %s:%s\n", |
| MachineReprToString(input_info->representation()), |
| MachineReprToString(use.representation()), |
| use.truncation().description()); |
| if (input_type.IsInvalid()) { |
| input_type = TypeOf(input); |
| } else { |
| // This case is reached when ConvertInput is called for TypeGuard nodes |
| // which explicitly set the {input_type} for their input. In order to |
| // correctly verify the resulting graph, we have to preserve this |
| // forced type for the verifier. |
| DCHECK_EQ(node->opcode(), IrOpcode::kTypeGuard); |
| input = InsertTypeOverrideForVerifier(input_type, input); |
| } |
| Node* n = changer_->GetRepresentationFor(input, input_rep, input_type, |
| node, use); |
| node->ReplaceInput(index, n); |
| } |
| } |
| |
| template <Phase T> |
| void ProcessInput(Node* node, int index, UseInfo use); |
| |
| // Just assert for Retype and Lower. Propagate specialized below. |
| template <Phase T> |
| void ProcessRemainingInputs(Node* node, int index) { |
| static_assert(retype<T>() || lower<T>(), |
| "This version of ProcessRemainingInputs has to be called in " |
| "the Retype or Lower phase."); |
| DCHECK_GE(index, NodeProperties::PastValueIndex(node)); |
| DCHECK_GE(index, NodeProperties::PastContextIndex(node)); |
| } |
| |
| // Marks node as a possible revisit since it is a use of input that will be |
| // visited before input is visited. |
| void MarkAsPossibleRevisit(Node* node, Node* input) { |
| auto it = might_need_revisit_.find(input); |
| if (it == might_need_revisit_.end()) { |
| it = might_need_revisit_.insert({input, ZoneVector<Node*>(zone())}).first; |
| } |
| it->second.push_back(node); |
| TRACE(" Marking #%d: %s as needing revisit due to #%d: %s\n", node->id(), |
| node->op()->mnemonic(), input->id(), input->op()->mnemonic()); |
| } |
| |
| // Just assert for Retype. Propagate and Lower specialized below. |
| template <Phase T> |
| void VisitInputs(Node* node) { |
| static_assert( |
| retype<T>(), |
| "This version of VisitInputs has to be called in the Retype phase."); |
| } |
| |
| template <Phase T> |
| void VisitReturn(Node* node) { |
| int first_effect_index = NodeProperties::FirstEffectIndex(node); |
| // Visit integer slot count to pop |
| ProcessInput<T>(node, 0, UseInfo::TruncatingWord32()); |
| |
| // Visit value, context and frame state inputs as tagged. |
| for (int i = 1; i < first_effect_index; i++) { |
| ProcessInput<T>(node, i, UseInfo::AnyTagged()); |
| } |
| // Only enqueue other inputs (effects, control). |
| for (int i = first_effect_index; i < node->InputCount(); i++) { |
| EnqueueInput<T>(node, i); |
| } |
| } |
| |
| // Helper for an unused node. |
| template <Phase T> |
| void VisitUnused(Node* node) { |
| int first_effect_index = NodeProperties::FirstEffectIndex(node); |
| for (int i = 0; i < first_effect_index; i++) { |
| ProcessInput<T>(node, i, UseInfo::None()); |
| } |
| ProcessRemainingInputs<T>(node, first_effect_index); |
| |
| if (lower<T>()) { |
| TRACE("disconnecting unused #%d:%s\n", node->id(), |
| node->op()->mnemonic()); |
| DisconnectFromEffectAndControl(node); |
| node->NullAllInputs(); // Node is now dead. |
| DeferReplacement(node, graph()->NewNode(common()->Plug())); |
| } |
| } |
| |
| // Helper for no-op node. |
| template <Phase T> |
| void VisitNoop(Node* node, Truncation truncation) { |
| if (truncation.IsUnused()) return VisitUnused<T>(node); |
| MachineRepresentation representation = |
| GetOutputInfoForPhi(TypeOf(node), truncation); |
| VisitUnop<T>(node, UseInfo(representation, truncation), representation); |
| if (lower<T>()) DeferReplacement(node, node->InputAt(0)); |
| } |
| |
| // Helper for binops of the R x L -> O variety. |
| template <Phase T> |
| void VisitBinop(Node* node, UseInfo left_use, UseInfo right_use, |
| MachineRepresentation output, |
| Type restriction_type = Type::Any()) { |
| DCHECK_EQ(2, node->op()->ValueInputCount()); |
| ProcessInput<T>(node, 0, left_use); |
| ProcessInput<T>(node, 1, right_use); |
| for (int i = 2; i < node->InputCount(); i++) { |
| EnqueueInput<T>(node, i); |
| } |
| SetOutput<T>(node, output, restriction_type); |
| } |
| |
| // Helper for binops of the I x I -> O variety. |
| template <Phase T> |
| void VisitBinop(Node* node, UseInfo input_use, MachineRepresentation output, |
| Type restriction_type = Type::Any()) { |
| VisitBinop<T>(node, input_use, input_use, output, restriction_type); |
| } |
| |
| template <Phase T> |
| void VisitSpeculativeInt32Binop(Node* node) { |
| DCHECK_EQ(2, node->op()->ValueInputCount()); |
| if (BothInputsAre(node, Type::NumberOrOddball())) { |
| return VisitBinop<T>(node, UseInfo::TruncatingWord32(), |
| MachineRepresentation::kWord32); |
| } |
| NumberOperationHint hint = NumberOperationHintOf(node->op()); |
| return VisitBinop<T>(node, |
| CheckedUseInfoAsWord32FromHint(hint, kIdentifyZeros), |
| MachineRepresentation::kWord32); |
| } |
| |
| // Helper for unops of the I -> O variety. |
| template <Phase T> |
| void VisitUnop(Node* node, UseInfo input_use, MachineRepresentation output, |
| Type restriction_type = Type::Any()) { |
| DCHECK_EQ(1, node->op()->ValueInputCount()); |
| ProcessInput<T>(node, 0, input_use); |
| ProcessRemainingInputs<T>(node, 1); |
| SetOutput<T>(node, output, restriction_type); |
| } |
| |
| // Helper for leaf nodes. |
| template <Phase T> |
| void VisitLeaf(Node* node, MachineRepresentation output) { |
| DCHECK_EQ(0, node->InputCount()); |
| SetOutput<T>(node, output); |
| } |
| |
| // Helpers for specific types of binops. |
| |
| template <Phase T> |
| void VisitFloat64Binop(Node* node) { |
| VisitBinop<T>(node, UseInfo::TruncatingFloat64(), |
| MachineRepresentation::kFloat64); |
| } |
| |
| template <Phase T> |
| void VisitInt64Binop(Node* node) { |
| VisitBinop<T>(node, UseInfo::Word64(), MachineRepresentation::kWord64); |
| } |
| |
| template <Phase T> |
| void VisitWord32TruncatingBinop(Node* node) { |
| VisitBinop<T>(node, UseInfo::TruncatingWord32(), |
| MachineRepresentation::kWord32); |
| } |
| |
| // Infer representation for phi-like nodes. |
| MachineRepresentation GetOutputInfoForPhi(Type type, Truncation use) { |
| // Compute the representation. |
| if (type.Is(Type::None())) { |
| return MachineRepresentation::kNone; |
| } else if (type.Is(Type::Signed32()) || type.Is(Type::Unsigned32())) { |
| return MachineRepresentation::kWord32; |
| } else if (type.Is(Type::NumberOrOddball()) && use.IsUsedAsWord32()) { |
| return MachineRepresentation::kWord32; |
| } else if (type.Is(Type::Boolean())) { |
| return MachineRepresentation::kBit; |
| } else if (type.Is(Type::NumberOrOddball()) && |
| use.TruncatesOddballAndBigIntToNumber()) { |
| return MachineRepresentation::kFloat64; |
| } else if (type.Is(Type::Union(Type::SignedSmall(), Type::NaN(), zone()))) { |
| // TODO(turbofan): For Phis that return either NaN or some Smi, it's |
| // beneficial to not go all the way to double, unless the uses are |
| // double uses. For tagging that just means some potentially expensive |
| // allocation code; we might want to do the same for -0 as well? |
| return MachineRepresentation::kTagged; |
| } else if (type.Is(Type::Number())) { |
| return MachineRepresentation::kFloat64; |
| } else if (type.Is(Type::BigInt()) && Is64() && use.IsUsedAsWord64()) { |
| return MachineRepresentation::kWord64; |
| } else if (type.Is(Type::ExternalPointer()) || |
| type.Is(Type::SandboxedPointer())) { |
| return MachineType::PointerRepresentation(); |
| } |
| return MachineRepresentation::kTagged; |
| } |
| |
| // Helper for handling selects. |
| template <Phase T> |
| void VisitSelect(Node* node, Truncation truncation, |
| SimplifiedLowering* lowering) { |
| DCHECK(TypeOf(node->InputAt(0)).Is(Type::Boolean())); |
| ProcessInput<T>(node, 0, UseInfo::Bool()); |
| |
| MachineRepresentation output = |
| GetOutputInfoForPhi(TypeOf(node), truncation); |
| SetOutput<T>(node, output); |
| |
| if (lower<T>()) { |
| // Update the select operator. |
| SelectParameters p = SelectParametersOf(node->op()); |
| if (output != p.representation()) { |
| ChangeOp(node, lowering->common()->Select(output, p.hint())); |
| } |
| } |
| // Convert inputs to the output representation of this phi, pass the |
| // truncation truncation along. |
| UseInfo input_use(output, truncation); |
| ProcessInput<T>(node, 1, input_use); |
| ProcessInput<T>(node, 2, input_use); |
| } |
| |
| // Helper for handling phis. |
| template <Phase T> |
| void VisitPhi(Node* node, Truncation truncation, |
| SimplifiedLowering* lowering) { |
| // If we already have a non-tagged representation set in the Phi node, it |
| // does come from subgraphs using machine operators we introduced early in |
| // the pipeline. In this case, we just keep the representation. |
| MachineRepresentation output = PhiRepresentationOf(node->op()); |
| if (output == MachineRepresentation::kTagged) { |
| output = GetOutputInfoForPhi(TypeOf(node), truncation); |
| } |
| // Only set the output representation if not running with type |
| // feedback. (Feedback typing will set the representation.) |
| SetOutput<T>(node, output); |
| |
| int values = node->op()->ValueInputCount(); |
| if (lower<T>()) { |
| // Update the phi operator. |
| if (output != PhiRepresentationOf(node->op())) { |
| ChangeOp(node, lowering->common()->Phi(output, values)); |
| } |
| } |
| |
| // Convert inputs to the output representation of this phi, pass the |
| // truncation along. |
| UseInfo input_use(output, truncation); |
| for (int i = 0; i < node->InputCount(); i++) { |
| ProcessInput<T>(node, i, i < values ? input_use : UseInfo::None()); |
| } |
| } |
| |
| template <Phase T> |
| void VisitObjectIs(Node* node, Type type, SimplifiedLowering* lowering) { |
| Type const input_type = TypeOf(node->InputAt(0)); |
| if (input_type.Is(type)) { |
| VisitUnop<T>(node, UseInfo::None(), MachineRepresentation::kBit); |
| if (lower<T>()) { |
| DeferReplacement( |
| node, InsertTypeOverrideForVerifier( |
| true_type(), lowering->jsgraph()->Int32Constant(1))); |
| } |
| } else { |
| VisitUnop<T>(node, UseInfo::AnyTagged(), MachineRepresentation::kBit); |
| if (lower<T>() && !input_type.Maybe(type)) { |
| DeferReplacement( |
| node, InsertTypeOverrideForVerifier( |
| false_type(), lowering->jsgraph()->Int32Constant(0))); |
| } |
| } |
| } |
| |
| template <Phase T> |
| void VisitCheck(Node* node, Type type, SimplifiedLowering* lowering) { |
| if (InputIs(node, type)) { |
| VisitUnop<T>(node, UseInfo::AnyTagged(), |
| MachineRepresentation::kTaggedPointer); |
| if (lower<T>()) DeferReplacement(node, node->InputAt(0)); |
| } else { |
| VisitUnop<T>(node, |
| UseInfo::CheckedHeapObjectAsTaggedPointer(FeedbackSource()), |
| MachineRepresentation::kTaggedPointer); |
| } |
| } |
| |
| template <Phase T> |
| void VisitCall(Node* node, SimplifiedLowering* lowering) { |
| auto call_descriptor = CallDescriptorOf(node->op()); |
| int params = static_cast<int>(call_descriptor->ParameterCount()); |
| int value_input_count = node->op()->ValueInputCount(); |
| |
| DCHECK_GT(value_input_count, 0); |
| DCHECK_GE(value_input_count, params); |
| |
| // The target of the call. |
| ProcessInput<T>(node, 0, UseInfo::Any()); |
| |
| // For the parameters (indexes [1, ..., params]), propagate representation |
| // information from call descriptor. |
| for (int i = 1; i <= params; i++) { |
| ProcessInput<T>(node, i, |
| TruncatingUseInfoFromRepresentation( |
| call_descriptor->GetInputType(i).representation())); |
| } |
| |
| // Rest of the value inputs. |
| for (int i = params + 1; i < value_input_count; i++) { |
| ProcessInput<T>(node, i, UseInfo::AnyTagged()); |
| } |
| |
| // Effect and Control. |
| ProcessRemainingInputs<T>(node, value_input_count); |
| |
| if (call_descriptor->ReturnCount() > 0) { |
| SetOutput<T>(node, call_descriptor->GetReturnType(0).representation()); |
| } else { |
| SetOutput<T>(node, MachineRepresentation::kTagged); |
| } |
| } |
| |
| void MaskShiftOperand(Node* node, Type rhs_type) { |
| if (!rhs_type.Is(type_cache_->kZeroToThirtyOne)) { |
| Node* const rhs = NodeProperties::GetValueInput(node, 1); |
| node->ReplaceInput(1, |
| graph()->NewNode(jsgraph_->machine()->Word32And(), rhs, |
| jsgraph_->Int32Constant(0x1F))); |
| } |
| } |
| |
| static MachineSemantic DeoptValueSemanticOf(Type type) { |
| // We only need signedness to do deopt correctly. |
| if (type.Is(Type::Signed32())) { |
| return MachineSemantic::kInt32; |
| } else if (type.Is(Type::Unsigned32())) { |
| return MachineSemantic::kUint32; |
| } else { |
| return MachineSemantic::kAny; |
| } |
| } |
| |
| static MachineType DeoptMachineTypeOf(MachineRepresentation rep, Type type) { |
| if (type.IsNone()) { |
| return MachineType::None(); |
| } |
| // Do not distinguish between various Tagged variations. |
| if (IsAnyTagged(rep)) { |
| return MachineType::AnyTagged(); |
| } |
| if (rep == MachineRepresentation::kWord64) { |
| if (type.Is(Type::SignedBigInt64())) { |
| return MachineType::SignedBigInt64(); |
| } |
| |
| if (type.Is(Type::UnsignedBigInt64())) { |
| return MachineType::UnsignedBigInt64(); |
| } |
| |
| if (type.Is(Type::BigInt())) { |
| return MachineType::AnyTagged(); |
| } |
| |
| DCHECK(type.Is(TypeCache::Get()->kSafeInteger)); |
| return MachineType(rep, MachineSemantic::kInt64); |
| } |
| MachineType machine_type(rep, DeoptValueSemanticOf(type)); |
| DCHECK_IMPLIES( |
| machine_type.representation() == MachineRepresentation::kWord32, |
| machine_type.semantic() == MachineSemantic::kInt32 || |
| machine_type.semantic() == MachineSemantic::kUint32); |
| DCHECK_IMPLIES(machine_type.representation() == MachineRepresentation::kBit, |
| type.Is(Type::Boolean())); |
| return machine_type; |
| } |
| |
| template <Phase T> |
| void VisitStateValues(Node* node) { |
| if (propagate<T>()) { |
| for (int i = 0; i < node->InputCount(); i++) { |
| if (IsLargeBigInt(TypeOf(node->InputAt(i)))) { |
| // BigInt64s are rematerialized in deoptimization. The other BigInts |
| // must be rematerialized before deoptimization. By propagating an |
| // AnyTagged use, the RepresentationChanger is going to insert the |
| // necessary conversions. |
| EnqueueInput<T>(node, i, UseInfo::AnyTagged()); |
| } else if (IsLoadFloat16ArrayElement(node->InputAt(i))) { |
| // Loads from Float16Arrays are raw bits as word16s but have the |
| // Number type, since not all archs have native float16 |
| // representation. Rematerialize them as float64s in deoptimization. |
| EnqueueInput<T>(node, i, UseInfo::Float64()); |
| } else { |
| EnqueueInput<T>(node, i, UseInfo::Any()); |
| } |
| } |
| } else if (lower<T>()) { |
| Zone* zone = jsgraph_->zone(); |
| ZoneVector<MachineType>* types = |
| zone->New<ZoneVector<MachineType>>(node->InputCount(), zone); |
| for (int i = 0; i < node->InputCount(); i++) { |
| Node* input = node->InputAt(i); |
| MachineRepresentation input_rep = GetInfo(input)->representation(); |
| if (IsLargeBigInt(TypeOf(input))) { |
| ConvertInput(node, i, UseInfo::AnyTagged()); |
| } else if (IsLoadFloat16ArrayElement(input)) { |
| ConvertInput(node, i, UseInfo::Float64()); |
| input_rep = MachineRepresentation::kFloat64; |
| } |
| (*types)[i] = DeoptMachineTypeOf(input_rep, TypeOf(input)); |
| } |
| SparseInputMask mask = SparseInputMaskOf(node->op()); |
| ChangeOp(node, common()->TypedStateValues(types, mask)); |
| } |
| SetOutput<T>(node, MachineRepresentation::kTagged); |
| } |
| |
| template <Phase T> |
| void VisitFrameState(FrameState node) { |
| DCHECK_EQ(5, node->op()->ValueInputCount()); |
| DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op())); |
| DCHECK_EQ(FrameState::kFrameStateInputCount, node->InputCount()); |
| |
| ProcessInput<T>(node, FrameState::kFrameStateParametersInput, |
| UseInfo::AnyTagged()); |
| ProcessInput<T>(node, FrameState::kFrameStateLocalsInput, |
| UseInfo::AnyTagged()); |
| |
| // Accumulator is a special flower - we need to remember its type in |
| // a singleton typed-state-values node (as if it was a singleton |
| // state-values node). |
| Node* accumulator = node.stack(); |
| if (propagate<T>()) { |
| if (IsLargeBigInt(TypeOf(accumulator))) { |
| EnqueueInput<T>(node, FrameState::kFrameStateStackInput, |
| UseInfo::AnyTagged()); |
| } else if (IsLoadFloat16ArrayElement(accumulator)) { |
| EnqueueInput<T>(node, FrameState::kFrameStateStackInput, |
| UseInfo::Float64()); |
| } else { |
| EnqueueInput<T>(node, FrameState::kFrameStateStackInput, |
| UseInfo::Any()); |
| } |
| } else if (lower<T>()) { |
| MachineRepresentation accumulator_rep = |
| GetInfo(accumulator)->representation(); |
| Type accumulator_type = TypeOf(accumulator); |
| if (IsLargeBigInt(accumulator_type)) { |
| ConvertInput(node, FrameState::kFrameStateStackInput, |
| UseInfo::AnyTagged()); |
| accumulator = node.stack(); |
| } else if (IsLoadFloat16ArrayElement(accumulator)) { |
| ConvertInput(node, FrameState::kFrameStateStackInput, |
| UseInfo::Float64()); |
| accumulator = node.stack(); |
| accumulator_rep = MachineRepresentation::kFloat64; |
| } |
| Zone* zone = jsgraph_->zone(); |
| if (accumulator == jsgraph_->OptimizedOutConstant()) { |
| node->ReplaceInput(FrameState::kFrameStateStackInput, |
| jsgraph_->SingleDeadTypedStateValues()); |
| } else { |
| ZoneVector<MachineType>* types = |
| zone->New<ZoneVector<MachineType>>(1, zone); |
| (*types)[0] = DeoptMachineTypeOf(accumulator_rep, accumulator_type); |
| |
| node->ReplaceInput( |
| FrameState::kFrameStateStackInput, |
| jsgraph_->graph()->NewNode( |
| common()->TypedStateValues(types, SparseInputMask::Dense()), |
| node.stack())); |
| } |
| } |
| |
| ProcessInput<T>(node, FrameState::kFrameStateContextInput, |
| UseInfo::AnyTagged()); |
| ProcessInput<T>(node, FrameState::kFrameStateFunctionInput, |
| UseInfo::AnyTagged()); |
| ProcessInput<T>(node, FrameState::kFrameStateOuterStateInput, |
| UseInfo::AnyTagged()); |
| return SetOutput<T>(node, MachineRepresentation::kTagged); |
| } |
| |
| template <Phase T> |
| void VisitObjectState(Node* node) { |
| if (propagate<T>()) { |
| for (int i = 0; i < node->InputCount(); i++) { |
| if (IsLargeBigInt(TypeOf(node->InputAt(i)))) { |
| EnqueueInput<T>(node, i, UseInfo::AnyTagged()); |
| } else if (IsLoadFloat16ArrayElement(node->InputAt(i))) { |
| EnqueueInput<T>(node, i, UseInfo::Float64()); |
| } else { |
| EnqueueInput<T>(node, i, UseInfo::Any()); |
| } |
| } |
| } else if (lower<T>()) { |
| Zone* zone = jsgraph_->zone(); |
| ZoneVector<MachineType>* types = |
| zone->New<ZoneVector<MachineType>>(node->InputCount(), zone); |
| for (int i = 0; i < node->InputCount(); i++) { |
| Node* input = node->InputAt(i); |
| MachineRepresentation input_rep = GetInfo(input)->representation(); |
| if (IsLargeBigInt(TypeOf(input))) { |
| ConvertInput(node, i, UseInfo::AnyTagged()); |
| } else if (IsLoadFloat16ArrayElement(input)) { |
| ConvertInput(node, i, UseInfo::Float64()); |
| input_rep = MachineRepresentation::kFloat64; |
| } |
| (*types)[i] = DeoptMachineTypeOf(input_rep, TypeOf(input)); |
| } |
| ChangeOp(node, common()->TypedObjectState(ObjectIdOf(node->op()), types)); |
| } |
| SetOutput<T>(node, MachineRepresentation::kTagged); |
| } |
| |
| const Operator* Int32Op(Node* node) { |
| return changer_->Int32OperatorFor(node->opcode()); |
| } |
| |
| const Operator* Int32OverflowOp(Node* node) { |
| return changer_->Int32OverflowOperatorFor(node->opcode()); |
| } |
| |
| const Operator* AdditiveSafeIntegerOverflowOp(Node* node) { |
| return changer_->AdditiveSafeIntegerOverflowOperatorFor(node->opcode()); |
| } |
| |
| const Operator* Int64Op(Node* node) { |
| return changer_->Int64OperatorFor(node->opcode()); |
| } |
| |
| const Operator* Int64OverflowOp(Node* node) { |
| return changer_->Int64OverflowOperatorFor(node->opcode()); |
| } |
| |
| const Operator* BigIntOp(Node* node) { |
| return changer_->BigIntOperatorFor(node->opcode()); |
| } |
| |
| const Operator* Uint32Op(Node* node) { |
| return changer_->Uint32OperatorFor(node->opcode()); |
| } |
| |
| const Operator* Uint32OverflowOp(Node* node) { |
| return changer_->Uint32OverflowOperatorFor(node->opcode()); |
| } |
| |
| const Operator* Float64Op(Node* node) { |
| return changer_->Float64OperatorFor(node->opcode()); |
| } |
| |
| WriteBarrierKind WriteBarrierKindFor( |
| BaseTaggedness base_taggedness, |
| MachineRepresentation field_representation, Type field_type, |
| MachineRepresentation value_representation, Node* value) { |
| if (base_taggedness == kTaggedBase && |
| CanBeTaggedPointer(field_representation)) { |
| Type value_type = NodeProperties::GetType(value); |
| if (value_representation == MachineRepresentation::kTaggedSigned) { |
| // Write barriers are only for stores of heap objects. |
| return kNoWriteBarrier; |
| } |
| if (field_type.Is(Type::BooleanOrNullOrUndefined()) || |
| value_type.Is(Type::BooleanOrNullOrUndefined())) { |
| // Write barriers are not necessary when storing true, false, null or |
| // undefined, because these special oddballs are always in the root set. |
| return kNoWriteBarrier; |
| } |
| if (value_type.IsHeapConstant()) { |
| RootIndex root_index; |
| const RootsTable& roots_table = jsgraph_->isolate()->roots_table(); |
| if (roots_table.IsRootHandle(value_type.AsHeapConstant()->Value(), |
| &root_index)) { |
| if (RootsTable::IsImmortalImmovable(root_index)) { |
| // Write barriers are unnecessary for immortal immovable roots. |
| return kNoWriteBarrier; |
| } |
| } |
| } |
| if (field_representation == MachineRepresentation::kTaggedPointer || |
| value_representation == MachineRepresentation::kTaggedPointer) { |
| // Write barriers for heap objects are cheaper. |
| return kPointerWriteBarrier; |
| } |
| NumberMatcher m(value); |
| if (m.HasResolvedValue()) { |
| if (IsSmiDouble(m.ResolvedValue())) { |
| // Storing a smi doesn't need a write barrier. |
| return kNoWriteBarrier; |
| } |
| // The NumberConstant will be represented as HeapNumber. |
| return kPointerWriteBarrier; |
| } |
| return kFullWriteBarrier; |
| } |
| return kNoWriteBarrier; |
| } |
| |
| WriteBarrierKind WriteBarrierKindFor( |
| BaseTaggedness base_taggedness, |
| MachineRepresentation field_representation, int field_offset, |
| Type field_type, MachineRepresentation value_representation, |
| Node* value) { |
| WriteBarrierKind write_barrier_kind = |
| WriteBarrierKindFor(base_taggedness, field_representation, field_type, |
| value_representation, value); |
| if (write_barrier_kind != kNoWriteBarrier) { |
| if (base_taggedness == kTaggedBase && |
| field_offset == HeapObject::kMapOffset) { |
| write_barrier_kind = kMapWriteBarrier; |
| } |
| } |
| return write_barrier_kind; |
| } |
| |
| Graph* graph() const { return jsgraph_->graph(); } |
| CommonOperatorBuilder* common() const { return jsgraph_->common(); } |
| SimplifiedOperatorBuilder* simplified() const { |
| return jsgraph_->simplified(); |
| } |
| |
| template <Phase T> |
| void VisitForCheckedInt32Mul(Node* node, Truncation truncation, |
| Type input0_type, Type input1_type, |
| UseInfo input_use) { |
| DCHECK_EQ(node->opcode(), IrOpcode::kSpeculativeNumberMultiply); |
| // A -0 input is impossible or will cause a deopt. |
| DCHECK(BothInputsAre(node, Type::Signed32()) || |
| !input_use.truncation().IdentifiesZeroAndMinusZero()); |
| |
| CheckForMinusZeroMode mz_mode; |
| Type restriction; |
| if (IsSomePositiveOrderedNumber(input0_type) || |
| IsSomePositiveOrderedNumber(input1_type)) { |
| mz_mode = CheckForMinusZeroMode::kDontCheckForMinusZero; |
| restriction = Type::Signed32(); |
| } else if (truncation.IdentifiesZeroAndMinusZero()) { |
| mz_mode = CheckForMinusZeroMode::kDontCheckForMinusZero; |
| restriction = Type::Signed32OrMinusZero(); |
| } else { |
| mz_mode = CheckForMinusZeroMode::kCheckForMinusZero; |
| restriction = Type::Signed32(); |
| } |
| |
| VisitBinop<T>(node, input_use, MachineRepresentation::kWord32, restriction); |
| if (lower<T>()) ChangeOp(node, simplified()->CheckedInt32Mul(mz_mode)); |
| } |
| |
| void ChangeToInt32OverflowOp(Node* node) { |
| ChangeOp(node, Int32OverflowOp(node)); |
| } |
| |
| void ChangeToUint32OverflowOp(Node* node) { |
| ChangeOp(node, Uint32OverflowOp(node)); |
| } |
| |
| template <Phase T> |
| void VisitSpeculativeSmallIntegerAdditiveOp(Node* node, Truncation truncation, |
| SimplifiedLowering* lowering) { |
| Type left_upper = GetUpperBound(node->InputAt(0)); |
| Type right_upper = GetUpperBound(node->InputAt(1)); |
| |
| if (left_upper.Is(type_cache_->kAdditiveSafeIntegerOrMinusZero) && |
| right_upper.Is(type_cache_->kAdditiveSafeIntegerOrMinusZero)) { |
| // Only eliminate the node if its typing rule can be satisfied, namely |
| // that a safe integer is produced. |
| if (truncation.IsUnused()) return VisitUnused<T>(node); |
| |
| // If we know how to interpret the result or if the users only care |
| // about the low 32-bits, we can truncate to Word32 do a wrapping |
| // addition. |
| if (GetUpperBound(node).Is(Type::Signed32()) || |
| GetUpperBound(node).Is(Type::Unsigned32()) || |
| truncation.IsUsedAsWord32()) { |
| // => Int32Add/Sub |
| VisitWord32TruncatingBinop<T>(node); |
| if (lower<T>()) ChangeToPureOp(node, Int32Op(node)); |
| return; |
| } |
| } |
| |
| // Try to use type feedback. |
| NumberOperationHint const hint = NumberOperationHint::kSignedSmall; |
| DCHECK_EQ(hint, NumberOperationHintOf(node->op())); |
| |
| Type left_feedback_type = TypeOf(node->InputAt(0)); |
| Type right_feedback_type = TypeOf(node->InputAt(1)); |
| |
| // Using Signed32 as restriction type amounts to promising there won't be |
| // signed overflow. This is incompatible with relying on a Word32 truncation |
| // in order to skip the overflow check. Similarly, we must not drop -0 from |
| // the result type unless we deopt for -0 inputs. |
| Type const restriction = |
| truncation.IsUsedAsWord32() |
| ? Type::Any() |
| : (truncation.identify_zeros() == kIdentifyZeros) |
| ? Type::Signed32OrMinusZero() |
| : Type::Signed32(); |
| |
| // Handle the case when no int32 checks on inputs are necessary (but |
| // an overflow check is needed on the output). Note that we do not |
| // have to do any check if at most one side can be minus zero. For |
| // subtraction we need to handle the case of -0 - 0 properly, since |
| // that can produce -0. |
| Type left_constraint_type = |
| node->opcode() == IrOpcode::kSpeculativeSmallIntegerAdd |
| ? Type::Signed32OrMinusZero() |
| : Type::Signed32(); |
| if (left_upper.Is(left_constraint_type) && |
| right_upper.Is(Type::Signed32OrMinusZero()) && |
| (left_upper.Is(Type::Signed32()) || right_upper.Is(Type::Signed32()))) { |
| VisitBinop<T>(node, UseInfo::TruncatingWord32(), |
| MachineRepresentation::kWord32, restriction); |
| } else { |
| // If the output's truncation is identify-zeros, we can pass it |
| // along. Moreover, if the operation is addition and we know the |
| // right-hand side is not minus zero, we do not have to distinguish |
| // between 0 and -0. |
| IdentifyZeros left_identify_zeros = truncation.identify_zeros(); |
| if (node->opcode() == IrOpcode::kSpeculativeSmallIntegerAdd && |
| !right_feedback_type.Maybe(Type::MinusZero())) { |
| left_identify_zeros = kIdentifyZeros; |
| } |
| UseInfo left_use = |
| CheckedUseInfoAsWord32FromHint(hint, left_identify_zeros); |
| // For CheckedInt32Add and CheckedInt32Sub, we don't need to do |
| // a minus zero check for the right hand side, since we already |
| // know that the left hand side is a proper Signed32 value, |
| // potentially guarded by a check. |
| UseInfo right_use = CheckedUseInfoAsWord32FromHint(hint, kIdentifyZeros); |
| VisitBinop<T>(node, left_use, right_use, MachineRepresentation::kWord32, |
| restriction); |
| } |
| |
| if (lower<T>()) { |
| if (truncation.IsUsedAsWord32() || |
| !CanOverflowSigned32(node->op(), left_feedback_type, |
| right_feedback_type, type_cache_, |
| graph_zone())) { |
| ChangeToPureOp(node, Int32Op(node)); |
| } else { |
| ChangeToInt32OverflowOp(node); |
| } |
| } |
| } |
| |
| bool CanSpeculateAdditiveSafeInteger(Node* node) { |
| if (!v8_flags.additive_safe_int_feedback) return false; |
| if (NumberOperationHintOf(node->op()) != |
| NumberOperationHint::kAdditiveSafeInteger) { |
| return false; |
| } |
| DCHECK_EQ(2, node->op()->ValueInputCount()); |
| Node* lhs = node->InputAt(0); |
| auto lhs_restriction_type = GetInfo(lhs)->restriction_type(); |
| Node* rhs = node->InputAt(1); |
| auto rhs_restriction_type = GetInfo(rhs)->restriction_type(); |
| // Only speculate AdditiveSafeInteger if one of the sides are already known |
| // to be in the AdditiveSafeInteger range, since the check is relatively |
| // expensive. |
| return GetUpperBound(lhs).Is(type_cache_->kAdditiveSafeInteger) || |
| GetUpperBound(rhs).Is(type_cache_->kAdditiveSafeInteger) || |
| lhs_restriction_type.Is(type_cache_->kAdditiveSafeInteger) || |
| rhs_restriction_type.Is(type_cache_->kAdditiveSafeInteger); |
| } |
| |
| template <Phase T> |
| void VisitSpeculativeAdditiveOp(Node* node, Truncation truncation, |
| SimplifiedLowering* lowering) { |
| if (GetUpperBound(node).Is(Type::Signed32()) || |
| GetUpperBound(node).Is(Type::Unsigned32()) || |
| truncation.IsUsedAsWord32()) { |
| if (BothInputsAre(node, type_cache_->kAdditiveSafeIntegerOrMinusZero)) { |
| // => Int32Add/Sub |
| VisitBinop<T>(node, UseInfo::TruncatingWord32(), |
| MachineRepresentation::kWord32); |
| if (lower<T>()) ChangeToPureOp(node, Int32Op(node)); |
| return; |
| } |
| |
| if (CanSpeculateAdditiveSafeInteger(node)) { |
| // This case handles addition where the result might be truncated to |
| // word32. Even if the inputs might be larger than 2^32, we can safely |
| // perform 32-bit addition *here* if the inputs are in the additive safe |
| // range. We *must* propagate the CheckedSafeIntTruncatingWord32 |
| // information. This is because we need to ensure that we deoptimize if |
| // either input is not an integer, or not in the range. |
| // => Int32Add/Sub |
| VisitBinop<T>(node, |
| UseInfo::CheckedSafeIntTruncatingWord32(FeedbackSource{}), |
| MachineRepresentation::kWord32, type_cache_->kInt32); |
| if (lower<T>()) ChangeToPureOp(node, Int32Op(node)); |
| return; |
| } |
| } else if (CanSpeculateAdditiveSafeInteger(node)) { |
| // => AdditiveSafeIntegerAdd/Sub |
| VisitBinop<T>(node, UseInfo::CheckedSafeIntAsWord64(FeedbackSource{}), |
| MachineRepresentation::kWord64, |
| type_cache_->kAdditiveSafeInteger); |
| if (lower<T>()) ChangeOp(node, AdditiveSafeIntegerOverflowOp(node)); |
| return; |
| } |
| |
| // Default case => Float64Add/Sub |
| VisitBinop<T>(node, |
| UseInfo::CheckedNumberOrOddballAsFloat64(kDistinguishZeros, |
| FeedbackSource()), |
| MachineRepresentation::kFloat64, Type::Number()); |
| if (lower<T>()) { |
| ChangeToPureOp(node, Float64Op(node)); |
| } |
| } |
| |
| template <Phase T> |
| void VisitSpeculativeNumberModulus(Node* node, Truncation truncation, |
| SimplifiedLowering* lowering) { |
| if (BothInputsAre(node, Type::Unsigned32OrMinusZeroOrNaN()) && |
| (truncation.IsUsedAsWord32() || |
| NodeProperties::GetType(node).Is(Type::Unsigned32()))) { |
| // => unsigned Uint32Mod |
| VisitWord32TruncatingBinop<T>(node); |
| if (lower<T>()) DeferReplacement(node, lowering->Uint32Mod(node)); |
| return; |
| } |
| if (BothInputsAre(node, Type::Signed32OrMinusZeroOrNaN()) && |
| (truncation.IsUsedAsWord32() || |
| NodeProperties::GetType(node).Is(Type::Signed32()))) { |
| // => signed Int32Mod |
| VisitWord32TruncatingBinop<T>(node); |
| if (lower<T>()) DeferReplacement(node, lowering->Int32Mod(node)); |
| return; |
| } |
| |
| // Try to use type feedback. |
| NumberOperationHint hint = NumberOperationHintOf(node->op()); |
| |
| // Handle the case when no uint32 checks on inputs are necessary |
| // (but an overflow check is needed on the output). |
| if (BothInputsAreUnsigned32(node)) { |
| if (hint == NumberOperationHint::kSignedSmall) { |
| VisitBinop<T>(node, UseInfo::TruncatingWord32(), |
| MachineRepresentation::kWord32, Type::Unsigned32()); |
| if (lower<T>()) ChangeToUint32OverflowOp(node); |
| return; |
| } |
| } |
| |
| // Handle the case when no int32 checks on inputs are necessary |
| // (but an overflow check is needed on the output). |
| if (BothInputsAre(node, Type::Signed32())) { |
| // If both the inputs the feedback are int32, use the overflow op. |
| if (hint == NumberOperationHint::kSignedSmall) { |
| VisitBinop<T>(node, UseInfo::TruncatingWord32(), |
| MachineRepresentation::kWord32, Type::Signed32()); |
| if (lower<T>()) ChangeToInt32OverflowOp(node); |
| return; |
| } |
| } |
| |
| if (hint == NumberOperationHint::kSignedSmall) { |
| // If the result is truncated, we only need to check the inputs. |
| // For the left hand side we just propagate the identify zeros |
| // mode of the {truncation}; and for modulus the sign of the |
| // right hand side doesn't matter anyways, so in particular there's |
| // no observable difference between a 0 and a -0 then. |
| UseInfo const lhs_use = |
| CheckedUseInfoAsWord32FromHint(hint, truncation.identify_zeros()); |
| UseInfo const rhs_use = |
| CheckedUseInfoAsWord32FromHint(hint, kIdentifyZeros); |
| if (truncation.IsUsedAsWord32()) { |
| VisitBinop<T>(node, lhs_use, rhs_use, MachineRepresentation::kWord32); |
| if (lower<T>()) DeferReplacement(node, lowering->Int32Mod(node)); |
| } else if (BothInputsAre(node, Type::Unsigned32OrMinusZeroOrNaN())) { |
| Type const restriction = |
| truncation.IdentifiesZeroAndMinusZero() && |
| TypeOf(node->InputAt(0)).Maybe(Type::MinusZero()) |
| ? Type::Unsigned32OrMinusZero() |
| : Type::Unsigned32(); |
| VisitBinop<T>(node, lhs_use, rhs_use, MachineRepresentation::kWord32, |
| restriction); |
| if (lower<T>()) ChangeToUint32OverflowOp(node); |
| } else { |
| Type const restriction = |
| truncation.IdentifiesZeroAndMinusZero() && |
| TypeOf(node->InputAt(0)).Maybe(Type::MinusZero()) |
| ? Type::Signed32OrMinusZero() |
| : Type::Signed32(); |
| VisitBinop<T>(node, lhs_use, rhs_use, MachineRepresentation::kWord32, |
| restriction); |
| if (lower<T>()) ChangeToInt32OverflowOp(node); |
| } |
| return; |
| } |
| |
| if (TypeOf(node->InputAt(0)).Is(Type::Unsigned32()) && |
| TypeOf(node->InputAt(1)).Is(Type::Unsigned32()) && |
| (truncation.IsUsedAsWord32() || |
| NodeProperties::GetType(node).Is(Type::Unsigned32()))) { |
| VisitBinop<T>(node, UseInfo::TruncatingWord32(), |
| MachineRepresentation::kWord32, Type::Number()); |
| if (lower<T>()) DeferReplacement(node, lowering->Uint32Mod(node)); |
| return; |
| } |
| if (TypeOf(node->InputAt(0)).Is(Type::Signed32()) && |
| TypeOf(node->InputAt(1)).Is(Type::Signed32()) && |
| (truncation.IsUsedAsWord32() || |
| NodeProperties::GetType(node).Is(Type::Signed32()))) { |
| VisitBinop<T>(node, UseInfo::TruncatingWord32(), |
| MachineRepresentation::kWord32, Type::Number()); |
| if (lower<T>()) DeferReplacement(node, lowering->Int32Mod(node)); |
| return; |
| } |
| |
| // default case => Float64Mod |
| // For the left hand side we just propagate the identify zeros |
| // mode of the {truncation}; and for modulus the sign of the |
| // right hand side doesn't matter anyways, so in particular there's |
| // no observable difference between a 0 and a -0 then. |
| UseInfo const lhs_use = UseInfo::CheckedNumberOrOddballAsFloat64( |
| truncation.identify_zeros(), FeedbackSource()); |
| UseInfo const rhs_use = UseInfo::CheckedNumberOrOddballAsFloat64( |
| kIdentifyZeros, FeedbackSource()); |
| VisitBinop<T>(node, lhs_use, rhs_use, MachineRepresentation::kFloat64, |
| Type::Number()); |
| if (lower<T>()) ChangeToPureOp(node, Float64Op(node)); |
| } |
| |
| // Just assert for Propagate and Retype. Lower specialized below. |
| template <Phase T> |
| void InsertUnreachableIfNecessary(Node* node) { |
| static_assert(propagate<T>() || retype<T>(), |
| "This version of InsertUnreachableIfNecessary has to be " |
| "called in the Propagate or Retype phase."); |
| } |
| |
| template <Phase T> |
| void VisitCheckBounds(Node* node, SimplifiedLowering* lowering) { |
| CheckBoundsParameters const& p = CheckBoundsParametersOf(node->op()); |
| FeedbackSource const& feedback = p.check_parameters().feedback(); |
| Type const index_type = TypeOf(node->InputAt(0)); |
| Type const length_type = TypeOf(node->InputAt(1)); |
| |
| // Conversions, if requested and needed, will be handled by the |
| // representation changer, not by the lower-level Checked*Bounds operators. |
| CheckBoundsFlags new_flags = |
| p.flags().without(CheckBoundsFlag::kConvertStringAndMinusZero); |
| |
| if (length_type.Is(Type::Unsigned31())) { |
| if (index_type.Is(Type::Integral32()) || |
| (index_type.Is(Type::Integral32OrMinusZero()) && |
| p.flags() & CheckBoundsFlag::kConvertStringAndMinusZero)) { |
| // Map the values in the [-2^31,-1] range to the [2^31,2^32-1] range, |
| // which will be considered out-of-bounds because the {length_type} is |
| // limited to Unsigned31. This also converts -0 to 0. |
| VisitBinop<T>(node, UseInfo::TruncatingWord32(), |
| MachineRepresentation::kWord32); |
| if (lower<T>()) { |
| if (index_type.IsNone() || length_type.IsNone() || |
| (index_type.Min() >= 0.0 && |
| index_type.Max() < length_type.Min())) { |
| // The bounds check is redundant if we already know that |
| // the index is within the bounds of [0.0, length[. |
| // TODO(neis): Move this into TypedOptimization? |
| if (v8_flags.turbo_typer_hardening) { |
| new_flags |= CheckBoundsFlag::kAbortOnOutOfBounds; |
| } else { |
| DeferReplacement(node, NodeProperties::GetValueInput(node, 0)); |
| return; |
| } |
| } |
| ChangeOp(node, |
| simplified()->CheckedUint32Bounds(feedback, new_flags)); |
| } |
| } else if (p.flags() & CheckBoundsFlag::kConvertStringAndMinusZero) { |
| VisitBinop<T>(node, UseInfo::CheckedTaggedAsArrayIndex(feedback), |
| UseInfo::Word(), MachineType::PointerRepresentation()); |
| if (lower<T>()) { |
| if (jsgraph_->machine()->Is64()) { |
| ChangeOp(node, |
| simplified()->CheckedUint64Bounds(feedback, new_flags)); |
| } else { |
| ChangeOp(node, |
| simplified()->CheckedUint32Bounds(feedback, new_flags)); |
| } |
| } |
| } else { |
| VisitBinop<T>( |
| node, UseInfo::CheckedSigned32AsWord32(kDistinguishZeros, feedback), |
| UseInfo::TruncatingWord32(), MachineRepresentation::kWord32); |
| if (lower<T>()) { |
| ChangeOp(node, |
| simplified()->CheckedUint32Bounds(feedback, new_flags)); |
| } |
| } |
| } else { |
| CHECK(length_type.Is(type_cache_->kPositiveSafeInteger)); |
| IdentifyZeros zero_handling = |
| (p.flags() & CheckBoundsFlag::kConvertStringAndMinusZero) |
| ? kIdentifyZeros |
| : kDistinguishZeros; |
| VisitBinop<T>(node, |
| UseInfo::CheckedSigned64AsWord64(zero_handling, feedback), |
| UseInfo::Word64(), MachineRepresentation::kWord64); |
| if (lower<T>()) { |
| ChangeOp(node, simplified()->CheckedUint64Bounds(feedback, new_flags)); |
| } |
| } |
| } |
| |
| UseInfo UseInfoForFastApiCallArgument(CTypeInfo type, |
| CFunctionInfo::Int64Representation repr, |
| FeedbackSource const& feedback) { |
| START_ALLOW_USE_DEPRECATED() |
| switch (type.GetSequenceType()) { |
| case CTypeInfo::SequenceType::kScalar: { |
| uint8_t flags = uint8_t(type.GetFlags()); |
| if (flags & uint8_t(CTypeInfo::Flags::kEnforceRangeBit) || |
| flags & uint8_t(CTypeInfo::Flags::kClampBit)) { |
| DCHECK(repr != CFunctionInfo::Int64Representation::kBigInt); |
| // If the parameter is marked as `kEnforceRange` or `kClampBit`, then |
| // special type conversion gets added explicitly to the generated |
| // code. Therefore it is sufficient here to only require here that the |
| // value is a Float64, even though the C++ signature actually asks for |
| // an `int32_t`. |
| return UseInfo::CheckedNumberAsFloat64(kIdentifyZeros, feedback); |
| } |
| switch (type.GetType()) { |
| case CTypeInfo::Type::kVoid: |
| case CTypeInfo::Type::kUint8: |
| UNREACHABLE(); |
| case CTypeInfo::Type::kBool: |
| return UseInfo::Bool(); |
| case CTypeInfo::Type::kInt32: |
| case CTypeInfo::Type::kUint32: |
| return UseInfo::CheckedNumberAsWord32(feedback); |
| // TODO(mslekova): We deopt for unsafe integers, but ultimately we |
| // want to make this less restrictive in order to stay on the fast |
| // path. |
| case CTypeInfo::Type::kInt64: |
| case CTypeInfo::Type::kUint64: |
| if (repr == CFunctionInfo::Int64Representation::kBigInt) { |
| return UseInfo::CheckedBigIntTruncatingWord64(feedback); |
| } else if (repr == CFunctionInfo::Int64Representation::kNumber) { |
| return UseInfo::CheckedSigned64AsWord64(kIdentifyZeros, feedback); |
| } else { |
| UNREACHABLE(); |
| } |
| case CTypeInfo::Type::kAny: |
| return UseInfo::CheckedSigned64AsWord64(kIdentifyZeros, feedback); |
| case CTypeInfo::Type::kFloat32: |
| case CTypeInfo::Type::kFloat64: |
| return UseInfo::CheckedNumberAsFloat64(kDistinguishZeros, feedback); |
| case CTypeInfo::Type::kPointer: |
| case CTypeInfo::Type::kV8Value: |
| case CTypeInfo::Type::kSeqOneByteString: |
| case CTypeInfo::Type::kApiObject: |
| return UseInfo::AnyTagged(); |
| } |
| } |
| case CTypeInfo::SequenceType::kIsSequence: { |
| CHECK_EQ(type.GetType(), CTypeInfo::Type::kVoid); |
| return UseInfo::AnyTagged(); |
| } |
| default: { |
| UNREACHABLE(); // TODO(mslekova): Implement array buffers. |
| } |
| } |
| END_ALLOW_USE_DEPRECATED() |
| } |
| |
| static constexpr int kInitialArgumentsCount = 10; |
| |
| template <Phase T> |
| void VisitFastApiCall(Node* node, SimplifiedLowering* lowering) { |
| FastApiCallParameters const& op_params = |
| FastApiCallParametersOf(node->op()); |
| // We only consider the first function signature here. In case of function |
| // overloads, we only support the case of two functions that differ for one |
| // argument, which must be a JSArray in one function and a TypedArray in the |
| // other function, and both JSArrays and TypedArrays have the same UseInfo |
| // UseInfo::AnyTagged(). All the other argument types must match. |
| const CFunctionInfo* c_signature = op_params.c_function().signature; |
| const int c_arg_count = c_signature->ArgumentCount(); |
| CallDescriptor* call_descriptor = op_params.descriptor(); |
| // Arguments for CallApiCallbackOptimizedXXX builtin (including context) |
| // plus JS arguments (including receiver). |
| int slow_arg_count = static_cast<int>(call_descriptor->ParameterCount()); |
| const int value_input_count = node->op()->ValueInputCount(); |
| CHECK_EQ(FastApiCallNode::ArityForArgc(c_arg_count, slow_arg_count), |
| value_input_count); |
| |
| FastApiCallNode n(node); |
| |
| base::SmallVector<UseInfo, kInitialArgumentsCount> arg_use_info( |
| c_arg_count); |
| // Propagate representation information from TypeInfo. |
| int cursor = 0; |
| for (int i = 0; i < c_arg_count; i++) { |
| arg_use_info[i] = UseInfoForFastApiCallArgument( |
| c_signature->ArgumentInfo(i), c_signature->GetInt64Representation(), |
| op_params.feedback()); |
| ProcessInput<T>(node, cursor++, arg_use_info[i]); |
| } |
| // Callback data for fast call. |
| DCHECK_EQ(n.CallbackDataIndex(), cursor); |
| ProcessInput<T>(node, cursor++, UseInfo::AnyTagged()); |
| |
| // The call code for the slow call. |
| ProcessInput<T>(node, cursor++, UseInfo::AnyTagged()); |
| // For the slow builtin parameters (indexes [1, ..., params]), propagate |
| // representation information from call descriptor. |
| for (int i = 1; i <= slow_arg_count; i++) { |
| ProcessInput<T>(node, cursor++, |
| TruncatingUseInfoFromRepresentation( |
| call_descriptor->GetInputType(i).representation())); |
| } |
| // Visit frame state input as tagged. |
| DCHECK_EQ(n.FrameStateIndex(), cursor); |
| ProcessInput<T>(node, cursor++, UseInfo::AnyTagged()); |
| DCHECK_EQ(cursor, value_input_count); |
| |
| // Effect and Control. |
| ProcessRemainingInputs<T>(node, value_input_count); |
| |
| CTypeInfo return_type = op_params.c_function().signature->ReturnInfo(); |
| switch (return_type.GetType()) { |
| case CTypeInfo::Type::kBool: |
| SetOutput<T>(node, MachineRepresentation::kBit); |
| return; |
| case CTypeInfo::Type::kFloat32: |
| SetOutput<T>(node, MachineRepresentation::kFloat32); |
| return; |
| case CTypeInfo::Type::kFloat64: |
| SetOutput<T>(node, MachineRepresentation::kFloat64); |
| return; |
| case CTypeInfo::Type::kInt32: |
| SetOutput<T>(node, MachineRepresentation::kWord32); |
| return; |
| case CTypeInfo::Type::kInt64: |
| case CTypeInfo::Type::kUint64: |
| if (c_signature->GetInt64Representation() == |
| CFunctionInfo::Int64Representation::kBigInt) { |
| SetOutput<T>(node, MachineRepresentation::kWord64); |
| return; |
| } |
| DCHECK_EQ(c_signature->GetInt64Representation(), |
| CFunctionInfo::Int64Representation::kNumber); |
| SetOutput<T>(node, MachineRepresentation::kFloat64); |
| return; |
| case CTypeInfo::Type::kSeqOneByteString: |
| SetOutput<T>(node, MachineRepresentation::kTagged); |
| return; |
| case CTypeInfo::Type::kUint32: |
| SetOutput<T>(node, MachineRepresentation::kWord32); |
| return; |
| case CTypeInfo::Type::kUint8: |
| SetOutput<T>(node, MachineRepresentation::kWord8); |
| return; |
| case CTypeInfo::Type::kAny: |
| // This type is only supposed to be used for parameters, not returns. |
| UNREACHABLE(); |
| case CTypeInfo::Type::kPointer: |
| case CTypeInfo::Type::kApiObject: |
| case CTypeInfo::Type::kV8Value: |
| case CTypeInfo::Type::kVoid: |
| SetOutput<T>(node, MachineRepresentation::kTagged); |
| return; |
| } |
| } |
| |
| template <Phase T> |
| bool TryOptimizeBigInt64Shift(Node* node, const Truncation& truncation, |
| SimplifiedLowering* lowering) { |
| DCHECK(Is64()); |
| if (!truncation.IsUsedAsWord64()) return false; |
| |
| Type input_type = GetUpperBound(node->InputAt(0)); |
| Type shift_amount_type = GetUpperBound(node->InputAt(1)); |
| |
| if (!shift_amount_type.IsHeapConstant()) return false; |
| HeapObjectRef ref = shift_amount_type.AsHeapConstant()->Ref(); |
| if (!ref.IsBigInt()) return false; |
| BigIntRef bigint = ref.AsBigInt(); |
| bool lossless = false; |
| int64_t shift_amount = bigint.AsInt64(&lossless); |
| // We bail out if we cannot represent the shift amount correctly. |
| if (!lossless) return false; |
| |
| // Canonicalize {shift_amount}. |
| bool is_shift_left = |
| node->opcode() == IrOpcode::kSpeculativeBigIntShiftLeft; |
| if (shift_amount < 0) { |
| // A shift amount of abs(std::numeric_limits<int64_t>::min()) is not |
| // representable. |
| if (shift_amount == std::numeric_limits<int64_t>::min()) return false; |
| is_shift_left = !is_shift_left; |
| shift_amount = -shift_amount; |
| DCHECK_GT(shift_amount, 0); |
| } |
| DCHECK_GE(shift_amount, 0); |
| |
| // If the operation is a *real* left shift, propagate truncation. |
| // If it is a *real* right shift, the output representation is |
| // word64 only if we know the input type is BigInt64. |
| // Otherwise, fall through to using BigIntOperationHint. |
| if (is_shift_left) { |
| VisitBinop<T>(node, |
| UseInfo::CheckedBigIntTruncatingWord64(FeedbackSource{}), |
| UseInfo::Any(), MachineRepresentation::kWord64); |
| if (lower<T>()) { |
| if (shift_amount > 63) { |
| DeferReplacement(node, jsgraph_->Int64Constant(0)); |
| } else if (shift_amount == 0) { |
| DeferReplacement(node, node->InputAt(0)); |
| } else { |
| DCHECK_GE(shift_amount, 1); |
| DCHECK_LE(shift_amount, 63); |
| ReplaceWithPureNode( |
| node, graph()->NewNode(lowering->machine()->Word64Shl(), |
| node->InputAt(0), |
| jsgraph_->Int64Constant(shift_amount))); |
| } |
| } |
| return true; |
| } else if (input_type.Is(Type::SignedBigInt64())) { |
| VisitBinop<T>(node, |
| UseInfo::CheckedBigIntTruncatingWord64(FeedbackSource{}), |
| UseInfo::Any(), MachineRepresentation::kWord64); |
| if (lower<T>()) { |
| if (shift_amount > 63) { |
| ReplaceWithPureNode( |
| node, |
| graph()->NewNode(lowering->machine()->Word64Sar(), |
| node->InputAt(0), jsgraph_->Int64Constant(63))); |
| } else if (shift_amount == 0) { |
| DeferReplacement(node, node->InputAt(0)); |
| } else { |
| DCHECK_GE(shift_amount, 1); |
| DCHECK_LE(shift_amount, 63); |
| ReplaceWithPureNode( |
| node, graph()->NewNode(lowering->machine()->Word64Sar(), |
| node->InputAt(0), |
| jsgraph_->Int64Constant(shift_amount))); |
| } |
| } |
| return true; |
| } else if (input_type.Is(Type::UnsignedBigInt64())) { |
| VisitBinop<T>(node, |
| UseInfo::CheckedBigIntTruncatingWord64(FeedbackSource{}), |
| UseInfo::Any(), MachineRepresentation::kWord64); |
| if (lower<T>()) { |
| if (shift_amount > 63) { |
| DeferReplacement(node, jsgraph_->Int64Constant(0)); |
| } else if (shift_amount == 0) { |
| DeferReplacement(node, node->InputAt(0)); |
| } else { |
| DCHECK_GE(shift_amount, 1); |
| DCHECK_LE(shift_amount, 63); |
| ReplaceWithPureNode( |
| node, graph()->NewNode(lowering->machine()->Word64Shr(), |
| node->InputAt(0), |
| jsgraph_->Int64Constant(shift_amount))); |
| } |
| } |
| return true; |
| } |
| |
| // None of the cases we can optimize here. |
| return false; |
| } |
| |
| #if V8_ENABLE_WEBASSEMBLY |
| static MachineType MachineTypeForWasmReturnType( |
| wasm::CanonicalValueType type) { |
| switch (type.kind()) { |
| case wasm::kI32: |
| return MachineType::Int32(); |
| case wasm::kI64: |
| return MachineType::Int64(); |
| case wasm::kF32: |
| return MachineType::Float32(); |
| case wasm::kF64: |
| return MachineType::Float64(); |
| case wasm::kRef: |
| case wasm::kRefNull: |
| return MachineType::AnyTagged(); |
| default: |
| UNREACHABLE(); |
| } |
| } |
| |
| UseInfo UseInfoForJSWasmCallArgument(Node* input, |
| wasm::CanonicalValueType type, |
| FeedbackSource const& feedback) { |
| // If the input type is a Number or Oddball, we can directly convert the |
| // input into the Wasm native type of the argument. If not, we return |
| // UseInfo::AnyTagged to signal that WasmWrapperGraphBuilder will need to |
| // add Nodes to perform the conversion (in WasmWrapperGraphBuilder::FromJS). |
| switch (type.kind()) { |
| case wasm::kI32: |
| return UseInfo::CheckedNumberOrOddballAsWord32(feedback); |
| case wasm::kI64: |
| return UseInfo::CheckedBigIntTruncatingWord64(feedback); |
| case wasm::kF32: |
| case wasm::kF64: |
| // For Float32, TruncateFloat64ToFloat32 will be inserted later in |
| // WasmWrapperGraphBuilder::BuildJSToWasmWrapper. |
| return UseInfo::CheckedNumberOrOddballAsFloat64(kDistinguishZeros, |
| feedback); |
| case wasm::kRef: |
| case wasm::kRefNull: |
| return UseInfo::AnyTagged(); |
| default: |
| UNREACHABLE(); |
| } |
| } |
| |
| template <Phase T> |
| void VisitJSWasmCall(Node* node, SimplifiedLowering* lowering) { |
| DCHECK_EQ(JSWasmCallNode::TargetIndex(), 0); |
| DCHECK_EQ(JSWasmCallNode::ReceiverIndex(), 1); |
| DCHECK_EQ(JSWasmCallNode::FirstArgumentIndex(), 2); |
| |
| JSWasmCallNode n(node); |
| |
| JSWasmCallParameters const& params = n.Parameters(); |
| const wasm::CanonicalSig* wasm_signature = params.signature(); |
| int wasm_arg_count = static_cast<int>(wasm_signature->parameter_count()); |
| DCHECK_EQ(wasm_arg_count, n.ArgumentCount()); |
| |
| base::SmallVector<UseInfo, kInitialArgumentsCount> arg_use_info( |
| wasm_arg_count); |
| |
| // Visit JSFunction and Receiver nodes. |
| ProcessInput<T>(node, JSWasmCallNode::TargetIndex(), UseInfo::Any()); |
| ProcessInput<T>(node, JSWasmCallNode::ReceiverIndex(), UseInfo::Any()); |
| |
| // Propagate representation information from TypeInfo. |
| for (int i = 0; i < wasm_arg_count; i++) { |
| TNode<Object> input = n.Argument(i); |
| DCHECK_NOT_NULL(input); |
| arg_use_info[i] = UseInfoForJSWasmCallArgument( |
| input, wasm_signature->GetParam(i), params.feedback()); |
| ProcessInput<T>(node, JSWasmCallNode::ArgumentIndex(i), arg_use_info[i]); |
| } |
| |
| // Visit value, context and frame state inputs as tagged. |
| int first_effect_index = NodeProperties::FirstEffectIndex(node); |
| DCHECK(first_effect_index > |
| JSWasmCallNode::FirstArgumentIndex() + wasm_arg_count); |
| for (int i = JSWasmCallNode::FirstArgumentIndex() + wasm_arg_count; |
| i < first_effect_index; i++) { |
| ProcessInput<T>(node, i, UseInfo::AnyTagged()); |
| } |
| |
| // Effect and Control. |
| ProcessRemainingInputs<T>(node, NodeProperties::FirstEffectIndex(node)); |
| |
| if (wasm_signature->return_count() == 1) { |
| MachineType return_type = |
| MachineTypeForWasmReturnType(wasm_signature->GetReturn()); |
| SetOutput<T>( |
| node, return_type.representation(), |
| JSWasmCallNode::TypeForWasmReturnType(wasm_signature->GetReturn())); |
| } else { |
| DCHECK_EQ(wasm_signature->return_count(), 0); |
| SetOutput<T>(node, MachineRepresentation::kTagged); |
| } |
| |
| // The actual lowering of JSWasmCall nodes happens later, in the subsequent |
| // "wasm-inlining" phase. |
| } |
| #endif // V8_ENABLE_WEBASSEMBLY |
| |
| // Dispatching routine for visiting the node {node} with the usage {use}. |
| // Depending on the operator, propagate new usage info to the inputs. |
| template <Phase T> |
| void VisitNode(Node* node, Truncation truncation, |
| SimplifiedLowering* lowering) { |
| tick_counter_->TickAndMaybeEnterSafepoint(); |
| |
| if (lower<T>()) { |
| // Kill non-effectful operations that have a None-type input and are thus |
| // dead code. Otherwise we might end up lowering the operation in a way, |
| // e.g. by replacing it with a constant, that cuts the dependency on a |
| // deopting operation (the producer of the None type), possibly resulting |
| // in a nonsense schedule. |
| if (node->op()->EffectOutputCount() == 0 && |
| node->op()->ControlOutputCount() == 0 && |
| node->opcode() != IrOpcode::kDeadValue && |
| node->opcode() != IrOpcode::kStateValues && |
| node->opcode() != IrOpcode::kFrameState && |
| node->opcode() != IrOpcode::kPhi) { |
| for (int i = 0; i < node->op()->ValueInputCount(); i++) { |
| Node* input = node->InputAt(i); |
| if (TypeOf(input).IsNone()) { |
| node->ReplaceInput(0, input); |
| node->TrimInputCount(1); |
| ChangeOp(node, |
| common()->DeadValue(GetInfo(node)->representation())); |
| return; |
| } |
| } |
| } else { |
| InsertUnreachableIfNecessary<T>(node); |
| } |
| } |
| |
| // Unconditionally eliminate unused pure nodes (only relevant if there's |
| // a pure operation in between two effectful ones, where the last one |
| // is unused). |
| // Note: We must not do this for constants, as they are cached and we |
| // would thus kill the cached {node} during lowering (i.e. replace all |
| // uses with Dead), but at that point some node lowering might have |
| // already taken the constant {node} from the cache (while it was not |
| // yet killed) and we would afterwards replace that use with Dead as well. |
| if (node->op()->ValueInputCount() > 0 && |
| node->op()->HasProperty(Operator::kPure) && truncation.IsUnused()) { |
| return VisitUnused<T>(node); |
| } |
| |
| switch (node->opcode()) { |
| //------------------------------------------------------------------ |
| // Common operators. |
| //------------------------------------------------------------------ |
| case IrOpcode::kStart: |
| // We use Start as a terminator for the frame state chain, so even |
| // tho Start doesn't really produce a value, we have to say Tagged |
| // here, otherwise the input conversion will fail. |
| return VisitLeaf<T>(node, MachineRepresentation::kTagged); |
| case IrOpcode::kParameter: |
| return VisitUnop<T>(node, UseInfo::None(), |
| linkage() |
| ->GetParameterType(ParameterIndexOf(node->op())) |
| .representation()); |
| case IrOpcode::kInt32Constant: |
| DCHECK_EQ(0, node->InputCount()); |
| SetOutput<T>(node, MachineRepresentation::kWord32); |
| DCHECK(NodeProperties::GetType(node).Is(Type::Machine())); |
| if (V8_UNLIKELY(verification_enabled())) { |
| // During lowering, SimplifiedLowering generates Int32Constants which |
| // need to be treated differently by the verifier than the |
| // Int32Constants introduced explicitly in machine graphs. To be able |
| // to distinguish them, we record those that are being visited here |
| // because they were generated before SimplifiedLowering. |
| if (propagate<T>()) { |
| verifier_->RecordMachineUsesOfConstant(node, node->uses()); |
| } |
| } |
| return; |
| case IrOpcode::kInt64Constant: |
| return VisitLeaf<T>(node, MachineRepresentation::kWord64); |
| case IrOpcode::kExternalConstant: |
| return VisitLeaf<T>(node, MachineType::PointerRepresentation()); |
| case IrOpcode::kNumberConstant: { |
| double const value = OpParameter<double>(node->op()); |
| int value_as_int; |
| if (DoubleToSmiInteger(value, &value_as_int)) { |
| VisitLeaf<T>(node, MachineRepresentation::kTaggedSigned); |
| if (lower<T>()) { |
| intptr_t smi = base::bit_cast<intptr_t>(Smi::FromInt(value_as_int)); |
| Node* constant = InsertTypeOverrideForVerifier( |
| NodeProperties::GetType(node), |
| lowering->jsgraph()->IntPtrConstant(smi)); |
| DeferReplacement(node, constant); |
| } |
| return; |
| } |
| VisitLeaf<T>(node, MachineRepresentation::kTagged); |
| return; |
| } |
| case IrOpcode::kHeapConstant: |
| return VisitLeaf<T>(node, MachineRepresentation::kTaggedPointer); |
| case IrOpcode::kTrustedHeapConstant: |
| return VisitLeaf<T>(node, MachineRepresentation::kTaggedPointer); |
| case IrOpcode::kPointerConstant: { |
| VisitLeaf<T>(node, MachineType::PointerRepresentation()); |
| if (lower<T>()) { |
| intptr_t const value = OpParameter<intptr_t>(node->op()); |
| DeferReplacement(node, lowering->jsgraph()->IntPtrConstant(value)); |
| } |
| return; |
| } |
| |
| case IrOpcode::kBranch: { |
| const auto& p = BranchParametersOf(node->op()); |
| if (p.semantics() == BranchSemantics::kMachine) { |
| // If this is a machine branch, the condition is a machine operator, |
| // so we enter machine branch here. |
| ProcessInput<T>(node, 0, UseInfo::Any()); |
| } else { |
| DCHECK(TypeOf(node->InputAt(0)).Is(Type::Boolean())); |
| ProcessInput<T>(node, 0, UseInfo::Bool()); |
| if (lower<T>()) { |
| ChangeOp(node, |
| common()->Branch(p.hint(), BranchSemantics::kMachine)); |
| } |
| } |
| EnqueueInput<T>(node, NodeProperties::FirstControlIndex(node)); |
| return; |
| } |
| case IrOpcode::kSwitch: |
| ProcessInput<T>(node, 0, UseInfo::TruncatingWord32()); |
| EnqueueInput<T>(node, NodeProperties::FirstControlIndex(node)); |
| return; |
| case IrOpcode::kSelect: { |
| const auto& p = SelectParametersOf(node->op()); |
| if (p.semantics() == BranchSemantics::kMachine) { |
| // If this is a machine select, all inputs are machine operators. |
| ProcessInput<T>(node, 0, UseInfo::Any()); |
| ProcessInput<T>(node, 1, UseInfo::Any()); |
| ProcessInput<T>(node, 2, UseInfo::Any()); |
| SetOutput<T>(node, p.representation()); |
| } else { |
| VisitSelect<T>(node, truncation, lowering); |
| } |
| return; |
| } |
| case IrOpcode::kPhi: |
| return VisitPhi<T>(node, truncation, lowering); |
| case IrOpcode::kCall: |
| return VisitCall<T>(node, lowering); |
| case IrOpcode::kAssert: { |
| const auto& p = AssertParametersOf(node->op()); |
| if (p.semantics() == BranchSemantics::kMachine) { |
| // If this is a machine condition already, we don't need to do |
| // anything. |
| ProcessInput<T>(node, 0, UseInfo::Any()); |
| } else { |
| DCHECK(TypeOf(node->InputAt(0)).Is(Type::Boolean())); |
| ProcessInput<T>(node, 0, UseInfo::Bool()); |
| if (lower<T>()) { |
| ChangeOp(node, common()->Assert(BranchSemantics::kMachine, |
| p.condition_string(), p.file(), |
| p.line())); |
| } |
| } |
| EnqueueInput<T>(node, NodeProperties::FirstControlIndex(node)); |
| return; |
| } |
| |
| //------------------------------------------------------------------ |
| // JavaScript operators. |
| //------------------------------------------------------------------ |
| case IrOpcode::kJSToNumber: |
| case IrOpcode::kJSToNumberConvertBigInt: |
| case IrOpcode::kJSToNumeric: { |
| DCHECK(NodeProperties::GetType(node).Is(Type::Union( |
| Type::BigInt(), Type::NumberOrOddball(), graph()->zone()))); |
| VisitInputs<T>(node); |
| // TODO(bmeurer): Optimize somewhat based on input type? |
| if (truncation.IsUsedAsWord32()) { |
| SetOutput<T>(node, MachineRepresentation::kWord32); |
| if (lower<T>()) |
| lowering->DoJSToNumberOrNumericTruncatesToWord32(node, this); |
| } else if (truncation.TruncatesOddballAndBigIntToNumber()) { |
| SetOutput<T>(node, MachineRepresentation::kFloat64); |
| if (lower<T>()) |
| lowering->DoJSToNumberOrNumericTruncatesToFloat64(node, this); |
| } else { |
| SetOutput<T>(node, MachineRepresentation::kTagged); |
| } |
| return; |
| } |
| case IrOpcode::kJSToBigInt: |
| case IrOpcode::kJSToBigIntConvertNumber: { |
| VisitInputs<T>(node); |
| SetOutput<T>(node, MachineRepresentation::kTaggedPointer); |
| return; |
| } |
| |
| //------------------------------------------------------------------ |
| // Simplified operators. |
| //------------------------------------------------------------------ |
| case IrOpcode::kToBoolean: { |
| if (truncation.IsUsedAsBool()) { |
| ProcessInput<T>(node, 0, UseInfo::Bool()); |
| SetOutput<T>(node, MachineRepresentation::kBit); |
| if (lower<T>()) DeferReplacement(node, node->InputAt(0)); |
| } else { |
| VisitInputs<T>(node); |
| SetOutput<T>(node, MachineRepresentation::kTaggedPointer); |
| } |
| return; |
| } |
| case IrOpcode::kBooleanNot: { |
| if (lower<T>()) { |
| NodeInfo* input_info = GetInfo(node->InputAt(0)); |
| if (input_info->representation() == MachineRepresentation::kBit) { |
| // BooleanNot(x: kRepBit) => Word32Equal(x, #0) |
| node->AppendInput(jsgraph_->zone(), jsgraph_->Int32Constant(0)); |
| ChangeOp(node, lowering->machine()->Word32Equal()); |
| } else if (CanBeTaggedPointer(input_info->representation())) { |
| // BooleanNot(x: kRepTagged) => TaggedEqual(x, #false) |
| node->AppendInput(jsgraph_->zone(), jsgraph_->FalseConstant()); |
| ChangeOp(node, lowering->machine()->TaggedEqual()); |
| } else { |
| DCHECK(TypeOf(node->InputAt(0)).IsNone()); |
| DeferReplacement(node, lowering->jsgraph()->Int32Constant(0)); |
| } |
|