[TurboProp] Move MidTierRegisterAllocator out of header.
Only expose top-level functions for DefineOutputs and AllocateRegisters in
the mid-tier register allocator, rather than exposing the MidTierRegisterAllocator
object, to be in-line with AllocateSpillSlots and PopulateReferenceMaps.
BUG=v8:9684
Change-Id: I93dcff77f5e50dab9b373b4415029361078d58e1
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2323361
Commit-Queue: Ross McIlroy <rmcilroy@chromium.org>
Reviewed-by: Georg Neis <neis@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69226}
diff --git a/src/compiler/backend/mid-tier-register-allocator.cc b/src/compiler/backend/mid-tier-register-allocator.cc
index 4a071c0..e033799 100644
--- a/src/compiler/backend/mid-tier-register-allocator.cc
+++ b/src/compiler/backend/mid-tier-register-allocator.cc
@@ -1977,27 +1977,33 @@
}
}
-MidTierRegisterAllocator::MidTierRegisterAllocator(
- MidTierRegisterAllocationData* data)
- : data_(data),
- general_reg_allocator_(
- new SinglePassRegisterAllocator(RegisterKind::kGeneral, data)),
- double_reg_allocator_(
- new SinglePassRegisterAllocator(RegisterKind::kDouble, data)) {}
+class MidTierOutputProcessor final {
+ public:
+ explicit MidTierOutputProcessor(MidTierRegisterAllocationData* data);
-MidTierRegisterAllocator::~MidTierRegisterAllocator() = default;
+ void InitializeBlockState(const InstructionBlock* block);
+ void DefineOutputs(const InstructionBlock* block);
-void MidTierRegisterAllocator::DefineOutputs() {
- for (const InstructionBlock* block :
- base::Reversed(code()->instruction_blocks())) {
- data_->tick_counter()->TickAndMaybeEnterSafepoint();
-
- InitializeBlockState(block);
- DefineOutputs(block);
+ private:
+ VirtualRegisterData& VirtualRegisterDataFor(int virtual_register) const {
+ return data()->VirtualRegisterDataFor(virtual_register);
}
-}
+ MachineRepresentation RepresentationFor(int virtual_register) const {
+ return data()->RepresentationFor(virtual_register);
+ }
-void MidTierRegisterAllocator::InitializeBlockState(
+ MidTierRegisterAllocationData* data() const { return data_; }
+ InstructionSequence* code() const { return data()->code(); }
+ Zone* allocation_zone() const { return data()->allocation_zone(); }
+
+ MidTierRegisterAllocationData* const data_;
+};
+
+MidTierOutputProcessor::MidTierOutputProcessor(
+ MidTierRegisterAllocationData* data)
+ : data_(data) {}
+
+void MidTierOutputProcessor::InitializeBlockState(
const InstructionBlock* block) {
// Update our predecessor blocks with their successors_phi_index if we have
// phis.
@@ -2022,7 +2028,7 @@
}
}
-void MidTierRegisterAllocator::DefineOutputs(const InstructionBlock* block) {
+void MidTierOutputProcessor::DefineOutputs(const InstructionBlock* block) {
int block_start = block->first_instruction_index();
for (int index = block->last_instruction_index(); index >= block_start;
index--) {
@@ -2074,19 +2080,65 @@
}
}
-void MidTierRegisterAllocator::AllocateRegisters() {
- for (InstructionBlock* block : base::Reversed(code()->instruction_blocks())) {
- data_->tick_counter()->TickAndMaybeEnterSafepoint();
- AllocateRegisters(block);
+void DefineOutputs(MidTierRegisterAllocationData* data) {
+ MidTierOutputProcessor processor(data);
+
+ for (const InstructionBlock* block :
+ base::Reversed(data->code()->instruction_blocks())) {
+ data->tick_counter()->TickAndMaybeEnterSafepoint();
+
+ processor.InitializeBlockState(block);
+ processor.DefineOutputs(block);
+ }
+}
+
+class MidTierRegisterAllocator final {
+ public:
+ explicit MidTierRegisterAllocator(MidTierRegisterAllocationData* data);
+
+ void AllocateRegisters(const InstructionBlock* block);
+ void UpdateSpillRangesForLoops();
+
+ SinglePassRegisterAllocator& general_reg_allocator() {
+ return general_reg_allocator_;
+ }
+ SinglePassRegisterAllocator& double_reg_allocator() {
+ return double_reg_allocator_;
}
- UpdateSpillRangesForLoops();
+ private:
+ void AllocatePhis(const InstructionBlock* block);
+ void AllocatePhiGapMoves(const InstructionBlock* block);
- data()->frame()->SetAllocatedRegisters(
- general_reg_allocator().assigned_registers());
- data()->frame()->SetAllocatedDoubleRegisters(
- double_reg_allocator().assigned_registers());
-}
+ bool IsFixedRegisterPolicy(const UnallocatedOperand* operand);
+ void ReserveFixedRegisters(int instr_index);
+
+ SinglePassRegisterAllocator& AllocatorFor(MachineRepresentation rep);
+ SinglePassRegisterAllocator& AllocatorFor(const UnallocatedOperand* operand);
+ SinglePassRegisterAllocator& AllocatorFor(const ConstantOperand* operand);
+
+ VirtualRegisterData& VirtualRegisterDataFor(int virtual_register) const {
+ return data()->VirtualRegisterDataFor(virtual_register);
+ }
+ MachineRepresentation RepresentationFor(int virtual_register) const {
+ return data()->RepresentationFor(virtual_register);
+ }
+ MidTierRegisterAllocationData* data() const { return data_; }
+ InstructionSequence* code() const { return data()->code(); }
+ Zone* allocation_zone() const { return data()->allocation_zone(); }
+
+ MidTierRegisterAllocationData* const data_;
+ SinglePassRegisterAllocator general_reg_allocator_;
+ SinglePassRegisterAllocator double_reg_allocator_;
+
+ DISALLOW_COPY_AND_ASSIGN(MidTierRegisterAllocator);
+};
+
+MidTierRegisterAllocator::MidTierRegisterAllocator(
+ MidTierRegisterAllocationData* data)
+ : data_(data),
+ general_reg_allocator_(RegisterKind::kGeneral, data),
+ double_reg_allocator_(RegisterKind::kDouble, data) {}
void MidTierRegisterAllocator::AllocateRegisters(
const InstructionBlock* block) {
@@ -2313,6 +2365,22 @@
}
}
+void AllocateRegisters(MidTierRegisterAllocationData* data) {
+ MidTierRegisterAllocator allocator(data);
+ for (InstructionBlock* block :
+ base::Reversed(data->code()->instruction_blocks())) {
+ data->tick_counter()->TickAndMaybeEnterSafepoint();
+ allocator.AllocateRegisters(block);
+ }
+
+ allocator.UpdateSpillRangesForLoops();
+
+ data->frame()->SetAllocatedRegisters(
+ allocator.general_reg_allocator().assigned_registers());
+ data->frame()->SetAllocatedDoubleRegisters(
+ allocator.double_reg_allocator().assigned_registers());
+}
+
// Spill slot allocator for mid-tier register allocation.
class MidTierSpillSlotAllocator final {
public:
@@ -2335,7 +2403,7 @@
bool operator()(const SpillSlot* a, const SpillSlot* b) const;
};
- MidTierRegisterAllocationData* data_;
+ MidTierRegisterAllocationData* const data_;
ZonePriorityQueue<SpillSlot*, OrderByLastUse> allocated_slots_;
ZoneLinkedList<SpillSlot*> free_slots_;
int position_;
@@ -2464,7 +2532,7 @@
MidTierRegisterAllocationData* data() const { return data_; }
InstructionSequence* code() const { return data()->code(); }
- MidTierRegisterAllocationData* data_;
+ MidTierRegisterAllocationData* const data_;
DISALLOW_COPY_AND_ASSIGN(MidTierReferenceMapPopulator);
};
diff --git a/src/compiler/backend/mid-tier-register-allocator.h b/src/compiler/backend/mid-tier-register-allocator.h
index 9012b03..6d8006b 100644
--- a/src/compiler/backend/mid-tier-register-allocator.h
+++ b/src/compiler/backend/mid-tier-register-allocator.h
@@ -22,7 +22,6 @@
namespace compiler {
class BlockState;
-class SinglePassRegisterAllocator;
class VirtualRegisterData;
// The MidTierRegisterAllocator is a register allocator specifically designed to
@@ -102,59 +101,12 @@
DISALLOW_COPY_AND_ASSIGN(MidTierRegisterAllocationData);
};
-class MidTierRegisterAllocator final {
- public:
- explicit MidTierRegisterAllocator(MidTierRegisterAllocationData* data);
- ~MidTierRegisterAllocator();
+// Phase 1: Process instruction outputs to determine how each virtual register
+// is defined.
+void DefineOutputs(MidTierRegisterAllocationData* data);
- // Phase 1: Process instruction outputs to determine how each virtual register
- // is defined.
- void DefineOutputs();
-
- // Phase 2: Allocate registers to instructions.
- void AllocateRegisters();
-
- private:
- // Define outputs operations.
- void InitializeBlockState(const InstructionBlock* block);
- void DefineOutputs(const InstructionBlock* block);
-
- // Allocate registers operations.
- void AllocateRegisters(const InstructionBlock* block);
- void AllocatePhis(const InstructionBlock* block);
- void AllocatePhiGapMoves(const InstructionBlock* block);
- void UpdateSpillRangesForLoops();
-
- bool IsFixedRegisterPolicy(const UnallocatedOperand* operand);
- void ReserveFixedRegisters(int instr_index);
-
- SinglePassRegisterAllocator& AllocatorFor(MachineRepresentation rep);
- SinglePassRegisterAllocator& AllocatorFor(const UnallocatedOperand* operand);
- SinglePassRegisterAllocator& AllocatorFor(const ConstantOperand* operand);
-
- SinglePassRegisterAllocator& general_reg_allocator() {
- return *general_reg_allocator_;
- }
- SinglePassRegisterAllocator& double_reg_allocator() {
- return *double_reg_allocator_;
- }
-
- VirtualRegisterData& VirtualRegisterDataFor(int virtual_register) const {
- return data()->VirtualRegisterDataFor(virtual_register);
- }
- MachineRepresentation RepresentationFor(int virtual_register) const {
- return data()->RepresentationFor(virtual_register);
- }
- MidTierRegisterAllocationData* data() const { return data_; }
- InstructionSequence* code() const { return data()->code(); }
- Zone* allocation_zone() const { return data()->allocation_zone(); }
-
- MidTierRegisterAllocationData* data_;
- std::unique_ptr<SinglePassRegisterAllocator> general_reg_allocator_;
- std::unique_ptr<SinglePassRegisterAllocator> double_reg_allocator_;
-
- DISALLOW_COPY_AND_ASSIGN(MidTierRegisterAllocator);
-};
+// Phase 2: Allocate registers to instructions.
+void AllocateRegisters(MidTierRegisterAllocationData* data);
// Phase 3: assign spilled operands to specific spill slots.
void AllocateSpillSlots(MidTierRegisterAllocationData* data);
diff --git a/src/compiler/pipeline.cc b/src/compiler/pipeline.cc
index 1818eb6..90911b6 100644
--- a/src/compiler/pipeline.cc
+++ b/src/compiler/pipeline.cc
@@ -2316,14 +2316,19 @@
}
};
+struct MidTierRegisterOutputDefinitionPhase {
+ DECL_PIPELINE_PHASE_CONSTANTS(MidTierRegisterAllocator)
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ DefineOutputs(data->mid_tier_register_allocator_data());
+ }
+};
+
struct MidTierRegisterAllocatorPhase {
DECL_PIPELINE_PHASE_CONSTANTS(MidTierRegisterAllocator)
void Run(PipelineData* data, Zone* temp_zone) {
- MidTierRegisterAllocator allocator(
- data->mid_tier_register_allocator_data());
- allocator.DefineOutputs();
- allocator.AllocateRegisters();
+ AllocateRegisters(data->mid_tier_register_allocator_data());
}
};
@@ -3727,6 +3732,8 @@
TraceSequence(info(), data, "before register allocation");
+ Run<MidTierRegisterOutputDefinitionPhase>();
+
Run<MidTierRegisterAllocatorPhase>();
Run<MidTierSpillSlotAllocatorPhase>();
diff --git a/src/logging/counters.h b/src/logging/counters.h
index 6a284ca..de583dd 100644
--- a/src/logging/counters.h
+++ b/src/logging/counters.h
@@ -880,77 +880,78 @@
V(Prefix##Suffix) \
V(Prefix##Background##Suffix)
-#define FOR_EACH_THREAD_SPECIFIC_COUNTER(V) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Compile, Analyse) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Compile, Eval) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Compile, Function) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Compile, Ignition) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Compile, IgnitionFinalization) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Compile, RewriteReturnResult) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Compile, ScopeAnalysis) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Compile, Script) \
- \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, AllocateFPRegisters) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, AllocateGeneralRegisters) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, AssembleCode) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, AssignSpillSlots) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, BuildLiveRangeBundles) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, BuildLiveRanges) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, CommitAssignment) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, ConnectRanges) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, ControlFlowOptimization) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, CSAEarlyOptimization) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, CSAOptimization) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, DecideSpillingMode) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, DecompressionOptimization) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, EarlyOptimization) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, EarlyTrimming) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, EffectLinearization) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, EscapeAnalysis) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MidTierPopulateReferenceMaps) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MidTierRegisterAllocator) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MidTierSpillSlotAllocator) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, FinalizeCode) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, FrameElision) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, GenericLowering) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, BytecodeGraphBuilder) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, Inlining) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, JumpThreading) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, LateGraphTrimming) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, LateOptimization) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, LoadElimination) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, LocateSpillSlots) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, LoopExitElimination) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, LoopPeeling) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MachineOperatorOptimization) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MeetRegisterConstraints) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MemoryOptimization) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MergeSplinteredRanges) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, OptimizeMoves) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, PopulatePointerMaps) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, PrintGraph) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, ResolveControlFlow) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, ResolvePhis) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, \
- ScheduledEffectControlLinearization) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, ScheduledMachineLowering) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, Scheduling) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, SelectInstructions) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, SimplifiedLowering) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, SplinterLiveRanges) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, StoreStoreElimination) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, TypeAssertions) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, TypedLowering) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, Typer) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, Untyper) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, VerifyGraph) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, WasmBaseOptimization) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, WasmFullOptimization) \
- \
- ADD_THREAD_SPECIFIC_COUNTER(V, Parse, ArrowFunctionLiteral) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Parse, FunctionLiteral) \
- ADD_THREAD_SPECIFIC_COUNTER(V, Parse, Program) \
- ADD_THREAD_SPECIFIC_COUNTER(V, PreParse, ArrowFunctionLiteral) \
+#define FOR_EACH_THREAD_SPECIFIC_COUNTER(V) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Compile, Analyse) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Compile, Eval) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Compile, Function) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Compile, Ignition) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Compile, IgnitionFinalization) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Compile, RewriteReturnResult) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Compile, ScopeAnalysis) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Compile, Script) \
+ \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, AllocateFPRegisters) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, AllocateGeneralRegisters) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, AssembleCode) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, AssignSpillSlots) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, BuildLiveRangeBundles) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, BuildLiveRanges) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, CommitAssignment) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, ConnectRanges) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, ControlFlowOptimization) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, CSAEarlyOptimization) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, CSAOptimization) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, DecideSpillingMode) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, DecompressionOptimization) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, EarlyOptimization) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, EarlyTrimming) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, EffectLinearization) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, EscapeAnalysis) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MidTierRegisterOutputDefinition) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MidTierPopulateReferenceMaps) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MidTierRegisterAllocator) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MidTierSpillSlotAllocator) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, FinalizeCode) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, FrameElision) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, GenericLowering) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, BytecodeGraphBuilder) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, Inlining) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, JumpThreading) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, LateGraphTrimming) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, LateOptimization) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, LoadElimination) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, LocateSpillSlots) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, LoopExitElimination) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, LoopPeeling) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MachineOperatorOptimization) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MeetRegisterConstraints) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MemoryOptimization) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MergeSplinteredRanges) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, OptimizeMoves) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, PopulatePointerMaps) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, PrintGraph) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, ResolveControlFlow) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, ResolvePhis) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, \
+ ScheduledEffectControlLinearization) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, ScheduledMachineLowering) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, Scheduling) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, SelectInstructions) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, SimplifiedLowering) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, SplinterLiveRanges) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, StoreStoreElimination) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, TypeAssertions) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, TypedLowering) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, Typer) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, Untyper) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, VerifyGraph) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, WasmBaseOptimization) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, WasmFullOptimization) \
+ \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Parse, ArrowFunctionLiteral) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Parse, FunctionLiteral) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Parse, Program) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, PreParse, ArrowFunctionLiteral) \
ADD_THREAD_SPECIFIC_COUNTER(V, PreParse, WithVariableResolution)
#define FOR_EACH_MANUAL_COUNTER(V) \