blob: dabe21013472be3f3980a5d6c867ec7b84f1717a [file] [log] [blame]
//-------------------------------------------------------------------------------------------------------
// Copyright (C) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
//-------------------------------------------------------------------------------------------------------
#include "Backend.h"
#define INLINEEMETAARG_COUNT 3
BackwardPass::BackwardPass(Func * func, GlobOpt * globOpt, Js::Phase tag)
: func(func), globOpt(globOpt), tag(tag), currentPrePassLoop(nullptr), tempAlloc(nullptr),
preOpBailOutInstrToProcess(nullptr),
considerSymAsRealUseInNoImplicitCallUses(nullptr),
isCollectionPass(false), currentRegion(nullptr),
collectionPassSubPhase(CollectionPassSubPhase::None),
isLoopPrepass(false)
{
// Those are the only three phases BackwardPass will use currently
Assert(tag == Js::BackwardPhase || tag == Js::DeadStorePhase || tag == Js::CaptureByteCodeRegUsePhase);
#if DBG
// The CaptureByteCodeRegUse phase is just a collection phase, no mutations should occur
this->isCollectionPass = tag == Js::CaptureByteCodeRegUsePhase;
#endif
this->implicitCallBailouts = 0;
this->fieldOpts = 0;
#if DBG_DUMP
this->numDeadStore = 0;
this->numMarkTempNumber = 0;
this->numMarkTempNumberTransferred = 0;
this->numMarkTempObject = 0;
#endif
}
void
BackwardPass::DoSetDead(IR::Opnd * opnd, bool isDead) const
{
// Note: Dead bit on the Opnd records flow-based liveness.
// This is distinct from isLastUse, which records lexical last-ness.
if (isDead && this->tag == Js::BackwardPhase && !this->IsPrePass())
{
opnd->SetIsDead();
}
else if (this->tag == Js::DeadStorePhase)
{
// Set or reset in DeadStorePhase.
// CSE could make previous dead operand not the last use, so reset it.
opnd->SetIsDead(isDead);
}
}
bool
BackwardPass::DoByteCodeUpwardExposedUsed() const
{
return (
(this->tag == Js::DeadStorePhase && this->func->hasBailout) ||
(this->tag == Js::BackwardPhase && this->func->HasTry() && this->func->DoOptimizeTry())
#if DBG
|| tag == Js::CaptureByteCodeRegUsePhase
#endif
);
}
bool BackwardPass::DoCaptureByteCodeUpwardExposedUsed() const
{
#if DBG
return (this->tag == Js::CaptureByteCodeRegUsePhase || this->tag == Js::DeadStorePhase) &&
this->DoByteCodeUpwardExposedUsed() &&
!func->IsJitInDebugMode() &&
!this->func->GetJITFunctionBody()->IsAsmJsMode() &&
this->func->DoGlobOpt();
#else
return false;
#endif
}
bool
BackwardPass::DoMarkTempNumbers() const
{
#if FLOATVAR
return false;
#else
// only mark temp number on the dead store phase
return (tag == Js::DeadStorePhase) && !PHASE_OFF(Js::MarkTempPhase, this->func) &&
!PHASE_OFF(Js::MarkTempNumberPhase, this->func) && func->DoFastPaths() && (!this->func->HasTry());
#endif
}
bool
BackwardPass::DoMarkTempObjects() const
{
// only mark temp object on the backward store phase
return (tag == Js::BackwardPhase) && !PHASE_OFF(Js::MarkTempPhase, this->func) &&
!PHASE_OFF(Js::MarkTempObjectPhase, this->func) && func->DoGlobOpt() && func->GetHasTempObjectProducingInstr() &&
!func->IsJitInDebugMode() &&
func->DoGlobOptsForGeneratorFunc();
// Why MarkTempObject is disabled under debugger:
// We add 'identified so far dead non-temp locals' to byteCodeUpwardExposedUsed in ProcessBailOutInfo,
// this may cause MarkTempObject to convert some temps back to non-temp when it sees a 'transferred exposed use'
// from a temp to non-temp. That's in general not a supported conversion (while non-temp -> temp is fine).
}
bool
BackwardPass::DoMarkTempNumbersOnTempObjects() const
{
return !PHASE_OFF(Js::MarkTempNumberOnTempObjectPhase, this->func) && DoMarkTempNumbers() && this->func->GetHasMarkTempObjects();
}
#if DBG
bool
BackwardPass::DoMarkTempObjectVerify() const
{
// only mark temp object on the backward store phase
return (tag == Js::DeadStorePhase) && !PHASE_OFF(Js::MarkTempPhase, this->func) &&
!PHASE_OFF(Js::MarkTempObjectPhase, this->func) && func->DoGlobOpt() && func->GetHasTempObjectProducingInstr();
}
#endif
// static
bool
BackwardPass::DoDeadStore(Func* func)
{
return
!PHASE_OFF(Js::DeadStorePhase, func) &&
(!func->HasTry() || func->DoOptimizeTry());
}
bool
BackwardPass::DoDeadStore() const
{
return
this->tag == Js::DeadStorePhase &&
DoDeadStore(this->func);
}
bool
BackwardPass::DoDeadStoreSlots() const
{
// only dead store fields if glob opt is on to generate the trackable fields bitvector
return (tag == Js::DeadStorePhase && this->func->DoGlobOpt()
&& (!this->func->HasTry()));
}
// Whether dead store is enabled for given func and sym.
// static
bool
BackwardPass::DoDeadStore(Func* func, StackSym* sym)
{
// Dead store is disabled under debugger for non-temp local vars.
return
DoDeadStore(func) &&
!(func->IsJitInDebugMode() && sym->HasByteCodeRegSlot() && func->IsNonTempLocalVar(sym->GetByteCodeRegSlot())) &&
func->DoGlobOptsForGeneratorFunc();
}
bool
BackwardPass::DoTrackNegativeZero() const
{
return
!PHASE_OFF(Js::TrackIntUsagePhase, func) &&
!PHASE_OFF(Js::TrackNegativeZeroPhase, func) &&
func->DoGlobOpt() &&
!IsPrePass() &&
!func->IsJitInDebugMode() &&
func->DoGlobOptsForGeneratorFunc();
}
bool
BackwardPass::DoTrackBitOpsOrNumber() const
{
#if defined(_WIN32) && defined(TARGET_64)
return
!PHASE_OFF1(Js::TypedArrayVirtualPhase) &&
tag == Js::BackwardPhase &&
func->DoGlobOpt() &&
!IsPrePass() &&
!func->IsJitInDebugMode() &&
func->DoGlobOptsForGeneratorFunc();
#else
return false;
#endif
}
bool
BackwardPass::DoTrackIntOverflow() const
{
return
!PHASE_OFF(Js::TrackIntUsagePhase, func) &&
!PHASE_OFF(Js::TrackIntOverflowPhase, func) &&
tag == Js::BackwardPhase &&
!IsPrePass() &&
globOpt->DoLossyIntTypeSpec() &&
!func->IsJitInDebugMode() &&
func->DoGlobOptsForGeneratorFunc();
}
bool
BackwardPass::DoTrackCompoundedIntOverflow() const
{
return
!PHASE_OFF(Js::TrackCompoundedIntOverflowPhase, func) &&
DoTrackIntOverflow() && !func->IsTrackCompoundedIntOverflowDisabled();
}
bool
BackwardPass::DoTrackNon32BitOverflow() const
{
// enabled only for IA
#if defined(_M_IX86) || defined(_M_X64)
return true;
#else
return false;
#endif
}
void
BackwardPass::CleanupBackwardPassInfoInFlowGraph()
{
if (!this->func->m_fg->hasBackwardPassInfo)
{
// No information to clean up
return;
}
// The backward pass temp arena has already been deleted, we can just reset the data
FOREACH_BLOCK_IN_FUNC_DEAD_OR_ALIVE(block, this->func)
{
block->upwardExposedUses = nullptr;
block->upwardExposedFields = nullptr;
block->typesNeedingKnownObjectLayout = nullptr;
block->slotDeadStoreCandidates = nullptr;
block->byteCodeUpwardExposedUsed = nullptr;
block->liveFixedFields = nullptr;
#if DBG
block->byteCodeRestoreSyms = nullptr;
block->excludeByteCodeUpwardExposedTracking = nullptr;
#endif
block->tempNumberTracker = nullptr;
block->tempObjectTracker = nullptr;
#if DBG
block->tempObjectVerifyTracker = nullptr;
#endif
block->stackSymToFinalType = nullptr;
block->stackSymToGuardedProperties = nullptr;
block->stackSymToWriteGuardsMap = nullptr;
block->cloneStrCandidates = nullptr;
block->noImplicitCallUses = nullptr;
block->noImplicitCallNoMissingValuesUses = nullptr;
block->noImplicitCallNativeArrayUses = nullptr;
block->noImplicitCallJsArrayHeadSegmentSymUses = nullptr;
block->noImplicitCallArrayLengthSymUses = nullptr;
block->couldRemoveNegZeroBailoutForDef = nullptr;
if (block->loop != nullptr)
{
block->loop->hasDeadStoreCollectionPass = false;
block->loop->hasDeadStorePrepass = false;
}
}
NEXT_BLOCK_IN_FUNC_DEAD_OR_ALIVE;
}
/*
* We Insert ArgIns at the start of the function for all the formals.
* Unused formals will be deadstored during the deadstore pass.
* We need ArgIns only for the outermost function(inliner).
*/
void
BackwardPass::InsertArgInsForFormals()
{
if (func->IsStackArgsEnabled() && !func->GetJITFunctionBody()->HasImplicitArgIns())
{
IR::Instr * insertAfterInstr = func->m_headInstr->m_next;
AssertMsg(insertAfterInstr->IsLabelInstr(), "First Instr of the first block should always have a label");
Js::ArgSlot paramsCount = insertAfterInstr->m_func->GetJITFunctionBody()->GetInParamsCount() - 1;
IR::Instr * argInInstr = nullptr;
for (Js::ArgSlot argumentIndex = 1; argumentIndex <= paramsCount; argumentIndex++)
{
IR::SymOpnd * srcOpnd;
StackSym * symSrc = StackSym::NewParamSlotSym(argumentIndex + 1, func);
StackSym * symDst = StackSym::New(func);
IR::RegOpnd * dstOpnd = IR::RegOpnd::New(symDst, TyVar, func);
func->SetArgOffset(symSrc, (argumentIndex + LowererMD::GetFormalParamOffset()) * MachPtr);
srcOpnd = IR::SymOpnd::New(symSrc, TyVar, func);
argInInstr = IR::Instr::New(Js::OpCode::ArgIn_A, dstOpnd, srcOpnd, func);
insertAfterInstr->InsertAfter(argInInstr);
insertAfterInstr = argInInstr;
AssertMsg(!func->HasStackSymForFormal(argumentIndex - 1), "Already has a stack sym for this formal?");
this->func->TrackStackSymForFormalIndex(argumentIndex - 1, symDst);
}
if (PHASE_VERBOSE_TRACE1(Js::StackArgFormalsOptPhase) && paramsCount > 0)
{
Output::Print(_u("StackArgFormals : %s (%d) :Inserting ArgIn_A for LdSlot (formals) in the start of Deadstore pass. \n"), func->GetJITFunctionBody()->GetDisplayName(), func->GetFunctionNumber());
Output::Flush();
}
}
}
void
BackwardPass::MarkScopeObjSymUseForStackArgOpt()
{
IR::Instr * instr = this->currentInstr;
if (tag == Js::DeadStorePhase)
{
if (instr->DoStackArgsOpt() && instr->m_func->GetScopeObjSym() != nullptr && this->DoByteCodeUpwardExposedUsed())
{
this->currentBlock->byteCodeUpwardExposedUsed->Set(instr->m_func->GetScopeObjSym()->m_id);
}
}
}
void
BackwardPass::ProcessBailOnStackArgsOutOfActualsRange()
{
IR::Instr * instr = this->currentInstr;
if (tag == Js::DeadStorePhase &&
(instr->m_opcode == Js::OpCode::LdElemI_A || instr->m_opcode == Js::OpCode::TypeofElem) &&
instr->HasBailOutInfo() && !IsPrePass())
{
if (instr->DoStackArgsOpt())
{
AssertMsg(instr->GetBailOutKind() & IR::BailOnStackArgsOutOfActualsRange, "Stack args bail out is not set when the optimization is turned on? ");
if (instr->GetBailOutKind() & ~IR::BailOnStackArgsOutOfActualsRange)
{
//Make sure that in absence of potential LazyBailOut and BailOutOnImplicitCallsPreOp, we only have BailOnStackArgsOutOfActualsRange bit set
Assert((BailOutInfo::WithoutLazyBailOut(instr->GetBailOutKind() & ~IR::BailOutOnImplicitCallsPreOp)) == IR::BailOnStackArgsOutOfActualsRange);
//We are sure at this point, that we will not have any implicit calls as we wouldn't have done this optimization in the first place.
instr->SetBailOutKind(IR::BailOnStackArgsOutOfActualsRange);
}
}
else if (instr->GetBailOutKind() & IR::BailOnStackArgsOutOfActualsRange)
{
//If we don't decide to do StackArgs, then remove the bail out at this point.
//We would have optimistically set the bailout in the forward pass, and by the end of forward pass - we
//turned off stack args for some reason. So we are removing it in the deadstore pass.
IR::BailOutKind bailOutKind = instr->GetBailOutKind() & ~IR::BailOnStackArgsOutOfActualsRange;
if (bailOutKind == IR::BailOutInvalid)
{
instr->ClearBailOutInfo();
}
else
{
instr->SetBailOutKind(bailOutKind);
}
}
}
}
void
BackwardPass::Optimize()
{
if (tag == Js::BackwardPhase && PHASE_OFF(tag, this->func))
{
return;
}
if (tag == Js::CaptureByteCodeRegUsePhase && (!PHASE_ENABLED(CaptureByteCodeRegUsePhase, this->func) || !DoCaptureByteCodeUpwardExposedUsed()))
{
return;
}
if (tag == Js::DeadStorePhase)
{
if (!this->func->DoLoopFastPaths() || !this->func->DoFastPaths())
{
//arguments[] access is similar to array fast path hence disable when array fastpath is disabled.
//loopFastPath is always true except explicitly disabled
//defaultDoFastPath can be false when we the source code size is huge
func->SetHasStackArgs(false);
}
InsertArgInsForFormals();
}
NoRecoverMemoryJitArenaAllocator localAlloc(tag == Js::BackwardPhase? _u("BE-Backward") : _u("BE-DeadStore"),
this->func->m_alloc->GetPageAllocator(), Js::Throw::OutOfMemory);
this->tempAlloc = &localAlloc;
#if DBG_DUMP
if (this->IsTraceEnabled())
{
this->func->DumpHeader();
}
#endif
this->CleanupBackwardPassInfoInFlowGraph();
// Info about whether a sym is used in a way in which -0 differs from +0, or whether the sym is used in a way in which an
// int32 overflow when generating the value of the sym matters, in the current block. The info is transferred to
// instructions that define the sym in the current block as they are encountered. The info in these bit vectors is discarded
// after optimizing each block, so the only info that remains for GlobOpt is that which is transferred to instructions.
BVSparse<JitArenaAllocator> localNegativeZeroDoesNotMatterBySymId(tempAlloc);
negativeZeroDoesNotMatterBySymId = &localNegativeZeroDoesNotMatterBySymId;
BVSparse<JitArenaAllocator> localSymUsedOnlyForBitOpsBySymId(tempAlloc);
symUsedOnlyForBitOpsBySymId = &localSymUsedOnlyForBitOpsBySymId;
BVSparse<JitArenaAllocator> localSymUsedOnlyForNumberBySymId(tempAlloc);
symUsedOnlyForNumberBySymId = &localSymUsedOnlyForNumberBySymId;
BVSparse<JitArenaAllocator> localIntOverflowDoesNotMatterBySymId(tempAlloc);
intOverflowDoesNotMatterBySymId = &localIntOverflowDoesNotMatterBySymId;
BVSparse<JitArenaAllocator> localIntOverflowDoesNotMatterInRangeBySymId(tempAlloc);
intOverflowDoesNotMatterInRangeBySymId = &localIntOverflowDoesNotMatterInRangeBySymId;
BVSparse<JitArenaAllocator> localCandidateSymsRequiredToBeInt(tempAlloc);
candidateSymsRequiredToBeInt = &localCandidateSymsRequiredToBeInt;
BVSparse<JitArenaAllocator> localCandidateSymsRequiredToBeLossyInt(tempAlloc);
candidateSymsRequiredToBeLossyInt = &localCandidateSymsRequiredToBeLossyInt;
intOverflowCurrentlyMattersInRange = true;
FloatSymEquivalenceMap localFloatSymEquivalenceMap(tempAlloc);
floatSymEquivalenceMap = &localFloatSymEquivalenceMap;
NumberTempRepresentativePropertySymMap localNumberTempRepresentativePropertySym(tempAlloc);
numberTempRepresentativePropertySym = &localNumberTempRepresentativePropertySym;
FOREACH_BLOCK_BACKWARD_IN_FUNC_DEAD_OR_ALIVE(block, this->func)
{
this->OptBlock(block);
}
NEXT_BLOCK_BACKWARD_IN_FUNC_DEAD_OR_ALIVE;
if (this->tag == Js::DeadStorePhase && !PHASE_OFF(Js::MemOpPhase, this->func))
{
this->RemoveEmptyLoops();
}
this->func->m_fg->hasBackwardPassInfo = true;
if(DoTrackCompoundedIntOverflow())
{
// Tracking int overflow makes use of a scratch field in stack syms, which needs to be cleared
func->m_symTable->ClearStackSymScratch();
}
#if DBG_DUMP
if (PHASE_STATS(this->tag, this->func))
{
this->func->DumpHeader();
Output::Print(this->tag == Js::BackwardPhase? _u("Backward Phase Stats:\n") : _u("Deadstore Phase Stats:\n"));
if (this->DoDeadStore())
{
Output::Print(_u(" Deadstore : %3d\n"), this->numDeadStore);
}
if (this->DoMarkTempNumbers())
{
Output::Print(_u(" Temp Number : %3d\n"), this->numMarkTempNumber);
Output::Print(_u(" Transferred Temp Number: %3d\n"), this->numMarkTempNumberTransferred);
}
if (this->DoMarkTempObjects())
{
Output::Print(_u(" Temp Object : %3d\n"), this->numMarkTempObject);
}
}
#endif
}
void
BackwardPass::MergeSuccBlocksInfo(BasicBlock * block)
{
// Can't reuse the bv in the current block, because its successor can be itself.
TempNumberTracker * tempNumberTracker = nullptr;
TempObjectTracker * tempObjectTracker = nullptr;
#if DBG
TempObjectVerifyTracker * tempObjectVerifyTracker = nullptr;
#endif
HashTable<AddPropertyCacheBucket> * stackSymToFinalType = nullptr;
HashTable<ObjTypeGuardBucket> * stackSymToGuardedProperties = nullptr;
HashTable<ObjWriteGuardBucket> * stackSymToWriteGuardsMap = nullptr;
BVSparse<JitArenaAllocator> * cloneStrCandidates = nullptr;
BVSparse<JitArenaAllocator> * noImplicitCallUses = nullptr;
BVSparse<JitArenaAllocator> * noImplicitCallNoMissingValuesUses = nullptr;
BVSparse<JitArenaAllocator> * noImplicitCallNativeArrayUses = nullptr;
BVSparse<JitArenaAllocator> * noImplicitCallJsArrayHeadSegmentSymUses = nullptr;
BVSparse<JitArenaAllocator> * noImplicitCallArrayLengthSymUses = nullptr;
BVSparse<JitArenaAllocator> * upwardExposedUses = nullptr;
BVSparse<JitArenaAllocator> * upwardExposedFields = nullptr;
BVSparse<JitArenaAllocator> * typesNeedingKnownObjectLayout = nullptr;
BVSparse<JitArenaAllocator> * slotDeadStoreCandidates = nullptr;
BVSparse<JitArenaAllocator> * byteCodeUpwardExposedUsed = nullptr;
BVSparse<JitArenaAllocator> * couldRemoveNegZeroBailoutForDef = nullptr;
BVSparse<JitArenaAllocator> * liveFixedFields = nullptr;
#if DBG
uint byteCodeLocalsCount = func->GetJITFunctionBody()->GetLocalsCount();
StackSym ** byteCodeRestoreSyms = nullptr;
BVSparse<JitArenaAllocator> * excludeByteCodeUpwardExposedTracking = nullptr;
#endif
Assert(!block->isDead || block->GetSuccList()->Empty());
if (this->DoByteCodeUpwardExposedUsed())
{
byteCodeUpwardExposedUsed = JitAnew(this->tempAlloc, BVSparse<JitArenaAllocator>, this->tempAlloc);
#if DBG
byteCodeRestoreSyms = JitAnewArrayZ(this->tempAlloc, StackSym *, byteCodeLocalsCount);
excludeByteCodeUpwardExposedTracking = JitAnew(this->tempAlloc, BVSparse<JitArenaAllocator>, this->tempAlloc);
#endif
}
#if DBG
if (!IsCollectionPass() && this->DoMarkTempObjectVerify())
{
tempObjectVerifyTracker = JitAnew(this->tempAlloc, TempObjectVerifyTracker, this->tempAlloc, block->loop != nullptr);
}
#endif
if (!block->isDead)
{
bool keepUpwardExposed = (this->tag == Js::BackwardPhase);
JitArenaAllocator *upwardExposedArena = nullptr;
if(!IsCollectionPass())
{
upwardExposedArena = keepUpwardExposed ? this->globOpt->alloc : this->tempAlloc;
upwardExposedUses = JitAnew(upwardExposedArena, BVSparse<JitArenaAllocator>, upwardExposedArena);
upwardExposedFields = JitAnew(upwardExposedArena, BVSparse<JitArenaAllocator>, upwardExposedArena);
if (this->tag == Js::DeadStorePhase)
{
liveFixedFields = JitAnew(this->tempAlloc, BVSparse<JitArenaAllocator>, this->tempAlloc);
typesNeedingKnownObjectLayout = JitAnew(this->tempAlloc, BVSparse<JitArenaAllocator>, this->tempAlloc);
}
if (this->DoDeadStoreSlots())
{
slotDeadStoreCandidates = JitAnew(this->tempAlloc, BVSparse<JitArenaAllocator>, this->tempAlloc);
}
if (this->DoMarkTempNumbers())
{
tempNumberTracker = JitAnew(this->tempAlloc, TempNumberTracker, this->tempAlloc, block->loop != nullptr);
}
if (this->DoMarkTempObjects())
{
tempObjectTracker = JitAnew(this->tempAlloc, TempObjectTracker, this->tempAlloc, block->loop != nullptr);
}
noImplicitCallUses = JitAnew(this->tempAlloc, BVSparse<JitArenaAllocator>, this->tempAlloc);
noImplicitCallNoMissingValuesUses = JitAnew(this->tempAlloc, BVSparse<JitArenaAllocator>, this->tempAlloc);
noImplicitCallNativeArrayUses = JitAnew(this->tempAlloc, BVSparse<JitArenaAllocator>, this->tempAlloc);
noImplicitCallJsArrayHeadSegmentSymUses = JitAnew(this->tempAlloc, BVSparse<JitArenaAllocator>, this->tempAlloc);
noImplicitCallArrayLengthSymUses = JitAnew(this->tempAlloc, BVSparse<JitArenaAllocator>, this->tempAlloc);
if (this->tag == Js::BackwardPhase)
{
cloneStrCandidates = JitAnew(this->globOpt->alloc, BVSparse<JitArenaAllocator>, this->globOpt->alloc);
}
else
{
couldRemoveNegZeroBailoutForDef = JitAnew(this->tempAlloc, BVSparse<JitArenaAllocator>, this->tempAlloc);
}
}
bool firstSucc = true;
FOREACH_SUCCESSOR_BLOCK(blockSucc, block)
{
#if defined(DBG_DUMP) || defined(ENABLE_DEBUG_CONFIG_OPTIONS)
char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE];
#endif
// save the byteCodeUpwardExposedUsed from deleting for the block right after the memop loop
if (this->tag == Js::DeadStorePhase && !this->IsPrePass() && globOpt->HasMemOp(block->loop) && blockSucc->loop != block->loop)
{
Assert(block->loop->memOpInfo->inductionVariablesUsedAfterLoop == nullptr);
block->loop->memOpInfo->inductionVariablesUsedAfterLoop = JitAnew(this->tempAlloc, BVSparse<JitArenaAllocator>, this->tempAlloc);
block->loop->memOpInfo->inductionVariablesUsedAfterLoop->Or(blockSucc->byteCodeUpwardExposedUsed);
block->loop->memOpInfo->inductionVariablesUsedAfterLoop->Or(blockSucc->upwardExposedUses);
}
bool deleteData = false;
if (!blockSucc->isLoopHeader && blockSucc->backwardPassCurrentLoop == this->currentPrePassLoop)
{
Assert(blockSucc->GetDataUseCount() != 0);
deleteData = (blockSucc->DecrementDataUseCount() == 0);
if (blockSucc->GetFirstInstr()->m_next->m_opcode == Js::OpCode::SpeculatedLoadFence)
{
// We hold on to data for these blocks until the arena gets cleared due to unusual data lifetimes.
deleteData = false;
blockSucc->IncrementDataUseCount();
}
}
#if DBG
if (excludeByteCodeUpwardExposedTracking && blockSucc->excludeByteCodeUpwardExposedTracking)
{
excludeByteCodeUpwardExposedTracking->Or(blockSucc->excludeByteCodeUpwardExposedTracking);
}
#endif
Assert((byteCodeUpwardExposedUsed == nullptr) == !this->DoByteCodeUpwardExposedUsed());
if (byteCodeUpwardExposedUsed && blockSucc->byteCodeUpwardExposedUsed)
{
byteCodeUpwardExposedUsed->Or(blockSucc->byteCodeUpwardExposedUsed);
if (this->tag == Js::DeadStorePhase)
{
#if DBG
for (uint i = 0; i < byteCodeLocalsCount; i++)
{
if (byteCodeRestoreSyms[i] == nullptr)
{
byteCodeRestoreSyms[i] = blockSucc->byteCodeRestoreSyms[i];
}
else
{
Assert(blockSucc->byteCodeRestoreSyms[i] == nullptr
|| byteCodeRestoreSyms[i] == blockSucc->byteCodeRestoreSyms[i]);
}
}
#endif
if (deleteData)
{
// byteCodeUpwardExposedUsed is required to populate the writeThroughSymbolsSet for the try region. So, don't delete it in the backwards pass.
JitAdelete(this->tempAlloc, blockSucc->byteCodeUpwardExposedUsed);
blockSucc->byteCodeUpwardExposedUsed = nullptr;
}
}
#if DBG
if (deleteData)
{
JitAdeleteArray(this->tempAlloc, byteCodeLocalsCount, blockSucc->byteCodeRestoreSyms);
blockSucc->byteCodeRestoreSyms = nullptr;
JitAdelete(this->tempAlloc, blockSucc->excludeByteCodeUpwardExposedTracking);
blockSucc->excludeByteCodeUpwardExposedTracking = nullptr;
}
#endif
}
else
{
Assert(blockSucc->byteCodeUpwardExposedUsed == nullptr);
Assert(blockSucc->byteCodeRestoreSyms == nullptr);
Assert(blockSucc->excludeByteCodeUpwardExposedTracking == nullptr);
}
if(IsCollectionPass())
{
continue;
}
Assert((blockSucc->upwardExposedUses != nullptr)
|| (blockSucc->isLoopHeader && (this->IsPrePass() || blockSucc->loop->IsDescendentOrSelf(block->loop))));
Assert((blockSucc->upwardExposedFields != nullptr)
|| (blockSucc->isLoopHeader && (this->IsPrePass() || blockSucc->loop->IsDescendentOrSelf(block->loop))));
Assert((blockSucc->typesNeedingKnownObjectLayout != nullptr)
|| (blockSucc->isLoopHeader && (this->IsPrePass() || blockSucc->loop->IsDescendentOrSelf(block->loop)))
|| this->tag != Js::DeadStorePhase);
Assert((blockSucc->slotDeadStoreCandidates != nullptr)
|| (blockSucc->isLoopHeader && (this->IsPrePass() || blockSucc->loop->IsDescendentOrSelf(block->loop)))
|| !this->DoDeadStoreSlots());
Assert((blockSucc->tempNumberTracker != nullptr)
|| (blockSucc->isLoopHeader && (this->IsPrePass() || blockSucc->loop->IsDescendentOrSelf(block->loop)))
|| !this->DoMarkTempNumbers());
Assert((blockSucc->tempObjectTracker != nullptr)
|| (blockSucc->isLoopHeader && (this->IsPrePass() || blockSucc->loop->IsDescendentOrSelf(block->loop)))
|| !this->DoMarkTempObjects());
Assert((blockSucc->tempObjectVerifyTracker != nullptr)
|| (blockSucc->isLoopHeader && (this->IsPrePass() || blockSucc->loop->IsDescendentOrSelf(block->loop)))
|| !this->DoMarkTempObjectVerify());
if (this->tag == Js::DeadStorePhase && blockSucc->liveFixedFields != nullptr)
{
liveFixedFields->Or(blockSucc->liveFixedFields);
JitAdelete(this->tempAlloc, blockSucc->liveFixedFields);
blockSucc->liveFixedFields = nullptr;
}
if (blockSucc->upwardExposedUses != nullptr)
{
upwardExposedUses->Or(blockSucc->upwardExposedUses);
if (deleteData && (!keepUpwardExposed
|| (this->IsPrePass() && blockSucc->backwardPassCurrentLoop == this->currentPrePassLoop)))
{
JitAdelete(upwardExposedArena, blockSucc->upwardExposedUses);
blockSucc->upwardExposedUses = nullptr;
}
}
if (blockSucc->upwardExposedFields != nullptr)
{
upwardExposedFields->Or(blockSucc->upwardExposedFields);
if (deleteData && (!keepUpwardExposed
|| (this->IsPrePass() && blockSucc->backwardPassCurrentLoop == this->currentPrePassLoop)))
{
JitAdelete(upwardExposedArena, blockSucc->upwardExposedFields);
blockSucc->upwardExposedFields = nullptr;
}
}
if (blockSucc->typesNeedingKnownObjectLayout != nullptr)
{
typesNeedingKnownObjectLayout->Or(blockSucc->typesNeedingKnownObjectLayout);
if (deleteData)
{
JitAdelete(this->tempAlloc, blockSucc->typesNeedingKnownObjectLayout);
blockSucc->typesNeedingKnownObjectLayout = nullptr;
}
}
if (blockSucc->slotDeadStoreCandidates != nullptr)
{
slotDeadStoreCandidates->And(blockSucc->slotDeadStoreCandidates);
if (deleteData)
{
JitAdelete(this->tempAlloc, blockSucc->slotDeadStoreCandidates);
blockSucc->slotDeadStoreCandidates = nullptr;
}
}
if (blockSucc->tempNumberTracker != nullptr)
{
Assert((blockSucc->loop != nullptr) == blockSucc->tempNumberTracker->HasTempTransferDependencies());
tempNumberTracker->MergeData(blockSucc->tempNumberTracker, deleteData);
if (deleteData)
{
blockSucc->tempNumberTracker = nullptr;
}
}
if (blockSucc->tempObjectTracker != nullptr)
{
Assert((blockSucc->loop != nullptr) == blockSucc->tempObjectTracker->HasTempTransferDependencies());
tempObjectTracker->MergeData(blockSucc->tempObjectTracker, deleteData);
if (deleteData)
{
blockSucc->tempObjectTracker = nullptr;
}
}
#if DBG
if (blockSucc->tempObjectVerifyTracker != nullptr)
{
Assert((blockSucc->loop != nullptr) == blockSucc->tempObjectVerifyTracker->HasTempTransferDependencies());
tempObjectVerifyTracker->MergeData(blockSucc->tempObjectVerifyTracker, deleteData);
if (deleteData)
{
blockSucc->tempObjectVerifyTracker = nullptr;
}
}
#endif
PHASE_PRINT_TRACE(Js::ObjTypeSpecStorePhase, this->func,
_u("ObjTypeSpecStore: func %s, edge %d => %d: "),
this->func->GetDebugNumberSet(debugStringBuffer),
block->GetBlockNum(), blockSucc->GetBlockNum());
auto fixupFrom = [block, blockSucc, upwardExposedUses, this](Bucket<AddPropertyCacheBucket> &bucket)
{
AddPropertyCacheBucket *fromData = &bucket.element;
if (fromData->GetInitialType() == nullptr ||
fromData->GetFinalType() == fromData->GetInitialType())
{
return;
}
this->InsertTypeTransitionsAtPriorSuccessors(block, blockSucc, bucket.value, fromData, upwardExposedUses);
};
auto fixupTo = [blockSucc, upwardExposedUses, this](Bucket<AddPropertyCacheBucket> &bucket)
{
AddPropertyCacheBucket *toData = &bucket.element;
if (toData->GetInitialType() == nullptr ||
toData->GetFinalType() == toData->GetInitialType())
{
return;
}
this->InsertTypeTransitionAtBlock(blockSucc, bucket.value, toData, upwardExposedUses);
};
if (blockSucc->stackSymToFinalType != nullptr)
{
#if DBG_DUMP
if (PHASE_TRACE(Js::ObjTypeSpecStorePhase, this->func))
{
blockSucc->stackSymToFinalType->Dump();
}
#endif
if (firstSucc)
{
stackSymToFinalType = blockSucc->stackSymToFinalType->Copy();
}
else if (stackSymToFinalType != nullptr)
{
if (this->IsPrePass())
{
stackSymToFinalType->And(blockSucc->stackSymToFinalType);
}
else
{
// Insert any type transitions that can't be merged past this point.
stackSymToFinalType->AndWithFixup(blockSucc->stackSymToFinalType, fixupFrom, fixupTo);
}
}
else if (!this->IsPrePass())
{
FOREACH_HASHTABLE_ENTRY(AddPropertyCacheBucket, bucket, blockSucc->stackSymToFinalType)
{
fixupTo(bucket);
}
NEXT_HASHTABLE_ENTRY;
}
if (deleteData)
{
blockSucc->stackSymToFinalType->Delete();
blockSucc->stackSymToFinalType = nullptr;
}
}
else
{
PHASE_PRINT_TRACE(Js::ObjTypeSpecStorePhase, this->func, _u("null\n"));
if (stackSymToFinalType)
{
if (!this->IsPrePass())
{
FOREACH_HASHTABLE_ENTRY(AddPropertyCacheBucket, bucket, stackSymToFinalType)
{
fixupFrom(bucket);
}
NEXT_HASHTABLE_ENTRY;
}
stackSymToFinalType->Delete();
stackSymToFinalType = nullptr;
}
}
if (tag == Js::BackwardPhase)
{
if (blockSucc->cloneStrCandidates != nullptr)
{
Assert(cloneStrCandidates != nullptr);
cloneStrCandidates->Or(blockSucc->cloneStrCandidates);
if (deleteData)
{
JitAdelete(this->globOpt->alloc, blockSucc->cloneStrCandidates);
blockSucc->cloneStrCandidates = nullptr;
}
}
#if DBG_DUMP
if (PHASE_VERBOSE_TRACE(Js::TraceObjTypeSpecWriteGuardsPhase, this->func))
{
char16 debugStringBuffer2[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE];
Output::Print(_u("ObjTypeSpec: top function %s (%s), function %s (%s), write guard symbols on edge %d => %d: "),
this->func->GetTopFunc()->GetJITFunctionBody()->GetDisplayName(),
this->func->GetTopFunc()->GetDebugNumberSet(debugStringBuffer),
this->func->GetJITFunctionBody()->GetDisplayName(),
this->func->GetDebugNumberSet(debugStringBuffer2), block->GetBlockNum(),
blockSucc->GetBlockNum());
}
#endif
if (blockSucc->stackSymToWriteGuardsMap != nullptr)
{
#if DBG_DUMP
if (PHASE_VERBOSE_TRACE(Js::TraceObjTypeSpecWriteGuardsPhase, this->func))
{
Output::Print(_u("\n"));
blockSucc->stackSymToWriteGuardsMap->Dump();
}
#endif
if (stackSymToWriteGuardsMap == nullptr)
{
stackSymToWriteGuardsMap = blockSucc->stackSymToWriteGuardsMap->Copy();
}
else
{
stackSymToWriteGuardsMap->Or(
blockSucc->stackSymToWriteGuardsMap, &BackwardPass::MergeWriteGuards);
}
if (deleteData)
{
blockSucc->stackSymToWriteGuardsMap->Delete();
blockSucc->stackSymToWriteGuardsMap = nullptr;
}
}
else
{
#if DBG_DUMP
if (PHASE_VERBOSE_TRACE(Js::TraceObjTypeSpecWriteGuardsPhase, this->func))
{
Output::Print(_u("null\n"));
}
#endif
}
}
else
{
#if DBG_DUMP
if (PHASE_VERBOSE_TRACE(Js::TraceObjTypeSpecTypeGuardsPhase, this->func))
{
char16 debugStringBuffer2[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE];
Output::Print(_u("ObjTypeSpec: top function %s (%s), function %s (%s), guarded property operations on edge %d => %d: \n"),
this->func->GetTopFunc()->GetJITFunctionBody()->GetDisplayName(),
this->func->GetTopFunc()->GetDebugNumberSet(debugStringBuffer),
this->func->GetJITFunctionBody()->GetDisplayName(),
this->func->GetDebugNumberSet(debugStringBuffer2),
block->GetBlockNum(), blockSucc->GetBlockNum());
}
#endif
if (blockSucc->stackSymToGuardedProperties != nullptr)
{
#if DBG_DUMP
if (PHASE_VERBOSE_TRACE(Js::TraceObjTypeSpecTypeGuardsPhase, this->func))
{
blockSucc->stackSymToGuardedProperties->Dump();
Output::Print(_u("\n"));
}
#endif
if (stackSymToGuardedProperties == nullptr)
{
stackSymToGuardedProperties = blockSucc->stackSymToGuardedProperties->Copy();
}
else
{
stackSymToGuardedProperties->Or(
blockSucc->stackSymToGuardedProperties, &BackwardPass::MergeGuardedProperties);
}
if (deleteData)
{
blockSucc->stackSymToGuardedProperties->Delete();
blockSucc->stackSymToGuardedProperties = nullptr;
}
}
else
{
#if DBG_DUMP
if (PHASE_VERBOSE_TRACE(Js::TraceObjTypeSpecTypeGuardsPhase, this->func))
{
Output::Print(_u("null\n"));
}
#endif
}
if (blockSucc->couldRemoveNegZeroBailoutForDef != nullptr)
{
couldRemoveNegZeroBailoutForDef->And(blockSucc->couldRemoveNegZeroBailoutForDef);
if (deleteData)
{
JitAdelete(this->tempAlloc, blockSucc->couldRemoveNegZeroBailoutForDef);
blockSucc->couldRemoveNegZeroBailoutForDef = nullptr;
}
}
}
if (blockSucc->noImplicitCallUses != nullptr)
{
noImplicitCallUses->Or(blockSucc->noImplicitCallUses);
if (deleteData)
{
JitAdelete(this->tempAlloc, blockSucc->noImplicitCallUses);
blockSucc->noImplicitCallUses = nullptr;
}
}
if (blockSucc->noImplicitCallNoMissingValuesUses != nullptr)
{
noImplicitCallNoMissingValuesUses->Or(blockSucc->noImplicitCallNoMissingValuesUses);
if (deleteData)
{
JitAdelete(this->tempAlloc, blockSucc->noImplicitCallNoMissingValuesUses);
blockSucc->noImplicitCallNoMissingValuesUses = nullptr;
}
}
if (blockSucc->noImplicitCallNativeArrayUses != nullptr)
{
noImplicitCallNativeArrayUses->Or(blockSucc->noImplicitCallNativeArrayUses);
if (deleteData)
{
JitAdelete(this->tempAlloc, blockSucc->noImplicitCallNativeArrayUses);
blockSucc->noImplicitCallNativeArrayUses = nullptr;
}
}
if (blockSucc->noImplicitCallJsArrayHeadSegmentSymUses != nullptr)
{
noImplicitCallJsArrayHeadSegmentSymUses->Or(blockSucc->noImplicitCallJsArrayHeadSegmentSymUses);
if (deleteData)
{
JitAdelete(this->tempAlloc, blockSucc->noImplicitCallJsArrayHeadSegmentSymUses);
blockSucc->noImplicitCallJsArrayHeadSegmentSymUses = nullptr;
}
}
if (blockSucc->noImplicitCallArrayLengthSymUses != nullptr)
{
noImplicitCallArrayLengthSymUses->Or(blockSucc->noImplicitCallArrayLengthSymUses);
if (deleteData)
{
JitAdelete(this->tempAlloc, blockSucc->noImplicitCallArrayLengthSymUses);
blockSucc->noImplicitCallArrayLengthSymUses = nullptr;
}
}
firstSucc = false;
}
NEXT_SUCCESSOR_BLOCK;
#if DBG_DUMP
char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE];
if (PHASE_TRACE(Js::ObjTypeSpecStorePhase, this->func))
{
Output::Print(_u("ObjTypeSpecStore: func %s, block %d: "),
this->func->GetDebugNumberSet(debugStringBuffer),
block->GetBlockNum());
if (stackSymToFinalType)
{
stackSymToFinalType->Dump();
}
else
{
Output::Print(_u("null\n"));
}
}
if (PHASE_TRACE(Js::TraceObjTypeSpecTypeGuardsPhase, this->func))
{
Output::Print(_u("ObjTypeSpec: func %s, block %d, guarded properties:\n"),
this->func->GetDebugNumberSet(debugStringBuffer), block->GetBlockNum());
if (stackSymToGuardedProperties)
{
stackSymToGuardedProperties->Dump();
Output::Print(_u("\n"));
}
else
{
Output::Print(_u("null\n"));
}
}
if (PHASE_TRACE(Js::TraceObjTypeSpecWriteGuardsPhase, this->func))
{
Output::Print(_u("ObjTypeSpec: func %s, block %d, write guards: "),
this->func->GetDebugNumberSet(debugStringBuffer), block->GetBlockNum());
if (stackSymToWriteGuardsMap)
{
Output::Print(_u("\n"));
stackSymToWriteGuardsMap->Dump();
Output::Print(_u("\n"));
}
else
{
Output::Print(_u("null\n"));
}
}
#endif
}
#if DBG
if (tempObjectVerifyTracker)
{
FOREACH_DEAD_SUCCESSOR_BLOCK(deadBlockSucc, block)
{
Assert(deadBlockSucc->tempObjectVerifyTracker || deadBlockSucc->isLoopHeader);
if (deadBlockSucc->tempObjectVerifyTracker != nullptr)
{
Assert((deadBlockSucc->loop != nullptr) == deadBlockSucc->tempObjectVerifyTracker->HasTempTransferDependencies());
// Dead block don't effect non temp use, we only need to carry the removed use bit vector forward
// and put all the upward exposed use to the set that we might found out to be mark temp
// after globopt
tempObjectVerifyTracker->MergeDeadData(deadBlockSucc);
}
if (!byteCodeUpwardExposedUsed)
{
if (!deadBlockSucc->isLoopHeader && deadBlockSucc->backwardPassCurrentLoop == this->currentPrePassLoop)
{
Assert(deadBlockSucc->GetDataUseCount() != 0);
if (deadBlockSucc->DecrementDataUseCount() == 0)
{
this->DeleteBlockData(deadBlockSucc);
}
}
}
}
NEXT_DEAD_SUCCESSOR_BLOCK;
}
#endif
if (byteCodeUpwardExposedUsed)
{
FOREACH_DEAD_SUCCESSOR_BLOCK(deadBlockSucc, block)
{
#if DBG
if (excludeByteCodeUpwardExposedTracking && deadBlockSucc->excludeByteCodeUpwardExposedTracking)
{
excludeByteCodeUpwardExposedTracking->Or(deadBlockSucc->excludeByteCodeUpwardExposedTracking);
}
#endif
Assert(deadBlockSucc->byteCodeUpwardExposedUsed || deadBlockSucc->isLoopHeader);
if (deadBlockSucc->byteCodeUpwardExposedUsed)
{
byteCodeUpwardExposedUsed->Or(deadBlockSucc->byteCodeUpwardExposedUsed);
if (this->tag == Js::DeadStorePhase)
{
#if DBG
for (uint i = 0; i < byteCodeLocalsCount; i++)
{
if (byteCodeRestoreSyms[i] == nullptr)
{
byteCodeRestoreSyms[i] = deadBlockSucc->byteCodeRestoreSyms[i];
}
else
{
Assert(deadBlockSucc->byteCodeRestoreSyms[i] == nullptr
|| byteCodeRestoreSyms[i] == deadBlockSucc->byteCodeRestoreSyms[i]);
}
}
#endif
}
}
if (!deadBlockSucc->isLoopHeader && deadBlockSucc->backwardPassCurrentLoop == this->currentPrePassLoop)
{
Assert(deadBlockSucc->GetDataUseCount() != 0);
if (deadBlockSucc->DecrementDataUseCount() == 0)
{
this->DeleteBlockData(deadBlockSucc);
}
}
}
NEXT_DEAD_SUCCESSOR_BLOCK;
}
if (block->isLoopHeader)
{
this->DeleteBlockData(block);
}
else
{
if(block->GetDataUseCount() == 0)
{
Assert(block->slotDeadStoreCandidates == nullptr);
Assert(block->tempNumberTracker == nullptr);
Assert(block->tempObjectTracker == nullptr);
Assert(block->tempObjectVerifyTracker == nullptr);
Assert(block->upwardExposedUses == nullptr);
Assert(block->upwardExposedFields == nullptr);
Assert(block->typesNeedingKnownObjectLayout == nullptr);
// byteCodeUpwardExposedUsed is required to populate the writeThroughSymbolsSet for the try region in the backwards pass
Assert(block->byteCodeUpwardExposedUsed == nullptr || (this->DoByteCodeUpwardExposedUsed()));
Assert(block->byteCodeRestoreSyms == nullptr);
Assert(block->excludeByteCodeUpwardExposedTracking == nullptr || (this->DoByteCodeUpwardExposedUsed()));
Assert(block->stackSymToFinalType == nullptr);
Assert(block->stackSymToGuardedProperties == nullptr);
Assert(block->stackSymToWriteGuardsMap == nullptr);
Assert(block->cloneStrCandidates == nullptr);
Assert(block->noImplicitCallUses == nullptr);
Assert(block->noImplicitCallNoMissingValuesUses == nullptr);
Assert(block->noImplicitCallNativeArrayUses == nullptr);
Assert(block->noImplicitCallJsArrayHeadSegmentSymUses == nullptr);
Assert(block->noImplicitCallArrayLengthSymUses == nullptr);
Assert(block->couldRemoveNegZeroBailoutForDef == nullptr);
}
else
{
// The collection pass sometimes does not know whether it can delete a successor block's data, so it may leave some
// blocks with data intact. Delete the block data now.
Assert(block->backwardPassCurrentLoop);
Assert(block->backwardPassCurrentLoop->hasDeadStoreCollectionPass);
// The two situations where we might be keeping data around are either before we do
// the prepass, or when we're storing the data because we have a speculation-cancel
// block, which has longer lifetimes for its data.
Assert(!block->backwardPassCurrentLoop->hasDeadStorePrepass || block->GetFirstInstr()->m_next->m_opcode == Js::OpCode::SpeculatedLoadFence);
DeleteBlockData(block);
}
block->backwardPassCurrentLoop = this->currentPrePassLoop;
if (this->DoByteCodeUpwardExposedUsed()
#if DBG
|| this->DoMarkTempObjectVerify()
#endif
)
{
block->SetDataUseCount(block->GetPredList()->Count() + block->GetDeadPredList()->Count());
}
else
{
block->SetDataUseCount(block->GetPredList()->Count());
}
}
block->upwardExposedUses = upwardExposedUses;
block->upwardExposedFields = upwardExposedFields;
block->typesNeedingKnownObjectLayout = typesNeedingKnownObjectLayout;
block->byteCodeUpwardExposedUsed = byteCodeUpwardExposedUsed;
#if DBG
block->byteCodeRestoreSyms = byteCodeRestoreSyms;
block->excludeByteCodeUpwardExposedTracking = excludeByteCodeUpwardExposedTracking;
#endif
block->slotDeadStoreCandidates = slotDeadStoreCandidates;
block->tempNumberTracker = tempNumberTracker;
block->tempObjectTracker = tempObjectTracker;
#if DBG
block->tempObjectVerifyTracker = tempObjectVerifyTracker;
#endif
block->stackSymToFinalType = stackSymToFinalType;
block->stackSymToGuardedProperties = stackSymToGuardedProperties;
block->stackSymToWriteGuardsMap = stackSymToWriteGuardsMap;
block->cloneStrCandidates = cloneStrCandidates;
block->noImplicitCallUses = noImplicitCallUses;
block->noImplicitCallNoMissingValuesUses = noImplicitCallNoMissingValuesUses;
block->noImplicitCallNativeArrayUses = noImplicitCallNativeArrayUses;
block->noImplicitCallJsArrayHeadSegmentSymUses = noImplicitCallJsArrayHeadSegmentSymUses;
block->noImplicitCallArrayLengthSymUses = noImplicitCallArrayLengthSymUses;
block->couldRemoveNegZeroBailoutForDef = couldRemoveNegZeroBailoutForDef;
block->liveFixedFields = liveFixedFields;
}
ObjTypeGuardBucket
BackwardPass::MergeGuardedProperties(ObjTypeGuardBucket bucket1, ObjTypeGuardBucket bucket2)
{
BVSparse<JitArenaAllocator> *guardedPropertyOps1 = bucket1.GetGuardedPropertyOps();
BVSparse<JitArenaAllocator> *guardedPropertyOps2 = bucket2.GetGuardedPropertyOps();
Assert(guardedPropertyOps1 || guardedPropertyOps2);
BVSparse<JitArenaAllocator> *mergedPropertyOps;
if (guardedPropertyOps1)
{
mergedPropertyOps = guardedPropertyOps1->CopyNew();
if (guardedPropertyOps2)
{
mergedPropertyOps->Or(guardedPropertyOps2);
}
}
else
{
mergedPropertyOps = guardedPropertyOps2->CopyNew();
}
ObjTypeGuardBucket bucket;
bucket.SetGuardedPropertyOps(mergedPropertyOps);
JITTypeHolder monoGuardType = bucket1.GetMonoGuardType();
if (monoGuardType != nullptr)
{
Assert(!bucket2.NeedsMonoCheck() || monoGuardType == bucket2.GetMonoGuardType());
}
else
{
monoGuardType = bucket2.GetMonoGuardType();
}
bucket.SetMonoGuardType(monoGuardType);
return bucket;
}
ObjWriteGuardBucket
BackwardPass::MergeWriteGuards(ObjWriteGuardBucket bucket1, ObjWriteGuardBucket bucket2)
{
BVSparse<JitArenaAllocator> *writeGuards1 = bucket1.GetWriteGuards();
BVSparse<JitArenaAllocator> *writeGuards2 = bucket2.GetWriteGuards();
Assert(writeGuards1 || writeGuards2);
BVSparse<JitArenaAllocator> *mergedWriteGuards;
if (writeGuards1)
{
mergedWriteGuards = writeGuards1->CopyNew();
if (writeGuards2)
{
mergedWriteGuards->Or(writeGuards2);
}
}
else
{
mergedWriteGuards = writeGuards2->CopyNew();
}
ObjWriteGuardBucket bucket;
bucket.SetWriteGuards(mergedWriteGuards);
return bucket;
}
void
BackwardPass::DeleteBlockData(BasicBlock * block)
{
if (block->slotDeadStoreCandidates != nullptr)
{
JitAdelete(this->tempAlloc, block->slotDeadStoreCandidates);
block->slotDeadStoreCandidates = nullptr;
}
if (block->tempNumberTracker != nullptr)
{
JitAdelete(this->tempAlloc, block->tempNumberTracker);
block->tempNumberTracker = nullptr;
}
if (block->tempObjectTracker != nullptr)
{
JitAdelete(this->tempAlloc, block->tempObjectTracker);
block->tempObjectTracker = nullptr;
}
#if DBG
if (block->tempObjectVerifyTracker != nullptr)
{
JitAdelete(this->tempAlloc, block->tempObjectVerifyTracker);
block->tempObjectVerifyTracker = nullptr;
}
#endif
if (block->stackSymToFinalType != nullptr)
{
block->stackSymToFinalType->Delete();
block->stackSymToFinalType = nullptr;
}
if (block->stackSymToGuardedProperties != nullptr)
{
block->stackSymToGuardedProperties->Delete();
block->stackSymToGuardedProperties = nullptr;
}
if (block->stackSymToWriteGuardsMap != nullptr)
{
block->stackSymToWriteGuardsMap->Delete();
block->stackSymToWriteGuardsMap = nullptr;
}
if (block->cloneStrCandidates != nullptr)
{
Assert(this->tag == Js::BackwardPhase);
JitAdelete(this->globOpt->alloc, block->cloneStrCandidates);
block->cloneStrCandidates = nullptr;
}
if (block->noImplicitCallUses != nullptr)
{
JitAdelete(this->tempAlloc, block->noImplicitCallUses);
block->noImplicitCallUses = nullptr;
}
if (block->noImplicitCallNoMissingValuesUses != nullptr)
{
JitAdelete(this->tempAlloc, block->noImplicitCallNoMissingValuesUses);
block->noImplicitCallNoMissingValuesUses = nullptr;
}
if (block->noImplicitCallNativeArrayUses != nullptr)
{
JitAdelete(this->tempAlloc, block->noImplicitCallNativeArrayUses);
block->noImplicitCallNativeArrayUses = nullptr;
}
if (block->noImplicitCallJsArrayHeadSegmentSymUses != nullptr)
{
JitAdelete(this->tempAlloc, block->noImplicitCallJsArrayHeadSegmentSymUses);
block->noImplicitCallJsArrayHeadSegmentSymUses = nullptr;
}
if (block->noImplicitCallArrayLengthSymUses != nullptr)
{
JitAdelete(this->tempAlloc, block->noImplicitCallArrayLengthSymUses);
block->noImplicitCallArrayLengthSymUses = nullptr;
}
if (block->liveFixedFields != nullptr)
{
JitArenaAllocator *liveFixedFieldsArena = this->tempAlloc;
JitAdelete(liveFixedFieldsArena, block->liveFixedFields);
block->liveFixedFields = nullptr;
}
if (block->upwardExposedUses != nullptr)
{
JitArenaAllocator *upwardExposedArena = (this->tag == Js::BackwardPhase) ? this->globOpt->alloc : this->tempAlloc;
JitAdelete(upwardExposedArena, block->upwardExposedUses);
block->upwardExposedUses = nullptr;
}
if (block->upwardExposedFields != nullptr)
{
JitArenaAllocator *upwardExposedArena = (this->tag == Js::BackwardPhase) ? this->globOpt->alloc : this->tempAlloc;
JitAdelete(upwardExposedArena, block->upwardExposedFields);
block->upwardExposedFields = nullptr;
}
if (block->typesNeedingKnownObjectLayout != nullptr)
{
JitAdelete(this->tempAlloc, block->typesNeedingKnownObjectLayout);
block->typesNeedingKnownObjectLayout = nullptr;
}
if (block->byteCodeUpwardExposedUsed != nullptr)
{
JitAdelete(this->tempAlloc, block->byteCodeUpwardExposedUsed);
block->byteCodeUpwardExposedUsed = nullptr;
#if DBG
JitAdeleteArray(this->tempAlloc, func->GetJITFunctionBody()->GetLocalsCount(), block->byteCodeRestoreSyms);
block->byteCodeRestoreSyms = nullptr;
JitAdelete(this->tempAlloc, block->excludeByteCodeUpwardExposedTracking);
block->excludeByteCodeUpwardExposedTracking = nullptr;
#endif
}
if (block->couldRemoveNegZeroBailoutForDef != nullptr)
{
JitAdelete(this->tempAlloc, block->couldRemoveNegZeroBailoutForDef);
block->couldRemoveNegZeroBailoutForDef = nullptr;
}
}
void
BackwardPass::ProcessLoopCollectionPass(BasicBlock *const lastBlock)
{
// The collection pass is done before the prepass, to collect and propagate a minimal amount of information into nested
// loops, for cases where the information is needed to make appropriate decisions on changing other state. For instance,
// bailouts in nested loops need to be able to see all byte-code uses that are exposed to the bailout so that the
// appropriate syms can be made upwards-exposed during the prepass. Byte-code uses that occur before the bailout in the
// flow, or byte-code uses after the current loop, are not seen by bailouts inside the loop. The collection pass collects
// byte-code uses and propagates them at least into each loop's header such that when bailouts are processed in the prepass,
// they will have full visibility of byte-code upwards-exposed uses.
//
// For the collection pass, one pass is needed to collect all byte-code uses of a loop to the loop header. If the loop has
// inner loops, another pass is needed to propagate byte-code uses in the outer loop into the inner loop's header, since
// some byte-code uses may occur before the inner loop in the flow. The process continues recursively for inner loops. The
// second pass only needs to walk as far as the first inner loop's header, since the purpose of that pass is only to
// propagate collected information into the inner loops' headers.
//
// Consider the following case:
// (Block 1, Loop 1 header)
// ByteCodeUses s1
// (Block 2, Loop 2 header)
// (Block 3, Loop 3 header)
// (Block 4)
// BailOut
// (Block 5, Loop 3 back-edge)
// (Block 6, Loop 2 back-edge)
// (Block 7, Loop 1 back-edge)
//
// Assume that the exit branch in each of these loops is in the loop's header block, like a 'while' loop. For the byte-code
// use of 's1' to become visible to the bailout in the innermost loop, we need to walk the following blocks:
// - Collection pass
// - 7, 6, 5, 4, 3, 2, 1, 7 - block 1 is the first block in loop 1 that sees 's1', and since block 7 has block 1 as its
// successor, block 7 sees 's1' now as well
// - 6, 5, 4, 3, 2, 6 - block 2 is the first block in loop 2 that sees 's1', and since block 6 has block 2 as its
// successor, block 6 sees 's1' now as well
// - 5, 4, 3 - block 3 is the first block in loop 3 that sees 's1'
// - The collection pass does not have to do another pass through the innermost loop because it does not have any inner
// loops of its own. It's sufficient to propagate the byte-code uses up to the loop header of each loop, as the
// prepass will do the remaining propagation.
// - Prepass
// - 7, 6, 5, 4, ... - since block 5 has block 3 as its successor, block 5 sees 's1', and so does block 4. So, the bailout
// finally sees 's1' as a byte-code upwards-exposed use.
//
// The collection pass walks as described above, and consists of one pass, followed by another pass if there are inner
// loops. The second pass only walks up to the first inner loop's header block, and during this pass upon reaching an inner
// loop, the algorithm goes recursively for that inner loop, and once it returns, the second pass continues from above that
// inner loop. Each bullet of the walk in the example above is a recursive call to ProcessLoopCollectionPass, except the
// first line, which is the initial call.
//
// Imagine the whole example above is inside another loop, and at the bottom of that loop there is an assignment to 's1'. If
// the bailout is the only use of 's1', then it needs to register 's1' as a use in the prepass to prevent treating the
// assignment to 's1' as a dead store.
Assert(tag == Js::DeadStorePhase);
Assert(IsCollectionPass());
Assert(lastBlock);
Loop *const collectionPassLoop = lastBlock->loop;
Assert(collectionPassLoop);
Assert(!collectionPassLoop->hasDeadStoreCollectionPass);
collectionPassLoop->hasDeadStoreCollectionPass = true;
Loop *const previousPrepassLoop = currentPrePassLoop;
currentPrePassLoop = collectionPassLoop;
Assert(IsPrePass());
// This is also the location where we do the additional step of tracking what opnds
// are used inside the loop in memory dereferences, and thus need masking for cache
// attacks (Spectre). This is a fairly conservative approach, where we just track a
// set of symbols which are determined by each other inside the loop. This lets the
// second pass later on determine if a particular operation generating a symbol can
// avoid the Spectre masking overhead, since a symbol not dereferenced in the loops
// can be masked on the out-edge of the loop, which should be significantly cheaper
// than masking it every iteration.
AssertMsg(collectionPassLoop->symClusterList == nullptr, "clusterList should not have been initialized yet!");
// This is needed to work around tokenization issues with preprocessor macros which
// present themselves when using multiple template parameters.
#ifndef _M_ARM
typedef SegmentClusterList<SymID, JitArenaAllocator> symClusterListType;
collectionPassLoop->symClusterList = JitAnew(this->func->m_fg->alloc, symClusterListType, this->func->m_fg->alloc, 256);
collectionPassLoop->internallyDereferencedSyms = JitAnew(this->func->m_fg->alloc, BVSparse<JitArenaAllocator>, this->func->m_fg->alloc);
#endif
// First pass
BasicBlock *firstInnerLoopHeader = nullptr;
{
#if DBG_DUMP
if(IsTraceEnabled())
{
Output::Print(_u("******* COLLECTION PASS 1 START: Loop %u ********\n"), collectionPassLoop->GetLoopTopInstr()->m_id);
}
#endif
// We want to be able to disambiguate this in ProcessBlock
CollectionPassSubPhase prevCollectionPassSubPhase = this->collectionPassSubPhase;
this->collectionPassSubPhase = CollectionPassSubPhase::FirstPass;
FOREACH_BLOCK_BACKWARD_IN_RANGE_DEAD_OR_ALIVE(block, lastBlock, nullptr)
{
ProcessBlock(block);
if(block->isLoopHeader)
{
if(block->loop == collectionPassLoop)
{
break;
}
// Keep track of the first inner loop's header for the second pass, which need only walk up to that block
firstInnerLoopHeader = block;
}
} NEXT_BLOCK_BACKWARD_IN_RANGE_DEAD_OR_ALIVE;
this->collectionPassSubPhase = prevCollectionPassSubPhase;
#if DBG_DUMP
if(IsTraceEnabled())
{
Output::Print(_u("******** COLLECTION PASS 1 END: Loop %u *********\n"), collectionPassLoop->GetLoopTopInstr()->m_id);
}
#endif
}
#ifndef _M_ARM
// Since we generated the base data structures for the spectre handling, we can now
// cross-reference them to get the full set of what may be dereferenced in the loop
// and what is safe in speculation.
#if DBG_DUMP
if (PHASE_TRACE(Js::SpeculationPropagationAnalysisPhase, this->func))
{
Output::Print(_u("Analysis Results for loop %u:\n"), collectionPassLoop->GetLoopNumber());
Output::Print(_u("ClusterList pre-consolidation: "));
collectionPassLoop->symClusterList->Dump();
}
#endif // DBG_DUMP
collectionPassLoop->symClusterList->Consolidate();
#if DBG_DUMP
if (PHASE_TRACE(Js::SpeculationPropagationAnalysisPhase, this->func))
{
Output::Print(_u("ClusterList post-consolidation: "));
collectionPassLoop->symClusterList->Dump();
Output::Print(_u("Internally dereferenced syms pre-propagation: "));
collectionPassLoop->internallyDereferencedSyms->Dump();
}
#endif // DBG_DUMP
collectionPassLoop->symClusterList->Map<BVSparse<JitArenaAllocator>*, true>([](SymID index, SymID containingSetRoot, BVSparse<JitArenaAllocator>* bv){
if (bv->Test(index))
{
bv->Set(containingSetRoot);
}
}, collectionPassLoop->internallyDereferencedSyms);
collectionPassLoop->symClusterList->Map<BVSparse<JitArenaAllocator>*, true>([](SymID index, SymID containingSetRoot, BVSparse<JitArenaAllocator>* bv){
if (bv->Test(containingSetRoot))
{
bv->Set(index);
}
}, collectionPassLoop->internallyDereferencedSyms);
#if DBG_DUMP
if (PHASE_TRACE(Js::SpeculationPropagationAnalysisPhase, this->func))
{
Output::Print(_u("Internally dereferenced syms post-propagation: "));
collectionPassLoop->internallyDereferencedSyms->Dump();
}
#endif // DBG_DUMP
#endif // defined(_M_ARM)
// Second pass, only needs to run if there are any inner loops, to propagate collected information into those loops
if(firstInnerLoopHeader)
{
#if DBG_DUMP
if(IsTraceEnabled())
{
Output::Print(_u("******* COLLECTION PASS 2 START: Loop %u ********\n"), collectionPassLoop->GetLoopTopInstr()->m_id);
}
#endif
// We want to be able to disambiguate this in ProcessBlock
CollectionPassSubPhase prevCollectionPassSubPhase = this->collectionPassSubPhase;
this->collectionPassSubPhase = CollectionPassSubPhase::SecondPass;
FOREACH_BLOCK_BACKWARD_IN_RANGE_DEAD_OR_ALIVE(block, lastBlock, firstInnerLoopHeader)
{
Loop *const loop = block->loop;
if(loop && loop != collectionPassLoop && !loop->hasDeadStoreCollectionPass)
{
// About to make a recursive call, so when jitting in the foreground, probe the stack
if(!func->IsBackgroundJIT())
{
PROBE_STACK_NO_DISPOSE(func->GetScriptContext(), Js::Constants::MinStackDefault);
}
ProcessLoopCollectionPass(block);
// The inner loop's collection pass would have propagated collected information to its header block. Skip to the
// inner loop's header block and continue from the block before it.
block = loop->GetHeadBlock();
Assert(block->isLoopHeader);
continue;
}
ProcessBlock(block);
} NEXT_BLOCK_BACKWARD_IN_RANGE_DEAD_OR_ALIVE;
this->collectionPassSubPhase = prevCollectionPassSubPhase;
#if DBG_DUMP
if(IsTraceEnabled())
{
Output::Print(_u("******** COLLECTION PASS 2 END: Loop %u *********\n"), collectionPassLoop->GetLoopTopInstr()->m_id);
}
#endif
}
currentPrePassLoop = previousPrepassLoop;
}
void
BackwardPass::ProcessLoop(BasicBlock * lastBlock)
{
#if DBG_DUMP
if (this->IsTraceEnabled())
{
Output::Print(_u("******* PREPASS START ********\n"));
}
#endif
Loop *loop = lastBlock->loop;
bool prevIsLoopPrepass = this->isLoopPrepass;
this->isLoopPrepass = true;
// This code doesn't work quite as intended. It is meant to capture fields that are live out of a loop to limit the
// number of implicit call bailouts the forward pass must create (only compiler throughput optimization, no impact
// on emitted code), but because it looks only at the lexically last block in the loop, it does the right thing only
// for do-while loops. For other loops (for and while) the last block does not exit the loop. Even for do-while loops
// this tracking can have the adverse effect of killing fields that should stay live after copy prop. Disabled by default.
// Left in under a flag, in case we find compiler throughput issues and want to do additional experiments.
if (PHASE_ON(Js::LiveOutFieldsPhase, this->func))
{
if (this->globOpt->DoFieldOpts(loop) || this->globOpt->DoFieldRefOpts(loop))
{
// Get the live-out set at the loop bottom.
// This may not be the only loop exit, but all loop exits either leave the function or pass through here.
// In the forward pass, we'll use this set to trim the live fields on exit from the loop
// in order to limit the number of bailout points following the loop.
BVSparse<JitArenaAllocator> *bv = JitAnew(this->func->m_fg->alloc, BVSparse<JitArenaAllocator>, this->func->m_fg->alloc);
FOREACH_SUCCESSOR_BLOCK(blockSucc, lastBlock)
{
if (blockSucc->loop != loop)
{
// Would like to assert this, but in strange exprgen cases involving "break LABEL" in nested
// loops the loop graph seems to get confused.
//Assert(!blockSucc->loop || blockSucc->loop->IsDescendentOrSelf(loop));
Assert(!blockSucc->loop || blockSucc->loop->hasDeadStorePrepass);
bv->Or(blockSucc->upwardExposedFields);
}
}
NEXT_SUCCESSOR_BLOCK;
lastBlock->loop->liveOutFields = bv;
}
}
if(tag == Js::DeadStorePhase && !loop->hasDeadStoreCollectionPass)
{
Assert(!IsCollectionPass());
Assert(!IsPrePass());
isCollectionPass = true;
ProcessLoopCollectionPass(lastBlock);
isCollectionPass = false;
}
Assert(!this->IsPrePass());
this->currentPrePassLoop = loop;
if (tag == Js::BackwardPhase)
{
Assert(loop->symsAssignedToInLoop == nullptr);
loop->symsAssignedToInLoop = JitAnew(this->globOpt->alloc, BVSparse<JitArenaAllocator>, this->globOpt->alloc);
}
FOREACH_BLOCK_BACKWARD_IN_RANGE_DEAD_OR_ALIVE(block, lastBlock, nullptr)
{
this->ProcessBlock(block);
if (block->isLoopHeader && block->loop == lastBlock->loop)
{
break;
}
}
NEXT_BLOCK_BACKWARD_IN_RANGE_DEAD_OR_ALIVE;
this->currentPrePassLoop = nullptr;
Assert(lastBlock);
__analysis_assume(lastBlock);
lastBlock->loop->hasDeadStorePrepass = true;
this->isLoopPrepass = prevIsLoopPrepass;
#if DBG_DUMP
if (this->IsTraceEnabled())
{
Output::Print(_u("******** PREPASS END *********\n"));
}
#endif
}
void
BackwardPass::OptBlock(BasicBlock * block)
{
this->func->ThrowIfScriptClosed();
if (block->loop && !block->loop->hasDeadStorePrepass)
{
ProcessLoop(block);
}
this->ProcessBlock(block);
if(DoTrackNegativeZero())
{
negativeZeroDoesNotMatterBySymId->ClearAll();
}
if (DoTrackBitOpsOrNumber())
{
symUsedOnlyForBitOpsBySymId->ClearAll();
symUsedOnlyForNumberBySymId->ClearAll();
}
if(DoTrackIntOverflow())
{
intOverflowDoesNotMatterBySymId->ClearAll();
if(DoTrackCompoundedIntOverflow())
{
intOverflowDoesNotMatterInRangeBySymId->ClearAll();
}
}
}
void
BackwardPass::ProcessBailOutArgObj(BailOutInfo * bailOutInfo, BVSparse<JitArenaAllocator> * byteCodeUpwardExposedUsed)
{
Assert(this->tag != Js::BackwardPhase);
if (this->globOpt->TrackArgumentsObject() && bailOutInfo->capturedValues->argObjSyms)
{
FOREACH_BITSET_IN_SPARSEBV(symId, bailOutInfo->capturedValues->argObjSyms)
{
if (byteCodeUpwardExposedUsed->TestAndClear(symId))
{
if (bailOutInfo->usedCapturedValues->argObjSyms == nullptr)
{
bailOutInfo->usedCapturedValues->argObjSyms = JitAnew(this->func->m_alloc,
BVSparse<JitArenaAllocator>, this->func->m_alloc);
}
bailOutInfo->usedCapturedValues->argObjSyms->Set(symId);
}
}
NEXT_BITSET_IN_SPARSEBV;
}
if (bailOutInfo->usedCapturedValues->argObjSyms)
{
byteCodeUpwardExposedUsed->Minus(bailOutInfo->usedCapturedValues->argObjSyms);
}
}
void
BackwardPass::ProcessBailOutConstants(BailOutInfo * bailOutInfo, BVSparse<JitArenaAllocator> * byteCodeUpwardExposedUsed, BVSparse<JitArenaAllocator>* bailoutReferencedArgSymsBv)
{
Assert(this->tag != Js::BackwardPhase);
// Remove constants that we are already going to restore
SListBase<ConstantStackSymValue> * usedConstantValues = &bailOutInfo->usedCapturedValues->constantValues;
FOREACH_SLISTBASE_ENTRY(ConstantStackSymValue, value, usedConstantValues)
{
byteCodeUpwardExposedUsed->Clear(value.Key()->m_id);
bailoutReferencedArgSymsBv->Clear(value.Key()->m_id);
}
NEXT_SLISTBASE_ENTRY;
// Find other constants that we need to restore
FOREACH_SLISTBASE_ENTRY_EDITING(ConstantStackSymValue, value, &bailOutInfo->capturedValues->constantValues, iter)
{
if (byteCodeUpwardExposedUsed->TestAndClear(value.Key()->m_id) || bailoutReferencedArgSymsBv->TestAndClear(value.Key()->m_id))
{
// Constant need to be restore, move it to the restore list
iter.MoveCurrentTo(usedConstantValues);
}
else if (!this->IsPrePass())
{
// Constants don't need to be restored, delete
iter.RemoveCurrent(this->func->m_alloc);
}
}
NEXT_SLISTBASE_ENTRY_EDITING;
}
void
BackwardPass::ProcessBailOutCopyProps(BailOutInfo * bailOutInfo, BVSparse<JitArenaAllocator> * byteCodeUpwardExposedUsed, BVSparse<JitArenaAllocator>* bailoutReferencedArgSymsBv)
{
Assert(this->tag != Js::BackwardPhase);
Assert(!this->func->GetJITFunctionBody()->IsAsmJsMode());
// Remove copy prop that we were already going to restore
SListBase<CopyPropSyms> * usedCopyPropSyms = &bailOutInfo->usedCapturedValues->copyPropSyms;
FOREACH_SLISTBASE_ENTRY(CopyPropSyms, copyPropSyms, usedCopyPropSyms)
{
byteCodeUpwardExposedUsed->Clear(copyPropSyms.Key()->m_id);
this->currentBlock->upwardExposedUses->Set(copyPropSyms.Value()->m_id);
}
NEXT_SLISTBASE_ENTRY;
JitArenaAllocator * allocator = this->func->m_alloc;
BasicBlock * block = this->currentBlock;
BVSparse<JitArenaAllocator> * upwardExposedUses = block->upwardExposedUses;
// Find other copy prop that we need to restore
FOREACH_SLISTBASE_ENTRY_EDITING(CopyPropSyms, copyPropSyms, &bailOutInfo->capturedValues->copyPropSyms, iter)
{
// Copy prop syms should be vars
Assert(!copyPropSyms.Key()->IsTypeSpec());
Assert(!copyPropSyms.Value()->IsTypeSpec());
if (byteCodeUpwardExposedUsed->TestAndClear(copyPropSyms.Key()->m_id) || bailoutReferencedArgSymsBv->TestAndClear(copyPropSyms.Key()->m_id))
{
// This copy-prop sym needs to be restored; add it to the restore list.
/*
- copyPropSyms.Key() - original sym that is byte-code upwards-exposed, its corresponding byte-code register needs
to be restored
- copyPropSyms.Value() - copy-prop sym whose value the original sym has at the point of this instruction
Heuristic:
- By default, use the copy-prop sym to restore its corresponding byte code register
- This is typically better because that allows the value of the original sym, if it's not used after the copy-prop
sym is changed, to be discarded and we only have one lifetime (the copy-prop sym's lifetime) in to deal with for
register allocation
- Additionally, if the transferring store, which caused the original sym to have the same value as the copy-prop
sym, becomes a dead store, the original sym won't actually attain the value of the copy-prop sym. In that case,
the copy-prop sym must be used to restore the byte code register corresponding to original sym.
Special case for functional correctness:
- Consider that we always use the copy-prop sym to restore, and consider the following case:
b = a
a = c * d <Pre-op bail-out>
= b
- This is rewritten by the lowerer as follows:
b = a
a = c
a = a * d <Pre-op bail-out> (to make dst and src1 the same)
= b
- The problem here is that at the point of the bail-out instruction, 'a' would be used to restore the value of 'b',
but the value of 'a' has changed before the bail-out (at 'a = c').
- In this case, we need to use 'b' (the original sym) to restore the value of 'b'. Because 'b' is upwards-exposed,
'b = a' cannot be a dead store, therefore making it valid to use 'b' to restore.
- Use the original sym to restore when all of the following are true:
- The bailout is a pre-op bailout, and the bailout check is done after overwriting the destination
- It's an int-specialized unary or binary operation that produces a value
- The copy-prop sym is the destination of this instruction
- None of the sources are the copy-prop sym. Otherwise, the value of the copy-prop sym will be saved as
necessary by the bailout code.
*/
StackSym * stackSym = copyPropSyms.Key(); // assume that we'll use the original sym to restore
SymID symId = stackSym->m_id;
// Prefer to restore from type-specialized versions of the sym, as that will reduce the need for potentially
// expensive ToVars that can more easily be eliminated due to being dead stores
StackSym * int32StackSym = nullptr;
StackSym * float64StackSym = nullptr;
StackSym * simd128StackSym = nullptr;
// If the sym is type specialized, we need to check for upward exposed uses of the specialized sym and not the equivalent var sym. If there are no
// uses and we use the copy prop sym to restore, we'll need to find the type specialize sym for that sym as well.
StackSym * typeSpecSym = nullptr;
auto findTypeSpecSym = [&]()
{
if (bailOutInfo->liveLosslessInt32Syms->Test(symId))
{
// Var version of the sym is not live, use the int32 version
int32StackSym = stackSym->GetInt32EquivSym(nullptr);
typeSpecSym = int32StackSym;
Assert(int32StackSym);
}
else if(bailOutInfo->liveFloat64Syms->Test(symId))
{
// Var/int32 version of the sym is not live, use the float64 version
float64StackSym = stackSym->GetFloat64EquivSym(nullptr);
typeSpecSym = float64StackSym;
Assert(float64StackSym);
}
else
{
Assert(bailOutInfo->liveVarSyms->Test(symId));
typeSpecSym = stackSym;
}
};
findTypeSpecSym();
Assert(typeSpecSym != nullptr);
IR::Instr *const instr = bailOutInfo->bailOutInstr;
StackSym *const dstSym = IR::RegOpnd::TryGetStackSym(instr->GetDst());
if(instr->GetBailOutKind() & IR::BailOutOnResultConditions &&
instr->GetByteCodeOffset() != Js::Constants::NoByteCodeOffset &&
bailOutInfo->bailOutOffset <= instr->GetByteCodeOffset() &&
dstSym &&
dstSym->IsInt32() &&
dstSym->IsTypeSpec() &&
dstSym->GetVarEquivSym(nullptr) == copyPropSyms.Value() &&
instr->GetSrc1() &&
!instr->GetDst()->IsEqual(instr->GetSrc1()) &&
!(instr->GetSrc2() && instr->GetDst()->IsEqual(instr->GetSrc2())))
{
Assert(bailOutInfo->bailOutOffset == instr->GetByteCodeOffset());
// Need to use the original sym to restore. The original sym is byte-code upwards-exposed, which is why it needs
// to be restored. Because the original sym needs to be restored and the copy-prop sym is changing here, the
// original sym must be live in some fashion at the point of this instruction, that will be verified below. The
// original sym will also be made upwards-exposed from here, so the aforementioned transferring store of the
// copy-prop sym to the original sym will not be a dead store.
}
else if (block->upwardExposedUses->Test(typeSpecSym->m_id) && !block->upwardExposedUses->Test(copyPropSyms.Value()->m_id))
{
// Don't use the copy prop sym if it is not used and the orig sym still has uses.
// No point in extending the lifetime of the copy prop sym unnecessarily.
}
else
{
// Need to use the copy-prop sym to restore
stackSym = copyPropSyms.Value();
symId = stackSym->m_id;
int32StackSym = nullptr;
float64StackSym = nullptr;
simd128StackSym = nullptr;
findTypeSpecSym();
}
// We did not end up using the copy prop sym. Let's make sure the use of the original sym by the bailout is captured.
if (stackSym != copyPropSyms.Value() && stackSym->HasArgSlotNum())
{
bailoutReferencedArgSymsBv->Set(stackSym->m_id);
}
if (int32StackSym != nullptr)
{
Assert(float64StackSym == nullptr);
usedCopyPropSyms->PrependNode(allocator, copyPropSyms.Key(), int32StackSym);
iter.RemoveCurrent(allocator);
upwardExposedUses->Set(int32StackSym->m_id);
}
else if (float64StackSym != nullptr)
{
// This float-specialized sym is going to be used to restore the corresponding byte-code register. Need to
// ensure that the float value can be precisely coerced back to the original Var value by requiring that it is
// specialized using BailOutNumberOnly.
float64StackSym->m_requiresBailOnNotNumber = true;
usedCopyPropSyms->PrependNode(allocator, copyPropSyms.Key(), float64StackSym);
iter.RemoveCurrent(allocator);
upwardExposedUses->Set(float64StackSym->m_id);
}
// SIMD_JS
else if (simd128StackSym != nullptr)
{
usedCopyPropSyms->PrependNode(allocator, copyPropSyms.Key(), simd128StackSym);
iter.RemoveCurrent(allocator);
upwardExposedUses->Set(simd128StackSym->m_id);
}
else
{
usedCopyPropSyms->PrependNode(allocator, copyPropSyms.Key(), stackSym);
iter.RemoveCurrent(allocator);
upwardExposedUses->Set(symId);
}
}
else if (!this->IsPrePass())
{
// Copy prop sym doesn't need to be restored, delete.
iter.RemoveCurrent(allocator);
}
}
NEXT_SLISTBASE_ENTRY_EDITING;
}
StackSym*
BackwardPass::ProcessByteCodeUsesDst(IR::ByteCodeUsesInstr * byteCodeUsesInstr)
{
Assert(this->DoByteCodeUpwardExposedUsed());
IR::Opnd * dst = byteCodeUsesInstr->GetDst();
if (dst)
{
IR::RegOpnd * dstRegOpnd = dst->AsRegOpnd();
StackSym * dstStackSym = dstRegOpnd->m_sym->AsStackSym();
Assert(!dstRegOpnd->GetIsJITOptimizedReg());
Assert(dstStackSym->GetByteCodeRegSlot() != Js::Constants::NoRegister);
if (dstStackSym->GetType() != TyVar)
{
dstStackSym = dstStackSym->GetVarEquivSym(nullptr);
}
// If the current region is a Try, symbols in its write-through set shouldn't be cleared.
// Otherwise, symbols in the write-through set of the first try ancestor shouldn't be cleared.
if (!this->currentRegion ||
!this->CheckWriteThroughSymInRegion(this->currentRegion, dstStackSym))
{
this->currentBlock->byteCodeUpwardExposedUsed->Clear(dstStackSym->m_id);
return dstStackSym;
}
}
return nullptr;
}
const BVSparse<JitArenaAllocator>*
BackwardPass::ProcessByteCodeUsesSrcs(IR::ByteCodeUsesInstr * byteCodeUsesInstr)
{
Assert(this->DoByteCodeUpwardExposedUsed() || tag == Js::BackwardPhase);
const BVSparse<JitArenaAllocator>* byteCodeUpwardExposedUsed = byteCodeUsesInstr->GetByteCodeUpwardExposedUsed();
if (byteCodeUpwardExposedUsed && this->DoByteCodeUpwardExposedUsed())
{
this->currentBlock->byteCodeUpwardExposedUsed->Or(byteCodeUpwardExposedUsed);
}
return byteCodeUpwardExposedUsed;
}
bool
BackwardPass::ProcessByteCodeUsesInstr(IR::Instr * instr)
{
if (!instr->IsByteCodeUsesInstr())
{
return false;
}
IR::ByteCodeUsesInstr * byteCodeUsesInstr = instr->AsByteCodeUsesInstr();
if (this->tag == Js::BackwardPhase)
{
// FGPeeps inserts bytecodeuses instrs with srcs. We need to look at them to set the proper
// UpwardExposedUsed info and keep the defs alive.
// The inliner inserts bytecodeuses instrs withs dsts, but we don't want to look at them for upwardExposedUsed
// as it would cause real defs to look dead. We use these for bytecodeUpwardExposedUsed info only, which is needed
// in the dead-store pass only.
//
// Handle the source side.
const BVSparse<JitArenaAllocator>* byteCodeUpwardExposedUsed = ProcessByteCodeUsesSrcs(byteCodeUsesInstr);
if (byteCodeUpwardExposedUsed != nullptr)
{
this->currentBlock->upwardExposedUses->Or(byteCodeUpwardExposedUsed);
}
}
#if DBG
else if (tag == Js::CaptureByteCodeRegUsePhase)
{
ProcessByteCodeUsesDst(byteCodeUsesInstr);
ProcessByteCodeUsesSrcs(byteCodeUsesInstr);
}
#endif
else
{
Assert(tag == Js::DeadStorePhase);
Assert(instr->m_opcode == Js::OpCode::ByteCodeUses);
#if DBG
if (this->DoMarkTempObjectVerify() && (this->currentBlock->isDead || !this->func->hasBailout))
{
if (IsCollectionPass())
{
if (!this->func->hasBailout)
{
// Prevent byte code uses from being remove on collection pass for mark temp object verify
// if we don't have any bailout
return true;
}
}
else
{
this->currentBlock->tempObjectVerifyTracker->NotifyDeadByteCodeUses(instr);
}
}
#endif
if (this->func->hasBailout)
{
// Just collect the byte code uses, and remove the instruction
// We are going backward, process the dst first and then the src
StackSym *dstStackSym = ProcessByteCodeUsesDst(byteCodeUsesInstr);
#if DBG
// We can only track first level function stack syms right now
if (dstStackSym && dstStackSym->GetByteCodeFunc() == this->func)
{
this->currentBlock->byteCodeRestoreSyms[dstStackSym->GetByteCodeRegSlot()] = nullptr;
}
#endif
const BVSparse<JitArenaAllocator>* byteCodeUpwardExposedUsed = ProcessByteCodeUsesSrcs(byteCodeUsesInstr);
#if DBG
if (byteCodeUpwardExposedUsed)
{
FOREACH_BITSET_IN_SPARSEBV(symId, byteCodeUpwardExposedUsed)
{
StackSym * stackSym = this->func->m_symTable->FindStackSym(symId);
Assert(!stackSym->IsTypeSpec());
// We can only track first level function stack syms right now
if (stackSym->GetByteCodeFunc() == this->func)
{
Js::RegSlot byteCodeRegSlot = stackSym->GetByteCodeRegSlot();
Assert(byteCodeRegSlot != Js::Constants::NoRegister);
if (this->currentBlock->byteCodeRestoreSyms[byteCodeRegSlot] != stackSym)
{
AssertMsg(this->currentBlock->byteCodeRestoreSyms[byteCodeRegSlot] == nullptr,
"Can't have two active lifetime for the same byte code register");
this->currentBlock->byteCodeRestoreSyms[byteCodeRegSlot] = stackSym;
}
}
}
NEXT_BITSET_IN_SPARSEBV;
}
#endif
if (IsCollectionPass())
{
return true;
}
PropertySym *propertySymUse = byteCodeUsesInstr->propertySymUse;
if (propertySymUse && !this->currentBlock->isDead)
{
this->currentBlock->upwardExposedFields->Set(propertySymUse->m_id);
}
if (this->IsPrePass())
{
// Don't remove the instruction yet if we are in the prepass
// But tell the caller we don't need to process the instruction any more
return true;
}
}
this->currentBlock->RemoveInstr(instr);
}
return true;
}
bool
BackwardPass::ProcessBailOutInfo(IR::Instr * instr)
{
Assert(!instr->IsByteCodeUsesInstr());
if (this->tag == Js::BackwardPhase)
{
// We don't need to fill in the bailout instruction in backward pass
Assert(this->func->hasBailout || !instr->HasBailOutInfo());
Assert(!instr->HasBailOutInfo() || instr->GetBailOutInfo()->byteCodeUpwardExposedUsed == nullptr || (this->func->HasTry() && this->func->DoOptimizeTry()));
return false;
}
if(IsCollectionPass())
{
return false;
}
Assert(tag == Js::DeadStorePhase);
if (instr->HasBailOutInfo())
{
Assert(this->func->hasBailout);
Assert(this->DoByteCodeUpwardExposedUsed());
BailOutInfo * bailOutInfo = instr->GetBailOutInfo();
// Only process the bailout info if this is the main bailout point (instead of shared)
if (bailOutInfo->bailOutInstr == instr)
{
if(instr->GetByteCodeOffset() == Js::Constants::NoByteCodeOffset ||
bailOutInfo->bailOutOffset > instr->GetByteCodeOffset())
{
// Currently, we only have post-op bailout with BailOutOnImplicitCalls,
// LazyBailOut, or JIT inserted operation (which no byte code offsets).
// If there are other bailouts that we want to bailout after the operation,
// we have to make sure that it still doesn't do the implicit call
// if it is done on the stack object.
// Otherwise, the stack object will be passed to the implicit call functions.
Assert(instr->GetByteCodeOffset() == Js::Constants::NoByteCodeOffset
|| (instr->GetBailOutKind() & ~IR::BailOutKindBits) == IR::BailOutOnImplicitCalls
|| (instr->GetBailOutKind() & ~IR::BailOutKindBits) == IR::LazyBailOut
|| (instr->GetBailOutKind() & ~IR::BailOutKindBits) == IR::BailOutInvalid);
// This instruction bails out to a later byte-code instruction, so process the bailout info now
this->ProcessBailOutInfo(instr, bailOutInfo);
if (instr->HasLazyBailOut())
{
this->ClearDstUseForPostOpLazyBailOut(instr);
}
}
else
{
// This instruction bails out to the equivalent byte code instruction. This instruction and ByteCodeUses
// instructions relevant to this instruction need to be processed before the bailout info for this instruction
// can be processed, so that it can be determined what byte code registers are used by the equivalent byte code
// instruction and need to be restored. Save the instruction for bailout info processing later.
Assert(bailOutInfo->bailOutOffset == instr->GetByteCodeOffset());
Assert(!preOpBailOutInstrToProcess);
preOpBailOutInstrToProcess = instr;
}
}
}
return false;
}
bool
BackwardPass::IsLazyBailOutCurrentlyNeeeded(IR::Instr * instr) const
{
if (!this->func->ShouldDoLazyBailOut())
{
return false;
}
Assert(this->tag == Js::DeadStorePhase);
// We insert potential lazy bailout points in the forward pass, so if the instruction doesn't
// have bailout info at this point, we know for sure lazy bailout is not needed.
if (!instr->HasLazyBailOut() || this->currentBlock->isDead)
{
return false;
}
AssertMsg(
this->currentBlock->liveFixedFields != nullptr,
"liveFixedField is null, MergeSuccBlocksInfo might have not initialized it?"
);
if (instr->IsStFldVariant())
{
Assert(instr->GetDst());
Js::PropertyId id = instr->GetDst()->GetSym()->AsPropertySym()->m_propertyId;
// We only need to protect against SetFld if it is setting to one of the live fixed fields
return this->currentBlock->liveFixedFields->Test(id);
}
return !this->currentBlock->liveFixedFields->IsEmpty();
}
bool
BackwardPass::IsImplicitCallBailOutCurrentlyNeeded(IR::Instr * instr, bool mayNeedImplicitCallBailOut, bool needLazyBailOut, bool hasLiveFields)
{
return this->globOpt->IsImplicitCallBailOutCurrentlyNeeded(
instr, nullptr /* src1Val */, nullptr /* src2Val */,
this->currentBlock, hasLiveFields, mayNeedImplicitCallBailOut, false /* isForwardPass */, needLazyBailOut
) ||
this->NeedBailOutOnImplicitCallsForTypedArrayStore(instr);
}
void
BackwardPass::DeadStoreTypeCheckBailOut(IR::Instr * instr)
{
// Good news: There are cases where the forward pass installs BailOutFailedTypeCheck, but the dead store pass
// discovers that the checked type is dead.
// Bad news: We may still need implicit call bailout, and it's up to the dead store pass to figure this out.
// Worse news: BailOutFailedTypeCheck is pre-op, and BailOutOnImplicitCall is post-op. We'll use a special
// bailout kind to indicate implicit call bailout that targets its own instruction. The lowerer will emit
// code to disable/re-enable implicit calls around the operation.
Assert(this->tag == Js::DeadStorePhase);
if (this->IsPrePass() || !instr->HasBailOutInfo())
{
return;
}
// By default, do not do this for stores, as it makes the presence of type checks unpredictable in the forward pass.
// For instance, we can't predict which stores may cause reallocation of aux slots.
if (!PHASE_ON(Js::DeadStoreTypeChecksOnStoresPhase, this->func) && instr->GetDst() && instr->GetDst()->IsSymOpnd())
{
return;
}
const IR::BailOutKind oldBailOutKind = instr->GetBailOutKind();
if (!IR::IsTypeCheckBailOutKind(oldBailOutKind))
{
return;
}
// Either src1 or dst must be a property sym operand
Assert((instr->GetSrc1() && instr->GetSrc1()->IsSymOpnd() && instr->GetSrc1()->AsSymOpnd()->IsPropertySymOpnd()) ||
(instr->GetDst() && instr->GetDst()->IsSymOpnd() && instr->GetDst()->AsSymOpnd()->IsPropertySymOpnd()));
IR::PropertySymOpnd *propertySymOpnd =
(instr->GetDst() && instr->GetDst()->IsSymOpnd()) ? instr->GetDst()->AsPropertySymOpnd() : instr->GetSrc1()->AsPropertySymOpnd();
if (propertySymOpnd->TypeCheckRequired())
{
return;
}
bool isTypeCheckProtected = false;
IR::BailOutKind bailOutKind;
if (GlobOpt::NeedsTypeCheckBailOut(instr, propertySymOpnd, propertySymOpnd == instr->GetDst(), &isTypeCheckProtected, &bailOutKind))
{
// If we installed a failed type check bailout in the forward pass, but we are now discovering that the checked
// type is dead, we may still need a bailout on failed fixed field type check. These type checks are required
// regardless of whether the checked type is dead. Hence, the bailout kind may change here.
Assert((oldBailOutKind & ~IR::BailOutKindBits) == bailOutKind ||
bailOutKind == IR::BailOutFailedFixedFieldTypeCheck || bailOutKind == IR::BailOutFailedEquivalentFixedFieldTypeCheck);
instr->SetBailOutKind(bailOutKind);
return;
}
else if (isTypeCheckProtected)
{
instr->ClearBailOutInfo();
if (preOpBailOutInstrToProcess == instr)
{
preOpBailOutInstrToProcess = nullptr;
}
return;
}
Assert(!propertySymOpnd->IsTypeCheckProtected());
// If all we're doing here is checking the type (e.g. because we've hoisted a field load or store out of the loop, but needed
// the type check to remain in the loop), and now it turns out we don't need the type checked, we can simply turn this into
// a NOP and remove the bailout.
if (instr->m_opcode == Js::OpCode::CheckObjType)
{
Assert(instr->GetDst() == nullptr && instr->GetSrc1() != nullptr && instr->GetSrc2() == nullptr);
instr->m_opcode = Js::OpCode::Nop;
instr->FreeSrc1();
instr->ClearBailOutInfo();
if (this->preOpBailOutInstrToProcess == instr)
{
this->preOpBailOutInstrToProcess = nullptr;
}
return;
}
// We don't need BailOutFailedTypeCheck but may need BailOutOnImplicitCall.
// Consider: are we in the loop landing pad? If so, no bailout, since implicit calls will be checked at
// the end of the block.
if (this->currentBlock->IsLandingPad())
{
// We're in the landing pad.
if (preOpBailOutInstrToProcess == instr)
{
preOpBailOutInstrToProcess = nullptr;
}
instr->UnlinkBailOutInfo();
return;
}
// If bailOutKind is equivTypeCheck then leave alone the bailout
if (bailOutKind == IR::BailOutFailedEquivalentTypeCheck ||
bailOutKind == IR::BailOutFailedEquivalentFixedFieldTypeCheck)
{
return;
}
// We're not checking for polymorphism, so don't let the bailout indicate that we
// detected polymorphism.
instr->GetBailOutInfo()->polymorphicCacheIndex = (uint)-1;
// Keep the mark temp object bit if it is there so that we will not remove the implicit call check
IR::BailOutKind newBailOutKind = IR::BailOutOnImplicitCallsPreOp | (oldBailOutKind & IR::BailOutMarkTempObject);
if (BailOutInfo::HasLazyBailOut(oldBailOutKind))
{
instr->SetBailOutKind(BailOutInfo::WithLazyBailOut(newBailOutKind));
}
else
{
instr->SetBailOutKind(newBailOutKind);
}
}
void
BackwardPass::DeadStoreLazyBailOut(IR::Instr * instr, bool needsLazyBailOut)
{
if (!this->IsPrePass() && !needsLazyBailOut && instr->HasLazyBailOut())
{
instr->ClearLazyBailOut();
if (!instr->HasBailOutInfo())
{
if (this->preOpBailOutInstrToProcess == instr)
{
this->preOpBailOutInstrToProcess = nullptr;
}
}
}
}
void
BackwardPass::DeadStoreImplicitCallBailOut(IR::Instr * instr, bool hasLiveFields, bool needsLazyBailOut)
{
Assert(this->tag == Js::DeadStorePhase);
if (this->IsPrePass() || !instr->HasBailOutInfo())
{
// Don't do this in the pre-pass, because, for instance, we don't have live-on-back-edge fields yet.
return;
}
if (OpCodeAttr::BailOutRec(instr->m_opcode))
{
// This is something like OpCode::BailOutOnNotEqual. Assume it needs what it's got.
return;
}
UpdateArrayBailOutKind(instr);
// Install the implicit call PreOp for mark temp object if we need one.
if ((instr->GetBailOutKind() & IR::BailOutMarkTempObject) != 0 && instr->GetBailOutKindNoBits() != IR::BailOutOnImplicitCallsPreOp)
{
IR::BailOutKind kind = instr->GetBailOutKind();
const IR::BailOutKind kindNoBits = instr->GetBailOutKindNoBits();
Assert(kindNoBits != IR::BailOutOnImplicitCalls);
if (kindNoBits == IR::BailOutInvalid)
{
// We should only have combined with array bits or lazy bailout
Assert(BailOutInfo::WithoutLazyBailOut(kind & ~IR::BailOutForArrayBits) == IR::BailOutMarkTempObject);
// Don't need to install if we are not going to do helper calls,
// or we are in the landingPad since implicit calls are already turned off.
if ((kind & IR::BailOutOnArrayAccessHelperCall) == 0 && !this->currentBlock->IsLandingPad())
{
kind += IR::BailOutOnImplicitCallsPreOp;
instr->SetBailOutKind(kind);
}
}
}
// Currently only try to eliminate these bailout kinds. The others are required in cases
// where we don't necessarily have live/hoisted fields.
const bool mayNeedBailOnImplicitCall = BailOutInfo::IsBailOutOnImplicitCalls(instr->GetBailOutKind());
if (!mayNeedBailOnImplicitCall)
{
const IR::BailOutKind kind = instr->GetBailOutKind();
if (kind & IR::BailOutMarkTempObject)
{
if (kind == IR::BailOutMarkTempObject)
{
// Landing pad does not need per-instr implicit call bailouts.
Assert(this->currentBlock->IsLandingPad());
instr->ClearBailOutInfo();
if (this->preOpBailOutInstrToProcess == instr)
{
this->preOpBailOutInstrToProcess = nullptr;
}
}
else
{
// Mark temp object bit is not needed after dead store pass
instr->SetBailOutKind(kind & ~IR::BailOutMarkTempObject);
}
}
return;
}
// We have an implicit call bailout in the code, and we want to make sure that it's required.
// Do this now, because only in the dead store pass do we have complete forward and backward liveness info.
bool needsBailOutOnImplicitCall = this->IsImplicitCallBailOutCurrentlyNeeded(instr, mayNeedBailOnImplicitCall, needsLazyBailOut, hasLiveFields);
if(!UpdateImplicitCallBailOutKind(instr, needsBailOutOnImplicitCall, needsLazyBailOut))
{
instr->ClearBailOutInfo();
if (preOpBailOutInstrToProcess == instr)
{
preOpBailOutInstrToProcess = nullptr;
}
#if DBG
if (this->DoMarkTempObjectVerify())
{
this->currentBlock->tempObjectVerifyTracker->NotifyBailOutRemoval(instr, this);
}
#endif
}
}
bool
BackwardPass::UpdateImplicitCallBailOutKind(IR::Instr *const instr, bool needsBailOutOnImplicitCall, bool needsLazyBailOut)
{
Assert(instr);
Assert(instr->HasBailOutInfo());
Assert(BailOutInfo::IsBailOutOnImplicitCalls(instr->GetBailOutKind()));
AssertMsg(
needsLazyBailOut || instr->GetBailOutKind() == BailOutInfo::WithoutLazyBailOut(instr->GetBailOutKind()),
"We should have removed all lazy bailout bit at this point if we decided that we wouldn't need it"
);
AssertMsg(
!needsLazyBailOut || instr->GetBailOutKind() == BailOutInfo::WithLazyBailOut(instr->GetBailOutKind()),
"The lazy bailout bit should be present at this point. We might have removed it incorrectly."
);
const IR::BailOutKind bailOutKindWithBits = instr->GetBailOutKind();
const bool hasMarkTempObject = bailOutKindWithBits & IR::BailOutMarkTempObject;
// Firstly, we remove the mark temp object bit, as it is not needed after the dead store pass.
// We will later skip removing BailOutOnImplicitCalls when there is a mark temp object bit regardless
// of `needsBailOutOnImplicitCall`.
if (hasMarkTempObject)
{
instr->SetBailOutKind(bailOutKindWithBits & ~IR::BailOutMarkTempObject);
}
if (needsBailOutOnImplicitCall)
{
// We decided that BailOutOnImplicitCall is needed. So lazy bailout is unnecessary
// because we are already protected from potential side effects unless the operation
// itself can change fields' values (StFld/StElem).
if (needsLazyBailOut && !instr->CanChangeFieldValueWithoutImplicitCall())
{
instr->ClearLazyBailOut();
}
return true;
}
else
{
// `needsBailOutOnImplicitCall` also captures our intention to keep BailOutOnImplicitCalls
// because we want to do fixed field lazy bailout optimization. So if we don't need them,
// just remove our lazy bailout.
instr->ClearLazyBailOut();
if (!instr->HasBailOutInfo())
{
return true;
}
}
const IR::BailOutKind bailOutKindWithoutBits = instr->GetBailOutKindNoBits();
if (hasMarkTempObject)
{
// Don't remove the implicit call pre op bailout for mark temp object.
Assert(bailOutKindWithoutBits == IR::BailOutOnImplicitCallsPreOp);
return true;
}
// At this point, we don't need the bail on implicit calls.
// Simply use the bailout kind bits as our new bailout kind.
IR::BailOutKind newBailOutKind = bailOutKindWithBits - bailOutKindWithoutBits;
if (newBailOutKind == IR::BailOutInvalid)
{
return false;
}
instr->SetBailOutKind(newBailOutKind);
return true;
}
bool
BackwardPass::NeedBailOutOnImplicitCallsForTypedArrayStore(IR::Instr* instr)
{
if ((instr->m_opcode == Js::OpCode::StElemI_A || instr->m_opcode == Js::OpCode::StElemI_A_Strict) &&
instr->GetDst()->IsIndirOpnd() &&
instr->GetDst()->AsIndirOpnd()->GetBaseOpnd()->GetValueType().IsLikelyTypedArray())
{
IR::Opnd * opnd = instr->GetSrc1();
if (opnd->IsRegOpnd())
{
return !opnd->AsRegOpnd()->GetValueType().IsPrimitive() &&
!opnd->AsRegOpnd()->m_sym->IsInt32() &&
!opnd->AsRegOpnd()->m_sym->IsFloat64() &&
!opnd->AsRegOpnd()->m_sym->IsFloatConst() &&
!opnd->AsRegOpnd()->m_sym->IsIntConst();
}
else
{
Assert(opnd->IsIntConstOpnd() || opnd->IsInt64ConstOpnd() || opnd->IsFloat32ConstOpnd() || opnd->IsFloatConstOpnd() || opnd->IsAddrOpnd());
}
}
return false;
}
IR::Instr*
BackwardPass::ProcessPendingPreOpBailOutInfo(IR::Instr *const currentInstr)
{
Assert(!IsCollectionPass());
if(!preOpBailOutInstrToProcess)
{
return currentInstr->m_prev;
}
Assert(preOpBailOutInstrToProcess == currentInstr);
if (!this->IsPrePass())
{
IR::Instr* prev = preOpBailOutInstrToProcess->m_prev;
while (prev && preOpBailOutInstrToProcess->CanAggregateByteCodeUsesAcrossInstr(prev))
{
IR::Instr* instr = prev;
prev = prev->m_prev;
if (instr->IsByteCodeUsesInstrFor(preOpBailOutInstrToProcess))
{
// If instr is a ByteCodeUsesInstr, it will remove it
ProcessByteCodeUsesInstr(instr);
}
}
}
// A pre-op bailout instruction was saved for bailout info processing after the instruction and relevant ByteCodeUses
// instructions before it have been processed. We can process the bailout info for that instruction now.
BailOutInfo *const bailOutInfo = preOpBailOutInstrToProcess->GetBailOutInfo();
Assert(bailOutInfo->bailOutInstr == preOpBailOutInstrToProcess);
<